pub_fms.bib

@inproceedings{Schleif2001a,
  author = {A. Simmel and T. D{\"o}rfler and F.-M. Schleif and E. Sommerfeld},
  booktitle = {In Proceedings of the 17th Meeting of the International Society for Psychophysics},
  pages = {602--607},
  title = {An analysis of connections between internal and external learning process indicators using EEG coherence analysis},
  publisher = {Pabst Publishing},
  year = {2001}
}
@inproceedings{Schleif2001b,
  author = {T. D{\"o}rfler and A. Simmel and F.-M. Schleif and E. Sommerfeld},
  booktitle = {In Proceedings of the 17th Meeting of the International Society for Psychophysics},
  pages = {343--348},
  title = {Complexity - dependent synchronization of brain subsystems during memorization},
  publisher = {Pabst Publishing},
  year = {2001}
}
@inproceedings{Schleif2002a,
  author = {M. K{\"o}hler and K. Buchta and F.-M. and F.-M. Schleif and E. Sommerfeld},
  booktitle = {In Proceedings of the 18th Meeting of the International Society for Psychophysics},
  pages = {433--439},
  title = {Complexity and difficulty in memory based comparison},
  publisher = {Pabst Publishing},
  editors = {J.A. da Silva and N.P.R. Filho and E.H. Matsushima},
  year = {2002}
}
@mastersthesis{Schleif2002b,
  author = {F.-M.~Schleif},
  title = {Momentbasierte Methoden zur Schriftzeichenerkennung},
  year = 2002,
  school = {University of Leipzig},
  publisher = {University of Leipzig},
  address = {Dokumentenserver Universit{\"a}t Leipzig, http://dol.uni-leipzig.de/pub/2002-33}
}
@inproceedings{Schleif2003a,
  author = {Th. Villmann and F.-M. Schleif and B. Hammer},
  booktitle = {In Proceedings of the 4th Workshop on Self Organizing Maps (WSOM) 2003},
  pages = {47--52},
  title = {Supervised Neural Gas and Relevance Learning in Learning Vector Quantisation},
  editor = {Takeshi Yamakawa},
  publisher = {Kyushu Institute of Technology on CD-ROM (C) 2003 WSOM'03 Organizing Committee},
  address = {Hibikino, Kitakyushu, Japan},
  year = {2003}
}
@inproceedings{Schleif2003b,
  author = {V. Gruhn and M. H{\"u}lder and R. Ijoui and F.-M. Schleif},
  booktitle = {In Proceedings of ISD 2003 - Constructing the Infrastructure for the Knowledge Economy - Methods and Tools, Theory and Practice},
  pages = {705--713},
  title = {A distributed logistic support communication system},
  editor = {H. Linger and J. Fisher and W.G. Wojtkowski and J. Zupancic and K. Vigo and J. Arnold},
  publisher = {Kluwer Academic Publishers, London},
  year = {2003}
}
@article{Schleif2003c,
  author = {T. D{\"o}rfler and A. Simmel and F.-M. Schleif and E. Sommerfeld},
  title = {Working memory load and EEG coherence},
  pages = {269},
  volume = 15,
  number = 4,
  isbn = {ISSN 0896-0267},
  journal = {Brain Topography},
  year = 2003
}
@article{Schleif2003d,
  author = {M. K{\"o}hler and K. Buchta and F.-M. and F.-M. Schleif and E. Sommerfeld},
  title = {A mission for the EEG coherence analysis: Is the task complex or difficult?},
  pages = {271},
  volume = 15,
  number = 4,
  isbn = {ISSN 0896-0267},
  journal = {Brain Topography},
  year = 2003
}
@inproceedings{Schleif2004a,
  author = {T. Villmann and B. Hammer and F.-M. Schleif},
  booktitle = {In Proceedings of Selbstorganisation Von Adaptivem Verfahren (SOAVE'2004)},
  pages = {592--597},
  editors = {H.-M. Groß and K. Debes and H.-J. B{\"o}hme},
  isbn = {3-18-374310-8},
  title = {Metrik Adaptation for Optimal Feature Classification in Learning Vector Quantization Applied to Environment Detection},
  publisher = {Fortschritts-Berichte VDI Reihe 10, Nr. 742, VDI Verlag, Germany},
  year = {2004}
}
@inproceedings{Schleif2004b,
  author = {F.-M. Schleif and U. Clauss and Th. Villmann and B. Hammer},
  booktitle = {In Proceedings of the 3rd International Conference on Machine Learning and Applications (ICMLA) 2004},
  pages = {374--379},
  title = {Supervised Relevance Neural Gas and Unified Maximum Separability Analysis for Classification of Mass Spectrometric Data},
  publisher = {IEEE Press},
  address = {Los Alamitos, CA, USA},
  editor = {M. Arif Wani and Krzysztof J. Cios and Khalid Hafeez},
  month = {December},
  year = {2004},
  others = {ISBN 0-7803-9923-2}
}
@inproceedings{Schleif2005a,
  author = {F.-M. Schleif and Th. Villmann and B. Hammer},
  booktitle = {In Proceedings of the 6th Workshop on Fuzzy Logic and Applications (WILF) 2005},
  pages = {290--296},
  title = {Local Metric Adaptation for Soft Nearest Prototype Classification to Classify Proteomic Data},
  editors = {Isabelle Bloch and Alfredo Petrosino and Andrea G.B. Tettamanzi},
  publisher = {Springer},
  address = {Berlin Heidelberg, Germany},
  year = {2005}
}
@inproceedings{Schleif2005b,
  author = {Th. Villmann and B. Hammer and F.-M. Schleif and T. Geweniger},
  title = {Fuzzy Labeled Neural GAS for Fuzzy Classification},
  booktitle = {Proceedings of the 5th Workshop on Self-Organizing Maps (WSOM) 2005},
  pages = {283--290},
  editor = {Marie Cottrell},
  publisher = {University Paris-1-Pantheon-Sorbonne on CD-ROM (C) 2005 WSOM'05 Organizing Committee},
  address = {Paris, France},
  year = {2005}
}
@inproceedings{Schleif2005c,
  author = {F.-M. Schleif and Th. Villmann and B. Hammer},
  booktitle = {In Proceedings of the 4th International Conference on Machine Learning and Applications (ICMLA) 2005},
  pages = {11--15},
  title = {Fuzzy Labeled Soft Nearest Neighbor Classification with Relevance Learning},
  publisher = {IEEE Press},
  address = {Los Alamitos, CA, USA},
  editor = {M. Arif Wani and Krzysztof J. Cios and Khalid Hafeez},
  year = {2005}
}
@article{Schleif2006a,
  author = {Th. Villmann and F.-M. Schleif and B. Hammer},
  title = {Comparison of Relevance Learning Vector Quantization with other Metric Adaptive Classification Methods},
  pages = {610--622},
  volume = 19,
  number = 5,
  journal = {Neural Networks},
  year = 2006
}
@inproceedings{Schleif2006b,
  author = {C. Br{\"u}{\ss} and F. Bollenbeck and F.-M. Schleif and W. Weschke and T. Villmann and U. Seiffert},
  booktitle = {Proc. of ESANN 2006},
  pages = {563--569},
  title = {Fuzzy Image Segmentation with Fuzzy Labelled Neural Gas},
  isbn = {2-930307-06-4},
  year = {2006}
}
@inproceedings{Schleif2006c,
  author = {B. Hammer and T. Villmann and F.-M. Schleif and C. Albani and W. Hermann},
  booktitle = {Proc. of ICAISC 2006, LNAI 4029},
  pages = {603--612},
  title = {Learning vector quantization classification with local relevance determination for medical data},
  isbn = {3-540-35748-3},
  publisher = {Springer},
  year = 2006
}
@inproceedings{Schleif2006d,
  author = {F.-M. Schleif and B. Hammer and Th. Villmann},
  booktitle = {Proc. of ESANN 2006},
  pages = {539--545},
  isbn = {2-930307-06-4},
  title = {Margin based {A}ctive {L}earning for {LVQ} {N}etworks},
  year = {2006}
}
@inproceedings{Schleif2006e,
  author = {F.-M. Schleif and T. Elssner and M. Kostrzewa and T. Villmann and B. Hammer},
  booktitle = {Proc. of CBMS 2006},
  pages = {919--924},
  title = {Analysis and Visualization of Proteomic Data by Fuzzy labeled Self Organizing Maps},
  isbn = {0-7695-2517-1},
  year = {2006}
}
@inproceedings{Schleif2006f,
  author = {T. Villmann and U. Seiffert and F.-M. Schleif and C. Br{\"u}{\ss} and T. Geweniger and B. Hammer},
  booktitle = {Proc. of ANNPR 2006},
  pages = {46--56},
  title = {Fuzzy Labeled Self-Organizing Map with Label-Adjusted Prototypes},
  isbn = {3-540-37951-7},
  year = {2006}
}
@inproceedings{Schleif2006g,
  author = {T. Villmann and B. Hammer and F.-M. Schleif and T. Geweniger and M. Cottrell},
  booktitle = {Proc. of ICONIP 2006},
  pages = {40--49},
  title = {Prototype Based Classification Using Information Theoretic Learning},
  isbn = {978-3-540-46481-5},
  year = {2006}
}
@inproceedings{Schleif2006h,
  author = {B. Hammer and A. Hasenfuss and F.-M. Schleif and T. Villmann},
  booktitle = {Proc. of ANNPR 2006},
  pages = {33--45},
  title = {Supervised batch neural gas},
  isbn = {3-540-37951-7},
  year = {2006}
}
@inproceedings{Schleif2006i,
  author = {B. Hammer and A. Hasenfuss and F.-M. Schleif and T. Villmann},
  booktitle = {Proc. of ANNIE 2006},
  pages = {623--632},
  title = {Supervised median clustering},
  isbn = {0-7918-0256-6},
  year = {2006}
}
@inproceedings{Schleif2006k,
  author = {F.-M. Schleif and T. Elssner and M. Kostrzewa and T. Villmann and B. Hammer},
  booktitle = {Proc. of FLINS 2006},
  pages = {541--548},
  title = {Machine Learning and Soft-Computing in Bioinformatics - A Short Journey},
  isbn = {981-256-690-2},
  publisher = {World Scientific Press},
  year = {2006}
}
@article{Schleif2006l,
  author = {T. Villmann and F.-M. Schleif and B. Hammer},
  title = {Prototype-based fuzzy classification with local relevance for proteomics},
  pages = {2425--2428},
  journal = {NeuroComputing Letters},
  volume = 69,
  year = 2006
}
@article{Schleif2006m,
  author = {T. Villmann and B. Hammer and F.-M. Schleif and T. Geweniger and W. Herrmann},
  title = {Fuzzy Classification by Fuzzy Labeled Neural Gas},
  pages = {772--779},
  journal = {Neural Networks},
  volume = 19,
  number = {6-7},
  year = 2006
}
@phdthesis{Schleif2006n,
  author = {F.-M.~Schleif},
  title = {Prototype based Machine Learning for Clinical Proteomics},
  year = 2006,
  school = {Technical University Clausthal},
  publisher = {Technical University Clausthal},
  address = {Technical University Clausthal, Clausthal-Zellerfeld, Germany}
}
@article{Schleif2007a,
  author = {F.-M. Schleif and B. Hammer and Th. Villmann},
  title = {Margin based {A}ctive {L}earning for {LVQ} {N}etworks},
  pages = {1215--1224},
  volume = 70,
  number = {7-9},
  journal = {Neurocomputing},
  year = 2007
}
@inproceedings{Schleif2007b,
  author = {F.-M. Schleif and B. Hammer and Th. Villmann},
  booktitle = {In Proceedings of the 9th International Work-Conference on Artificial Neural Networks (IWANN) 2007},
  editors = {Francisco Sandoval and Alberto Prieto and Joan Cabestany and Manuel Grana},
  publisher = {Springer},
  address = {Berlin, Heidelberg, Germany},
  pages = {1036--1044},
  title = {Supervised Neural Gas for Functional Data and its Application to the Analysis of Clinical Proteom Spectra},
  isbn = {978-3-540-73006-4},
  year = {2007}
}
@inproceedings{Schleif2007c,
  author = {T.~Villmann and M.~Strickert and C.~Br{\"u}{\ss} and F.-M. Schleif and U.~Seiffert},
  booktitle = {In Proceedings of the 15th European Symposium on Artificial Neural Networks (ESANN) 2007},
  editor = {Michel Verleysen},
  publisher = {d-side publications},
  address = {Evere, Belgium},
  pages = {103--108},
  title = {Visualization of fuzzy information in in fuzzy-classification for
  image sagmentation using {MDS}},
  isbn = {2-930307-07-2},
  year = {2007}
}
@inproceedings{Schleif2007d,
  author = {A. Hasenfuss and B. Hammer and F.-M. Schleif and T.~Villmann},
  booktitle = {In Proceedings of the 9th International Work-Conference on Artificial Neural Networks (IWANN) 2007},
  editors = {Francisco Sandoval and Alberto Prieto and Joan Cabestany and Manuel Grana},
  publisher = {Springer},
  address = {Berlin, Heidelberg, Germany},
  pages = {539--546},
  title = {Neural gas clustering for sparse proximity data},
  isbn = {978-3-540-73006-4},
  year = {2007}
}
@inproceedings{Schleif2007e,
  author = {T. Villmann and F.-M. Schleif and E. Merenyi and B. Hammer},
  booktitle = {In Proceedings of the 9th International Work-Conference on Artificial Neural Networks (IWANN) 2007},
  editors = {Francisco Sandoval and Alberto Prieto and Joan Cabestany and Manuel Grana},
  publisher = {Springer},
  address = {Berlin, Heidelberg, Germany},
  pages = {556--563},
  title = {Fuzzy Labeled Self Organizing Map for Classification of Spectra},
  isbn = {978-3-540-73006-4},
  year = {2007}
}
@inproceedings{Schleif2007f,
  author = {B. Hammer and A. Hasenfuss and B. Hammer and F.-M. Schleif and T.~Villmann and M. Strickert and U. Seiffert},
  booktitle = {Proc. of IJCNN 2007},
  pages = {1877--1882},
  title = {Intuitive Clustering of Biological Data},
  publisher = {IEEE},
  isbn = {978-1-4244-1380-5},
  year = {2007}
}
@inproceedings{Schleif2007g,
  author = {S.-O. Deininger and M. Gerhard and F.-M. Schleif},
  booktitle = {Proc. of CBMS 2007},
  pages = {403--405},
  title = {Statistical Classification and Visualization of MALDI-Imaging Data},
  year = {2007}
}
@inproceedings{Schleif2007h,
  author = {F.-M. Schleif and T. Villmann and B. Hammer},
  booktitle = {Proceedings of the 7th International Workshop on Fuzzy Logic and Applications (WILF) 2007},
  editors = {Francesco Masulli and Sushmita Mitra and Gabriella Pasi},
  publisher = {Springer},
  address = {Berlin, Heidelberg, Germany},
  pages = {563--570},
  title = {Analysis of Proteomic Spectral Data by Multi Resolution Analysis and Self-Organizing-Maps},
  isbn = {978-3-540-73399-7},
  year = {2007}
}
@inproceedings{Schleif2007i,
  author = {F.-M. Schleif},
  booktitle = {Ausgezeichnete Informatikdissertationen 2006},
  pages = {179--188},
  title = {Prototypen basiertes maschinelles Lernen in der klinische Proteomik},
  publisher = {GI-Edition Lecture Notes in Informatics (LNI)},
  isbn = {978-3-88579-411-0},
  year = {2007}
}
@inproceedings{Schleif2007j,
  author = {T. Villmann and F.-M. Schleif and B. Hammer and M. Strickert and E. Merenyi},
  booktitle = {Proc. of WSOM 2007},
  pages = {http://biecoll.ub.uni-bielefeld.de//frontdoor.php?source\_opus=128\&la=en},
  title = {Class imaging of hyperspectral satellite remote sensing data using Fuzzy labeled Self Organizing Maps},
  publisher = {Bielefeld University Press},
  isbn = {9783000224},
  year = {2007}
}
@inproceedings{Schleif2007k,
  author = {P. Schneider and M. Biehl and F.-M. Schleif and B. Hammer},
  booktitle = {Proc. of WSOM 2007},
  pages = {http://biecoll.ub.uni-bielefeld.de//frontdoor.php?source\_opus=125\&la=en},
  title = {Advanced metric adaptation in General LVQ for classification of mass spectrometry data},
  publisher = {Bielefeld University Press},
  isbn = {9783000224},
  year = {2007}
}
@inproceedings{Schleif2007l,
  author = {M. Strickert and F.-M. Schleif and U. Seiffert},
  booktitle = {Proc. of ASAI 2007},
  pages = {139--150},
  title = {Gradients of Pearson Correlation for Analysis of Biomedical Data},
  isbn = {18502784},
  year = {2007}
}
@inproceedings{Schleif2007m,
  author = {M. Strickert and F.-M. Schleif},
  booktitle = {Proc. of MLSB 2007},
  pages = {81--86},
  title = {Supervised Attribute Relevance Determination for Protein Identification in Stress Experiments},
  year = {2007}
}
@inproceedings{Schleif2007n,
  author = {T. Villmann and F.-M. Schleif and M. v.d.Werff and A. Deelder and R. Tollenaar},
  booktitle = {Proc. of ICMLA 2007},
  pages = {581--586},
  title = {Associative learning in SOMs for Fuzzy-Classification},
  isbn = {0-7695-3069-9},
  year = {2007}
}
@article{Schleif2007o,
  author = {F.-M. Schleif},
  title = {Maschinelles {L}ernen mit {P}rototypmethoden in der klinischen {P}roteomik},
  pages = {65--67},
  volume = {4/07},
  journal = {K\"unstliche Intelligenz (KI)},
  url = {pdf/ki_2007.pdf},
  abstract = {Die klinische Proteomik untersucht proteinbasierte Krankheitsprozesse 
in klinischen Proben. Die Messung der Probe erfolgt
dabei typischer Weise durch ein Massenspektrometer. 
Dabei entstehen hochdimensionale Spektren, die die Expressivit\"at 
von bestimmten Proteinfragmenten anzeigen. Eine weitere Herausforderung ist die eher geringe Anzahl von Proben.
Zudem ist die G\"ute und Interpretierbarkeit der Klassifikationsentscheidung von besonderer Bedeutung und die 
Adaptierbarkeit der generischen Klassifikationsmodelle bei Nachmessungen. Entsprechend werden die Spektren zur Weiterverarbeitung
geeignet reduziert. Nach geeigneter Evaluierung, k\"onnen diese
f\"ur die Analyse und Diagnostik von Krankheitsprozessen in Frage
kommen. Wir betrachten kurz die Aufbereitung der Spektren,
nachfolgend werden Konzepte prototypischer Klassifikationsverfahren 
beschrieben und deren Erweiterungen f ̈r die klinische Proteomik skizziert. 
Im Ergebnisteil wird die entwickelte Algorithmik zur Bildung von Klassifikationsmodellen f\"ur 
verschiedene klinische Datens\"atze eingesetzt und bewertet.
},
  isbn = {0933-1875},
  year = 2007
}
@inproceedings{Schleif2007p,
  author = {M. Strickert and F.-M. Schleif and T. Villmann and U. Seiffert},
  booktitle = {Similarity based Clustering, LNCS},
  title = {Derivatives of Pearson Correlation for Gradient based Analysis of Biomedical Data},
  year = {2007}
}
@inproceedings{Schleif2007q,
  author = {F.-M. Schleif},
  booktitle = {Dagstuhl online proceedings - Seminar Similarity based Clustering},
  title = {},
  publisher = {Schloss Dagstuhl - Leibniz Center for Informatics},
  year = {2007}
}
@article{Schleif2008a,
  author = {T. Villmann and B. Hammer and F.-M. Schleif and W. Herrmann and M. Cottrell},
  title = {Fuzzy Classification Using Information Theoretic Learning Vector Quantization},
  volume = 71,
  number = {16-18},
  pages = {3070--3076},
  journal = {NeuroComputing},
  year = 2008
}
@article{Schleif2008b,
  author = {T. Villmann and F.-M. Schleif and B. Hammer and M. Kostrzewa},
  title = {Exploration of Mass-Spectrometric Data in Clinical Proteomics Using Learning Vector Quantization Methods},
  volume = 9,
  number = 2,
  pages = {129--143},
  journal = {Briefings in Bioinformatics},
  url = {url/bib_2008.pdf},
  abstract = {In the present contribution we present two recently developed classification al-
gorithms for analysis of mass-spectrometric data - the supervised neural gas and
the fuzzy labeled self-organizing map. The algorithms are inherently regularizing,
which is recommended, for these spectral data because of its high dimensionality
and the sparseness for specific problems. The algorithms are both prototype based
such that the principle of characteristic representants is realized. This leads to an
easy interpretation of the generated classifcation model. Further, the fuzzy labeled
self-organizing map, is able to process uncertainty in data, and classification results
can be obtained as fuzzy decisions. Moreover, this fuzzy classifcation together with
the property of topographic mapping offers the possibility of class similarity detec-
tion, which can be used for class visualization. We demonstrate the power of both
methods for two exemplary examples: the classification of bacteria (listeria types)
and neoplastic and non-neoplastic cell populations in breast cancer tissue sections.
},
  year = 2008
}
@inproceedings{Schleif2008c,
  author = {M. Strickert and F.-M. Schleif and T. Villmann},
  title = {Metric adaptation for supervised attribute rating},
  booktitle = {In Proceedings of the 16th European Symposium on Artificial Neural Networks (ESANN) 2008},
  editor = {Michel Verleysen},
  publisher = {d-side publications},
  address = {Evere, Belgium},
  pages = {31--36},
  isbn = {2-930307-08-0},
  year = {2008}
}
@inproceedings{Schleif2008d,
  author = {P. Schneider and F.-M. Schleif and T. Villmann and M. Biehl},
  title = {Generalized Matrix Learning Vector Quantizer for the Analysis of Spectral Data},
  booktitle = {In Proceedings of the 16th European Symposium on Artificial Neural Networks (ESANN) 2008},
  editor = {Michel Verleysen},
  publisher = {d-side publications},
  address = {Evere, Belgium},
  pages = {451--456},
  isbn = {2-930307-08-0},
  url = {pdf/esann_2008.pdf},
  abstract = {The analysis of spectral data constitutes new challenges for machine
learning algorithms due to the functional nature of the data. Special attention is
given to the used metric in such analysis. Recently a prototype based algorithm has
been proposed which allows the integration of a full adaptive matrix in the metric.
In this contribution we analyse this approach with respect to band matrices and its
usage for the analysis of functional spectral data. The approach is tested on satellite
data and data taken from food chemistry
},
  year = {2008}
}
@inproceedings{Schleif2008e,
  author = {F.-M. Schleif and T. Riemer and M. Cross and T. Villmann},
  title = {Automatic Identification and Quantification of Metabolites in H-NMR Measurements},
  booktitle = {In Proceedings of the Workshop on Computational Systems Biology (WCSB) 2008},
  pages = {165--168},
  isbn = {978-952-15-1988-8},
  year = {2008}
}
@inproceedings{Schleif2008f,
  author = {F.-M. Schleif and M. Ongyerth and T. Villmann},
  title = {Sparse coding Neural Gas for analysis of Nuclear Magnetic Resonance Spectroscopy},
  booktitle = {In Proceedings of the CBMS 2008},
  pages = {620--625},
  isbn = {987-0-7695-3165-6},
  url = {pdf/cbms_2008.pdf},
  abstract = {Nuclear Magnetic Resonance Spectroscopy
is a technique for the analysis of complex
biochemical materials. Thereby the iden-
tification of known sub-patterns is impor-
tant. These measurements require an accu-
rate preprocessing and analysis to meet clin-
ical standards. Here we present a method
for an appropriate sparse encoding of NMR
spectral data combined with a fuzzy classifi-
cation system allowing the identification of
sub-patterns including mixtures thereof. The
method is evaluated in contrast to an alterna-
tive approach using simulated metabolic spec-
tra.
},
  year = {2008}
}
@inproceedings{Schleif2008g,
  author = {T. Geweniger and F.-M. Schleif and A. Hasenfuss and B. Hammer and T. Villmann},
  title = {Comparison of cluster algorithms for the analysis of text data using Kolmogorov complexity},
  booktitle = {In Proceedings of the ICONIP 2008},
  pages = {CD-Publication},
  url = {pdf/iconip_2008.pdf},
  abstract = {In this paper we present a comparison of multiple cluster
algorithms and their suitability for clustering text data. The clustering
is based on similarities only, employing the Kolmogorov complexity as a
similiarity measure. This motivates the set of considered clustering algo-
rithms which take into account the similarity between objects exclusively.
Compared cluster algorithms are Median kMeans, Median Neural Gas,
Relational Neural Gas, Spectral Clustering and Affinity Propagation.
keywords: cluster algorithm, similarity data, neural gas, spectral clus-
tering, message passing, kMeans, Kolmogorov complexity
},
  year = {2008}
}
@incollection{Schleif2008h,
  author = {F.-M. Schleif and T. Villmann and B. Hammer},
  title = {Pattern Recognition by Supervised Relevance Neural Gas and its Application to Spectral Data in Bioinformatics},
  booktitle = {Encyclopedia of Artificial Intelligence},
  isbn = {978-1-59904-849-9},
  year = {2008}
}
@incollection{Schleif2008i,
  author = {F.-M. Schleif and B. Hammer and T. Villmann and M. v.d. Werff and A. Deelder and R. Tollenaar},
  title = {Analysis of Spectral Data in Clinical Proteomics by use of Learning Vector Quantizers},
  booktitle = {Computational Intelligence in Biomedicine and Bioinformatics: Current Trends and Applications},
  pages = {141--167, chap. 6},
  isbn = {978-3-540-70776-9},
  year = {2008}
}
@article{Schleif2008j,
  author = {F.-M. Schleif and T. Villmann and B. Hammer},
  title = {Prototype based Fuzzy Classification in Clinical Proteomics},
  pages = {4-16},
  volume = 47,
  number = 1,
  journal = {International Journal of Approximate Reasoning},
  url = {pdf/japr_2008.pdf},
  abstract = {Proteomic profiling based on mass spectrometry is an important tool for studies at
the protein and peptide level in medicine and health care. Thereby, the identification
of relevant masses, which are characteristic for specific sample states e.g. a disease
state is complicated. Further, the classification accuracy and safety is especially
important in medicine. The determination of classification models for such high dimensional clinical data is a complex task. Specific methods, which are robust with
respect to the large number of dimensions and fit to clinical needs, are required. In
this contribution two such methods for the construction of nearest prototype classifiers are compared in the context of clinical proteomic studies, which are specifically
suited to deal with such high-dimensional functional data. Both methods are suitable to the adaptation of the underling metric, which is useful in proteomic research
to get a problem adequate representation of the clinical data. In addition they allow
fuzzy classification and for one of them allows fuzzy classified training data. Both
algorithms are investigated in detail with respect to their specific properties. A performance analyzes is taken on real clinical proteomic cancer data in a comparative
manner.
},
  year = 2008
}
@article{Schleif2008k,
  author = {M. Strickert and F.-M. Schleif and U. Seiffert},
  title = {Derivatives of Pearson Correlation for Gradient-based Analysis of Biomedical Data},
  pages = {37--44},
  volume = 37,
  number = 12,
  journal = {Ibero-American Journal of Artificial Intelligence},
  year = 2008
}
@article{Schleif2009a,
  author = {F.-M. Schleif and M. Lindemann and P. Maass and M. Diaz and J. Decker and T. Elssner and M. Kuhn and H. Thiele},
  title = {Support Vector Classification of Proteomic Profile Spectra based on Feature Extraction with the Bi-orthogonal Discrete Wavelet Transform},
  pages = {189--199},
  volume = 12,
  journal = {Computing and Visualization in Science},
  url = {pdf/sc_2009.pdf},
  abstract = {Automatic classification of high-resolution
mass spectrometry data has increasing potential to support physicians in diagnosis of diseases like cancer. The
proteomic data exhibit variations among different disease states. A precise and reliable classification of mass
spectra is essential for a successful diagnosis and treat-
ment. The underlying process to obtain such reliable
classification results is a crucial point. In this paper such
a method is explained and a corresponding semi automatic parametrization procedure is derived. Thereby
a simple straightforward classification procedure to assign mass spectra to a particular disease state is derived.
The method is based on an initial preprocessing stage of
the whole set of spectra followed by the bi-orthogonal
discrete wavelet transform (DWT) for feature extraction. The approximation coefficients calculated from the
scaling function exhibit a high peak pattern matching
property and feature a denoising of the spectrum. The
discriminating coefficients, selected by the Kolmogorov-
Smirnov test are finally used as features for training and
testing a support vector machine with both a linear and
a radial basis kernel. For comparison the peak areas obtained with the ClinProt-System1 [33] were analyzed using the same support vector machines. The introduced
approach was evaluated on clinical MALDI-MS data sets
with two classes each originating from cancer studies.
The cross validated error rates using the wavelet coeffi-
cients where better than those obtained from the peak
areas.
},
  year = 2009
}
@inproceedings{Schleif2009b,
  author = {F.-M. Schleif and T. Villmann},
  title = {Neural Maps and Learning Vector Quantization - Theory and Applications},
  booktitle = {In Proceedings of the ESANN 2009},
  pages = {509--516},
  isbn = {2-930307-09-9},
  url = {pdf/esann_2012.pdf},
  abstract = {Neural maps and Learning Vector Quantizer are fundamental paradigms
in neural vector quantization based on Hebbian learning. The beginning of this
field dates back over twenty years with strong progress in theory and outstand-
ing applications. Their success lies in its robustness and simplicity in application
whereas the mathematics beyond is rather difficult. We provide an overview on
recent achievements and current trends of ongoing research.
},
  year = {2009}
}
@inproceedings{Schleif2009c,
  author = {S. Simmuteit and F.-M. Schleif and T. Villmann and M. Kostrzewa},
  title = {Hierarchical PCA using Tree-SOM for the Identification of Bacteria},
  booktitle = {In Proceedings of the 7th International Workshop on Self Organizing Maps WSOM 2009},
  pages = {272--280},
  isbn = {978-3-642-02396-5},
  url = {pdf/wsom_2009.pdf},
  abstract = {In this paper we present an extended version of Evolving
Trees using Oja’s rule. Evolving Trees are extensions of Self-Organizing
Maps developed for hierarchical classification systems. Therefore they are
well suited for taxonomic problems like the identification of bacteria. The
paper focus on clustering and visualization of bacteria measurements. A
modified variant of the Evolving Tree is developed and applied to obtain
a hierarchical clustering. The method provides an inherent PCA analysis
which is analyzed in combination with the tree based visualization. The
obtained loadings support insights in the classification decision and can
be used to identify features which are relevant for the cluster separation.
},
  year = {2009}
}
@inproceedings{Schleif2009d,
  author = {M. Strickert and J. Keilwagen and F.-M. Schleif and and T. Villmann and M. Biehl},
  title = {Matrix metric adaptation for improved linear discriminant analysis of biomedical data},
  booktitle = {Bio-Inspired Systems: Computational and Ambient Intelligence, 10th International Work-Conference on Artificial Neural Networks, IWANN 2009, Proceedings, Part I. LNCS 5517},
  pages = {933--940},
  isbn = {978-3-642-02477-1},
  publisher = {Springer},
  year = {2009}
}
@inproceedings{Schleif2009e,
  author = {T. Villmann and F.-M. Schleif},
  title = {Functional Vector Quantization by Neural Maps},
  booktitle = {Proceedings of Whispers 2009},
  pages = {CD},
  year = {2009}
}
@inproceedings{Schleif2009f,
  author = {S. Simmuteit and F.-M. Schleif and T. Villmann and T. Elssner},
  title = {Tanimoto metric in Tree-SOM for improved representation of mass spectrometry data with an underlying taxonomic structure},
  booktitle = {Proceedings of ICMLA 2009},
  pages = { 563--567},
  publisher = {IEEE Press},
  isbn = {978-0-7695-3926-3},
  url = {pdf/icmla_2009.pdf},
  abstract = {In this paper we develop a Tanimoto metric variant of the
Evolving Tree for the analysis of mass spectrometric data
of animal fur. The Evolving Tree is an extension of Self-
Organizing Maps developed to analyze hierarchical cluster-
ing problems. Together with the Tanimoto similarity mea-
sure, which is intended to work with taxonomic structured
data, the Evolving Tree is well suited for the identification
of animal hair based on mass spectrometry fingerprints. Re-
sults show a suitable hierarchical clustering of the test data
and also a good retrieval capability with a logarithmic num-
ber of comparisons.
},
  year = {2009}
}
@incollection{Schleif2009g,
  author = {M. Strickert and F.-M. Schleif and T. Villmann and U. Seiffert},
  editors = {in M. Biehl, B. Hammer, M. Verleysen, T. Villmann},
  title = {Unleashing Pearson Correlation for Faithful Analysis of Biomedical Data},
  booktitle = {Similarity-based Clustering},
  pages = {70--91},
  isbn = {978-3-642-01804-6},
  publisher = {Springer, LNAI 5400},
  year = {2009}
}
@article{Schleif2009h,
  author = {F.-M. Schleif and F.-M. Ongyerth and T. Villmann},
  title = {Supervised data analysis and reliability estimation for spectral data},
  pages = {3590--3601},
  volume = 72,
  number = {16-18},
  journal = {NeuroComputing},
  url = {pdf/nc_2009.pdf},
  abstract = {The analysis and classification of data, is a common task in multiple fields of experimental research such as bioinformatics,
medicine, satellite remote sensing or chemometrics leading to new challenges for an appropriate analysis. For this purpose dif-
ferent machine learning methods have been proposed. These methods usually do not provide information about the reliability of
the classification. This however is a common requirement in e.g. medicine and biology. In this line the present contribution offers
an approach to enhance classifiers with reliability estimates in the context of prototype vector quantization. This extension can also
be used to optimize precision or recall of the classifier system and to determine items which are not classifiable. This can lead to
significantly improved classification results. The method is exemplarily presented on satellite remote spectral data but is applicable
to a wider range of data sets.
},
  year = 2009
}
@article{Schleif2009i,
  author = {F.-M. Schleif and A. Vellido and M. Biehl},
  title = {Advances in machine learning and computational intelligence},
  pages = {7--9},
  volume = 72,
  journal = {NeuroComputing},
  year = 2009
}
@article{Schleif2009j,
  author = {F.-M. Schleif and T. Villmann and M. Kostrzewa and B. Hammer and A. Gammerman},
  title = {Cancer Informatics by Prototype-networks in Mass Spectrometry},
  pages = {215--228},
  volume = 45,
  journal = {Artificial Intelligence in Medicine},
  url = {pdf/aim_2009.pdf},
  abstract = {Mass spectrometry has become a standard technique to analyse clinical samples
in cancer research. The obtained spectrometric measurements reveal a lot of in-
formation of the clinical sample at the peptide and protein level. The spectra are
high dimensional and, due to the small number of samples a sparse coverage of
the population is very common. In clinical research the calculation and evaluation
of classification models is important. For classical statistics this is achieved by hy-
pothesis testing with respect to a chosen level of confidence. In clinical proteomics
the application of statistical tests is limited due to the small number of samples
and the high dimensionality of the data. Typically soft methods from the field of
machine learning like prototype based vector quantizers [17], Support Vector Ma-
chines(SVM) [32], Self-Organizing Maps (SOMs) [17] and respective variants are
used to generate such models. However for these methods the classification decision
is crisp in general and no or only few additional information about the safety of the
decision is available.
},
  year = 2009
}
@inproceedings{Schleif2010a,
  author = {E. Mwebaze and P. Schneider and F.-M. Schleif and S. Haase and T. Villmann and M. Biehl},
  title = {Divergence based Learning Vector Quantization},
  booktitle = {Proceedings of ESANN 2010},
  pages = {247--252},
  year = {2010}
}
@inproceedings{Schleif2010b,
  author = {D. Z{\"u}hle and F.-M. Schleif and T. Geweniger and T. Villmann},
  title = {Learning vector quantization for heterogeneous structured data},
  booktitle = {Proceedings of ESANN 2010},
  pages = {271--276},
  year = {2010}
}
@inproceedings{Schleif2010c,
  author = {T. Villmann and S. Haase and F.-M. Schleif and B. Hammer and M. Biehl},
  title = {The Mathematics of Divergence Based Online Learning in Vector Quantization},
  booktitle = {Proceedings of ANNPR 2010},
  pages = {108--119},
  year = {2010}
}
@inproceedings{Schleif2010d,
  author = {T. Villmann and S. Haase and F.-M. Schleif and B. Hammer},
  title = {Divergence based online learning in vector quantization},
  booktitle = {Proceedings of ICAISC 2010},
  pages = {479--486},
  year = {2010}
}
@article{Schleif2010e,
  author = {C. Angulo and J. A. Lee and F.-M. Schleif},
  title = {Advances in computational intelligence and learning},
  pages = {1049--1050},
  volume = 73,
  number = {7-9},
  journal = {NeuroComputing},
  year = 2010
}
@article{Schleif2010f,
  author = {S. Simmuteit and F.-M. Schleif and T. Villmann and B. Hammer},
  title = {Evolving Trees for the Retrieval of Mass Spectrometry based Bacteria Fingerprints},
  pages = {327--343},
  journal = {Knowledge and Information Systems},
  volume = 25,
  number = 2,
  url = {pdf/kais_2010.pdf},
  abstract = {In this paper we investigate the application of Evolving Trees for the anal-
ysis of mass spectrometric data of bacteria. Evolving Trees are extensions of Self-
Organizing Maps developed for hierarchical classification systems. Therefore they are
well suited for taxonomic problems like the identification of bacteria. Here we focus on
three topics, an appropriate pre-processing and encoding of the spectra, an adequate
data model by means of a hierarchical Evolving Tree and an interpretable visualization.
First the high-dimensionality of the data is reduced by a compact representation. Here
we employ sparse coding, specifically tailored for the processing of mass spectra. In the
second step the topographic information which is expected in the fingerprints is used
for advanced tree evaluation and analysis. We adapted the original topographic prod-
uct for Self-Organizing-Maps for Evolving Trees to achieve a judgment of topography.
Additionally we transferred the concept of U-matrix for evaluation of the separability
of Self-Organizing-Maps to their analog in Evolving Trees. We demonstrate these ex-
tensions for two mass spectrometric data sets of bacteria fingerprints and show their
classification and evaluation capabilities in comparison to state of the art techniques.
},
  year = 2010
}
@inproceedings{Schleif2010g,
  author = {F.-M. Schleif and T. Villmann and B. Hammer and P. Schneider and M. Biehl},
  title = {Generalized derivative based Kernelized learning vector quantization},
  booktitle = {Proceedings of IDEAL 2010},
  pages = {21--28},
  year = {2010},
  url = {pdf/ideal_2010.pdf},
  abstract = {We derive a novel derivative based version of kernelized Generalized Learning Vector Quantization (KGLVQ) as an effective, easy to
interpret, prototype based and kernelized classifier. It is called D-KGLVQ
and we provide generalization error bounds, experimental results on real
world data, showing that D-KGLVQ is competitive with KGLVQ and
the SVM on UCI data and additionally show that automatic parameter
adaptation for the used kernels simplifies the learning.
}
}
@article{Schleif2010h,
  author = {E. Mwebaze and P. Schneider and F.-M. Schleif and J.R. Aduwo and J.A. Quinn and S. Haase and T. Villmann and M. Biehl},
  title = {Divergence based classification in Learning Vector Quantization},
  pages = {1429-1435},
  volume = 74,
  journal = {NeuroComputing},
  year = 2010,
  url = {pdf/nc_2010},
  abstract = {We discuss the use of divergences in dissimilarity-based classification. Divergences can be employed
 whenever vectorial data consists of non-negative, potentially normalized features. This is, for instance,
the case in spectral data or histograms. In particular, we introduce and study divergence based learning
vector quantization (DLVQ). We derive cost function based DLVQ schemes for the family of
g-divergences which includes the well-known Kullback–Leibler divergence and the so-called
 Cauchy–Schwarz divergence as special cases. The corresponding training schemes are applied to two
  different real world data sets. The first one, a benchmark data set (Wisconsin Breast Cancer) is available
   in the public domain. In the second problem, color histograms of leaf images are used to detect the
  presence of cassava mosaic disease in cassava plants. We compare the use of standard Euclidean
 distances with DLVQ for different parameter settings. We show that DLVQ can yield superior
classification accuracies and Receiver Operating Characteristics.
}
}
@inproceedings{Schleif2010i,
  author = {T. Villmann and F.-M. Schleif and B. Hammer},
  title = {Sparse representation of data},
  booktitle = {Proceedings of ESANN 2010},
  pages = {225--234},
  year = {2010},
  url = {pdf/esann_2012.pdf},
  abstract = {The amount of data available for investigation and analysis is rapidly
growing in various areas of research like in biology, medicine, (bio-)chemistry or
physics. Many of these data sets are very complex but have also a simple inherent
structure which allows an appropriate sparse representation and modeling of such
data with less or no information loss. Advanced methods are needed to extract
these inherent but hidden information. The task of sparse data representation and
modeling can be approached using very different models. Some focus on the encoding and reconstruction of the data by means of sparse basis function sets, like
wavelets some other identify more complex underlying structures by means of deconvolution approaches such as non-negativ matrix factorization. But also feature
reduction, feature extraction and sparse clustering techniques, often employing data
specific knowledge, can be employed to obtain sparse models of high dimensional
data. All these fields have a long tradition but due to the increasing amount of
data, sparse representation techniques have got a tremendous attention in the last
decade with strong progress in theory and outstanding applications. We provide an
overview on recent achievements and current trends of ongoing research.
}
}
@inproceedings{Schleif2010j,
  author = {S. Simmuteit and F.-M. Schleif and T. Villmann},
  title = {Hierarchical evolving trees together with global and local learning for large data sets in MALDI imaging},
  booktitle = {Proceedings of WCSB 2010},
  pages = {103--106},
  year = {2010},
  url = {pdf/wcsb_2010b.pdf},
  abstract = {The analysis of very large sets of data with multiple
thousand measurements is an increasing problem. High-
throughput approaches in the life science lead to large
amounts of data which need to be analyzed by data mining approaches. Focusing on clustering and visualization
approaches a common problem are very large similarity
matrices. Standard techniques suffer from memory and
runtime limitations for such complex settings or are not
applicable at all. Here we present a hierarchical composite clustering employing data specific properties to deal
with this problem for data with an inherent hierarchical order. As an additional advantage our algorithm allows easy
control of the clustering depth. The method is a prototype based approach leading to sparse, compact and interpretable models. We derive the algorithm and present it on
data taken from tissues slices of high resolution MALDI
Imaging. Results show an effective clustering as well as
significant improvements of the computational complexity for this type of data.
}
}
@inproceedings{Schleif2010k,
  author = {F.-M. Schleif and T. Riemer and U. Boerner and L. Schnapka-Hille and M. Cross},
  title = {Efficient identification and quantification of metabolites in 1-H NMR measurements by a novel data encoding approach },
  booktitle = {Proceedings of WCSB 2010},
  pages = {91--94},
  year = {2010},
  url = {pdf/wcsb_2010.pdf},
  abstract = {The analysis of metabolic processes is becoming 
increasingly important to our understanding of complex biological systems and disease states. Nuclear magnetic resonance (NMR) spectroscopy is a particularly relevant technology in this respect, since the NMR signals provide a
quantitative measure of metabolite concentrations. How-
ever, due to the complexity of the spectra typical of bio-
logical samples, the demands of clinical and high through-
put analysis will only be fully met by a system capable of
reliable, automatic processing of the spectra. We present
here a novel data representation strategy for the measured
spectra which simplifies the pre-processing of the data
and supports the automatic identication and quantification
of metabolites. The approach is combined with an extended targeted profiling strategy to allow the highly automated processing of 1 H NMR spectra, generating readouts
suitable for the derivation of system biological models.
The parallel application of both manual expert analysis
and the automated approach to 1 H NMR spectra obtained
from stem cell extracts shows that the results obtained are
highly comparable. Use of the automated system therefore significantly reduces the effort normally associated
with manual processing and paves the way for reliable,
high throughput analysis of complex NMR spectra.}
}
@inproceedings{Schleif2011a,
  author = {F.-M. Schleif and S. Simmuteit and T. Villmann},
  title = {Hierarchical deconvolution of linear mixtures of high-dimensional mass spectra in micro-biology},
  booktitle = {Proceedings of AIA 2011},
  pages = {CD-publication},
  doi = {http://dx.doi.org/10.2316/P.2011.717-011},
  year = {2011},
  url = {pdf/aia_2012.pdf},
  abstract = {This paper introduces a hierarchical model for the description and deconvolution of composite patterns. The patterns
are described in a basis system of spectral basis functions.
The mixture coefficients for the composite patterns are determined by solving a linear mixture model with nonneg-
ative coefficients. In life science research, wet-lab mixed
samples of possible known basis substances occur regularly and cause a challenge for identification tasks. Also
in case of known basis functions the problem is still complex, if the used basis is very sparse and the number of basis
functions is very large. Simple approaches either try combining different basis spectra or incorporate blind source
separation. Our proposed method is to use nonnegative
least squares combined with a hierarchical prototype based
learning model. We evaluate our method on mixtures of
real and simulated composite patterns of mass spectrometry data from bacteria. Results show remarkable success
and can be taken as a promising step in the new field of
automatic unmixing of mixed cultures.
}
}
@inproceedings{Schleif2011c,
  author = {A. Gisbrecht and B. Hammer and F.-M. Schleif and X. Zhu},
  title = {Accelerating kernel clustering for biomedical data analysis},
  booktitle = {Proceedings of CIBCB 2011},
  pages = {154-161},
  year = {2011},
  url = {pdf/cibcb_2012.pdf},
  abstract = {The increasing size and complexity of modern data
sets turns modern data mining techniques to indispensable tools
when inspecting biomedical data sets. Thereby, dedicated data
formats and detailed information often cause the need for problem specific similarities or dissimilarities instead of the standard
Euclidean norm. Therefore, a number of clustering techniques
which rely on similarities or dissimilarities only have recently
been proposed. In this contribution, we review some of the most
popular dissimilarity based clustering techniques and we discuss
possibilities how to get around the usually squared complexity
of the models due to their dependency on the full dissimilarity
matrix. We evaluate the techniques on two benchmarks from the
biomedical domain.
}
}
@inproceedings{Schleif2011d,
  author = {F.-M. Schleif and A. Gisbrecht and B. Hammer},
  booktitle = {Artificial Neural Networks and Machine Learning -- ICANN 2011},
  date-added = {2011-06-27 13:20:26 +0200},
  date-modified = {2011-06-27 13:20:50 +0200},
  editor = {Timo Honkela and W{\l}odzis{\l}aw Duch and Mark Girolami and Samuel Kaski},
  isbn = {978-3-642-21734-0},
  location = {Heidelberg},
  pages = {150--158},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  title = {Accelerating Kernel Neural Gas},
  volume = {6791},
  year = {2011},
  url = {pdf/icann_2011.pdf},
  abstract = {Clustering approaches constitute important methods for unsupervised data analysis. 
Traditionally, many clustering models focus on
spherical or ellipsoidal clusters in Euclidean space. Kernel methods extend these approaches to more complex cluster forms, and they have
been recently integrated into several clustering techniques. While leading to very flexible representations, kernel clustering has the drawback
of high memory and time complexity due to its dependency on the full
Gram matrix and its implicit representation of clusters in terms of feature vectors. In this contribution, we accelerate the kernelized Neural
Gas algorithm by incorporating a Nystr ̈m approximation scheme and
active learning, and we arrive at sparse solutions by integration of a sparsity constraint. We provide experimental results which show that these
accelerations do not lead to a deterioration in accuracy while improving
time and memory complexity.
}
}
@inproceedings{Schleif2011e,
  author = {K. Bunte and F.-M. Schleif and T. Villmann},
  title = {Mathematical Foundations of the Self Organized Neighbor Embedding (SONE) for Dimension Reduction and Visualization},
  booktitle = {Proceedings of ESANN 2011},
  pages = {29--34},
  isbn = {978-2-87419-044-5},
  year = {2011},
  url = {pdf/esann_2011.pdf},
  abstract = {In this paper we propose the generalization of the recently
introduced Neighbor Embedding Exploratory Observation Machine (NEXOM) for dimension reduction and visualization. We provide a general mathematical framework called Self Organized Neighbor Embedding
(SONE). It treats the components, like data similarity measures and neighborhood functions, independently and easily changeable. And it enables
the utilization of different divergences, based on the theory of Frechet
derivatives. In this way we propose a new dimension reduction and visualization algorithm, which can be easily adapted to the user specific request
and the actual problem.
}
}
@inproceedings{Schleif2011f,
  author = {U. Seiffert and F.-M. Schleif and D. Z\"uhlke},
  title = {Recent Trends in Computational Intelligence in Life Science},
  booktitle = {Proceedings of ESANN 2011},
  pages = {77-86},
  isbn = {978-2-87419-044-5},
  year = {2011},
  url = {pdf/esann_2011c.pdf},
  abstract = {Computational intelligence generally comprises a rather large
set of – in a wider sense – adaptive and human-like data analysis and
modelling methods. Due to some superior features - such as generalisation, trainability, 
coping with incomplete and inconsistent data, etc. 
computational intelligence has found its way into numerous applications
in almost all scientific disciplines. A very prominent field among them are
life sciences that are characterised by some unique requirements in terms
of data structure and analysis.
}
}
@inproceedings{Schleif2011g,
  author = {P. Schneider and T. Geweniger and F.-M. Schleif and M. Biehl and T. Villmann},
  title = {Multivariate class labeling in Robust Soft {LVQ}},
  booktitle = {Proceedings of ESANN 2011},
  pages = {17-22},
  isbn = {978-2-87419-044-5},
  year = {2011},
  url = {pdf/esann_2011b.pdf},
  abstract = {We introduce a generalization of Robust Soft Learning Vector Quantization (RSLVQ). 
This algorithm for nearest prototype classification is derived from an explicit cost function and follows the dynamics
of a stochastic gradient ascent. We generalize the RSLVQ cost function
with respect to vectorial class labelings. This approach allows to realize
multivariate class memberships for prototypes and training samples, and
the prototype labels can be learned from the data during training. We
present experiments to demonstrate the new algorithm in practice.}
}
@inproceedings{Schleif2011h,
  author = {F.-M. Schleif},
  title = {Sparse Kernel Vector Quantization with Local Dependencies },
  booktitle = {Proceedings of IJCNN 2011},
  pages = {1538 - 1545},
  isbn = {978-1-4244-9635-8},
  year = {2011},
  url = {pdf/ijcnn_2011.pdf},
  abstract = {Clustering approaches are very important methods to analyze data sets in an initial unsupervised setting.
Traditionally many clustering approaches assume data points
to be independent. Here we present a method to make use
of local dependencies to improve clustering under guaranteed
distortions. Such local dependencies are very common for data
generated by imaging technologies with an underlying topographic support of the measured data. We provide experimental
results on artificial and real world data of clustering tasks
}
}
@inproceedings{Schleif2011i,
  author = {B. Hammer and A. Gisbrecht and A. Hasenfuss and B. Mokbel and F. M. Schleif and X. Zhu},
  booktitle = {Advances in Self-Organizing Maps, WSOM 2011},
  date-added = {2011-04-08 13:48:18 +0200},
  date-modified = {2011-06-20 15:48:51 +0200},
  editor = {Jorma Laaksonen and Timo Honkela},
  pages = {1-15},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science 6731},
  title = {Topographic Mapping of Dissimilarity Data},
  year = {2011},
  url = {pdf/wsom_2011.pdf},
  abstract = {Topographic mapping offers a very flexible tool to inspect
large quantities of high-dimensional data in an intuitive way. Often, electronic data are inherently non Euclidean and modern data formats are
connected to dedicated non-Euclidean dissimilarity measures for which
classical topographic mapping cannot be used. We give an overview
about extensions of topographic mapping to general dissimilarities by
means of median or relational extensions. Further, we discuss efficient
approximations to avoid the usually squared time complexity.
}
}
@inproceedings{Schleif2011j,
  author = {A. Gisbrecht and
               F.-M. Schleif and
               X. Zhu and
               B. Hammer},
  title = {Linear Time Heuristics for Topographic Mapping of Dissimilarity
               Data},
  booktitle = {IDEAL},
  year = {2011},
  pages = {25-33},
  crossref = {DBLP:conf/ideal/2011},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  url = {pdf/ideal_2011.pdf},
  abstract = {Topographic mapping offers an intuitive interface to inspect
large quantities of electronic data. Recently, it has been extended to data
described by general dissimilarities rather than Euclidean vectors. Unlike
its Euclidean counterpart, the technique has quadratic time complexity
due to the underlying quadratic dissimilarity matrix. Thus, it is infeasible
already for medium sized data sets. We introduce two approximation
techniques which speed up the complexity to linear time algorithms: the
Nystr{\"o}m approximation and patch processing, respectively. We evaluate
o
the techniques on three examples from the biomedical domain
}
}
@proceedings{DBLP:conf/ideal/2011,
  editor = {Hujun Yin and
               Wenjia Wang and
               Victor J. Rayward-Smith},
  title = {Intelligent Data Engineering and Automated Learning - IDEAL
               2011 - 12th International Conference, Norwich, UK, September
               7-9, 2011. Proceedings},
  booktitle = {IDEAL},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  volume = {6936},
  year = {2011},
  isbn = {978-3-642-23877-2},
  ee = {http://dx.doi.org/10.1007/978-3-642-23878-9},
  bibsource = {DBLP, http://dblp.uni-trier.de}
}
@inproceedings{Schleif2011m,
  editor = {Bao-Liang Lu and Liqing Zhang and James Kwok},
  booktitle = {Neural Information Processing},
  publisher = {Springer},
  location = {Heidelberg},
  series = {Lecture Notes in Computer Science},
  volume = {7063},
  year = {2011},
  isbn = {978-3-642-24957-0},
  author = {B. Hammer and F.-M. Schleif and X. Zhu},
  title = {Relational Extensions of Learning Vector Quantization},
  pages = {481--489},
  url = {pdf/iconip_2011.pdf},
  abstract = {Prototype based models offer an intuitive interface to given
data sets by means of an inspection of the model prototypes. Supervised
classification can be achieved by popular techniques such as learning
vector quantization (LVQ) and extensions derived from cost functions
such as generalized LVQ (GLVQ) and robust soft LVQ (RSLVQ). These
methods, however, are restricted to Euclidean vectors and they cannot
be used if data are characterized by a general dissimilarity matrix. In
this approach, we propose relational extensions of GLVQ and RSLVQ
which can directly be applied to general possibly non-Euclidean data
sets characterized by a symmetric dissimilarity matrix.
}
}
@inproceedings{Schleif2011n,
  author = {Barbara Hammer and
               Bassam Mokbel and
               F.-M. Schleif and
               Xibin Zhu},
  title = {Prototype-Based Classification of Dissimilarity Data},
  booktitle = {IDA},
  year = {2011},
  pages = {185-197},
  ee = {http://dx.doi.org/10.1007/978-3-642-24800-9_19},
  crossref = {DBLP:conf/ida/2011},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  url = {pdf/ida_2011.pdf},
  abstract = {Unlike many black-box algorithms in machine learning, prototype based models offer an intuitive interface to given data sets since
prototypes can directly be inspected by experts in the field. Most techniques rely on Euclidean vectors such that their suitability for complex
scenarios is limited. Recently, several unsupervised approaches have successfully been extended to general possibly non-Euclidean data charac-
terized by pairwise dissimilarities. In this paper, we shortly review a
general approach to extend unsupervised prototype-based techniques to
dissimilarities, and we transfer this approach to supervised prototype-based classification for general dissimilarity data.
}
}
@proceedings{DBLP:conf/ida/2011,
  editor = {Jo{\~a}o Gama and
               Elizabeth Bradley and
               Jaakko Hollm{\'e}n},
  title = {Advances in Intelligent Data Analysis X - 10th International
               Symposium, IDA 2011, Porto, Portugal, October 29-31, 2011.
               Proceedings},
  booktitle = {IDA},
  publisher = {Springer},
  series = {Lecture Notes in Computer Science},
  volume = {7014},
  year = {2011},
  isbn = {978-3-642-24799-6},
  ee = {http://dx.doi.org/10.1007/978-3-642-24800-9},
  bibsource = {DBLP, http://dblp.uni-trier.de}
}
@article{Schleif2011b,
  author = {F.-M. Schleif and T. Riemer and U. B{\"o}rner and L. Schnapka-Hille and M. Cross},
  title = {Genetic algorithm for shift-uncertainty correction in {1-D} {NMR} based metabolite identifications and quantifications},
  pages = {524--533},
  journal = {Bioinformatics},
  volume = {27},
  number = {4},
  year = 2011,
  url = {pdf/bioinfo_2011.pdf},
  abstract = {Motivation: The analysis of metabolic processes is becoming
increasingly important to our understanding of complex biological
systems and disease states. Nuclear magnetic resonance
spectroscopy (NMR) is a particularly relevant technology in this
respect, since the NMR signals provide a quantitative measure of
metabolite concentrations. However, due to the complexity of the
spectra typical of biological samples, the demands of clinical and
high throughput analysis will only be fully met by a system capable
of reliable, automatic processing of the spectra. An initial step in this
direction has been taken by Targeted Profiling (TP), employing a
set of known and predicted metabolite signatures fitted against the
signal. However, an accurate fitting procedure for 1 H NMR data is
complicated by shift uncertainties in the peak systems caused by
measurement imperfections. These uncertainties have a large impact
on the accuracy of identification and quantification and currently
require compensation by very time consuming manual interactions.
Here, we present an approach, termed Extended Targeted Profiling
(ETP), that estimates shift uncertainties based on a genetic algorithm
(GA) combined with a least squares optimization (LSQO). The
estimated shifts are used to correct the known metabolite signatures
leading to significantly improved identification and quantification. In
this way, use of the automated system significantly reduces the effort
normally associated with manual processing and paves the way for
reliable, high throughput analysis of complex NMR spectra.
Results: The results indicate that using simultaneous shift
uncertainty correction and least squares fitting significantly improves
the identification and quantification results for 1 H NMR data
in comparison to the standard targeted profiling approach and
compares favorably with the results obtained by manual expert
analysis. Preservation of the functional structure of the NMR spectra
makes this approach more realistic than simple binning strategies.
}
}
@article{Schleif2011k,
  author = {F.-M. Schleif and T. Villmann and B. Hammer and P. Schneider},
  title = {Efficient Kernelized Prototype-based Classification},
  pages = {443-457},
  journal = {Journal of Neural Systems},
  volume = {21},
  number = {6},
  year = 2011,
  url = {pdf/jnns_2011.pdf},
  abstract = {Prototype based classifiers are effective algorithms in modeling classification problems and have been
applied in multiple domains. While many supervised learning algorithms have been successfully extended
to kernels to improve the discrimination power by means of the kernel concept, prototype based classifiers
are typically still used with Euclidean distance measures. Kernelized variants of prototype based classifiers
are currently too complex to be applied for larger data sets. Here we propose an extension of Kernelized
Generalized Learning Vector Quantization (KGLVQ) employing a sparsity and approximation technique
to reduce the learning complexity. We provide generalization error bounds and experimental results on
real world data, showing that the extended approach is comparable to SVM on different public data.
}
}
@article{Schleif2011l,
  author = {K. Bunte and P. Schneider and B. Hammer and F.-M. Schleif and T. Villmann and M. Biehl},
  title = {Limited Rank Matrix Learning Discriminative Dimension Reduction and Visualization},
  pages = {159--173},
  journal = {Neural Networks},
  volume = {26},
  year = 2012,
  url = {pdf/nn_2012.pdf},
  abstract = {We present an extension of the recently introduced Generalized Matrix Learning Vector Quantization
algorithm. In the original scheme, adaptive square matrices of relevance factors parameterize a
discriminative distance measure. We extend the scheme to matrices of limited rank corresponding to
low-dimensional representations of the data. This allows to incorporate prior knowledge of the intrinsic
dimension and to reduce the number of adaptive parameters efficiently.
In particular, for very large dimensional data, the limitation of the rank can reduce computation
time and memory requirements significantly. Furthermore, two- or three-dimensional representations
constitute an efficient visualization method for labeled data sets. The identification of a suitable projection
is not treated as a pre-processing step but as an integral part of the supervised training. Several real world
data sets serve as an illustration and demonstrate the usefulness of the suggested method
}
}
@inproceedings{Schleif2012a,
  author = {B. Hammer and B. Mokbel and F.-M. Schleif and X. Zhu},
  affiliation = {CITEC Centre of Excellence, Bielefeld University, 33615 Bielefeld, Germany},
  title = {White Box Classification of Dissimilarity Data},
  booktitle = {Hybrid Artificial Intelligent Systems},
  series = {Lecture Notes in Computer Science},
  editor = {Corchado, Emilio and Snášel, Václav and Abraham, Ajith and Wozniak, Michal and Graña, Manuel and Cho, Sung-Bae},
  publisher = {Springer Berlin / Heidelberg},
  isbn = {978-3-642-28941-5},
  keyword = {Computer Science},
  pages = {309-321},
  volume = {7208},
  location = {Salamanca, Spain},
  url = {pdf/hais_2012.pdf},
  abstracts = {While state-of-the-art classifiers such as support vector machines offer efficient classification for kernel data, they suffer from two
drawbacks: the underlying classifier acts as a black box which can hardly
be inspected by humans, and non-positive definite Gram matrices require additional preprocessing steps to arrive at a valid kernel. In this
approach, we extend prototype-based classification towards general dissimilarity data resulting in a technology which (i) can deal with dissimilarity data characterized by an arbitrary symmetric dissimilarity matrix,
(ii) offers intuitive classification in terms of prototypical class represen-
tatives, (iii) and leads to state-of-the-art classification results.
},
  year = {2012}
}
@article{Schleif2012b,
  author = {X. Zhu and A. Gisbrecht and F.-M. Schleif and B. Hammer},
  title = {Approximation techniques for clustering dissimilarity data},
  pages = {72--84},
  journal = {Neuro Computing},
  volume = {90},
  year = 2012,
  url = {pdf/nc_2012.pdf},
  abstract = {Recently, diverse high quality prototype-based clustering techniques
have been developed which can directly deal with data sets given by general pairwise dissimilarities rather than standard Euclidean vectors.
Examples include affinity propagation, relational neural gas, or relational
generative topographic mapping. Corresponding to the size of the dissimilarity matrix, these techniques scale quadratically with the size of
the training set, such that training becomes prohibitive for large data
volumes. In this contribution, we investigate two different linear time
approximation techniques, patch processing and the Nystr ̈m approximation. We apply these approximations to several representative clustering
techniques for dissimilarities, where possible, and compare the results for
diverse data sets
}
}
@inproceedings{Schleif2012c,
  author = {K. Bunte and F.-M. Schleif and M. Biehl},
  title = {Adaptive Learning for complex-valued data},
  booktitle = {Proceedings of ESANN 2012},
  pages = {387-392},
  url = {pdf/esann_2012.pdf},
  abstract = {In this paper we propose a variant of the Generalized Matrix Learning Vector Quantization (GMLVQ) for dissimilarity learning on
complex-valued data. Complex features can be encountered in various
data domains, e.g. Fourier transformed mass spectrometry or image analysis data. Current approaches deal with complex inputs by ignoring the
imaginary parts or concatenating real and imaginary parts in one real
valued vector. In this contribution we propose a prototype based classification method, which allows to deal with complex-valued data directly.
The algorithm is tested on a benchmark data set and for leaf recognition
using Zernike moments. We observe that the complex version converges
much faster than the original GMLVQ evaluated on the real parts only.
The complex version has fewer free parameters than using a concatenated
vector and is thus computationally more efficient than original GMLVQ
},
  year = {2012}
}
@inproceedings{Schleif2012d,
  author = {F.-M. Schleif and A. Gisbrecht and B. Hammer},
  title = {Relevance learning for short high-dimensional time series in the life sciences},
  booktitle = {Proceedings of IJCNN 2012},
  pages = {2069-2076},
  year = {2012},
  url = {pdf/ijcnn_2012.pdf},
  abstract = {Digital data characterizing physiological processes
over time are becoming increasingly important such as spectrometric data or gene expression profiles. Typical characteristics
of such data are high dimensionality due to a fine grained
measurement, but usually only few time points of the series. Due
to the short length, classical time series models cannot be used.
At the same time, due to the high dimensionality, data cannot
be treated by means of time windows using simple vectorial
techniques.
Here, we consider the generative topographic mapping through
time (GTM-TT) as a highly regularized model for time series
inspection in the unsupervised setting, based on hidden Markov
models enhanced with topographic mapping facilities. We extend
the model such that supervised classification can be built on top
of GTM-TT, resulting in supervised GTM-TT, and we extend the
technique by supervised relevance learning. The latter adapts the
metric according to given auxiliary information resulting in an
interpretable form which can deal with high dimensional inputs.
We demonstrate the technique in simulated data as well as an
example from the biomedical domain, reaching state of the art
classification accuracy in both cases.
}
}
@inproceedings{Schleif2012e,
  author = {M. Biehl and K. Bunte and F.-M. Schleif and P. Schneider and T. Villmann},
  title = {Large Margin Linear Discriminative Visualization by Matrix Relevance Learning},
  booktitle = {Proceedings of IJCNN 2012},
  pages = {1873-1880},
  url = {pdf/ijcnn_2012b.pdf},
  abstract = {We suggest and investigate the use of Generalized
Matrix Relevance Learning (GMLVQ) in the context of discriminative visualization. This prototype-based, supervised learning
scheme parameterizes an adaptive distance measure in terms of
a matrix of relevance factors. By means of a few benchmark
problems, we demonstrate that the training process yields low
rank matrices which can be used efficiently for the discriminative
visualization of labeled data. Comparison with well known
standard methods illustrate the flexibility and discriminative
power of the novel approach. The mathematical analysis of
GMLVQ shows that the corresponding stationarity condition
can be formulated as an eigenvalue problem with one or several
strongly dominating eigenvectors. We also study the inclusion of
a penalty term which enforces non-singularity of the relevance
matrix and can be used to control the role of higher order
eigenvalues, efficiently},
  year = {2012}
}
@inproceedings{Schleif2012f,
  author = {X. Zhu and F.-M. Schleif and B. Hammer},
  title = {Patch Processing for Relational Learning Vector Quantization},
  booktitle = {Advances in Neural Networks -- ISNN 2012},
  year = {2012},
  editor = {Jun Wang and Gary G. Yen and Marios M. Polycarpou},
  volume = {7367},
  series = {Lecture Notes in Computer Science},
  pages = {55--63},
  publisher = {Springer},
  isbn = {978-3-642-31345-5},
  location = {Heidelberg},
  url = {pdf/isnn_2012.pdf},
  abstract = {Recently, an extension of popular learning vector quantization (LVQ) to general dissimilarity data has been proposed, relational
generalized LVQ (RGLVQ) [10, 9]. An intuitive prototype based classification scheme results which can divide data characterized by pairwise
dissimilarities into priorly given categories. However, the technique relies
on the full dissimilarity matrix and, thus, has squared time complexity
and linear space complexity. In this contribution, we propose an intuitive
linear time and constant space approximation of RGLVQ by means of
patch processing. An efficient heuristic which maintains the good classification accuracy and interpretability of RGLVQ results, as demonstrated
in three examples from the biomdical domain.
}
}
@inproceedings{Schleif2012g,
  author = {F.-M. Schleif and X. Zhu and B. Hammer},
  title = {A conformal classifier for dissimilarity data},
  booktitle = {Proceedings of AIAI 2012},
  isbn = {978-3-642-33411-5},
  pages = {234-243},
  url = {pdf/aiai_2012.pdf},
  abstract = {Current classification algorithms focus on vectorial data, given in euclidean or kernel spaces. Many real world data, like biological sequences are not
vectorial and often non-euclidean, given by (dis-)similarities only, requesting for
efficient and interpretable models. Current classifiers for such data require complex transformations and provide only crisp classification without any measure
of confidence, which is a standard requirement in the life sciences. In this paper we propose a prototype-based conformal classifier for dissimilarity data. It
effectively deals with dissimilarity data. The model complexity is automatically
adjusted and confidence measures are provided. In experiments on dissimilarity
data we investigate the effectiveness with respect to accuracy and model complexity in comparison to different state of the art classifiers
},
  year = {2012}
}
@inproceedings{Schleif2012h,
  author = {F.-M. Schleif and X. Zhu and A. Gisbrecht and B. Hammer},
  title = {Fast approximated relational and kernel clustering},
  booktitle = {Proceedings of ICPR 2012},
  pages = {1229 - 1232},
  publisher = {IEEE},
  isbn = {978-4-990644-1-6},
  url = {pdf/icpr_2012.pdf},
  abstract = {The large amount of digital data requests for scalable tools like efficient clustering algorithms. Many
algorithms for large data sets request linear separability in an Euclidean space. Kernel approaches can
capture the non-linear structure but do not scale well
for large data sets. Alternatively, data are often represented implicitly by dissimilarities like for protein
sequences, whose methods also often do not scale to
large problems. We propose a single algorithm for both
type of data, based on a batch approximation of relational soft competitive learning, termed fast generic
soft-competitive learning. The algorithm has linear
computational and memory requirements and performs
favorable to traditional techniques
},
  year = {2012}
}
@inproceedings{Schleif2012i,
  author = {F.-M. Schleif and X. Zhu and B. Hammer},
  title = {Soft Competitive Learning for large data sets},
  booktitle = {Proceedings of MCSD 2012},
  pages = {141-151},
  isbn = {978-3-642-32517-5},
  year = {2012},
  url = {pdf/mcsd_2012.pdf},
  abstract = {Soft competitive learning is an advanced k-means like clustering approach
overcoming some severe drawbacks of k-means, like initialization dependence and
sticking to local minima. It achieves lower distortion error than k-means and has
shown very good performance in the clustering of complex data sets, using various
metrics or kernels. While very effective, it does not scale for large data sets which is
even more severe in case of kernels, due to a dense prototype model. In this paper,
we propose a novel soft-competitive learning algorithm using core-sets, significantly
accelerating the original method in practice with natural sparsity. It effectively deals
with very large data sets up to multiple million points. Our method provides also an
alternative fast kernelization of soft-competitive learning. In contrast to many other
clustering methods the obtained model is based on only few prototypes and shows
natural sparsity. It is the first natural sparse kernelized soft competitive learning
approach. Numerical experiments on synthetical and benchmark data sets show the
efficiency of the proposed method.
}
}
@inproceedings{Schleif2012j,
  author = {F.-M. Schleif and B. Mokbel and A. Gisbrecht and L. Theunissen and V. D{\"u}rr and B. Hammer},
  title = {Learning relevant time points for time-series data in the life sciences},
  booktitle = {Proceedings of ICANN 2012},
  pages = {531-539},
  year = {2012},
  url = {pdf/icann_2012.pdf},
  abstract = {In the life sciences, short time series with high dimensional
entries are becoming more and more popular such as spectrometric data
or gene expression profiles taken over time. Data characteristics rule out
classical time series analysis due to the few time points, and they prevent a simple vectorial treatment due to the high dimensionality. In this
contribution, we successfully use the generative topographic mapping
through time (GTM-TT) which is based on hidden Markov models enhanced with a topographic mapping to model such data. We propose an
extension of GTM-TT by relevance learning which automatically adapts
the model such that the most relevant input variables and time points
are emphasized by means of an automatic relevance weighting scheme.
We demonstrate the technique in two applications from the life sciences.
}
}
@article{Schleif2012k,
  author = {A. Gisbrecht and B. Mokbel and F.-M. Schleif and X. Zhu and B. Hammer},
  title = {Linear time relational prototype based learning},
  pages = {online},
  journal = {Journal of Neural Systems},
  volume = {22},
  number = {5},
  year = 2012,
  url = {pdf/ijns_2012.pdf},
  abstract = {Prototype based learning offers an intuitive interface to inspect large quantities of electronic data in
    supervised or unsupervised settings. Recently, many techniques have been extended to data described
    by general dissimilarities rather than Euclidean vectors, so-called relational data settings. Unlike the
    Euclidean counterparts, the techniques have quadratic time complexity due to the underlying quadratic
    dissimilarity matrix. Thus, they are infeasible already for medium sized data sets. The contribution
    of this article is twofold: on the one hand we propose a novel supervised prototype based classification
    technique for dissimilarity data based on popular learning vector quantization, on the other hand we
    transfer a linear time approximation technique, the Nystr ̈m approximation, to this algorithm and an
    unsupervised counterpart, the relational generative topographic mapping. This way, linear time and
    space methods result. We evaluate the techniques on three examples from the biomedical domain.
    }
}
@inproceedings{Schleif2013a,
  author = {X.Zhu and F.-M. Schleif and B. Hammer},
  title = {Semi-Supervised Vector Quantization for proximity data},
  booktitle = {Proceedings of ESANN 2013},
  pages = {89-94},
  url = {pdf/esann_2013.pdf},
  abstract = {Semi-supervised learning (SSL) is focused on learning from
labeled and unlabeled data by incorporating structural and statistical in-
formation of the available unlabeled data. The amount of data is dra-
matically increasing, but few of them are fully labeled, due to cost and
time constraints. This is even more challenging for non-vectorial, proxim-
ity data, given by pairwise proximity values. Only few methods provide
SSL for this data, limited to positive-semi-definite (psd) data. They also
lack interpretable models, which is a relevant aspect in life-sciences where
most of these data are found. This paper provides a prototype based SSL
approach for proximity data.

},
  year = {2013}
}
@inproceedings{Schleif2013b,
  author = {F.-M. Schleif and A. Gisbrecht},
  title = {Data analysis of (non-)metric proximities at linear costs},
  booktitle = {Proceedings of SIMBAD 2013},
  pages = {59--74},
  url = {pdf/simbad_2013.pdf},
  abstract = {Domain specific (dis-)similarity or proximity measures, employed
e.g. in alignment algorithms in bio-informatics, are often used to compare com-
plex data objects and to cover domain specific data properties. Lacking an under-
lying vector space, data are given as pairwise (dis-)similarities. The few available
methods for such data do not scale well to very large data sets. Kernel methods
easily deal with metric similarity matrices, also at large scale, but costly trans-
formations are need starting from non-metric (dis-) similarities. We propose an
integrative combination of Nystr ̈ m approximation, potential double centering
o
and eigenvalue correction to obtain valid kernel matrices at linear costs. Accord-
ingly effective kernel approaches, become accessible for these data. Evaluation at
several larger (dis-)similarity data sets shows that the proposed method achieves
much better runtime performance than the standard strategy while keeping com-
petitive model accuracy. Our main contribution is an efficient linear technique, to
convert (potentially non-metric) large scale dissimilarity matrices into approxi-
mated psd kernel matrices.
},
  year = {2013}
}
@inproceedings{Schleif2013c,
  author = {X.Zhu and F.-M. Schleif and B. Hammer},
  title = {Secure Semi-Supervised Vector Quantization for dissimilarity data},
  booktitle = {Proceedings of IWANN 2013},
  pages = {347--356},
  url = {pdf/iwann_2013.pdf},
  abstract = {The amount and complexity of data increase rapidly, how-
ever, due to time and cost, only few of them are fully labeled. In this
context non-vectorial relational data given by pairwise (dis-)similarities
without explicit vectorial representation, like score-values in sequences
alignments, are particularly challenging. Existing semi-supervised learn-
ing (SSL) algorithms focus on vectorial data given in Euclidean space. In
this paper we extend a prototype-based classifier for dissimilarity data
to non i.i.d. semi-supervised tasks. Using conformal prediction the ’se-
cure region’ of unlabeled data can be used to improve the trained model
based on labeled data while adapting the model complexity to cover the
’insecure region’ of labeled data. The proposed method is evaluated on
some benchmarks from the SSL domain.
},
  year = {2013}
}
@article{Schleif2013d,
  author = {A. Micheli and F.-M. Schleif and P. Tino},
  title = {Novel approaches in machine learning and computational intelligence},
  pages = {1-3},
  volume = 112,
  journal = {NeuroComputing},
  year = 2013
}
@article{Schleif2013e,
  author = {B. Hammer and D. Hoffmann and F.-M. Schleif and X.Zhu},
  title = {Learning vector quantization for (dis-)similarities},
  pages = {43--51},
  volume = 131,
  journal = {NeuroComputing},
  url = {pdf/nc_diss_2014.pdf},
  year = 2014
}
@inproceedings{Schleif2013f,
  author = {F.-M. Schleif and X. Zhu and B. Hammer},
  title = {Sparse prototype representation by core sets},
  booktitle = {Proceedings of IDEAL 2013},
  pages = {302--309},
  url = {pdf/ideal_2013.pdf},
  abstract = {Due to the increasing amount of large data sets, efficient learning 
    algorithms are necessary. Also the interpretation of the final model is desirable to
    draw efficient conclusions from the model results. Prototype based learning algorithms 
    have been extended recently to proximity learners to analyze data given
    in non-standard data formats. The supervised methods of this type are of special
    interest but suffer from a large number of optimization parameters to model the
    prototypes. In this contribution we derive an efficient core set based preprocessing 
    to restrict the number of model parameters to O( n/eps^2 ) with n as the number
    of prototypes. Accordingly, the number of model parameters gets independent of
    the size of the data sets but scales with the requested precision of the core sets.
    Experimental results show that our approach does not significantly degrade the
    performance while significantly reducing the memory complexity.
    },
  year = {2013}
}
@article{Schleif2014a,
  author = {D. Hofmann and F.-M. Schleif and B. Hammer},
  title = {Learning interpretable kernelized prototype-based models},
  pages = {43-51},
  volume = {131},
  journal = {NeuroComputing},
  url = {pdf/nc_int_2014.pdf},
  year = 2014
}
@article{Schleif2014b,
  author = {M. Strickert and Kerstin Bunte and F.-M. Schleif and E. Huellermeier},
  title = {Correlation-based Neighbor Embedding},
  pages = {online},
  volume = {},
  journal = {NeuroComputing},
  url = {pdf/nc_corr_2014.pdf},
  year = 2014
}
@article{Schleif2014c,
  author = {F.-M. Schleif and X. Zhu and B. Hammer},
  title = {Sparse conformal prediction for dissimilarity data},
  pages = {to appear},
  volume = {},
  journal = {Annals of Mathematics and Artificial Intelligence},
  url = {pdf/amai_j_2014.pdf},
  year = 2014
}
@inproceedings{Schleif2014d,
  author = {F.-M. Schleif and P. Tino and T. Villmann},
  title = {Recent trends in learning of structured and non-standard data},
  booktitle = {Proceedings of ESANN 2014},
  pages = {243--252},
  url = {pdf/esann_2014a.pdf},
  year = {2014}
}
@inproceedings{Schleif2014e,
  author = {F.-M. Schleif},
  title = {Proximity learning for non-standard big data},
  booktitle = {Proceedings of ESANN 2014},
  pages = {359--364},
  url = {pdf/esann_2014b.pdf},
  year = {2014}
}
@article{Schleif2014f,
  author = {M. J. Embrechts and F. Rossi and F.-M. Schleif and J. A. Lee},
  title = {Advances in artificial neural networks, machine learning, and computational intelligence (ESANN 2013)},
  pages = {to appear},
  volume = {},
  journal = {Neurocomputing},
  url = {pdf/nc_editorial_2014.pdf},
  year = 2014
}
@inproceedings{Schleif2014g,
  author = {F.-M. Schleif},
  title = {Discriminative Fast Soft Competitive Learning},
  booktitle = {Proceedings of ICANN 2014},
  pages = {to appear},
  url = {pdf/icann_2014.pdf},
  year = {2014}
}
@article{Schleif2014h,
  author = {Xibin Zhu and
               Frank{-}Michael Schleif and
               Barbara Hammer},
  title = {Adaptive conformal semi-supervised vector quantization for dissimilarity
               data},
  journal = {Pattern Recognition Letters},
  year = {2014},
  volume = {49},
  pages = {138--145},
  url = {pdf/patrec_2014.pdf},
  doi = {10.1016/j.patrec.2014.07.009},
  timestamp = {Thu, 25 Sep 2014 21:52:21 +0200},
  biburl = {http://dblp.uni-trier.de/rec/bib/journals/prl/ZhuSH14},
  bibsource = {dblp computer science bibliography, http://dblp.org}
}
@techreport{Schleif2005X,
  author = {B. Hammer and F.-M. Schleif and Th. Villmann},
  title = {On the Generalization Ability of Prototype-Based Classifiers with Local Relevance Determination},
  number = {Technical Reports University of Clausthal IfI-05-14},
  year = 2005
}
@techreport{Schleif2006X,
  author = {B. Hammer and A. Hasenfuss and F.-M. Schleif and Th. Villmann},
  title = {Supervised Median Clustering},
  number = {Technical Reports University of Clausthal IfI-09-06},
  year = 2006
}
@techreport{Schleif2007X,
  author = {F.-M. Schleif and A. Hasenfuss and B. Hammer},
  title = {Aggregation of multiple peak lists by use of an improved neural gas network},
  number = {MLR-02-2007, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_02\_2007.pdf},
  year = 2007
}
@techreport{Schleif2007Y,
  author = {F.-M. Schleif},
  title = {Preprocessing of Nuclear Magnetic Resonance Spectrometry Data},
  number = {MLR-01-2007, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_01\_2007.pdf},
  year = 2007
}
@techreport{Schleif2008X,
  author = {K. Bunte and P. Schneider and B. Hammer and F.-M. Schleif and T. Villmann and M. Biehl},
  title = {Discriminative Visualization by Limited Rank Matrix Learning},
  number = {MLR-03-2008, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_03\_2008.pdf},
  year = 2008
}
@techreport{Schleif2009X,
  author = {F.-M. Schleif and T. Riemer and U. Boerner and M. Cross},
  title = {Extended Targeted Profiling to Identify and Quantify Metabolites in 1-H NMR measurements},
  pages = {to appear},
  booktitle = {Proc. of ICOLE 2009, IfI-05-14},
  publisher = {Technical University of Clausthal},
  year = 2009
}
@techreport{Schleif2009Y,
  author = {S. Simmuteit and J. Simmuteit and F.-M. Schleif and T. Villmann},
  title = {Deconvolution and Identification of Mass Spectra from mixed and pure colonies of bacteria},
  pages = {to appear},
  booktitle = {Proc. of ICOLE 2009, IfI-05-14},
  publisher = {Technical University of Clausthal},
  year = 2009
}
@techreport{Schleif2009Z,
  author = {M. Biehl and B. Hammer and F.-M. Schleif and P. Schneider and T. Villmann},
  title = {Stationarity of Matrix Relevance Learning Vector Quantization},
  number = {MLR-01-2009, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_01\_2009.pdf},
  year = 2009
}
@techreport{Schleif2012Z,
  author = {F.-M. Schleif and A. Gisbrecht},
  title = {Data analysis of (non-)metric (dis-)similarities at linear costs},
  number = {MLR-04-2012, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_04\_2012.pdf},
  year = 2012
}
@techreport{Schleif2013Za,
  author = {F.-M. Schleif},
  title = {Large scale Nystroem approximation for non-metric similarity and dissimilarity data},
  number = {MLR-03-2013, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_03\_2013.pdf},
  year = 2013
}
@techreport{Schleif2013Zb,
  author = {F.-M. Schleif and T. Villmann},
  title = {Analysis of temporal Kinect motion capturing data},
  number = {MLR-05-2013, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_05\_2013.pdf},
  year = 2013
}
@techreport{Schleif2013Z,
  author = {F.-M. Schleif},
  title = {Large scale Nystr{\"o}m approximation for non-metric similarity and dissimilarity data},
  number = {MLR-03-2013, Machine Learning Reports, ISSN:1865-3960 http://www.uni-leipzig.de/$\tilde{ }$compint/mlr/mlr\_03\_2013.pdf},
  year = 2013
}
@article{Schleif2002X,
  author = {F.-M. Schleif},
  title = {OCR mit statistischen Momenten},
  pages = {15--17},
  journal = {Gaotenblatt},
  isbn = {1619-0114},
  year = 2002
}
@article{Schleif2002Y,
  author = {F.-M. Schleif and H. Stamer},
  title = {{LaTeX} im studentischen Alltag},
  pages = {3--10},
  journal = {Gaotenblatt},
  isbn = {1619-0114},
  year = 2002
}
@article{Schleif2005Y,
  author = {F.-M. Schleif},
  title = {Plugins mit wxWidgets},
  pages = {5--10},
  volume = {01/05},
  journal = {Offene Systeme},
  isbn = {1619-0114},
  year = 2005
}

This file was generated by bibtex2html 1.97.