In this paper, we investigate how to scale hierarchical clustering methods (such as OPTICS) to extremely large databases by utilizing data compression methods (such as BIRCH or random sampling). We propose a three step procedure: 1) compress the data into suitable representative objects; 2) apply the hierarchical clustering algorithm only to these objects; 3) recover the clustering structure for the whole data set, based on the result for the compressed data. The key issue in this approach is to design compressed data items such that not only a hierarchical clustering algorithm can be applied, but also that they contain enough information to infer the clustering structure of the original data set in the third step. This is crucial because the results of hierarchical clustering algorithms, when applied naively to a random sample or to the clustering features (CFs) generated by BIRCH, deteriorate rapidly for higher compression rates. This is due to three key problems, which we identify. To solve these problems, we propose an efficient post-processing step and the concept of a Data Bubble as a special kind of compressed data item. Applying OPTICS to these Data Bubbles allows us to recover a very accurate approximation of the clustering structure of a large data set even for very high compression rates. A comprehensive performance and quality evaluation shows that we only trade very little quality of the clustering result for a great increase in performance.
%0 Conference Paper
%1 Breunig2001Data
%A Breunig, Markus M.
%A Kriegel, Hans P.
%A Kröger, Peer
%A Sander, Jörg
%B SIGMOD '01: Proceedings of the 2001 ACM SIGMOD international conference on Management of data
%C New York, NY, USA
%D 2001
%I ACM
%K clustering entityguides summary
%P 79--90
%R http://dx.doi.org/10.1145/375663.375672
%T Data bubbles: quality preserving performance boosting for hierarchical clustering
%U http://dx.doi.org/10.1145/375663.375672
%X In this paper, we investigate how to scale hierarchical clustering methods (such as OPTICS) to extremely large databases by utilizing data compression methods (such as BIRCH or random sampling). We propose a three step procedure: 1) compress the data into suitable representative objects; 2) apply the hierarchical clustering algorithm only to these objects; 3) recover the clustering structure for the whole data set, based on the result for the compressed data. The key issue in this approach is to design compressed data items such that not only a hierarchical clustering algorithm can be applied, but also that they contain enough information to infer the clustering structure of the original data set in the third step. This is crucial because the results of hierarchical clustering algorithms, when applied naively to a random sample or to the clustering features (CFs) generated by BIRCH, deteriorate rapidly for higher compression rates. This is due to three key problems, which we identify. To solve these problems, we propose an efficient post-processing step and the concept of a Data Bubble as a special kind of compressed data item. Applying OPTICS to these Data Bubbles allows us to recover a very accurate approximation of the clustering structure of a large data set even for very high compression rates. A comprehensive performance and quality evaluation shows that we only trade very little quality of the clustering result for a great increase in performance.
%@ 1-58113-332-4
@inproceedings{Breunig2001Data,
abstract = {In this paper, we investigate how to scale hierarchical clustering methods (such as OPTICS) to extremely large databases by utilizing data compression methods (such as BIRCH or random sampling). We propose a three step procedure: 1) compress the data into suitable representative objects; 2) apply the hierarchical clustering algorithm only to these objects; 3) recover the clustering structure for the whole data set, based on the result for the compressed data. The key issue in this approach is to design compressed data items such that not only a hierarchical clustering algorithm can be applied, but also that they contain enough information to infer the clustering structure of the original data set in the third step. This is crucial because the results of hierarchical clustering algorithms, when applied naively to a random sample or to the clustering features (CFs) generated by BIRCH, deteriorate rapidly for higher compression rates. This is due to three key problems, which we identify. To solve these problems, we propose an efficient post-processing step and the concept of a Data Bubble as a special kind of compressed data item. Applying OPTICS to these Data Bubbles allows us to recover a very accurate approximation of the clustering structure of a large data set even for very high compression rates. A comprehensive performance and quality evaluation shows that we only trade very little quality of the clustering result for a great increase in performance.},
added-at = {2009-03-12T15:42:50.000+0100},
address = {New York, NY, USA},
author = {Breunig, Markus M. and Kriegel, Hans P. and Kr\"oger, Peer and Sander, J\"org},
biburl = {https://www.bibsonomy.org/bibtex/29ef5fde9db618bbb66f7299ef329deb1/lillejul},
booktitle = {SIGMOD '01: Proceedings of the 2001 ACM SIGMOD international conference on Management of data},
citeulike-article-id = {4028520},
doi = {http://dx.doi.org/10.1145/375663.375672},
interhash = {7d7b79de83c7006c0ee767ad98f5c49b},
intrahash = {9ef5fde9db618bbb66f7299ef329deb1},
isbn = {1-58113-332-4},
keywords = {clustering entityguides summary},
location = {Santa Barbara, California, United States},
pages = {79--90},
posted-at = {2009-02-10 10:56:05},
priority = {2},
publisher = {ACM},
timestamp = {2009-03-12T15:42:50.000+0100},
title = {Data bubbles: quality preserving performance boosting for hierarchical clustering},
url = {http://dx.doi.org/10.1145/375663.375672},
year = 2001
}