This paper presents an experimental study of the automatic correction of broken (dead) Web links focusing, in particular, on links broken by the relocation ofWeb pages. Our first contribution is that we developed an algorithm that incorporates a comprehensive set of heuristics, some of which are novel, in a single unified framework. The second contribution is that we conducted a relatively large-scale experiment, and analysis of our results revealed the characteristics of the problem of finding movedWeb pages. We demonstrated empirically that the problem of searching for moved pages is different from typical information retrieval problems. First, it is impossible to identify the final destination until the page is moved, so the index-server approach is not necessarily effective. Secondly, there is a large bias about where the new address is likely to be and crawler-based solutions can be effectively implemented, avoiding the need to search the entire Web. We analyzed the experimental results in detail to show how important each heuristic is in real Web settings, and conducted statistical analyses to show that our algorithm succeeds in correctly finding new links for more than 70\% of broken links at 95\% confidence level.
(private-note)Very interesting approach to find dead links. It use link chaser, which is, in turn, relies on several heuristics and also on known link authorities - pages, which refer to links and happen to have them updated.
%0 Conference Paper
%1 citeulike:5019954
%A Morishima, Atsuyuki
%A Nakamizo, Akiyoshi
%A Iida, Toshinari
%A Sugimoto, Shigeo
%A Kitagawa, Hiroyuki
%B HT '09: Proceedings of the 20th ACM conference on Hypertext and hypermedia
%C New York, NY, USA
%D 2009
%I ACM
%K dead-link www
%P 15--24
%R 10.1145/1557914.1557921
%T Bringing your dead links back to life: a comprehensive approach and lessons learned
%U http://dx.doi.org/10.1145/1557914.1557921
%X This paper presents an experimental study of the automatic correction of broken (dead) Web links focusing, in particular, on links broken by the relocation ofWeb pages. Our first contribution is that we developed an algorithm that incorporates a comprehensive set of heuristics, some of which are novel, in a single unified framework. The second contribution is that we conducted a relatively large-scale experiment, and analysis of our results revealed the characteristics of the problem of finding movedWeb pages. We demonstrated empirically that the problem of searching for moved pages is different from typical information retrieval problems. First, it is impossible to identify the final destination until the page is moved, so the index-server approach is not necessarily effective. Secondly, there is a large bias about where the new address is likely to be and crawler-based solutions can be effectively implemented, avoiding the need to search the entire Web. We analyzed the experimental results in detail to show how important each heuristic is in real Web settings, and conducted statistical analyses to show that our algorithm succeeds in correctly finding new links for more than 70\% of broken links at 95\% confidence level.
%@ 978-1-60558-486-7
@inproceedings{citeulike:5019954,
abstract = {{This paper presents an experimental study of the automatic correction of broken (dead) Web links focusing, in particular, on links broken by the relocation ofWeb pages. Our first contribution is that we developed an algorithm that incorporates a comprehensive set of heuristics, some of which are novel, in a single unified framework. The second contribution is that we conducted a relatively large-scale experiment, and analysis of our results revealed the characteristics of the problem of finding movedWeb pages. We demonstrated empirically that the problem of searching for moved pages is different from typical information retrieval problems. First, it is impossible to identify the final destination until the page is moved, so the index-server approach is not necessarily effective. Secondly, there is a large bias about where the new address is likely to be and crawler-based solutions can be effectively implemented, avoiding the need to search the entire Web. We analyzed the experimental results in detail to show how important each heuristic is in real Web settings, and conducted statistical analyses to show that our algorithm succeeds in correctly finding new links for more than 70\% of broken links at 95\% confidence level.}},
added-at = {2018-03-19T12:24:51.000+0100},
address = {New York, NY, USA},
author = {Morishima, Atsuyuki and Nakamizo, Akiyoshi and Iida, Toshinari and Sugimoto, Shigeo and Kitagawa, Hiroyuki},
biburl = {https://www.bibsonomy.org/bibtex/240e07fec49693cc3861ecf3fb25aa815/aho},
booktitle = {HT '09: Proceedings of the 20th ACM conference on Hypertext and hypermedia},
citeulike-article-id = {5019954},
citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=1557914.1557921},
citeulike-linkout-1 = {http://dx.doi.org/10.1145/1557914.1557921},
comment = {(private-note)Very interesting approach to find dead links. It use link chaser, which is, in turn, relies on several heuristics and also on known link authorities - pages, which refer to links and happen to have them updated.},
doi = {10.1145/1557914.1557921},
interhash = {4ee86291d78698b239edee7c54f967fe},
intrahash = {40e07fec49693cc3861ecf3fb25aa815},
isbn = {978-1-60558-486-7},
keywords = {dead-link www},
location = {Torino, Italy},
pages = {15--24},
posted-at = {2009-06-30 09:12:43},
priority = {0},
publisher = {ACM},
timestamp = {2018-03-19T12:24:51.000+0100},
title = {{Bringing your dead links back to life: a comprehensive approach and lessons learned}},
url = {http://dx.doi.org/10.1145/1557914.1557921},
year = 2009
}