Comparing the performance of programming languages is difficult because they differ in many aspects including preferred programming abstractions, available frameworks, and their runtime systems. Nonetheless, the question about relative performance comes up repeatedly in the research community, industry, and wider audience of enthusiasts.
This paper presents 14 benchmarks and a novel methodology to assess the compiler effectiveness across language implementations. Using a set of common language abstractions, the benchmarks are implemented in Java, JavaScript, Ruby, Crystal, Newspeak, and Smalltalk. We show that the benchmarks exhibit a wide range of characteristics using language-agnostic metrics. Using four different languages on top of the same compiler, we show that the benchmarks perform similarly and therefore allow for a comparison of compiler effectiveness across languages. Based on anecdotes, we argue that these benchmarks help language implementers to identify performance bugs and optimization potential by comparing to other language implementations.
%0 Conference Paper
%1 Marr:2016:AWFY
%A Marr, Stefan
%A Daloze, Benoit
%A Mössenböck, Hanspeter
%B Proceedings of the 12th Symposium on Dynamic Languages
%D 2016
%I ACM
%K Benchmark Compiler Crystal Graal Java JavaScript MeMyPublication Metrics Newspeak NodeJS Performance Ruby Smalltalk Truffle myown
%P 120--131
%R 10.1145/2989225.2989232
%T Cross-Language Compiler Benchmarking---Are We Fast Yet?
%X Comparing the performance of programming languages is difficult because they differ in many aspects including preferred programming abstractions, available frameworks, and their runtime systems. Nonetheless, the question about relative performance comes up repeatedly in the research community, industry, and wider audience of enthusiasts.
This paper presents 14 benchmarks and a novel methodology to assess the compiler effectiveness across language implementations. Using a set of common language abstractions, the benchmarks are implemented in Java, JavaScript, Ruby, Crystal, Newspeak, and Smalltalk. We show that the benchmarks exhibit a wide range of characteristics using language-agnostic metrics. Using four different languages on top of the same compiler, we show that the benchmarks perform similarly and therefore allow for a comparison of compiler effectiveness across languages. Based on anecdotes, we argue that these benchmarks help language implementers to identify performance bugs and optimization potential by comparing to other language implementations.
%@ 978-1-4503-4445-6
@inproceedings{Marr:2016:AWFY,
abstract = {Comparing the performance of programming languages is difficult because they differ in many aspects including preferred programming abstractions, available frameworks, and their runtime systems. Nonetheless, the question about relative performance comes up repeatedly in the research community, industry, and wider audience of enthusiasts.
This paper presents 14 benchmarks and a novel methodology to assess the compiler effectiveness across language implementations. Using a set of common language abstractions, the benchmarks are implemented in Java, JavaScript, Ruby, Crystal, Newspeak, and Smalltalk. We show that the benchmarks exhibit a wide range of characteristics using language-agnostic metrics. Using four different languages on top of the same compiler, we show that the benchmarks perform similarly and therefore allow for a comparison of compiler effectiveness across languages. Based on anecdotes, we argue that these benchmarks help language implementers to identify performance bugs and optimization potential by comparing to other language implementations.},
acceptancerate = {0.55},
added-at = {2016-08-13T22:53:41.000+0200},
appendix = {https://github.com/smarr/are-we-fast-yet#readme},
author = {Marr, Stefan and Daloze, Benoit and Mössenböck, Hanspeter},
biburl = {https://www.bibsonomy.org/bibtex/20177b6515342452e425fd81ffb06eae9/gron},
blog = {https://stefan-marr.de/2016/10/cross-language-compiler-benchmarking-are-we-fast-yet/},
booktitle = {Proceedings of the 12th Symposium on Dynamic Languages},
day = 1,
doi = {10.1145/2989225.2989232},
html = {https://stefan-marr.de/papers/dls-marr-et-al-cross-language-compiler-benchmarking-are-we-fast-yet/},
interhash = {b19a3027d653a0a8a59199c0f576571c},
intrahash = {0177b6515342452e425fd81ffb06eae9},
isbn = {978-1-4503-4445-6},
keywords = {Benchmark Compiler Crystal Graal Java JavaScript MeMyPublication Metrics Newspeak NodeJS Performance Ruby Smalltalk Truffle myown},
location = {Amsterdam, Netherlands},
month = {November},
note = {(acceptance rate 55%)},
numpages = {12},
pages = {120--131},
pdf = {https://stefan-marr.de/downloads/dls16-marr-et-al-cross-language-compiler-benchmarking-are-we-fast-yet.pdf},
publisher = {ACM},
series = {DLS'16},
timestamp = {2022-08-29T20:35:21.000+0200},
title = {{Cross-Language Compiler Benchmarking---Are We Fast Yet?}},
year = 2016
}