Merge #18011: Replace current benchmarking framework with nanobench

78c312c983 Replace current benchmarking framework with nanobench (Martin Ankerl)

Pull request description:

  Replace current benchmarking framework with nanobench

  This replaces the current benchmarking framework with nanobench [1], an
  MIT licensed single-header benchmarking library, of which I am the
  autor. This has in my opinion several advantages, especially on Linux:

  * fast: Running all benchmarks takes ~6 seconds instead of 4m13s on
    an Intel i7-8700 CPU @ 3.20GHz.

  * accurate: I ran e.g. the benchmark for SipHash_32b 10 times and
    calculate standard deviation / mean = coefficient of variation:

    * 0.57% CV for old benchmarking framework
    * 0.20% CV for nanobench

    So the benchmark results with nanobench seem to vary less than with
    the old framework.

  * It automatically determines runtime based on clock precision, no need
    to specify number of evaluations.

  * measure instructions, cycles, branches, instructions per cycle,
    branch misses (only Linux, when performance counters are available)

  * output in markdown table format.

  * Warn about unstable environment (frequency scaling, turbo, ...)

  * For better profiling, it is possible to set the environment variable
    NANOBENCH_ENDLESS to force endless running of a particular benchmark
    without the need to recompile. This makes it to e.g. run "perf top"
    and look at hotspots.

  Here is an example copy & pasted from the terminal output:

  |             ns/byte |              byte/s |    err% |        ins/byte |        cyc/byte |    IPC |       bra/byte |   miss% |     total | benchmark
  |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:----------
  |                2.52 |      396,529,415.94 |    0.6% |           25.42 |            8.02 |  3.169 |           0.06 |    0.0% |      0.03 | `bench/crypto_hash.cpp RIPEMD160`
  |                1.87 |      535,161,444.83 |    0.3% |           21.36 |            5.95 |  3.589 |           0.06 |    0.0% |      0.02 | `bench/crypto_hash.cpp SHA1`
  |                3.22 |      310,344,174.79 |    1.1% |           36.80 |           10.22 |  3.601 |           0.09 |    0.0% |      0.04 | `bench/crypto_hash.cpp SHA256`
  |                2.01 |      496,375,796.23 |    0.0% |           18.72 |            6.43 |  2.911 |           0.01 |    1.0% |      0.00 | `bench/crypto_hash.cpp SHA256D64_1024`
  |                7.23 |      138,263,519.35 |    0.1% |           82.66 |           23.11 |  3.577 |           1.63 |    0.1% |      0.00 | `bench/crypto_hash.cpp SHA256_32b`
  |                3.04 |      328,780,166.40 |    0.3% |           35.82 |            9.69 |  3.696 |           0.03 |    0.0% |      0.03 | `bench/crypto_hash.cpp SHA512`

  [1] https://github.com/martinus/nanobench

ACKs for top commit:
  laanwj:
    ACK 78c312c983

Tree-SHA512: 9e18770b18b6f95a7d0105a4a5497d31cf4eb5efe6574f4482f6f1b4c88d7e0946b9a4a1e9e8e6ecbf41a3f2d7571240677dcb45af29a6f0584e89b25f32e49e
This commit is contained in:
Wladimir J. van der Laan
2020-07-30 15:20:19 +02:00
38 changed files with 3656 additions and 585 deletions

View File

@@ -10,26 +10,30 @@
#include <memory>
static const int64_t DEFAULT_BENCH_EVALUATIONS = 5;
static const char* DEFAULT_BENCH_FILTER = ".*";
static const char* DEFAULT_BENCH_SCALING = "1.0";
static const char* DEFAULT_BENCH_PRINTER = "console";
static const char* DEFAULT_PLOT_PLOTLYURL = "https://cdn.plot.ly/plotly-latest.min.js";
static const int64_t DEFAULT_PLOT_WIDTH = 1024;
static const int64_t DEFAULT_PLOT_HEIGHT = 768;
static void SetupBenchArgs(ArgsManager& argsman)
{
SetupHelpOptions(argsman);
argsman.AddArg("-list", "List benchmarks without executing them. Can be combined with -scaling and -filter", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-evals=<n>", strprintf("Number of measurement evaluations to perform. (default: %u)", DEFAULT_BENCH_EVALUATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-scaling=<n>", strprintf("Scaling factor for benchmark's runtime (default: %u)", DEFAULT_BENCH_SCALING), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-printer=(console|plot)", strprintf("Choose printer format. console: print data to console. plot: Print results as HTML graph (default: %s)", DEFAULT_BENCH_PRINTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-plot-plotlyurl=<uri>", strprintf("URL to use for plotly.js (default: %s)", DEFAULT_PLOT_PLOTLYURL), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-plot-width=<x>", strprintf("Plot width in pixel (default: %u)", DEFAULT_PLOT_WIDTH), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-plot-height=<x>", strprintf("Plot height in pixel (default: %u)", DEFAULT_PLOT_HEIGHT), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-asymptote=n1,n2,n3,...", strprintf("Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark"), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
}
// parses a comma separated list like "10,20,30,50"
static std::vector<double> parseAsymptote(const std::string& str) {
std::stringstream ss(str);
std::vector<double> numbers;
double d;
char c;
while (ss >> d) {
numbers.push_back(d);
ss >> c;
}
return numbers;
}
int main(int argc, char** argv)
@@ -49,34 +53,14 @@ int main(int argc, char** argv)
return EXIT_SUCCESS;
}
int64_t evaluations = argsman.GetArg("-evals", DEFAULT_BENCH_EVALUATIONS);
std::string regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
std::string scaling_str = argsman.GetArg("-scaling", DEFAULT_BENCH_SCALING);
bool is_list_only = argsman.GetBoolArg("-list", false);
benchmark::Args args;
args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
args.is_list_only = argsman.GetBoolArg("-list", false);
args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", ""));
args.output_csv = argsman.GetArg("-output_csv", "");
args.output_json = argsman.GetArg("-output_json", "");
if (evaluations == 0) {
return EXIT_SUCCESS;
} else if (evaluations < 0) {
tfm::format(std::cerr, "Error parsing evaluations argument: %d\n", evaluations);
return EXIT_FAILURE;
}
double scaling_factor;
if (!ParseDouble(scaling_str, &scaling_factor)) {
tfm::format(std::cerr, "Error parsing scaling factor as double: %s\n", scaling_str);
return EXIT_FAILURE;
}
std::unique_ptr<benchmark::Printer> printer = MakeUnique<benchmark::ConsolePrinter>();
std::string printer_arg = argsman.GetArg("-printer", DEFAULT_BENCH_PRINTER);
if ("plot" == printer_arg) {
printer.reset(new benchmark::PlotlyPrinter(
argsman.GetArg("-plot-plotlyurl", DEFAULT_PLOT_PLOTLYURL),
argsman.GetArg("-plot-width", DEFAULT_PLOT_WIDTH),
argsman.GetArg("-plot-height", DEFAULT_PLOT_HEIGHT)));
}
benchmark::BenchRunner::RunAll(*printer, evaluations, scaling_factor, regex_filter, is_list_only);
benchmark::BenchRunner::RunAll(args);
return EXIT_SUCCESS;
}