mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-11-27 14:30:24 +01:00
Replace current benchmarking framework with nanobench
This replaces the current benchmarking framework with nanobench [1], an MIT licensed single-header benchmarking library, of which I am the autor. This has in my opinion several advantages, especially on Linux: * fast: Running all benchmarks takes ~6 seconds instead of 4m13s on an Intel i7-8700 CPU @ 3.20GHz. * accurate: I ran e.g. the benchmark for SipHash_32b 10 times and calculate standard deviation / mean = coefficient of variation: * 0.57% CV for old benchmarking framework * 0.20% CV for nanobench So the benchmark results with nanobench seem to vary less than with the old framework. * It automatically determines runtime based on clock precision, no need to specify number of evaluations. * measure instructions, cycles, branches, instructions per cycle, branch misses (only Linux, when performance counters are available) * output in markdown table format. * Warn about unstable environment (frequency scaling, turbo, ...) * For better profiling, it is possible to set the environment variable NANOBENCH_ENDLESS to force endless running of a particular benchmark without the need to recompile. This makes it to e.g. run "perf top" and look at hotspots. Here is an example copy & pasted from the terminal output: | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 2.52 | 396,529,415.94 | 0.6% | 25.42 | 8.02 | 3.169 | 0.06 | 0.0% | 0.03 | `bench/crypto_hash.cpp RIPEMD160` | 1.87 | 535,161,444.83 | 0.3% | 21.36 | 5.95 | 3.589 | 0.06 | 0.0% | 0.02 | `bench/crypto_hash.cpp SHA1` | 3.22 | 310,344,174.79 | 1.1% | 36.80 | 10.22 | 3.601 | 0.09 | 0.0% | 0.04 | `bench/crypto_hash.cpp SHA256` | 2.01 | 496,375,796.23 | 0.0% | 18.72 | 6.43 | 2.911 | 0.01 | 1.0% | 0.00 | `bench/crypto_hash.cpp SHA256D64_1024` | 7.23 | 138,263,519.35 | 0.1% | 82.66 | 23.11 | 3.577 | 1.63 | 0.1% | 0.00 | `bench/crypto_hash.cpp SHA256_32b` | 3.04 | 328,780,166.40 | 0.3% | 35.82 | 9.69 | 3.696 | 0.03 | 0.0% | 0.03 | `bench/crypto_hash.cpp SHA512` [1] https://github.com/martinus/nanobench * Adds support for asymptotes This adds support to calculate asymptotic complexity of a benchmark. This is similar to #17375, but currently only one asymptote is supported, and I have added support in the benchmark `ComplexMemPool` as an example. Usage is e.g. like this: ``` ./bench_bitcoin -filter=ComplexMemPool -asymptote=25,50,100,200,400,600,800 ``` This runs the benchmark `ComplexMemPool` several times but with different complexityN settings. The benchmark can extract that number and use it accordingly. Here, it's used for `childTxs`. The output is this: | complexityN | ns/op | op/s | err% | ins/op | cyc/op | IPC | total | benchmark |------------:|--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|----------:|:---------- | 25 | 1,064,241.00 | 939.64 | 1.4% | 3,960,279.00 | 2,829,708.00 | 1.400 | 0.01 | `ComplexMemPool` | 50 | 1,579,530.00 | 633.10 | 1.0% | 6,231,810.00 | 4,412,674.00 | 1.412 | 0.02 | `ComplexMemPool` | 100 | 4,022,774.00 | 248.58 | 0.6% | 16,544,406.00 | 11,889,535.00 | 1.392 | 0.04 | `ComplexMemPool` | 200 | 15,390,986.00 | 64.97 | 0.2% | 63,904,254.00 | 47,731,705.00 | 1.339 | 0.17 | `ComplexMemPool` | 400 | 69,394,711.00 | 14.41 | 0.1% | 272,602,461.00 | 219,014,691.00 | 1.245 | 0.76 | `ComplexMemPool` | 600 | 168,977,165.00 | 5.92 | 0.1% | 639,108,082.00 | 535,316,887.00 | 1.194 | 1.86 | `ComplexMemPool` | 800 | 310,109,077.00 | 3.22 | 0.1% |1,149,134,246.00 | 984,620,812.00 | 1.167 | 3.41 | `ComplexMemPool` | coefficient | err% | complexity |--------------:|-------:|------------ | 4.78486e-07 | 4.5% | O(n^2) | 6.38557e-10 | 21.7% | O(n^3) | 3.42338e-05 | 38.0% | O(n log n) | 0.000313914 | 46.9% | O(n) | 0.0129823 | 114.4% | O(log n) | 0.0815055 | 133.8% | O(1) The best fitting curve is O(n^2), so the algorithm seems to scale quadratic with `childTxs` in the range 25 to 800.
This commit is contained in:
@@ -11,131 +11,53 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <bench/nanobench.h>
|
||||
#include <boost/preprocessor/cat.hpp>
|
||||
#include <boost/preprocessor/stringize.hpp>
|
||||
|
||||
// Simple micro-benchmarking framework; API mostly matches a subset of the Google Benchmark
|
||||
// framework (see https://github.com/google/benchmark)
|
||||
// Why not use the Google Benchmark framework? Because adding Yet Another Dependency
|
||||
// (that uses cmake as its build system and has lots of features we don't need) isn't
|
||||
// worth it.
|
||||
|
||||
/*
|
||||
* Usage:
|
||||
|
||||
static void CODE_TO_TIME(benchmark::State& state)
|
||||
static void CODE_TO_TIME(benchmark::Bench& bench)
|
||||
{
|
||||
... do any setup needed...
|
||||
while (state.KeepRunning()) {
|
||||
nanobench::Config().run([&] {
|
||||
... do stuff you want to time...
|
||||
}
|
||||
});
|
||||
... do any cleanup needed...
|
||||
}
|
||||
|
||||
// default to running benchmark for 5000 iterations
|
||||
BENCHMARK(CODE_TO_TIME, 5000);
|
||||
BENCHMARK(CODE_TO_TIME);
|
||||
|
||||
*/
|
||||
|
||||
namespace benchmark {
|
||||
// In case high_resolution_clock is steady, prefer that, otherwise use steady_clock.
|
||||
struct best_clock {
|
||||
using hi_res_clock = std::chrono::high_resolution_clock;
|
||||
using steady_clock = std::chrono::steady_clock;
|
||||
using type = std::conditional<hi_res_clock::is_steady, hi_res_clock, steady_clock>::type;
|
||||
|
||||
using ankerl::nanobench::Bench;
|
||||
|
||||
typedef std::function<void(Bench&)> BenchFunction;
|
||||
|
||||
struct Args {
|
||||
std::string regex_filter;
|
||||
bool is_list_only;
|
||||
std::vector<double> asymptote;
|
||||
std::string output_csv;
|
||||
std::string output_json;
|
||||
};
|
||||
using clock = best_clock::type;
|
||||
using time_point = clock::time_point;
|
||||
using duration = clock::duration;
|
||||
|
||||
class Printer;
|
||||
|
||||
class State
|
||||
{
|
||||
public:
|
||||
std::string m_name;
|
||||
uint64_t m_num_iters_left;
|
||||
const uint64_t m_num_iters;
|
||||
const uint64_t m_num_evals;
|
||||
std::vector<double> m_elapsed_results;
|
||||
time_point m_start_time;
|
||||
|
||||
bool UpdateTimer(time_point finish_time);
|
||||
|
||||
State(std::string name, uint64_t num_evals, double num_iters, Printer& printer) : m_name(name), m_num_iters_left(0), m_num_iters(num_iters), m_num_evals(num_evals)
|
||||
{
|
||||
}
|
||||
|
||||
inline bool KeepRunning()
|
||||
{
|
||||
if (m_num_iters_left--) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool result = UpdateTimer(clock::now());
|
||||
// measure again so runtime of UpdateTimer is not included
|
||||
m_start_time = clock::now();
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::function<void(State&)> BenchFunction;
|
||||
|
||||
class BenchRunner
|
||||
{
|
||||
struct Bench {
|
||||
BenchFunction func;
|
||||
uint64_t num_iters_for_one_second;
|
||||
};
|
||||
typedef std::map<std::string, Bench> BenchmarkMap;
|
||||
typedef std::map<std::string, BenchFunction> BenchmarkMap;
|
||||
static BenchmarkMap& benchmarks();
|
||||
|
||||
public:
|
||||
BenchRunner(std::string name, BenchFunction func, uint64_t num_iters_for_one_second);
|
||||
BenchRunner(std::string name, BenchFunction func);
|
||||
|
||||
static void RunAll(Printer& printer, uint64_t num_evals, double scaling, const std::string& filter, bool is_list_only);
|
||||
};
|
||||
|
||||
// interface to output benchmark results.
|
||||
class Printer
|
||||
{
|
||||
public:
|
||||
virtual ~Printer() {}
|
||||
virtual void header() = 0;
|
||||
virtual void result(const State& state) = 0;
|
||||
virtual void footer() = 0;
|
||||
};
|
||||
|
||||
// default printer to console, shows min, max, median.
|
||||
class ConsolePrinter : public Printer
|
||||
{
|
||||
public:
|
||||
void header() override;
|
||||
void result(const State& state) override;
|
||||
void footer() override;
|
||||
};
|
||||
|
||||
// creates box plot with plotly.js
|
||||
class PlotlyPrinter : public Printer
|
||||
{
|
||||
public:
|
||||
PlotlyPrinter(std::string plotly_url, int64_t width, int64_t height);
|
||||
void header() override;
|
||||
void result(const State& state) override;
|
||||
void footer() override;
|
||||
|
||||
private:
|
||||
std::string m_plotly_url;
|
||||
int64_t m_width;
|
||||
int64_t m_height;
|
||||
static void RunAll(const Args& args);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// BENCHMARK(foo, num_iters_for_one_second) expands to: benchmark::BenchRunner bench_11foo("foo", num_iterations);
|
||||
// Choose a num_iters_for_one_second that takes roughly 1 second. The goal is that all benchmarks should take approximately
|
||||
// the same time, and scaling factor can be used that the total time is appropriate for your system.
|
||||
#define BENCHMARK(n, num_iters_for_one_second) \
|
||||
benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n, (num_iters_for_one_second));
|
||||
// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo");
|
||||
#define BENCHMARK(n) \
|
||||
benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n);
|
||||
|
||||
#endif // BITCOIN_BENCH_BENCH_H
|
||||
|
||||
Reference in New Issue
Block a user