Changeset View
Changeset View
Standalone View
Standalone View
src/bench/bench.cpp
// Copyright (c) 2015-2019 The Bitcoin Core developers | // Copyright (c) 2015-2019 The Bitcoin Core developers | ||||
// Distributed under the MIT software license, see the accompanying | // Distributed under the MIT software license, see the accompanying | ||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php. | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | ||||
#include <bench/bench.h> | #include <bench/bench.h> | ||||
#include <chainparams.h> | #include <chainparams.h> | ||||
#include <validation.h> | #include <validation.h> | ||||
#include <test/util/setup_common.h> | #include <test/util/setup_common.h> | ||||
#include <algorithm> | |||||
#include <cassert> | |||||
#include <iomanip> | |||||
#include <iostream> | |||||
#include <numeric> | |||||
#include <regex> | #include <regex> | ||||
void benchmark::ConsolePrinter::header() { | namespace { | ||||
std::cout << "# Benchmark, evals, iterations, total, min, max, median" | |||||
<< std::endl; | |||||
} | |||||
void benchmark::ConsolePrinter::result(const State &state) { | |||||
auto results = state.m_elapsed_results; | |||||
std::sort(results.begin(), results.end()); | |||||
double total = state.m_num_iters * | |||||
std::accumulate(results.begin(), results.end(), 0.0); | |||||
double front = 0; | |||||
double back = 0; | |||||
double median = 0; | |||||
if (!results.empty()) { | |||||
front = results.front(); | |||||
back = results.back(); | |||||
size_t mid = results.size() / 2; | void GenerateTemplateResults( | ||||
median = results[mid]; | const std::vector<ankerl::nanobench::Result> &benchmarkResults, | ||||
if (0 == results.size() % 2) { | const std::string &filename, const char *tpl) { | ||||
median = (results[mid] + results[mid + 1]) / 2; | if (benchmarkResults.empty() || filename.empty()) { | ||||
// nothing to write, bail out | |||||
return; | |||||
} | } | ||||
std::ofstream fout(filename); | |||||
if (fout.is_open()) { | |||||
ankerl::nanobench::render(tpl, benchmarkResults, fout); | |||||
} else { | |||||
std::cout << "Could write to file '" << filename << "'" << std::endl; | |||||
} | } | ||||
std::cout << std::setprecision(6); | std::cout << "Created '" << filename << "'" << std::endl; | ||||
std::cout << state.m_name << ", " << state.m_num_evals << ", " | |||||
<< state.m_num_iters << ", " << total << ", " << front << ", " | |||||
<< back << ", " << median << std::endl; | |||||
} | } | ||||
void benchmark::ConsolePrinter::footer() {} | } // namespace | ||||
benchmark::PlotlyPrinter::PlotlyPrinter(std::string plotly_url, int64_t width, | |||||
int64_t height) | |||||
: m_plotly_url(plotly_url), m_width(width), m_height(height) {} | |||||
void benchmark::PlotlyPrinter::header() { | |||||
std::cout << "<html><head>" | |||||
<< "<script src=\"" << m_plotly_url << "\"></script>" | |||||
<< "</head><body><div id=\"myDiv\" style=\"width:" << m_width | |||||
<< "px; height:" << m_height << "px\"></div>" | |||||
<< "<script> var data = [" << std::endl; | |||||
} | |||||
void benchmark::PlotlyPrinter::result(const State &state) { | |||||
std::cout << "{ " << std::endl | |||||
<< " name: '" << state.m_name << "', " << std::endl | |||||
<< " y: ["; | |||||
const char *prefix = ""; | |||||
for (const auto &e : state.m_elapsed_results) { | |||||
std::cout << prefix << std::setprecision(6) << e; | |||||
prefix = ", "; | |||||
} | |||||
std::cout << "]," << std::endl | |||||
<< " boxpoints: 'all', jitter: 0.3, pointpos: 0, type: 'box'," | |||||
<< std::endl | |||||
<< "}," << std::endl; | |||||
} | |||||
void benchmark::PlotlyPrinter::footer() { | |||||
std::cout << "]; var layout = { showlegend: false, yaxis: { rangemode: " | |||||
"'tozero', autorange: true } };" | |||||
<< "Plotly.newPlot('myDiv', data, layout);" | |||||
<< "</script></body></html>"; | |||||
} | |||||
void benchmark::JunitPrinter::header() { | |||||
std::cout << "<?xml version='1.0' encoding='UTF-8'?>" << std::endl; | |||||
} | |||||
void benchmark::JunitPrinter::result(const State &state) { | |||||
auto results = state.m_elapsed_results; | |||||
double bench_duration = | |||||
state.m_num_iters * | |||||
std::accumulate(results.begin(), results.end(), 0.0); | |||||
/* | |||||
* Don't print the results now, we need them all to build the <testsuite> | |||||
* node. | |||||
*/ | |||||
bench_results.emplace_back(std::move(state.m_name), bench_duration); | |||||
total_duration += bench_duration; | |||||
} | |||||
void benchmark::JunitPrinter::footer() { | |||||
std::cout << std::setprecision(6); | |||||
std::cout << "<testsuite tests=\"" << bench_results.size() << "\" time=\"" | |||||
<< total_duration | |||||
<< "\" name=\"Bitcoin ABC benchmarks\" id=\"0\">" << std::endl; | |||||
for (const auto &result : bench_results) { | |||||
std::cout << "<testcase classname=\"" << result.first << "\" name=\"" | |||||
<< result.first << "\" time=\"" << result.second | |||||
<< "\"></testcase>" << std::endl; | |||||
} | |||||
std::cout << "</testsuite>" << std::endl; | |||||
} | |||||
benchmark::BenchRunner::BenchmarkMap &benchmark::BenchRunner::benchmarks() { | benchmark::BenchRunner::BenchmarkMap &benchmark::BenchRunner::benchmarks() { | ||||
static std::map<std::string, Bench> benchmarks_map; | static std::map<std::string, BenchFunction> benchmarks_map; | ||||
return benchmarks_map; | return benchmarks_map; | ||||
} | } | ||||
benchmark::BenchRunner::BenchRunner(std::string name, | benchmark::BenchRunner::BenchRunner(std::string name, | ||||
benchmark::BenchFunction func, | benchmark::BenchFunction func) { | ||||
uint64_t num_iters_for_one_second) { | benchmarks().insert(std::make_pair(name, func)); | ||||
benchmarks().insert( | } | ||||
std::make_pair(name, Bench{func, num_iters_for_one_second})); | |||||
} | |||||
void benchmark::BenchRunner::RunAll(Printer &printer, uint64_t num_evals, | |||||
double scaling, const std::string &filter, | |||||
bool is_list_only) { | |||||
if (!std::ratio_less_equal<benchmark::clock::period, std::micro>::value) { | |||||
std::cerr << "WARNING: Clock precision is worse than microsecond - " | |||||
"benchmarks may be less accurate!\n"; | |||||
} | |||||
#ifdef DEBUG | |||||
std::cerr << "WARNING: This is a debug build - may result in slower " | |||||
"benchmarks.\n"; | |||||
#endif | |||||
std::regex reFilter(filter); | void benchmark::BenchRunner::RunAll(const Args &args) { | ||||
std::regex reFilter(args.regex_filter); | |||||
std::smatch baseMatch; | std::smatch baseMatch; | ||||
printer.header(); | std::vector<ankerl::nanobench::Result> benchmarkResults; | ||||
for (const auto &p : benchmarks()) { | for (const auto &p : benchmarks()) { | ||||
if (!std::regex_match(p.first, baseMatch, reFilter)) { | if (!std::regex_match(p.first, baseMatch, reFilter)) { | ||||
continue; | continue; | ||||
} | } | ||||
uint64_t num_iters = | if (args.is_list_only) { | ||||
static_cast<uint64_t>(p.second.num_iters_for_one_second * scaling); | std::cout << p.first << std::endl; | ||||
if (0 == num_iters) { | continue; | ||||
num_iters = 1; | |||||
} | |||||
State state(p.first, num_evals, num_iters, printer); | |||||
if (!is_list_only) { | |||||
p.second.func(state); | |||||
} | |||||
printer.result(state); | |||||
} | |||||
printer.footer(); | |||||
} | |||||
bool benchmark::State::UpdateTimer(const benchmark::time_point current_time) { | |||||
if (m_start_time != time_point()) { | |||||
std::chrono::duration<double> diff = current_time - m_start_time; | |||||
m_elapsed_results.push_back(diff.count() / m_num_iters); | |||||
if (m_elapsed_results.size() == m_num_evals) { | |||||
return false; | |||||
} | |||||
} | } | ||||
m_num_iters_left = m_num_iters - 1; | Bench bench; | ||||
return true; | bench.name(p.first); | ||||
if (args.asymptote.empty()) { | |||||
p.second(bench); | |||||
} else { | |||||
for (auto n : args.asymptote) { | |||||
bench.complexityN(n); | |||||
p.second(bench); | |||||
} | |||||
std::cout << bench.complexityBigO() << std::endl; | |||||
} | |||||
benchmarkResults.push_back(bench.results().back()); | |||||
} | |||||
GenerateTemplateResults( | |||||
benchmarkResults, args.output_csv, | |||||
"# Benchmark, evals, iterations, total, min, max, median\n" | |||||
"{{#result}}{{name}}, {{epochs}}, {{average(iterations)}}, " | |||||
"{{sumProduct(iterations, elapsed)}}, {{minimum(elapsed)}}, " | |||||
"{{maximum(elapsed)}}, {{median(elapsed)}}\n" | |||||
"{{/result}}"); | |||||
GenerateTemplateResults(benchmarkResults, args.output_json, | |||||
ankerl::nanobench::templates::json()); | |||||
} | } |