Benchmarks

This commit is contained in:
Piotr Wójcik 2025-07-13 10:25:06 +02:00
commit 0e9d1c2807
14 changed files with 2006 additions and 6 deletions

View file

@ -312,10 +312,10 @@ spdlog::level::level_enum nano::logger::to_spdlog_level (nano::log::level level)
* logging config presets
*/
nano::log_config nano::log_config::cli_default ()
nano::log_config nano::log_config::cli_default (nano::log::level default_level)
{
log_config config{};
config.default_level = nano::log::level::critical;
config.default_level = default_level;
config.console.colors = false;
config.console.to_cerr = true; // Use cerr to avoid interference with CLI output that goes to stdout
config.file.enable = false;

View file

@ -134,7 +134,7 @@ public:
nano::log::tracing_format tracing_format{ nano::log::tracing_format::standard };
public: // Predefined defaults
static log_config cli_default ();
static log_config cli_default (nano::log::level default_level = nano::log::level::critical);
static log_config daemon_default ();
static log_config tests_default ();
static log_config dummy_default (); // For empty logger

View file

@ -1,4 +1,14 @@
add_executable(nano_node daemon.cpp daemon.hpp entry.cpp)
add_executable(
nano_node
benchmarks/benchmarks.cpp
benchmarks/benchmarks.hpp
benchmarks/benchmark_block_processing.cpp
benchmarks/benchmark_cementing.cpp
benchmarks/benchmark_elections.cpp
benchmarks/benchmark_pipeline.cpp
daemon.cpp
daemon.hpp
entry.cpp)
target_link_libraries(nano_node node Boost::process ${PLATFORM_LIBS})

View file

@ -0,0 +1,253 @@
#include <nano/lib/config.hpp>
#include <nano/lib/locks.hpp>
#include <nano/lib/thread_runner.hpp>
#include <nano/lib/timer.hpp>
#include <nano/lib/work.hpp>
#include <nano/lib/work_version.hpp>
#include <nano/nano_node/benchmarks/benchmarks.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/daemonconfig.hpp>
#include <nano/node/ledger_notifications.hpp>
#include <boost/asio/io_context.hpp>
#include <atomic>
#include <chrono>
#include <iostream>
#include <limits>
#include <memory>
#include <thread>
#include <unordered_set>
#include <fmt/format.h>
namespace nano::cli
{
/*
* Block Processing Benchmark
*
* Measures the performance of the block processor - the component responsible for validating
* and inserting blocks into the ledger. This benchmark tests raw block processing throughput
* without elections or confirmation.
*
* How it works:
* 1. Setup: Creates a node with unlimited queue sizes and disabled work requirements
* 2. Generate: Creates random transfer transactions (send/receive pairs) between accounts
* 3. Submit: Adds all blocks to the block processor queue via block_processor.add()
* 4. Measure: Tracks time from submission until all blocks are processed into the ledger
* 5. Report: Calculates blocks/sec throughput and final account states
*
* What is tested:
* - Block validation speed (signature verification, balance checks, etc.)
* - Ledger write performance (database insertion)
* - Block processor queue management
* - Unchecked block handling for out-of-order blocks
*
* What is NOT tested:
* - Elections or voting (blocks are not confirmed)
* - Cementing (blocks remain unconfirmed)
* - Network communication (local-only testing)
*/
class block_processing_benchmark : public benchmark_base
{
private:
// Blocks currently being processed
nano::locked<std::unordered_set<nano::block_hash>> current_blocks;
// Metrics
std::atomic<size_t> processed_blocks_count{ 0 };
std::atomic<size_t> failed_blocks_count{ 0 };
std::atomic<size_t> old_blocks_count{ 0 };
std::atomic<size_t> gap_previous_count{ 0 };
std::atomic<size_t> gap_source_count{ 0 };
public:
block_processing_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a);
void run ();
void run_iteration (std::deque<std::shared_ptr<nano::block>> & blocks);
void print_statistics ();
};
void run_block_processing_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path)
{
auto config = benchmark_config::parse (vm);
std::cout << "=== BENCHMARK: Block Processing ===\n";
std::cout << "Configuration:\n";
std::cout << fmt::format (" Accounts: {}\n", config.num_accounts);
std::cout << fmt::format (" Iterations: {}\n", config.num_iterations);
std::cout << fmt::format (" Batch size: {}\n", config.batch_size);
// Setup node directly in run method
nano::network_constants::set_active_network ("dev");
nano::logger::initialize (nano::log_config::cli_default (nano::log::level::warn));
nano::node_flags node_flags;
nano::update_flags (node_flags, vm);
auto io_ctx = std::make_shared<boost::asio::io_context> ();
nano::work_pool work_pool{ nano::dev::network_params.network, std::numeric_limits<unsigned>::max () };
// Load configuration from current working directory (if exists) and cli config overrides
auto daemon_config = nano::load_config_file<nano::daemon_config> (nano::node_config_filename, {}, node_flags.config_overrides);
auto node_config = daemon_config.node;
node_config.network_params.work = nano::work_thresholds{ 0, 0, 0 };
node_config.peering_port = 0; // Use random available port
node_config.max_backlog = 0; // Disable bounded backlog
node_config.block_processor.max_system_queue = std::numeric_limits<size_t>::max (); // Unlimited queue size
node_config.max_unchecked_blocks = 1024 * 1024; // Large unchecked blocks cache to avoid dropping blocks
auto node = std::make_shared<nano::node> (io_ctx, nano::unique_path (), node_config, work_pool, node_flags);
node->start ();
nano::thread_runner runner (io_ctx, nano::default_logger (), node->config.io_threads);
std::cout << "\nSystem Info:\n";
std::cout << fmt::format (" Backend: {}\n", node->store.vendor_get ());
std::cout << fmt::format (" Block processor threads: {}\n", 1); // TODO: Log number of block processor threads when upstreamed
std::cout << fmt::format (" Block processor batch size: {}\n", node->config.block_processor.batch_size);
std::cout << "\n";
// Wait for node to be ready
std::this_thread::sleep_for (500ms);
// Run benchmark
block_processing_benchmark benchmark{ node, config };
benchmark.run ();
node->stop ();
}
block_processing_benchmark::block_processing_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a) :
benchmark_base (node_a, config_a)
{
// Register notification handler to track block processing results
node->ledger_notifications.blocks_processed.add ([this] (std::deque<std::pair<nano::block_status, nano::block_context>> const & batch) {
auto current_l = current_blocks.lock ();
for (auto const & [status, context] : batch)
{
if (status == nano::block_status::progress)
{
current_l->erase (context.block->hash ());
processed_blocks_count++;
}
else
{
switch (status)
{
case nano::block_status::old:
// Block already exists in ledger
old_blocks_count++;
break;
case nano::block_status::gap_previous:
// Missing previous block, should be handled by unchecked map
gap_previous_count++;
break;
case nano::block_status::gap_source:
// Missing source block, should be handled by unchecked map
gap_source_count++;
break;
default:
std::cout << fmt::format ("Block processing failed: {} for block {}\n", to_string (status), context.block->hash ().to_string ());
failed_blocks_count++;
break;
}
}
}
});
}
void block_processing_benchmark::run ()
{
// Create account pool and distribute genesis funds to a random account
std::cout << fmt::format ("Generating {} accounts...\n", config.num_accounts);
pool.generate_accounts (config.num_accounts);
setup_genesis_distribution ();
// Run multiple iterations to measure consistent performance
for (size_t iteration = 0; iteration < config.num_iterations; ++iteration)
{
std::cout << fmt::format ("\n--- Iteration {}/{} --------------------------------------------------------------\n", iteration + 1, config.num_iterations);
std::cout << fmt::format ("Generating {} random transfers...\n", config.batch_size / 2);
auto blocks = generate_random_transfers ();
std::cout << fmt::format ("Processing {} blocks...\n", blocks.size ());
run_iteration (blocks);
}
print_statistics ();
}
void block_processing_benchmark::run_iteration (std::deque<std::shared_ptr<nano::block>> & blocks)
{
auto const total_blocks = blocks.size ();
// Add all blocks to tracking set
{
auto current_l = current_blocks.lock ();
for (auto const & block : blocks)
{
current_l->insert (block->hash ());
}
}
auto const time_begin = std::chrono::high_resolution_clock::now ();
// Process all blocks
while (!blocks.empty ())
{
auto block = blocks.front ();
blocks.pop_front ();
bool added = node->block_processor.add (block, nano::block_source::test);
release_assert (added, "failed to add block to processor");
}
// Wait for processing to complete
nano::interval progress_interval;
while (true)
{
{
auto current_l = current_blocks.lock ();
if (current_l->empty () || progress_interval.elapse (3s))
{
std::cout << fmt::format ("Blocks remaining: {:>9} (block processor: {:>9} | unchecked: {:>5})\n",
current_l->size (),
node->block_processor.size (),
node->unchecked.count ());
}
if (current_l->empty ())
{
break;
}
}
std::this_thread::sleep_for (1ms);
}
auto const time_end = std::chrono::high_resolution_clock::now ();
auto const time_us = std::chrono::duration_cast<std::chrono::microseconds> (time_end - time_begin).count ();
std::cout << fmt::format ("\nPerformance: {} blocks/sec [{:.2f}s] {} blocks processed\n",
total_blocks * 1000000 / time_us, time_us / 1000000.0, total_blocks);
std::cout << "─────────────────────────────────────────────────────────────────\n";
node->stats.clear ();
}
void block_processing_benchmark::print_statistics ()
{
std::cout << "\n--- SUMMARY ---------------------------------------------------------------------\n\n";
std::cout << fmt::format ("Blocks processed: {:>10}\n", processed_blocks_count.load ());
std::cout << fmt::format ("Blocks failed: {:>10}\n", failed_blocks_count.load ());
std::cout << fmt::format ("Blocks old: {:>10}\n", old_blocks_count.load ());
std::cout << fmt::format ("Blocks gap_previous: {:>10}\n", gap_previous_count.load ());
std::cout << fmt::format ("Blocks gap_source: {:>10}\n", gap_source_count.load ());
std::cout << fmt::format ("\n");
std::cout << fmt::format ("Accounts total: {:>10}\n", pool.total_accounts ());
std::cout << fmt::format ("Accounts with balance: {:>10} ({:.1f}%)\n",
pool.accounts_with_balance_count (),
100.0 * pool.accounts_with_balance_count () / pool.total_accounts ());
}
}

View file

@ -0,0 +1,285 @@
#include <nano/lib/config.hpp>
#include <nano/lib/locks.hpp>
#include <nano/lib/thread_runner.hpp>
#include <nano/lib/timer.hpp>
#include <nano/nano_node/benchmarks/benchmarks.hpp>
#include <nano/node/active_elections.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/daemonconfig.hpp>
#include <nano/node/ledger_notifications.hpp>
#include <nano/node/node_observers.hpp>
#include <nano/secure/ledger.hpp>
#include <boost/asio/io_context.hpp>
#include <atomic>
#include <chrono>
#include <iostream>
#include <limits>
#include <memory>
#include <thread>
#include <unordered_map>
#include <fmt/format.h>
namespace nano::cli
{
/*
* Cementing Benchmark
*
* Measures the performance of the cementing subsystem - the component that marks blocks
* as confirmed/immutable in the ledger.
*
* How it works:
* 1. Setup: Creates a node and generates random transfer blocks
* 2. Process: Inserts blocks directly into ledger (bypassing block processor)
* 3. Submit: Adds blocks to cementing set for confirmation
* 4. Measure: Tracks time from submission until all blocks are cemented
* 5. Report: Calculates cementing throughput in blocks/sec
*
* Two modes:
* - Sequential mode: Each block is submitted to cementing set individually
* - Root mode: Only the final block is submitted, which triggers cascading cementing
* of all dependent blocks (tests dependency resolution performance)
*
* What is tested:
* - Cementing set processing speed
* - Database write performance for confirmation marks
* - Dependency resolution (root mode only)
*
* What is NOT tested:
* - Block processing (blocks inserted directly into ledger)
* - Elections or voting (blocks pre-confirmed)
* - Network communication
*/
class cementing_benchmark : public benchmark_base
{
private:
// Track blocks waiting to be cemented
nano::locked<std::unordered_map<nano::block_hash, std::chrono::steady_clock::time_point>> pending_cementing;
// Metrics
std::atomic<size_t> processed_blocks_count{ 0 };
std::atomic<size_t> cemented_blocks_count{ 0 };
public:
cementing_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a);
void run ();
void run_iteration (std::deque<std::shared_ptr<nano::block>> & blocks);
void print_statistics ();
};
void run_cementing_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path)
{
auto config = benchmark_config::parse (vm);
std::cout << "=== BENCHMARK: Cementing ===\n";
std::cout << "Configuration:\n";
std::cout << fmt::format (" Mode: {}\n", config.cementing_mode == cementing_mode::root ? "root" : "sequential");
std::cout << fmt::format (" Accounts: {}\n", config.num_accounts);
std::cout << fmt::format (" Iterations: {}\n", config.num_iterations);
std::cout << fmt::format (" Batch size: {}\n", config.batch_size);
// Setup node directly in run method
nano::network_constants::set_active_network ("dev");
nano::logger::initialize (nano::log_config::cli_default (nano::log::level::warn));
nano::node_flags node_flags;
nano::update_flags (node_flags, vm);
auto io_ctx = std::make_shared<boost::asio::io_context> ();
nano::work_pool work_pool{ nano::dev::network_params.network, std::numeric_limits<unsigned>::max () };
// Load configuration from current working directory (if exists) and cli config overrides
auto daemon_config = nano::load_config_file<nano::daemon_config> (nano::node_config_filename, {}, node_flags.config_overrides);
auto node_config = daemon_config.node;
node_config.network_params.work = nano::work_thresholds{ 0, 0, 0 };
node_config.peering_port = 0; // Use random available port
node_config.max_backlog = 0; // Disable bounded backlog
node_config.block_processor.max_system_queue = std::numeric_limits<size_t>::max (); // Unlimited queue size
node_config.max_unchecked_blocks = 1024 * 1024; // Large unchecked blocks cache to avoid dropping blocks
auto node = std::make_shared<nano::node> (io_ctx, nano::unique_path (), node_config, work_pool, node_flags);
node->start ();
nano::thread_runner runner (io_ctx, nano::default_logger (), node->config.io_threads);
std::cout << "\nSystem Info:\n";
std::cout << fmt::format (" Backend: {}\n", node->store.vendor_get ());
std::cout << "\n";
// Wait for node to be ready
std::this_thread::sleep_for (500ms);
// Run benchmark
cementing_benchmark benchmark{ node, config };
benchmark.run ();
node->stop ();
}
cementing_benchmark::cementing_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a) :
benchmark_base (node_a, config_a)
{
// Track when blocks get processed
node->ledger_notifications.blocks_processed.add ([this] (std::deque<std::pair<nano::block_status, nano::block_context>> const & batch) {
for (auto const & [status, context] : batch)
{
if (status == nano::block_status::progress)
{
processed_blocks_count++;
}
}
});
// Track when blocks get cemented
node->cementing_set.batch_cemented.add ([this] (auto const & hashes) {
auto pending_l = pending_cementing.lock ();
for (auto const & ctx : hashes)
{
pending_l->erase (ctx.block->hash ());
cemented_blocks_count++;
}
});
}
void cementing_benchmark::run ()
{
std::cout << fmt::format ("Generating {} accounts...\n", config.num_accounts);
pool.generate_accounts (config.num_accounts);
setup_genesis_distribution ();
std::cout << fmt::format ("Cementing mode: {}\n", config.cementing_mode == cementing_mode::root ? "root" : "sequential");
for (size_t iteration = 0; iteration < config.num_iterations; ++iteration)
{
std::cout << fmt::format ("\n--- Iteration {}/{} --------------------------------------------------------------\n", iteration + 1, config.num_iterations);
std::deque<std::shared_ptr<nano::block>> blocks;
if (config.cementing_mode == cementing_mode::root)
{
std::cout << fmt::format ("Generating dependent chain topology...\n");
blocks = generate_dependent_chain ();
}
else
{
std::cout << fmt::format ("Generating {} random transfers...\n", config.batch_size / 2);
blocks = generate_random_transfers ();
}
std::cout << fmt::format ("Cementing {} blocks...\n", blocks.size ());
run_iteration (blocks);
}
print_statistics ();
}
void cementing_benchmark::run_iteration (std::deque<std::shared_ptr<nano::block>> & blocks)
{
auto const total_blocks = blocks.size ();
// Add all blocks to tracking set
{
auto now = std::chrono::steady_clock::now ();
auto pending_l = pending_cementing.lock ();
for (auto const & block : blocks)
{
pending_l->emplace (block->hash (), now);
}
}
std::cout << fmt::format ("Processing {} blocks directly into the ledger...\n", blocks.size ());
// Process all blocks directly into the ledger
{
auto transaction = node->ledger.tx_begin_write ();
for (auto const & block : blocks)
{
auto result = node->ledger.process (transaction, block);
release_assert (result == nano::block_status::progress, to_string (result));
}
}
std::cout << "All blocks processed, starting cementing...\n";
auto const time_begin = std::chrono::high_resolution_clock::now ();
// Mode-specific cementing
size_t blocks_submitted = 0;
if (config.cementing_mode == cementing_mode::root)
{
// In root mode, only submit the final block which depends on all others
if (!blocks.empty ())
{
auto final_block = blocks.back ();
bool added = node->cementing_set.add (final_block->hash ());
release_assert (added, "failed to add final block to cementing set");
blocks_submitted = 1;
std::cout << fmt::format ("Submitted 1 root block to cement {} dependent blocks\n",
total_blocks);
}
}
else
{
// Sequential mode - submit each block separately
while (!blocks.empty ())
{
auto block = blocks.front ();
blocks.pop_front ();
bool added = node->cementing_set.add (block->hash ());
release_assert (added, "failed to add block to cementing set");
blocks_submitted++;
}
std::cout << fmt::format ("Submitted {} blocks to cementing set\n",
blocks_submitted);
}
// Wait for cementing to complete
nano::interval progress_interval;
while (true)
{
{
auto pending_l = pending_cementing.lock ();
if (pending_l->empty () || progress_interval.elapse (3s))
{
std::cout << fmt::format ("Blocks remaining: {:>9} (cementing set: {:>5} | deferred: {:>5})\n",
pending_l->size (),
node->cementing_set.size (),
node->cementing_set.deferred_size ());
}
if (pending_l->empty ())
{
break;
}
}
std::this_thread::sleep_for (1ms);
}
auto const time_end = std::chrono::high_resolution_clock::now ();
auto const time_us = std::chrono::duration_cast<std::chrono::microseconds> (time_end - time_begin).count ();
std::cout << fmt::format ("\nPerformance: {} blocks/sec [{:.2f}s] {} blocks processed\n",
total_blocks * 1000000 / time_us, time_us / 1000000.0, total_blocks);
std::cout << "─────────────────────────────────────────────────────────────────\n";
node->stats.clear ();
}
void cementing_benchmark::print_statistics ()
{
std::cout << "\n--- SUMMARY ---------------------------------------------------------------------\n\n";
std::cout << fmt::format ("Mode: {:>10}\n", config.cementing_mode == cementing_mode::root ? "root" : "sequential");
std::cout << fmt::format ("Blocks processed: {:>10}\n", processed_blocks_count.load ());
std::cout << fmt::format ("Blocks cemented: {:>10}\n", cemented_blocks_count.load ());
std::cout << fmt::format ("\n");
std::cout << fmt::format ("Accounts total: {:>10}\n", pool.total_accounts ());
std::cout << fmt::format ("Accounts with balance: {:>10} ({:.1f}%)\n",
pool.accounts_with_balance_count (),
100.0 * pool.accounts_with_balance_count () / pool.total_accounts ());
}
}

View file

@ -0,0 +1,344 @@
#include <nano/lib/config.hpp>
#include <nano/lib/locks.hpp>
#include <nano/lib/thread_runner.hpp>
#include <nano/lib/timer.hpp>
#include <nano/nano_node/benchmarks/benchmarks.hpp>
#include <nano/node/active_elections.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/daemonconfig.hpp>
#include <nano/node/election.hpp>
#include <nano/node/ledger_notifications.hpp>
#include <nano/node/node_observers.hpp>
#include <nano/node/scheduler/component.hpp>
#include <nano/node/scheduler/manual.hpp>
#include <nano/secure/ledger.hpp>
#include <boost/asio/io_context.hpp>
#include <chrono>
#include <iostream>
#include <limits>
#include <thread>
#include <fmt/format.h>
namespace nano::cli
{
/*
* Elections Benchmark
*
* Measures the performance of the election subsystem - the component that runs voting
* consensus to cement blocks. Tests how quickly the node can start elections, collect
* votes, reach quorum, and cement blocks.
*
* How it works:
* 1. Setup: Creates a node with genesis representative key for voting
* 2. Prepare: Generates independent open blocks (send blocks are pre-cemented)
* 3. Process: Inserts open blocks directly into ledger (bypassing block processor)
* 4. Start: Manually triggers elections for all open blocks
* 5. Measure: Tracks time from election start until blocks are confirmed and cemented
* 6. Report: Calculates election throughput and timing statistics
*
* What is tested:
* - Election startup performance
* - Vote generation and processing speed (with one local rep running on the same node)
* - Quorum detection and confirmation logic
* - Cementing after confirmation
* - Concurrent election handling
*
* What is NOT tested:
* - Block processing (blocks inserted directly)
* - Network vote propagation (local voting only)
* - Election schedulers (elections started manually)
*/
class elections_benchmark : public benchmark_base
{
private:
struct block_timing
{
std::chrono::steady_clock::time_point submitted;
std::chrono::steady_clock::time_point election_started;
std::chrono::steady_clock::time_point election_stopped;
std::chrono::steady_clock::time_point cemented;
};
// Track timing for each block through the election pipeline
nano::locked<std::unordered_map<nano::block_hash, block_timing>> block_timings;
nano::locked<std::unordered_set<nano::block_hash>> pending_confirmation;
nano::locked<std::unordered_set<nano::block_hash>> pending_cementing;
// Metrics
std::atomic<size_t> elections_started{ 0 };
std::atomic<size_t> elections_stopped{ 0 };
std::atomic<size_t> elections_confirmed{ 0 };
std::atomic<size_t> blocks_cemented{ 0 };
public:
elections_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a);
void run ();
void run_iteration (std::deque<std::shared_ptr<nano::block>> & sends, std::deque<std::shared_ptr<nano::block>> & opens);
void print_statistics ();
};
void run_elections_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path)
{
auto config = benchmark_config::parse (vm);
std::cout << "=== BENCHMARK: Elections ===\n";
std::cout << "Configuration:\n";
std::cout << fmt::format (" Accounts: {}\n", config.num_accounts);
std::cout << fmt::format (" Iterations: {}\n", config.num_iterations);
std::cout << fmt::format (" Batch size: {}\n", config.batch_size);
// Setup node directly in run method
nano::network_constants::set_active_network ("dev");
nano::logger::initialize (nano::log_config::cli_default (nano::log::level::warn));
nano::node_flags node_flags;
nano::update_flags (node_flags, vm);
auto io_ctx = std::make_shared<boost::asio::io_context> ();
nano::work_pool work_pool{ nano::dev::network_params.network, std::numeric_limits<unsigned>::max () };
// Load configuration from current working directory (if exists) and cli config overrides
auto daemon_config = nano::load_config_file<nano::daemon_config> (nano::node_config_filename, {}, node_flags.config_overrides);
auto node_config = daemon_config.node;
node_config.network_params.work = nano::work_thresholds{ 0, 0, 0 };
node_config.peering_port = 0; // Use random available port
node_config.max_backlog = 0; // Disable bounded backlog
// Disable election schedulers and backlog scanning
node_config.hinted_scheduler.enable = false;
node_config.optimistic_scheduler.enable = false;
node_config.priority_scheduler.enable = false;
node_config.backlog_scan.enable = false;
node_config.block_processor.max_peer_queue = std::numeric_limits<size_t>::max (); // Unlimited queue size
node_config.block_processor.max_system_queue = std::numeric_limits<size_t>::max (); // Unlimited queue size
node_config.max_unchecked_blocks = 1024 * 1024; // Large unchecked blocks cache to avoid dropping blocks
node_config.vote_processor.max_pr_queue = std::numeric_limits<size_t>::max (); // Unlimited vote processing queue
auto node = std::make_shared<nano::node> (io_ctx, nano::unique_path (), node_config, work_pool, node_flags);
node->start ();
nano::thread_runner runner (io_ctx, nano::default_logger (), node->config.io_threads);
std::cout << "\nSystem Info:\n";
std::cout << fmt::format (" Backend: {}\n", node->store.vendor_get ());
std::cout << "\n";
// Insert dev genesis representative key for voting
auto wallet = node->wallets.create (nano::random_wallet_id ());
wallet->insert_adhoc (nano::dev::genesis_key.prv);
// Wait for node to be ready
std::this_thread::sleep_for (500ms);
// Run benchmark
elections_benchmark benchmark{ node, config };
benchmark.run ();
node->stop ();
}
elections_benchmark::elections_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a) :
benchmark_base (node_a, config_a)
{
// Track when elections start
node->active.election_started.add ([this] (std::shared_ptr<nano::election> const & election, nano::bucket_index const & bucket, nano::priority_timestamp const & priority) {
auto now = std::chrono::steady_clock::now ();
auto hash = election->winner ()->hash ();
auto timings_l = block_timings.lock ();
if (auto it = timings_l->find (hash); it != timings_l->end ())
{
it->second.election_started = now;
}
elections_started++;
});
// Track when elections stop (regardless of confirmation)
node->active.election_erased.add ([this] (std::shared_ptr<nano::election> const & election) {
auto now = std::chrono::steady_clock::now ();
auto hash = election->winner ()->hash ();
auto timings_l = block_timings.lock ();
auto pending_confirmation_l = pending_confirmation.lock ();
if (auto it = timings_l->find (hash); it != timings_l->end ())
{
it->second.election_stopped = now;
}
pending_confirmation_l->erase (hash);
elections_stopped++;
elections_confirmed += election->confirmed () ? 1 : 0;
});
// Track when blocks get cemented
node->cementing_set.batch_cemented.add ([this] (auto const & hashes) {
auto now = std::chrono::steady_clock::now ();
auto pending_l = pending_cementing.lock ();
auto timings_l = block_timings.lock ();
for (auto const & ctx : hashes)
{
auto hash = ctx.block->hash ();
if (auto it = timings_l->find (hash); it != timings_l->end ())
{
it->second.cemented = now;
}
pending_l->erase (hash);
blocks_cemented++;
}
});
}
void elections_benchmark::run ()
{
std::cout << fmt::format ("Generating {} accounts...\n", config.num_accounts);
pool.generate_accounts (config.num_accounts);
setup_genesis_distribution (0.1); // Only distribute 10%, keep 90% for voting weight
for (size_t iteration = 0; iteration < config.num_iterations; ++iteration)
{
std::cout << fmt::format ("\n--- Iteration {}/{} --------------------------------------------------------------\n", iteration + 1, config.num_iterations);
std::cout << fmt::format ("Generating independent blocks...\n");
auto [sends, opens] = generate_independent_blocks ();
std::cout << fmt::format ("Measuring elections performance for {} opens...\n", opens.size ());
run_iteration (sends, opens);
}
print_statistics ();
}
void elections_benchmark::run_iteration (std::deque<std::shared_ptr<nano::block>> & sends, std::deque<std::shared_ptr<nano::block>> & opens)
{
auto const total_opens = opens.size ();
// Process and cement all send blocks directly
std::cout << fmt::format ("Processing and cementing {} send blocks...\n", sends.size ());
{
auto transaction = node->ledger.tx_begin_write ();
for (auto const & send : sends)
{
auto result = node->ledger.process (transaction, send);
release_assert (result == nano::block_status::progress, to_string (result));
// Add to cementing set for direct cementing
auto cemented = node->ledger.confirm (transaction, send->hash ());
release_assert (!cemented.empty () && cemented.back ()->hash () == send->hash ());
}
}
// Process open blocks into ledger without confirming
std::cout << fmt::format ("Processing {} open blocks into ledger...\n", opens.size ());
{
auto transaction = node->ledger.tx_begin_write ();
for (auto const & open : opens)
{
auto result = node->ledger.process (transaction, open);
release_assert (result == nano::block_status::progress, to_string (result));
}
}
// Initialize timing entries for open blocks only
{
auto now = std::chrono::steady_clock::now ();
auto timings_l = block_timings.lock ();
auto pending_cementing_l = pending_cementing.lock ();
auto pending_confirmation_l = pending_confirmation.lock ();
for (auto const & open : opens)
{
pending_cementing_l->emplace (open->hash ());
pending_confirmation_l->emplace (open->hash ());
timings_l->emplace (open->hash (), block_timing{ now });
}
}
auto const time_begin = std::chrono::high_resolution_clock::now ();
// Manually start elections for open blocks only
std::cout << fmt::format ("Starting elections manually for {} open blocks...\n", opens.size ());
for (auto const & open : opens)
{
// Use manual scheduler to start election
node->scheduler.manual.push (open);
}
// Wait for all elections to complete and blocks to be cemented
nano::interval progress_interval;
while (true)
{
{
auto pending_cementing_l = pending_cementing.lock ();
auto pending_confirmation_l = pending_confirmation.lock ();
if ((pending_cementing_l->empty () && pending_confirmation_l->empty ()) || progress_interval.elapse (3s))
{
std::cout << fmt::format ("Confirming elections: {:>9} remaining | cementing: {:>9} remaining (active: {:>5} | cementing: {:>5} | deferred: {:>5})\n",
pending_confirmation_l->size (),
pending_cementing_l->size (),
node->active.size (),
node->cementing_set.size (),
node->cementing_set.deferred_size ());
}
if (pending_cementing_l->empty () && pending_confirmation_l->empty ())
{
break;
}
}
std::this_thread::sleep_for (1ms);
}
auto const time_end = std::chrono::high_resolution_clock::now ();
auto const time_us = std::chrono::duration_cast<std::chrono::microseconds> (time_end - time_begin).count ();
std::cout << fmt::format ("\nPerformance: {} blocks/sec [{:.2f}s] {} blocks processed\n",
total_opens * 1000000 / time_us, time_us / 1000000.0, total_opens);
std::cout << "─────────────────────────────────────────────────────────────────\n";
node->stats.clear ();
}
void elections_benchmark::print_statistics ()
{
std::cout << "\n--- SUMMARY ---------------------------------------------------------------------\n\n";
std::cout << fmt::format ("Elections started: {:>10}\n", elections_started.load ());
std::cout << fmt::format ("Elections stopped: {:>10}\n", elections_stopped.load ());
std::cout << fmt::format ("Elections confirmed: {:>10}\n", elections_confirmed.load ());
std::cout << fmt::format ("\n");
// Calculate timing statistics from raw data
auto timings_l = block_timings.lock ();
uint64_t total_election_time = 0;
uint64_t total_confirmation_time = 0;
size_t election_count = 0;
size_t confirmed_count = 0;
for (auto const & [hash, timing] : *timings_l)
{
release_assert (timing.election_started != std::chrono::steady_clock::time_point{});
release_assert (timing.election_stopped != std::chrono::steady_clock::time_point{});
release_assert (timing.cemented != std::chrono::steady_clock::time_point{});
total_election_time += std::chrono::duration_cast<std::chrono::microseconds> (timing.election_stopped - timing.election_started).count ();
election_count++;
total_confirmation_time += std::chrono::duration_cast<std::chrono::microseconds> (timing.cemented - timing.election_started).count ();
confirmed_count++;
}
std::cout << "\n";
std::cout << fmt::format ("Election time (activated > confirmed): {:>8.2f} ms/block avg\n", total_election_time / (election_count * 1000.0));
std::cout << fmt::format ("Total time (activated > cemented): {:>8.2f} ms/block avg\n", total_confirmation_time / (confirmed_count * 1000.0));
}
}

View file

@ -0,0 +1,359 @@
#include <nano/lib/config.hpp>
#include <nano/lib/locks.hpp>
#include <nano/lib/thread_runner.hpp>
#include <nano/lib/timer.hpp>
#include <nano/nano_node/benchmarks/benchmarks.hpp>
#include <nano/node/active_elections.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/daemonconfig.hpp>
#include <nano/node/election.hpp>
#include <nano/node/ledger_notifications.hpp>
#include <nano/node/node_observers.hpp>
#include <nano/node/scheduler/component.hpp>
#include <boost/asio/io_context.hpp>
#include <chrono>
#include <iostream>
#include <limits>
#include <thread>
#include <fmt/format.h>
namespace nano::cli
{
/*
* Full Pipeline Benchmark
*
* Measures the complete block confirmation pipeline from submission through processing,
* elections, and cementing. Tests all stages together including inter-component coordination.
*
* How it works:
* 1. Setup: Creates a node with genesis representative key for voting
* 2. Generate: Creates random transfer transactions (send/receive pairs)
* 3. Submit: Adds blocks via process_active() which triggers the full pipeline
* 4. Measure: Tracks time from submission through processing, election, and cementing
* 5. Report: Calculates overall throughput and timing breakdown for each stage
*
* Pipeline stages measured:
* - Block processing: submission -> ledger insertion
* - Election activation: ledger insertion -> election start
* - Election confirmation: election start -> block cemented
* - Total pipeline: submission -> cemented
*
* What is tested:
* - Block processor throughput
* - Election startup and scheduling
* - Vote generation and processing (with one local rep)
* - Quorum detection and confirmation
* - Cementing performance
* - Inter-component coordination and queueing
*
* What is NOT tested:
* - Network communication (local-only)
* - Multiple remote representatives
*/
class pipeline_benchmark : public benchmark_base
{
private:
struct block_timing
{
std::chrono::steady_clock::time_point submitted;
std::chrono::steady_clock::time_point processed;
std::chrono::steady_clock::time_point election_started;
std::chrono::steady_clock::time_point election_stopped;
std::chrono::steady_clock::time_point confirmed;
std::chrono::steady_clock::time_point cemented;
};
// Track timing for each block through the pipeline
nano::locked<std::unordered_map<nano::block_hash, block_timing>> block_timings;
// Track blocks waiting to be cemented
nano::locked<std::unordered_map<nano::block_hash, std::chrono::steady_clock::time_point>> pending_cementing;
// Metrics
std::atomic<size_t> elections_started{ 0 };
std::atomic<size_t> elections_stopped{ 0 };
std::atomic<size_t> elections_confirmed{ 0 };
std::atomic<size_t> blocks_cemented{ 0 };
public:
pipeline_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a);
void run ();
void run_iteration (std::deque<std::shared_ptr<nano::block>> & blocks);
void print_statistics ();
};
void run_pipeline_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path)
{
auto config = benchmark_config::parse (vm);
std::cout << "=== BENCHMARK: Full Pipeline ===\n";
std::cout << "Configuration:\n";
std::cout << fmt::format (" Accounts: {}\n", config.num_accounts);
std::cout << fmt::format (" Iterations: {}\n", config.num_iterations);
std::cout << fmt::format (" Batch size: {}\n", config.batch_size);
// Setup node directly in run method
nano::network_constants::set_active_network ("dev");
nano::logger::initialize (nano::log_config::cli_default (nano::log::level::warn));
nano::node_flags node_flags;
nano::update_flags (node_flags, vm);
auto io_ctx = std::make_shared<boost::asio::io_context> ();
nano::work_pool work_pool{ nano::dev::network_params.network, std::numeric_limits<unsigned>::max () };
// Load configuration from current working directory (if exists) and cli config overrides
auto daemon_config = nano::load_config_file<nano::daemon_config> (nano::node_config_filename, {}, node_flags.config_overrides);
auto node_config = daemon_config.node;
node_config.network_params.work = nano::work_thresholds{ 0, 0, 0 };
node_config.peering_port = 0; // Use random available port
node_config.max_backlog = 0; // Disable bounded backlog
node_config.block_processor.max_peer_queue = std::numeric_limits<size_t>::max (); // Unlimited queue size
node_config.block_processor.max_system_queue = std::numeric_limits<size_t>::max (); // Unlimited queue size
node_config.max_unchecked_blocks = 1024 * 1024; // Large unchecked blocks cache to avoid dropping blocks
node_config.vote_processor.max_pr_queue = std::numeric_limits<size_t>::max (); // Unlimited vote processing queue
node_config.priority_bucket.max_blocks = std::numeric_limits<size_t>::max (); // Unlimited priority bucket
node_config.priority_bucket.max_elections = std::numeric_limits<size_t>::max (); // Unlimited bucket elections
node_config.priority_bucket.reserved_elections = std::numeric_limits<size_t>::max (); // Unlimited bucket elections
auto node = std::make_shared<nano::node> (io_ctx, nano::unique_path (), node_config, work_pool, node_flags);
node->start ();
nano::thread_runner runner (io_ctx, nano::default_logger (), node->config.io_threads);
std::cout << "\nSystem Info:\n";
std::cout << fmt::format (" Backend: {}\n", node->store.vendor_get ());
std::cout << fmt::format (" Block processor threads: {}\n", 1); // TODO: Log number of block processor threads when upstreamed
std::cout << fmt::format (" Vote processor threads: {}\n", node->config.vote_processor.threads);
std::cout << fmt::format (" Active elections limit: {}\n", node->config.active_elections.size);
std::cout << fmt::format (" Priority bucket max blocks: {}\n", node->config.priority_bucket.max_blocks);
std::cout << fmt::format (" Priority bucket max elections: {}\n", node->config.priority_bucket.max_elections);
std::cout << fmt::format (" Block processor max peer queue: {}\n", node->config.block_processor.max_peer_queue);
std::cout << fmt::format (" Block processor max system queue: {}\n", node->config.block_processor.max_system_queue);
std::cout << fmt::format (" Vote processor max pr queue: {}\n", node->config.vote_processor.max_pr_queue);
std::cout << fmt::format (" Max unchecked blocks: {}\n", node->config.max_unchecked_blocks);
std::cout << "\n";
// Insert dev genesis representative key for voting
auto wallet = node->wallets.create (nano::random_wallet_id ());
wallet->insert_adhoc (nano::dev::genesis_key.prv);
// Wait for node to be ready
std::this_thread::sleep_for (500ms);
// Run benchmark
pipeline_benchmark benchmark{ node, config };
benchmark.run ();
node->stop ();
}
pipeline_benchmark::pipeline_benchmark (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a) :
benchmark_base (node_a, config_a)
{
// Track when blocks get processed
node->ledger_notifications.blocks_processed.add ([this] (std::deque<std::pair<nano::block_status, nano::block_context>> const & batch) {
auto now = std::chrono::steady_clock::now ();
auto timings_l = block_timings.lock ();
for (auto const & [status, context] : batch)
{
if (status == nano::block_status::progress)
{
if (auto it = timings_l->find (context.block->hash ()); it != timings_l->end ())
{
it->second.processed = now;
}
processed_blocks_count++;
}
}
});
// Track when elections start
node->active.election_started.add ([this] (std::shared_ptr<nano::election> const & election, nano::bucket_index const & bucket, nano::priority_timestamp const & priority) {
auto now = std::chrono::steady_clock::now ();
auto hash = election->winner ()->hash ();
auto timings_l = block_timings.lock ();
if (auto it = timings_l->find (hash); it != timings_l->end ())
{
it->second.election_started = now;
}
elections_started++;
});
// Track when elections stop (regardless of confirmation)
node->active.election_erased.add ([this] (std::shared_ptr<nano::election> const & election) {
auto now = std::chrono::steady_clock::now ();
auto hash = election->winner ()->hash ();
auto timings_l = block_timings.lock ();
if (auto it = timings_l->find (hash); it != timings_l->end ())
{
it->second.election_stopped = now;
}
elections_stopped++;
elections_confirmed += election->confirmed () ? 1 : 0;
});
// Track when blocks get cemented
node->cementing_set.batch_cemented.add ([this] (auto const & hashes) {
auto now = std::chrono::steady_clock::now ();
auto pending_l = pending_cementing.lock ();
auto timings_l = block_timings.lock ();
for (auto const & ctx : hashes)
{
auto hash = ctx.block->hash ();
if (auto it = timings_l->find (hash); it != timings_l->end ())
{
it->second.cemented = now;
}
pending_l->erase (hash);
blocks_cemented++;
}
});
}
void pipeline_benchmark::run ()
{
std::cout << fmt::format ("Generating {} accounts...\n", config.num_accounts);
pool.generate_accounts (config.num_accounts);
setup_genesis_distribution (0.1); // Only distribute 10%, keep 90% for voting weight
for (size_t iteration = 0; iteration < config.num_iterations; ++iteration)
{
std::cout << fmt::format ("\n--- Iteration {}/{} --------------------------------------------------------------\n", iteration + 1, config.num_iterations);
std::cout << fmt::format ("Generating {} random transfers...\n", config.batch_size / 2);
auto blocks = generate_random_transfers ();
std::cout << fmt::format ("Measuring full confirmation pipeline for {} blocks...\n", blocks.size ());
run_iteration (blocks);
}
print_statistics ();
}
void pipeline_benchmark::run_iteration (std::deque<std::shared_ptr<nano::block>> & blocks)
{
auto const total_blocks = blocks.size ();
// Initialize timing entries for all blocks
{
auto now = std::chrono::steady_clock::now ();
auto timings_l = block_timings.lock ();
auto pending_l = pending_cementing.lock ();
for (auto const & block : blocks)
{
timings_l->emplace (block->hash (), block_timing{ now });
pending_l->emplace (block->hash (), now);
}
}
auto const time_begin = std::chrono::high_resolution_clock::now ();
// Submit all blocks through the full pipeline
while (!blocks.empty ())
{
auto block = blocks.front ();
blocks.pop_front ();
// Process block through full confirmation pipeline
node->process_active (block);
}
// Wait for all blocks to be confirmed and cemented
nano::interval progress_interval;
while (true)
{
{
auto pending_l = pending_cementing.lock ();
if (pending_l->empty () || progress_interval.elapse (3s))
{
std::cout << fmt::format ("Blocks remaining: {:>9} (block processor: {:>9} | active: {:>5} | cementing: {:>5} | pool: {:>5})\n",
pending_l->size (),
node->block_processor.size (),
node->active.size (),
node->cementing_set.size (),
node->scheduler.priority.size ());
}
if (pending_l->empty ())
{
break;
}
}
std::this_thread::sleep_for (1ms);
}
auto const time_end = std::chrono::high_resolution_clock::now ();
auto const time_us = std::chrono::duration_cast<std::chrono::microseconds> (time_end - time_begin).count ();
std::cout << fmt::format ("\nPerformance: {} blocks/sec [{:.2f}s] {} blocks processed\n",
total_blocks * 1000000 / time_us, time_us / 1000000.0, total_blocks);
std::cout << "─────────────────────────────────────────────────────────────────\n";
node->stats.clear ();
}
void pipeline_benchmark::print_statistics ()
{
std::cout << "\n--- SUMMARY ---------------------------------------------------------------------\n\n";
std::cout << fmt::format ("Blocks processed: {:>10}\n", processed_blocks_count.load ());
std::cout << fmt::format ("Elections started: {:>10}\n", elections_started.load ());
std::cout << fmt::format ("Elections stopped: {:>10}\n", elections_stopped.load ());
std::cout << fmt::format ("Elections confirmed: {:>10}\n", elections_confirmed.load ());
std::cout << fmt::format ("\n");
std::cout << fmt::format ("Accounts total: {:>10}\n", pool.total_accounts ());
std::cout << fmt::format ("Accounts with balance: {:>10} ({:.1f}%)\n",
pool.accounts_with_balance_count (),
100.0 * pool.accounts_with_balance_count () / pool.total_accounts ());
// Calculate timing statistics from raw data
auto timings_l = block_timings.lock ();
uint64_t total_processing_time = 0;
uint64_t total_activation_time = 0;
uint64_t total_election_time = 0;
uint64_t total_cementing_time = 0;
size_t processed_count = 0;
size_t activation_count = 0;
size_t election_count = 0;
size_t cemented_count = 0;
for (auto const & [hash, timing] : *timings_l)
{
release_assert (timing.submitted != std::chrono::steady_clock::time_point{});
release_assert (timing.election_started != std::chrono::steady_clock::time_point{});
release_assert (timing.election_stopped != std::chrono::steady_clock::time_point{});
release_assert (timing.cemented != std::chrono::steady_clock::time_point{});
total_processing_time += std::chrono::duration_cast<std::chrono::microseconds> (timing.processed - timing.submitted).count ();
processed_count++;
total_activation_time += std::chrono::duration_cast<std::chrono::microseconds> (timing.election_started - timing.processed).count ();
activation_count++;
total_election_time += std::chrono::duration_cast<std::chrono::microseconds> (timing.cemented - timing.election_started).count ();
election_count++;
total_cementing_time += std::chrono::duration_cast<std::chrono::microseconds> (timing.cemented - timing.submitted).count ();
cemented_count++;
}
std::cout << "\n";
std::cout << fmt::format ("Block processing (submitted > processed): {:>8.2f} ms/block avg\n", total_processing_time / (processed_count * 1000.0));
std::cout << fmt::format ("Election activation (processed > activated): {:>8.2f} ms/block avg\n", total_activation_time / (activation_count * 1000.0));
std::cout << fmt::format ("Election time (activated > confirmed): {:>8.2f} ms/block avg\n", total_election_time / (election_count * 1000.0));
std::cout << fmt::format ("Total pipeline (submitted > cemented): {:>8.2f} ms/block avg\n", total_cementing_time / (cemented_count * 1000.0));
}
}

View file

@ -0,0 +1,616 @@
#include <nano/lib/blockbuilders.hpp>
#include <nano/lib/config.hpp>
#include <nano/lib/thread_runner.hpp>
#include <nano/lib/timer.hpp>
#include <nano/nano_node/benchmarks/benchmarks.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/daemonconfig.hpp>
#include <boost/asio/io_context.hpp>
#include <chrono>
#include <iostream>
#include <limits>
#include <set>
#include <thread>
#include <fmt/format.h>
namespace nano::cli
{
account_pool::account_pool () :
gen (rd ())
{
}
void account_pool::generate_accounts (size_t count)
{
keys.clear ();
keys.reserve (count);
account_to_keypair.clear ();
balances.clear ();
accounts_with_balance.clear ();
balance_lookup.clear ();
frontiers.clear ();
for (size_t i = 0; i < count; ++i)
{
keys.emplace_back ();
account_to_keypair[keys[i].pub] = keys[i];
balances[keys[i].pub] = 0;
}
}
nano::account account_pool::get_random_account_with_balance ()
{
debug_assert (!accounts_with_balance.empty ());
std::uniform_int_distribution<size_t> dist (0, accounts_with_balance.size () - 1);
return accounts_with_balance[dist (gen)];
}
nano::account account_pool::get_random_account ()
{
debug_assert (!keys.empty ());
std::uniform_int_distribution<size_t> dist (0, keys.size () - 1);
return keys[dist (gen)].pub;
}
nano::keypair const & account_pool::get_keypair (nano::account const & account)
{
auto it = account_to_keypair.find (account);
debug_assert (it != account_to_keypair.end ());
return it->second;
}
void account_pool::update_balance (nano::account const & account, nano::uint128_t new_balance)
{
auto old_balance = balances[account];
balances[account] = new_balance;
bool had_balance = balance_lookup.count (account) > 0;
bool has_balance_now = new_balance > 0;
if (!had_balance && has_balance_now)
{
// Account gained balance
accounts_with_balance.push_back (account);
balance_lookup.insert (account);
}
else if (had_balance && !has_balance_now)
{
// Account lost balance
auto it = std::find (accounts_with_balance.begin (), accounts_with_balance.end (), account);
if (it != accounts_with_balance.end ())
{
accounts_with_balance.erase (it);
}
balance_lookup.erase (account);
}
}
nano::uint128_t account_pool::get_balance (nano::account const & account)
{
auto it = balances.find (account);
return (it != balances.end ()) ? it->second : 0;
}
bool account_pool::has_balance (nano::account const & account)
{
return balance_lookup.count (account) > 0;
}
size_t account_pool::accounts_with_balance_count () const
{
return accounts_with_balance.size ();
}
size_t account_pool::total_accounts () const
{
return keys.size ();
}
std::vector<nano::account> account_pool::get_accounts_with_balance () const
{
return accounts_with_balance;
}
void account_pool::set_initial_balance (nano::account const & account, nano::uint128_t balance)
{
balances[account] = balance;
if (balance > 0)
{
if (balance_lookup.count (account) == 0)
{
accounts_with_balance.push_back (account);
balance_lookup.insert (account);
}
}
}
void account_pool::set_frontier (nano::account const & account, nano::block_hash const & frontier)
{
frontiers[account] = frontier;
}
nano::block_hash account_pool::get_frontier (nano::account const & account) const
{
auto it = frontiers.find (account);
return (it != frontiers.end ()) ? it->second : nano::block_hash (0);
}
/*
*
*/
benchmark_config benchmark_config::parse (boost::program_options::variables_map const & vm)
{
benchmark_config config;
if (vm.count ("accounts"))
{
config.num_accounts = std::stoull (vm["accounts"].as<std::string> ());
}
if (vm.count ("iterations"))
{
config.num_iterations = std::stoull (vm["iterations"].as<std::string> ());
}
if (vm.count ("batch_size"))
{
config.batch_size = std::stoull (vm["batch_size"].as<std::string> ());
}
if (vm.count ("cementing_mode"))
{
auto mode_str = vm["cementing_mode"].as<std::string> ();
if (mode_str == "root")
{
config.cementing_mode = cementing_mode::root;
}
else if (mode_str == "sequential")
{
config.cementing_mode = cementing_mode::sequential;
}
else
{
std::cerr << "Invalid cementing mode: " << mode_str << ". Using default (sequential).\n";
}
}
return config;
}
benchmark_base::benchmark_base (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a) :
node (node_a), config (config_a)
{
}
/*
* Prepares the ledger for benchmarking by transferring all genesis funds to a single random account.
* This creates a clean starting state where:
* - One account holds all the balance (simulating a funded account)
* - All other accounts start with zero balance
* - The funded account can then distribute funds to other accounts during the benchmark
*
* Algorithm:
* 1. Select a random account from the pool to be the initial holder
* 2. Create a send block from genesis account sending all balance
* 3. Create an open block for the selected account to receive all funds
* 4. Process both blocks to establish the initial state
*/
void benchmark_base::setup_genesis_distribution (double distribution_percentage)
{
std::cout << "Setting up genesis distribution...\n";
// Get genesis balance and latest block
nano::block_hash genesis_latest (node->latest (nano::dev::genesis_key.pub));
nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ());
// Calculate amount to send using 256-bit arithmetic to avoid precision loss
nano::uint256_t genesis_balance_256 = genesis_balance;
nano::uint256_t multiplier = static_cast<nano::uint256_t> (distribution_percentage * 1000000);
nano::uint256_t send_amount_256 = (genesis_balance_256 * multiplier) / 1000000;
release_assert (send_amount_256 <= std::numeric_limits<nano::uint128_t>::max (), "send amount overflows uint128_t");
nano::uint128_t send_amount = static_cast<nano::uint128_t> (send_amount_256);
nano::uint128_t remaining_balance = genesis_balance - send_amount;
// Select random account to receive genesis funds
nano::account target_account = pool.get_random_account ();
auto & target_keypair = pool.get_keypair (target_account);
// Create send block from genesis to target account
nano::block_builder builder;
auto send = builder.state ()
.account (nano::dev::genesis_key.pub)
.previous (genesis_latest)
.representative (nano::dev::genesis_key.pub)
.balance (remaining_balance)
.link (target_account)
.sign (nano::dev::genesis_key.prv, nano::dev::genesis_key.pub)
.work (0)
.build ();
// Create open block for target account
auto open = builder.state ()
.account (target_account)
.previous (0)
.representative (target_account)
.balance (send_amount)
.link (send->hash ())
.sign (target_keypair.prv, target_keypair.pub)
.work (0)
.build ();
// Process blocks
auto result1 = node->process (send);
release_assert (result1 == nano::block_status::progress, to_string (result1));
auto result2 = node->process (open);
release_assert (result2 == nano::block_status::progress, to_string (result2));
// Update pool balance tracking
pool.set_initial_balance (target_account, send_amount);
// Initialize frontier for target account
pool.set_frontier (target_account, open->hash ());
std::cout << fmt::format ("Genesis distribution complete: {:.1f}% distributed, {:.1f}% retained for voting\n",
distribution_percentage * 100.0, (1.0 - distribution_percentage) * 100.0);
}
/*
* Generates random transfer transactions between accounts with no specific dependency pattern.
* This simulates typical network activity with independent transactions.
*
* Algorithm:
* 1. For each transfer (batch_size/2 transfers, since each creates 2 blocks):
* a. Select a random sender account that has balance
* b. Select a random receiver account (can be any account)
* c. Generate a random transfer amount (up to sender's balance)
* d. Create a send block from sender
* e. Create a receive/open block for receiver
* 2. Update account balances and frontiers after each transfer
* 3. Continue until batch_size blocks are generated or no accounts have balance
*
* The resulting blocks have no intentional dependency structure beyond the natural
* send->receive pairs, making this suitable for testing sequential block processing.
*/
std::deque<std::shared_ptr<nano::block>> benchmark_base::generate_random_transfers ()
{
std::deque<std::shared_ptr<nano::block>> blocks;
std::random_device rd;
std::mt19937 gen (rd ());
// Generate batch_size number of transfer pairs (send + receive = 2 blocks each)
size_t transfers_generated = 0;
nano::block_builder builder;
while (transfers_generated < config.batch_size / 2) // Divide by 2 since each transfer creates 2 blocks
{
if (pool.accounts_with_balance_count () == 0)
{
std::cout << "No accounts with balance remaining, stopping...\n";
break;
}
// Get random sender with balance
nano::account sender = pool.get_random_account_with_balance ();
auto & sender_keypair = pool.get_keypair (sender);
nano::uint128_t sender_balance = pool.get_balance (sender);
if (sender_balance == 0)
continue;
// Get random receiver
nano::account receiver = pool.get_random_account ();
auto & receiver_keypair = pool.get_keypair (receiver);
// Random transfer amount (but not more than sender balance)
std::uniform_int_distribution<uint64_t> amount_dist (1, sender_balance.convert_to<uint64_t> ());
nano::uint128_t transfer_amount = std::min (static_cast<nano::uint128_t> (amount_dist (gen)), sender_balance);
// Get or initialize sender frontier
nano::block_hash sender_frontier = pool.get_frontier (sender);
nano::root work_root;
if (sender_frontier != 0)
{
work_root = sender_frontier;
}
else
{
sender_frontier = 0; // First block for this account
work_root = sender; // Use account address for first block work
}
// Create send block
nano::uint128_t new_sender_balance = sender_balance - transfer_amount;
auto send = builder.state ()
.account (sender)
.previous (sender_frontier)
.representative (sender)
.balance (new_sender_balance)
.link (receiver)
.sign (sender_keypair.prv, sender_keypair.pub)
.work (0)
.build ();
blocks.push_back (send);
pool.set_frontier (sender, send->hash ());
pool.update_balance (sender, new_sender_balance);
// Create receive block
nano::uint128_t receiver_balance = pool.get_balance (receiver);
nano::uint128_t new_receiver_balance = receiver_balance + transfer_amount;
nano::block_hash receiver_frontier = pool.get_frontier (receiver);
nano::root receiver_work_root;
if (receiver_frontier != 0)
{
receiver_work_root = receiver_frontier;
}
else
{
receiver_frontier = 0; // First block for this account (open block)
receiver_work_root = receiver; // Use account address for first block work
}
auto receive = builder.state ()
.account (receiver)
.previous (receiver_frontier)
.representative (receiver)
.balance (new_receiver_balance)
.link (send->hash ())
.sign (receiver_keypair.prv, receiver_keypair.pub)
.work (0)
.build ();
blocks.push_back (receive);
pool.set_frontier (receiver, receive->hash ());
pool.update_balance (receiver, new_receiver_balance);
transfers_generated++;
}
std::cout << fmt::format ("Generated {} blocks\n", blocks.size ());
return blocks;
}
/*
* Generates blocks in a dependency tree structure optimized for root mode cementing.
* All blocks are organized so they become dependencies of a single root block.
*
* Algorithm:
* 1. Random transfer phase (80% of blocks):
* - Generate random transfers between accounts (same as generate_random_transfers)
* - Creates a natural web of dependencies through send/receive pairs
* 2. Convergence phase (20% of blocks):
* - All accounts with balance send their entire balance to a collector account
* - The collector receives all these sends in sequence
* - The final receive block becomes the root that depends on all previous blocks
*
* The last block in the returned deque is the ultimate root that depends on all others.
* Cementing this single block will cascade and cement all blocks in the tree.
*/
std::deque<std::shared_ptr<nano::block>> benchmark_base::generate_dependent_chain ()
{
std::deque<std::shared_ptr<nano::block>> blocks;
std::random_device rd;
std::mt19937 gen (rd ());
nano::block_builder builder;
// Phase 1: Random transfers (80% of blocks)
size_t random_transfer_blocks = config.batch_size * 0.8;
size_t transfers_to_generate = random_transfer_blocks / 2; // Each transfer creates 2 blocks
std::cout << fmt::format ("Generating dependent chain: {} random transfers, then convergence\n",
transfers_to_generate);
// Phase 1: Generate random transfers (same logic as generate_random_transfers)
size_t transfers_generated = 0;
while (transfers_generated < transfers_to_generate && pool.accounts_with_balance_count () > 0)
{
// Get random sender with balance
nano::account sender = pool.get_random_account_with_balance ();
auto & sender_keypair = pool.get_keypair (sender);
nano::uint128_t sender_balance = pool.get_balance (sender);
if (sender_balance == 0)
continue;
// Get random receiver
nano::account receiver = pool.get_random_account ();
auto & receiver_keypair = pool.get_keypair (receiver);
// Random transfer amount (but not more than sender balance)
std::uniform_int_distribution<uint64_t> amount_dist (1, sender_balance.convert_to<uint64_t> ());
nano::uint128_t transfer_amount = std::min (static_cast<nano::uint128_t> (amount_dist (gen)), sender_balance);
// Get or initialize sender frontier
nano::block_hash sender_frontier = pool.get_frontier (sender);
// Create send block
nano::uint128_t new_sender_balance = sender_balance - transfer_amount;
auto send = builder.state ()
.account (sender)
.previous (sender_frontier)
.representative (sender)
.balance (new_sender_balance)
.link (receiver)
.sign (sender_keypair.prv, sender_keypair.pub)
.work (0)
.build ();
blocks.push_back (send);
pool.set_frontier (sender, send->hash ());
pool.update_balance (sender, new_sender_balance);
// Create receive block
nano::uint128_t receiver_balance = pool.get_balance (receiver);
nano::uint128_t new_receiver_balance = receiver_balance + transfer_amount;
nano::block_hash receiver_frontier = pool.get_frontier (receiver);
auto receive = builder.state ()
.account (receiver)
.previous (receiver_frontier)
.representative (receiver)
.balance (new_receiver_balance)
.link (send->hash ())
.sign (receiver_keypair.prv, receiver_keypair.pub)
.work (0)
.build ();
blocks.push_back (receive);
pool.set_frontier (receiver, receive->hash ());
pool.update_balance (receiver, new_receiver_balance);
transfers_generated++;
}
// Phase 2: Convergence - all accounts with balance send to a collector
std::cout << fmt::format ("Converging {} accounts to collector account\n",
pool.accounts_with_balance_count ());
// Select a collector account (can be new or existing)
nano::account collector = pool.get_random_account ();
auto & collector_keypair = pool.get_keypair (collector);
nano::block_hash collector_frontier = pool.get_frontier (collector);
nano::uint128_t collector_balance = pool.get_balance (collector);
// Collect all accounts with balance (except collector)
std::vector<std::pair<nano::account, nano::uint128_t>> accounts_to_drain;
auto accounts_with_balance = pool.get_accounts_with_balance ();
for (auto const & account : accounts_with_balance)
{
if (account != collector)
{
nano::uint128_t balance = pool.get_balance (account);
accounts_to_drain.push_back ({ account, balance });
}
}
// All accounts send a random amount to collector
std::vector<std::pair<nano::block_hash, nano::uint128_t>> convergence_sends;
for (auto const & [account, balance] : accounts_to_drain)
{
auto & account_keypair = pool.get_keypair (account);
nano::block_hash account_frontier = pool.get_frontier (account);
// Send random amount to collector (between 1 and full balance)
std::uniform_int_distribution<uint64_t> amount_dist (1, balance.convert_to<uint64_t> ());
nano::uint128_t send_amount = static_cast<nano::uint128_t> (amount_dist (gen));
nano::uint128_t remaining_balance = balance - send_amount;
auto send = builder.state ()
.account (account)
.previous (account_frontier)
.representative (account)
.balance (remaining_balance)
.link (collector)
.sign (account_keypair.prv, account_keypair.pub)
.work (0)
.build ();
blocks.push_back (send);
convergence_sends.push_back ({ send->hash (), send_amount });
pool.set_frontier (account, send->hash ());
pool.update_balance (account, remaining_balance);
}
// Collector receives all sends (these become the root blocks)
for (auto const & [send_hash, amount] : convergence_sends)
{
collector_balance += amount;
auto receive = builder.state ()
.account (collector)
.previous (collector_frontier)
.representative (collector)
.balance (collector_balance)
.link (send_hash)
.sign (collector_keypair.prv, collector_keypair.pub)
.work (0)
.build ();
blocks.push_back (receive);
collector_frontier = receive->hash ();
}
// Update collector state
pool.set_frontier (collector, collector_frontier);
pool.update_balance (collector, collector_balance);
std::cout << fmt::format ("Generated {} blocks in dependent chain topology\n", blocks.size ());
return blocks;
}
/*
* Generates independent blocks - one block per account with no dependencies.
* Returns sends and opens separately so sends can be confirmed first, then opens processed for elections.
*/
std::pair<std::deque<std::shared_ptr<nano::block>>, std::deque<std::shared_ptr<nano::block>>> benchmark_base::generate_independent_blocks ()
{
std::deque<std::shared_ptr<nano::block>> sends;
std::deque<std::shared_ptr<nano::block>> opens;
nano::block_builder builder;
// Find accounts with balance to send from
auto accounts_with_balance = pool.get_accounts_with_balance ();
if (accounts_with_balance.empty ())
{
std::cout << "No accounts with balance available\n";
return { sends, opens };
}
// Generate independent blocks up to batch_size
for (size_t i = 0; i < config.batch_size && !accounts_with_balance.empty (); ++i)
{
// Pick a sender with balance
nano::account sender = accounts_with_balance[i % accounts_with_balance.size ()];
auto & sender_keypair = pool.get_keypair (sender);
nano::uint128_t sender_balance = pool.get_balance (sender);
if (sender_balance == 0)
continue;
// Create a brand new receiver account
nano::keypair receiver_keypair;
nano::account receiver = receiver_keypair.pub;
// Send a small amount to the new account
nano::uint128_t transfer_amount = std::min (sender_balance, nano::uint128_t (1000000)); // Small fixed amount
nano::block_hash sender_frontier = pool.get_frontier (sender);
nano::uint128_t new_sender_balance = sender_balance - transfer_amount;
// Create send block
auto send = builder.state ()
.account (sender)
.previous (sender_frontier)
.representative (sender)
.balance (new_sender_balance)
.link (receiver)
.sign (sender_keypair.prv, sender_keypair.pub)
.work (0)
.build ();
// Create open block for new receiver (this is the independent block)
auto open = builder.state ()
.account (receiver)
.previous (0) // First block for this account
.representative (receiver)
.balance (transfer_amount)
.link (send->hash ())
.sign (receiver_keypair.prv, receiver_keypair.pub)
.work (0)
.build ();
// Separate sends and opens
sends.push_back (send);
opens.push_back (open);
// Update pool state for sender only (receiver is new account not tracked)
pool.set_frontier (sender, send->hash ());
pool.update_balance (sender, new_sender_balance);
}
std::cout << fmt::format ("Generated {} sends and {} opens\n", sends.size (), opens.size ());
return { sends, opens };
}
}

View file

@ -0,0 +1,96 @@
#pragma once
#include <nano/lib/blocks.hpp>
#include <nano/node/node.hpp>
#include <nano/secure/common.hpp>
#include <boost/program_options.hpp>
#include <atomic>
#include <memory>
#include <random>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace nano::cli
{
enum class cementing_mode
{
sequential,
root
};
class account_pool
{
private:
std::vector<nano::keypair> keys;
std::unordered_map<nano::account, nano::keypair> account_to_keypair;
std::unordered_map<nano::account, nano::uint128_t> balances;
std::vector<nano::account> accounts_with_balance;
std::unordered_set<nano::account> balance_lookup;
std::unordered_map<nano::account, nano::block_hash> frontiers;
std::random_device rd;
std::mt19937 gen;
public:
account_pool ();
void generate_accounts (size_t count);
nano::account get_random_account_with_balance ();
nano::account get_random_account ();
nano::keypair const & get_keypair (nano::account const & account);
void update_balance (nano::account const & account, nano::uint128_t new_balance);
nano::uint128_t get_balance (nano::account const & account);
bool has_balance (nano::account const & account);
size_t accounts_with_balance_count () const;
size_t total_accounts () const;
std::vector<nano::account> get_accounts_with_balance () const;
void set_initial_balance (nano::account const & account, nano::uint128_t balance);
void set_frontier (nano::account const & account, nano::block_hash const & frontier);
nano::block_hash get_frontier (nano::account const & account) const;
};
struct benchmark_config
{
size_t num_accounts{ 150000 };
size_t num_iterations{ 5 };
size_t batch_size{ 250000 };
nano::cli::cementing_mode cementing_mode{ nano::cli::cementing_mode::sequential };
static benchmark_config parse (boost::program_options::variables_map const & vm);
};
class benchmark_base
{
protected:
account_pool pool;
std::shared_ptr<nano::node> node;
benchmark_config config;
// Common metrics
std::atomic<size_t> processed_blocks_count{ 0 };
public:
benchmark_base (std::shared_ptr<nano::node> node_a, benchmark_config const & config_a);
virtual ~benchmark_base () = default;
// Transfers genesis balance to a random account to prepare for benchmarking
void setup_genesis_distribution (double distribution_percentage = 1.0);
// Generates random transfer pairs between accounts with no specific dependency structure
std::deque<std::shared_ptr<nano::block>> generate_random_transfers ();
// Generates blocks that are dependencies of a single root block (last in deque)
std::deque<std::shared_ptr<nano::block>> generate_dependent_chain ();
// Generates independent blocks - returns sends and opens separately
std::pair<std::deque<std::shared_ptr<nano::block>>, std::deque<std::shared_ptr<nano::block>>> generate_independent_blocks ();
};
// Benchmark entry points - individual implementations are in separate cpp files
void run_block_processing_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path);
void run_cementing_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path);
void run_elections_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path);
void run_pipeline_benchmark (boost::program_options::variables_map const & vm, std::filesystem::path const & data_path);
}

View file

@ -6,6 +6,7 @@
#include <nano/lib/thread_runner.hpp>
#include <nano/lib/utility.hpp>
#include <nano/lib/work_version.hpp>
#include <nano/nano_node/benchmarks/benchmarks.hpp>
#include <nano/nano_node/daemon.hpp>
#include <nano/node/active_elections.hpp>
#include <nano/node/cementing_set.hpp>
@ -132,6 +133,10 @@ int main (int argc, char * const * argv)
("debug_profile_bootstrap", "Profile bootstrap style blocks processing (at least 10GB of free storage space required)")
("debug_profile_sign", "Profile signature generation")
("debug_profile_process", "Profile active blocks processing (only for nano_dev_network)")
("benchmark_block_processing", "Run block processing throughput benchmark")
("benchmark_cementing", "Run cementing throughput benchmark")
("benchmark_elections", "Run elections confirmation and cementing benchmark")
("benchmark_pipeline", "Run full confirmation pipeline benchmark")
("debug_profile_votes", "Profile votes processing (only for nano_dev_network)")
("debug_profile_frontiers_confirmation", "Profile frontiers confirmation speed (only for nano_dev_network)")
("debug_random_feed", "Generates output to RNG test suites")
@ -149,6 +154,10 @@ int main (int argc, char * const * argv)
("difficulty", boost::program_options::value<std::string> (), "Defines <difficulty> for OpenCL command, HEX")
("multiplier", boost::program_options::value<std::string> (), "Defines <multiplier> for work generation. Overrides <difficulty>")
("count", boost::program_options::value<std::string> (), "Defines <count> for various commands")
("accounts", boost::program_options::value<std::string> (), "Defines <accounts> for throughput benchmark (default 500000)")
("iterations", boost::program_options::value<std::string> (), "Defines <iterations> for throughput benchmark (default 10)")
("batch_size", boost::program_options::value<std::string> (), "Defines <batch_size> for throughput benchmark (default 250000)")
("cementing_mode", boost::program_options::value<std::string> (), "Defines cementing mode for benchmark: 'sequential' or 'root' (default sequential)")
("pow_sleep_interval", boost::program_options::value<std::string> (), "Defines the amount to sleep inbetween each pow calculation attempt")
("address_column", boost::program_options::value<std::string> (), "Defines which column the addresses are located, 0 indexed (check --debug_output_last_backtrace_dump output)")
("silent", "Silent command execution")
@ -1047,6 +1056,22 @@ int main (int argc, char * const * argv)
std::cout << boost::str (boost::format ("%|1$ 12d| us \n%2% blocks per second\n") % time % (max_blocks * 1000000 / time));
release_assert (node->ledger.block_count () == max_blocks + 1);
}
else if (vm.count ("benchmark_block_processing"))
{
nano::cli::run_block_processing_benchmark (vm, data_path);
}
else if (vm.count ("benchmark_cementing"))
{
nano::cli::run_cementing_benchmark (vm, data_path);
}
else if (vm.count ("benchmark_elections"))
{
nano::cli::run_elections_benchmark (vm, data_path);
}
else if (vm.count ("benchmark_pipeline"))
{
nano::cli::run_pipeline_benchmark (vm, data_path);
}
else if (vm.count ("debug_profile_votes"))
{
nano::block_builder builder;

View file

@ -17,6 +17,7 @@ enum class block_source
local,
forced,
election,
test,
};
std::string_view to_string (block_source);

View file

@ -54,7 +54,7 @@ nano::cementing_set::~cementing_set ()
debug_assert (!thread.joinable ());
}
void nano::cementing_set::add (nano::block_hash const & hash, std::shared_ptr<nano::election> const & election)
bool nano::cementing_set::add (nano::block_hash const & hash, std::shared_ptr<nano::election> const & election)
{
bool added = false;
{
@ -71,6 +71,7 @@ void nano::cementing_set::add (nano::block_hash const & hash, std::shared_ptr<na
{
stats.inc (nano::stat::type::cementing_set, nano::stat::detail::duplicate);
}
return added;
}
void nano::cementing_set::start ()
@ -117,6 +118,12 @@ std::size_t nano::cementing_set::size () const
return set.size () + current.size ();
}
std::size_t nano::cementing_set::deferred_size () const
{
std::lock_guard lock{ mutex };
return deferred.size ();
}
void nano::cementing_set::run ()
{
std::unique_lock lock{ mutex };

View file

@ -61,10 +61,12 @@ public:
void stop ();
// Adds a block to the set of blocks to be confirmed
void add (nano::block_hash const & hash, std::shared_ptr<nano::election> const & election = nullptr);
bool add (nano::block_hash const & hash, std::shared_ptr<nano::election> const & election = nullptr);
// Added blocks will remain in this set until after ledger has them marked as confirmed.
bool contains (nano::block_hash const & hash) const;
std::size_t size () const;
std::size_t deferred_size () const;
nano::container_info container_info () const;
@ -119,6 +121,7 @@ private:
ordered_entries set;
// Blocks that could not be cemented immediately (e.g. waiting for rollbacks to complete)
ordered_entries deferred;
// Blocks that are being cemented in the current batch
std::unordered_set<nano::block_hash> current;

View file

@ -1,5 +1,6 @@
#pragma once
#include <nano/lib/numbers.hpp>
#include <nano/node/fwd.hpp>
#include <memory>