From ca6e4f171d2c268f16d1a0cdb182acb88834e3ac Mon Sep 17 00:00:00 2001 From: Sergey Kroshnin Date: Wed, 6 Nov 2019 22:59:08 +0300 Subject: [PATCH] Limit bulk pulls memory usage in lazy bootstrap flush less pulls from lazy_pulls to pulls --- nano/node/bootstrap/bootstrap.cpp | 53 +++++++++++++++++-------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/nano/node/bootstrap/bootstrap.cpp b/nano/node/bootstrap/bootstrap.cpp index 68e238b05..9df1f7392 100644 --- a/nano/node/bootstrap/bootstrap.cpp +++ b/nano/node/bootstrap/bootstrap.cpp @@ -819,33 +819,40 @@ void nano::bootstrap_attempt::lazy_requeue (nano::block_hash const & hash_a, nan void nano::bootstrap_attempt::lazy_pull_flush () { assert (!mutex.try_lock ()); - last_lazy_flush = std::chrono::steady_clock::now (); - nano::lock_guard lazy_lock (lazy_mutex); - assert (node->network_params.bootstrap.lazy_max_pull_blocks <= std::numeric_limits::max ()); - nano::pull_info::count_t batch_count (node->network_params.bootstrap.lazy_max_pull_blocks); - if (total_blocks > nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit && !lazy_blocks.empty ()) + static size_t const max_pulls (nano::bootstrap_limits::bootstrap_connection_scale_target_blocks_lazy * 3); + if (pulls.size () < max_pulls) { - double lazy_blocks_ratio (total_blocks / lazy_blocks.size ()); - if (lazy_blocks_ratio > nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio) + last_lazy_flush = std::chrono::steady_clock::now (); + nano::lock_guard lazy_lock (lazy_mutex); + assert (node->network_params.bootstrap.lazy_max_pull_blocks <= std::numeric_limits::max ()); + nano::pull_info::count_t batch_count (node->network_params.bootstrap.lazy_max_pull_blocks); + if (total_blocks > nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit && !lazy_blocks.empty ()) { - // Increasing blocks ratio weight as more important (^3). Small batch count should lower blocks ratio below target - double lazy_blocks_factor (std::pow (lazy_blocks_ratio / nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio, 3.0)); - // Decreasing total block count weight as less important (sqrt) - double total_blocks_factor (std::sqrt (total_blocks / nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit)); - uint32_t batch_count_min (node->network_params.bootstrap.lazy_max_pull_blocks / (lazy_blocks_factor * total_blocks_factor)); - batch_count = std::max (node->network_params.bootstrap.lazy_min_pull_blocks, batch_count_min); + double lazy_blocks_ratio (total_blocks / lazy_blocks.size ()); + if (lazy_blocks_ratio > nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio) + { + // Increasing blocks ratio weight as more important (^3). Small batch count should lower blocks ratio below target + double lazy_blocks_factor (std::pow (lazy_blocks_ratio / nano::bootstrap_limits::lazy_batch_pull_count_resize_ratio, 3.0)); + // Decreasing total block count weight as less important (sqrt) + double total_blocks_factor (std::sqrt (total_blocks / nano::bootstrap_limits::lazy_batch_pull_count_resize_blocks_limit)); + uint32_t batch_count_min (node->network_params.bootstrap.lazy_max_pull_blocks / (lazy_blocks_factor * total_blocks_factor)); + batch_count = std::max (node->network_params.bootstrap.lazy_min_pull_blocks, batch_count_min); + } + } + size_t count (0); + auto transaction (node->store.tx_begin_read ()); + while (!lazy_pulls.empty () && count < max_pulls) + { + auto const & pull_start (lazy_pulls.front ()); + // Recheck if block was already processed + if (lazy_blocks.find (pull_start.first) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start.first)) + { + pulls.emplace_back (pull_start.first, pull_start.first, nano::block_hash (0), batch_count, pull_start.second); + ++count; + } + lazy_pulls.pop_front (); } } - auto transaction (node->store.tx_begin_read ()); - for (auto & pull_start : lazy_pulls) - { - // Recheck if block was already processed - if (lazy_blocks.find (pull_start.first) == lazy_blocks.end () && !node->store.block_exists (transaction, pull_start.first)) - { - pulls.emplace_back (pull_start.first, pull_start.first, nano::block_hash (0), batch_count, pull_start.second); - } - } - lazy_pulls.clear (); } bool nano::bootstrap_attempt::lazy_finished ()