Various consistency changes (#3058)

* Various consistency changes

* Colin comments
This commit is contained in:
Wesley Shillingford 2021-02-17 15:36:27 +00:00 committed by GitHub
commit 0a64feb49e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
65 changed files with 559 additions and 610 deletions

View file

@ -1892,7 +1892,7 @@ size_t nano::block_uniquer::size ()
return blocks.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_uniquer & block_uniquer, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_uniquer & block_uniquer, std::string const & name)
{
auto count = block_uniquer.size ();
auto sizeof_element = sizeof (block_uniquer::value_type);

View file

@ -413,7 +413,7 @@ private:
static unsigned constexpr cleanup_count = 2;
};
std::unique_ptr<container_info_component> collect_container_info (block_uniquer & block_uniquer, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (block_uniquer & block_uniquer, std::string const & name);
std::shared_ptr<nano::block> deserialize_block (nano::stream &);
std::shared_ptr<nano::block> deserialize_block (nano::stream &, nano::block_type, nano::block_uniquer * = nullptr);

View file

@ -81,7 +81,7 @@ nano::uint128_t nano::rep_weights::get (nano::account const & account_a) const
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::rep_weights const & rep_weights, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::rep_weights const & rep_weights, std::string const & name)
{
size_t rep_amounts_count;

View file

@ -29,7 +29,7 @@
#endif
#endif
nano::container_info_composite::container_info_composite (const std::string & name) :
nano::container_info_composite::container_info_composite (std::string const & name) :
name (name)
{
}

View file

@ -73,7 +73,7 @@ public:
class container_info_composite : public container_info_component
{
public:
container_info_composite (const std::string & name);
container_info_composite (std::string const & name);
bool is_composite () const override;
void add_component (std::unique_ptr<container_info_component> child);
const std::vector<std::unique_ptr<container_info_component>> & get_children () const;
@ -154,7 +154,7 @@ public:
};
template <typename... T>
std::unique_ptr<container_info_component> collect_container_info (observer_set<T...> & observer_set, const std::string & name)
std::unique_ptr<container_info_component> collect_container_info (observer_set<T...> & observer_set, std::string const & name)
{
size_t count = 0;
{

View file

@ -408,7 +408,7 @@ size_t nano::work_pool::size ()
return pending.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (work_pool & work_pool, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (work_pool & work_pool, std::string const & name)
{
size_t count;
{

View file

@ -82,5 +82,5 @@ public:
nano::observer_set<bool> work_observers;
};
std::unique_ptr<container_info_component> collect_container_info (work_pool & work_pool, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (work_pool & work_pool, std::string const & name);
}

View file

@ -22,10 +22,7 @@ void my_abort_signal_handler (int signum)
nano::dump_crash_stacktrace ();
nano::create_load_memory_address_files ();
}
}
namespace
{
volatile sig_atomic_t sig_int_or_term = 0;
}

View file

@ -141,17 +141,6 @@ int main (int argc, char * const * argv)
}
auto data_path_it = vm.find ("data_path");
if (data_path_it == vm.end ())
{
std::string error_string;
if (!nano::migrate_working_path (error_string))
{
std::cerr << error_string << std::endl;
return 1;
}
}
boost::filesystem::path data_path ((data_path_it != vm.end ()) ? data_path_it->second.as<std::string> () : nano::working_path ());
auto ec = nano::handle_node_options (vm);
if (ec == nano::error_cli::unknown_command)

View file

@ -122,17 +122,6 @@ int main (int argc, char * const * argv)
}
auto data_path_it = vm.find ("data_path");
if (data_path_it == vm.end ())
{
std::string error_string;
if (!nano::migrate_working_path (error_string))
{
std::cerr << error_string << std::endl;
return 1;
}
}
boost::filesystem::path data_path ((data_path_it != vm.end ()) ? data_path_it->second.as<std::string> () : nano::working_path ());
if (vm.count ("daemon") > 0)
{

View file

@ -272,13 +272,10 @@ int main (int argc, char * const * argv)
}
}
if (!vm.count ("data_path"))
std::vector<std::string> config_overrides;
if (vm.count ("config"))
{
std::string error_string;
if (!nano::migrate_working_path (error_string))
{
throw std::runtime_error (error_string);
}
config_overrides = vm["config"].as<std::vector<std::string>> ();
}
auto ec = nano::handle_node_options (vm);

View file

@ -22,15 +22,6 @@ int main (int argc, char * const * argv)
boost::program_options::notify (vm);
int result (0);
if (!vm.count ("data_path"))
{
std::string error_string;
if (!nano::migrate_working_path (error_string))
{
throw std::runtime_error (error_string);
}
}
auto ec = nano::handle_node_options (vm);
if (ec == nano::error_cli::unknown_command && vm.count ("help") != 0)
{

View file

@ -30,6 +30,8 @@ add_library(
bootstrap/bootstrap_frontier.cpp
bootstrap/bootstrap_lazy.hpp
bootstrap/bootstrap_lazy.cpp
bootstrap/bootstrap_legacy.hpp
bootstrap/bootstrap_legacy.cpp
bootstrap/bootstrap_server.hpp
bootstrap/bootstrap_server.cpp
bootstrap/bootstrap.hpp

View file

@ -1552,7 +1552,7 @@ bool nano::frontiers_confirmation_info::can_start_elections () const
return max_elections > 0;
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (active_transactions & active_transactions, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (active_transactions & active_transactions, std::string const & name)
{
size_t roots_count;
size_t blocks_count;

View file

@ -369,5 +369,5 @@ private:
};
bool purge_singleton_inactive_votes_cache_pool_memory ();
std::unique_ptr<container_info_component> collect_container_info (active_transactions & active_transactions, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (active_transactions & active_transactions, std::string const & name);
}

View file

@ -561,7 +561,7 @@ void nano::block_processor::requeue_invalid (nano::block_hash const & hash_a, na
node.bootstrap_initiator.lazy_requeue (hash_a, info_a.block->previous (), info_a.confirmed);
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_processor & block_processor, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_processor & block_processor, std::string const & name)
{
size_t blocks_count;
size_t forced_count;

View file

@ -89,7 +89,7 @@ private:
std::mutex mutex;
nano::state_block_signature_verification state_block_signature_verification;
friend std::unique_ptr<container_info_component> collect_container_info (block_processor & block_processor, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (block_processor & block_processor, std::string const & name);
};
std::unique_ptr<nano::container_info_component> collect_container_info (block_processor & block_processor, const std::string & name);
std::unique_ptr<nano::container_info_component> collect_container_info (block_processor & block_processor, std::string const & name);
}

View file

@ -1,7 +1,7 @@
#include <nano/lib/threading.hpp>
#include <nano/node/bootstrap/bootstrap.hpp>
#include <nano/node/bootstrap/bootstrap_attempt.hpp>
#include <nano/node/bootstrap/bootstrap_lazy.hpp>
#include <nano/node/bootstrap/bootstrap_legacy.hpp>
#include <nano/node/common.hpp>
#include <nano/node/node.hpp>
@ -288,7 +288,7 @@ void nano::bootstrap_initiator::notify_listeners (bool in_progress_a)
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (bootstrap_initiator & bootstrap_initiator, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (bootstrap_initiator & bootstrap_initiator, std::string const & name)
{
size_t count;
size_t cache_count;

View file

@ -113,10 +113,10 @@ private:
std::vector<std::function<void(bool)>> observers;
std::vector<boost::thread> bootstrap_initiator_threads;
friend std::unique_ptr<container_info_component> collect_container_info (bootstrap_initiator & bootstrap_initiator, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (bootstrap_initiator & bootstrap_initiator, std::string const & name);
};
std::unique_ptr<container_info_component> collect_container_info (bootstrap_initiator & bootstrap_initiator, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (bootstrap_initiator & bootstrap_initiator, std::string const & name);
class bootstrap_limits final
{
public:

View file

@ -197,434 +197,3 @@ size_t nano::bootstrap_attempt::wallet_size ()
debug_assert (mode == nano::bootstrap_mode::wallet_lazy);
return 0;
}
nano::bootstrap_attempt_legacy::bootstrap_attempt_legacy (std::shared_ptr<nano::node> const & node_a, uint64_t incremental_id_a, std::string const & id_a) :
nano::bootstrap_attempt (node_a, nano::bootstrap_mode::legacy, incremental_id_a, id_a)
{
node->bootstrap_initiator.notify_listeners (true);
}
bool nano::bootstrap_attempt_legacy::consume_future (std::future<bool> & future_a)
{
bool result;
try
{
result = future_a.get ();
}
catch (std::future_error &)
{
result = true;
}
return result;
}
void nano::bootstrap_attempt_legacy::stop ()
{
nano::unique_lock<std::mutex> lock (mutex);
stopped = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
if (auto i = frontiers.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
if (auto i = push.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
lock.unlock ();
node->bootstrap_initiator.connections->clear_pulls (incremental_id);
}
void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<std::mutex> & lock_a)
{
bool error (false);
lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->find_connection (endpoint_frontier_request));
lock_a.lock ();
if (connection_l)
{
std::future<bool> future;
{
auto this_l (shared_from_this ());
auto client (std::make_shared<nano::bulk_push_client> (connection_l, this_l));
client->start ();
push = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
}
if (node->config.logging.network_logging ())
{
node->logger.try_log ("Exiting bulk push client");
if (error)
{
node->logger.try_log ("Bulk push client failed");
}
}
}
void nano::bootstrap_attempt_legacy::add_frontier (nano::pull_info const & pull_a)
{
nano::pull_info pull (pull_a);
nano::lock_guard<std::mutex> lock (mutex);
frontier_pulls.push_back (pull);
}
void nano::bootstrap_attempt_legacy::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
{
nano::lock_guard<std::mutex> lock (mutex);
bulk_push_targets.emplace_back (head, end);
}
bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> & current_target_a)
{
nano::lock_guard<std::mutex> lock (mutex);
auto empty (bulk_push_targets.empty ());
if (!empty)
{
current_target_a = bulk_push_targets.back ();
bulk_push_targets.pop_back ();
}
return empty;
}
void nano::bootstrap_attempt_legacy::add_recent_pull (nano::block_hash const & head_a)
{
nano::lock_guard<std::mutex> lock (mutex);
recent_pulls_head.push_back (head_a);
if (recent_pulls_head.size () > nano::bootstrap_limits::bootstrap_max_confirm_frontiers)
{
recent_pulls_head.pop_front ();
}
}
void nano::bootstrap_attempt_legacy::restart_condition ()
{
/* Conditions to start frontiers confirmation:
- not completed frontiers confirmation
- more than 256 pull retries usually indicating issues with requested pulls
- or 128k processed blocks indicating large bootstrap */
if (!frontiers_confirmation_pending && !frontiers_confirmed && (requeued_pulls > (!node->network_params.network.is_dev_network () ? nano::bootstrap_limits::requeued_pulls_limit : nano::bootstrap_limits::requeued_pulls_limit_dev) || total_blocks > nano::bootstrap_limits::frontier_confirmation_blocks_limit))
{
frontiers_confirmation_pending = true;
}
}
void nano::bootstrap_attempt_legacy::attempt_restart_check (nano::unique_lock<std::mutex> & lock_a)
{
if (frontiers_confirmation_pending)
{
auto confirmed (confirm_frontiers (lock_a));
debug_assert (lock_a.owns_lock ());
if (!confirmed)
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_confirmation_failed, nano::stat::dir::in);
auto score (node->network.excluded_peers.add (endpoint_frontier_request, node->network.size ()));
if (score >= nano::peer_exclusion::score_limit)
{
node->logger.always_log (boost::str (boost::format ("Adding peer %1% to excluded peers list with score %2% after %3% seconds bootstrap attempt") % endpoint_frontier_request % score % std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt_start).count ()));
auto channel = node->network.find_channel (nano::transport::map_tcp_to_endpoint (endpoint_frontier_request));
if (channel != nullptr)
{
node->network.erase (*channel);
}
}
lock_a.unlock ();
stop ();
lock_a.lock ();
// Start new bootstrap connection
auto node_l (node->shared ());
auto this_l (shared_from_this ());
node->background ([node_l, this_l]() {
node_l->bootstrap_initiator.remove_attempt (this_l);
// Delay after removing current attempt
node_l->workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (50), [node_l]() {
node_l->bootstrap_initiator.bootstrap (true);
});
});
}
else
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_confirmation_successful, nano::stat::dir::in);
}
frontiers_confirmed = confirmed;
frontiers_confirmation_pending = false;
}
}
bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<std::mutex> & lock_a)
{
bool confirmed (false);
debug_assert (!frontiers_confirmed);
condition.wait (lock_a, [& stopped = stopped] { return !stopped; });
auto this_l (shared_from_this ());
std::vector<nano::block_hash> frontiers;
lock_a.unlock ();
nano::unique_lock<std::mutex> pulls_lock (node->bootstrap_initiator.connections->mutex);
for (auto i (node->bootstrap_initiator.connections->pulls.begin ()), end (node->bootstrap_initiator.connections->pulls.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i)
{
if (!i->head.is_zero () && i->bootstrap_id == incremental_id && std::find (frontiers.begin (), frontiers.end (), i->head) == frontiers.end ())
{
frontiers.push_back (i->head);
}
}
pulls_lock.unlock ();
lock_a.lock ();
for (auto i (recent_pulls_head.begin ()), end (recent_pulls_head.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i)
{
if (!i->is_zero () && std::find (frontiers.begin (), frontiers.end (), *i) == frontiers.end ())
{
frontiers.push_back (*i);
}
}
lock_a.unlock ();
auto frontiers_count (frontiers.size ());
if (frontiers_count > 0)
{
const size_t reps_limit = 20;
auto representatives (node->rep_crawler.representatives ());
auto reps_weight (node->rep_crawler.total_weight ());
auto representatives_copy (representatives);
nano::uint128_t total_weight (0);
// Select random peers from bottom 50% of principal representatives
if (representatives.size () > 1)
{
std::reverse (representatives.begin (), representatives.end ());
representatives.resize (representatives.size () / 2);
for (auto i = static_cast<CryptoPP::word32> (representatives.size () - 1); i > 0; --i)
{
auto k = nano::random_pool::generate_word32 (0, i);
std::swap (representatives[i], representatives[k]);
}
if (representatives.size () > reps_limit)
{
representatives.resize (reps_limit);
}
}
for (auto const & rep : representatives)
{
total_weight += rep.weight.number ();
}
// Select peers with total 25% of reps stake from top 50% of principal representatives
representatives_copy.resize (representatives_copy.size () / 2);
while (total_weight < reps_weight / 4) // 25%
{
auto k = nano::random_pool::generate_word32 (0, static_cast<CryptoPP::word32> (representatives_copy.size () - 1));
auto rep (representatives_copy[k]);
if (std::find (representatives.begin (), representatives.end (), rep) == representatives.end ())
{
representatives.push_back (rep);
total_weight += rep.weight.number ();
}
}
// Start requests
for (auto i (0), max_requests (20); i <= max_requests && !confirmed && !stopped; ++i)
{
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> batched_confirm_req_bundle;
std::deque<std::pair<nano::block_hash, nano::root>> request;
// Find confirmed frontiers (tally > 12.5% of reps stake, 60% of requestsed reps responded
for (auto ii (frontiers.begin ()); ii != frontiers.end ();)
{
if (node->ledger.block_or_pruned_exists (*ii))
{
ii = frontiers.erase (ii);
}
else
{
auto existing (node->active.find_inactive_votes_cache (*ii));
nano::uint128_t tally;
for (auto & voter : existing.voters)
{
tally += node->ledger.weight (voter);
}
if (existing.status.confirmed || (tally > reps_weight / 8 && existing.voters.size () >= representatives.size () * 0.6)) // 12.5% of weight, 60% of reps
{
ii = frontiers.erase (ii);
}
else
{
for (auto const & rep : representatives)
{
if (std::find (existing.voters.begin (), existing.voters.end (), rep.account) == existing.voters.end ())
{
release_assert (!ii->is_zero ());
auto rep_request (batched_confirm_req_bundle.find (rep.channel));
if (rep_request == batched_confirm_req_bundle.end ())
{
std::deque<std::pair<nano::block_hash, nano::root>> insert_root_hash = { std::make_pair (*ii, *ii) };
batched_confirm_req_bundle.emplace (rep.channel, insert_root_hash);
}
else
{
rep_request->second.emplace_back (*ii, *ii);
}
}
}
++ii;
}
}
}
auto confirmed_count (frontiers_count - frontiers.size ());
if (confirmed_count >= frontiers_count * nano::bootstrap_limits::required_frontier_confirmation_ratio) // 80% of frontiers confirmed
{
confirmed = true;
}
else if (i < max_requests)
{
node->network.broadcast_confirm_req_batched_many (batched_confirm_req_bundle);
std::this_thread::sleep_for (std::chrono::milliseconds (!node->network_params.network.is_dev_network () ? 500 : 25));
}
}
if (!confirmed)
{
node->logger.always_log (boost::str (boost::format ("Failed to confirm frontiers for bootstrap attempt. %1% of %2% frontiers were not confirmed") % frontiers.size () % frontiers_count));
}
}
lock_a.lock ();
return confirmed;
}
bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<std::mutex> & lock_a, bool first_attempt)
{
auto result (true);
lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this (), first_attempt));
lock_a.lock ();
if (connection_l && !stopped)
{
endpoint_frontier_request = connection_l->channel->get_tcp_endpoint ();
std::future<bool> future;
{
auto this_l (shared_from_this ());
auto client (std::make_shared<nano::frontier_req_client> (connection_l, this_l));
client->run ();
frontiers = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
if (result)
{
frontier_pulls.clear ();
}
else
{
account_count = nano::narrow_cast<unsigned int> (frontier_pulls.size ());
// Shuffle pulls
release_assert (std::numeric_limits<CryptoPP::word32>::max () > frontier_pulls.size ());
if (!frontier_pulls.empty ())
{
for (auto i = static_cast<CryptoPP::word32> (frontier_pulls.size () - 1); i > 0; --i)
{
auto k = nano::random_pool::generate_word32 (0, i);
std::swap (frontier_pulls[i], frontier_pulls[k]);
}
}
// Add to regular pulls
while (!frontier_pulls.empty ())
{
auto pull (frontier_pulls.front ());
lock_a.unlock ();
node->bootstrap_initiator.connections->add_pull (pull);
lock_a.lock ();
++pulling;
frontier_pulls.pop_front ();
}
}
if (node->config.logging.network_logging ())
{
if (!result)
{
node->logger.try_log (boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % account_count % connection_l->channel->to_string ()));
}
else
{
node->stats.inc (nano::stat::type::error, nano::stat::detail::frontier_req, nano::stat::dir::out);
}
}
}
return result;
}
void nano::bootstrap_attempt_legacy::run_start (nano::unique_lock<std::mutex> & lock_a)
{
frontiers_received = false;
frontiers_confirmed = false;
total_blocks = 0;
requeued_pulls = 0;
recent_pulls_head.clear ();
auto frontier_failure (true);
uint64_t frontier_attempts (0);
while (!stopped && frontier_failure)
{
++frontier_attempts;
frontier_failure = request_frontier (lock_a, frontier_attempts == 1);
}
frontiers_received = true;
}
void nano::bootstrap_attempt_legacy::run ()
{
debug_assert (started);
debug_assert (!node->flags.disable_legacy_bootstrap);
node->bootstrap_initiator.connections->populate_connections (false);
nano::unique_lock<std::mutex> lock (mutex);
run_start (lock);
while (still_pulling ())
{
while (still_pulling ())
{
// clang-format off
condition.wait (lock, [&stopped = stopped, &pulling = pulling, &frontiers_confirmation_pending = frontiers_confirmation_pending] { return stopped || pulling == 0 || frontiers_confirmation_pending; });
// clang-format on
attempt_restart_check (lock);
}
// Flushing may resolve forks which can add more pulls
node->logger.try_log ("Flushing unchecked blocks");
lock.unlock ();
node->block_processor.flush ();
lock.lock ();
node->logger.try_log ("Finished flushing unchecked blocks");
}
if (!stopped)
{
node->logger.try_log ("Completed legacy pulls");
if (!node->flags.disable_bootstrap_bulk_push_client)
{
request_push (lock);
}
if (!stopped)
{
node->unchecked_cleanup ();
}
}
lock.unlock ();
stop ();
condition.notify_all ();
}
void nano::bootstrap_attempt_legacy::get_information (boost::property_tree::ptree & tree_a)
{
nano::lock_guard<std::mutex> lock (mutex);
tree_a.put ("frontier_pulls", std::to_string (frontier_pulls.size ()));
tree_a.put ("frontiers_received", static_cast<bool> (frontiers_received));
tree_a.put ("frontiers_confirmed", static_cast<bool> (frontiers_confirmed));
tree_a.put ("frontiers_confirmation_pending", static_cast<bool> (frontiers_confirmation_pending));
}

View file

@ -56,31 +56,4 @@ public:
std::mutex mutex;
nano::condition_variable condition;
};
class bootstrap_attempt_legacy : public bootstrap_attempt
{
public:
explicit bootstrap_attempt_legacy (std::shared_ptr<nano::node> const & node_a, uint64_t incremental_id_a, std::string const & id_a = "");
void run () override;
bool consume_future (std::future<bool> &);
void stop () override;
bool request_frontier (nano::unique_lock<std::mutex> &, bool = false);
void request_push (nano::unique_lock<std::mutex> &);
void add_frontier (nano::pull_info const &) override;
void add_bulk_push_target (nano::block_hash const &, nano::block_hash const &) override;
bool request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> &) override;
void add_recent_pull (nano::block_hash const &) override;
void run_start (nano::unique_lock<std::mutex> &);
void restart_condition () override;
void attempt_restart_check (nano::unique_lock<std::mutex> &);
bool confirm_frontiers (nano::unique_lock<std::mutex> &);
void get_information (boost::property_tree::ptree &) override;
nano::tcp_endpoint endpoint_frontier_request;
std::weak_ptr<nano::frontier_req_client> frontiers;
std::weak_ptr<nano::bulk_push_client> push;
std::deque<nano::pull_info> frontier_pulls;
std::deque<nano::block_hash> recent_pulls_head;
std::vector<std::pair<nano::block_hash, nano::block_hash>> bulk_push_targets;
std::atomic<unsigned> account_count{ 0 };
std::atomic<bool> frontiers_confirmation_pending{ false };
};
}

View file

@ -46,10 +46,6 @@ bulk_push_cost (0)
next ();
}
nano::frontier_req_client::~frontier_req_client ()
{
}
void nano::frontier_req_client::receive_frontier ()
{
auto this_l (shared_from_this ());

View file

@ -13,7 +13,6 @@ class frontier_req_client final : public std::enable_shared_from_this<nano::fron
{
public:
explicit frontier_req_client (std::shared_ptr<nano::bootstrap_client> const &, std::shared_ptr<nano::bootstrap_attempt> const &);
~frontier_req_client ();
void run ();
void receive_frontier ();
void received_frontier (boost::system::error_code const &, size_t);

View file

@ -0,0 +1,437 @@
#include <nano/node/bootstrap/bootstrap_bulk_push.hpp>
#include <nano/node/bootstrap/bootstrap_frontier.hpp>
#include <nano/node/bootstrap/bootstrap_legacy.hpp>
#include <nano/node/node.hpp>
#include <boost/format.hpp>
nano::bootstrap_attempt_legacy::bootstrap_attempt_legacy (std::shared_ptr<nano::node> const & node_a, uint64_t incremental_id_a, std::string const & id_a) :
nano::bootstrap_attempt (node_a, nano::bootstrap_mode::legacy, incremental_id_a, id_a)
{
node->bootstrap_initiator.notify_listeners (true);
}
bool nano::bootstrap_attempt_legacy::consume_future (std::future<bool> & future_a)
{
bool result;
try
{
result = future_a.get ();
}
catch (std::future_error &)
{
result = true;
}
return result;
}
void nano::bootstrap_attempt_legacy::stop ()
{
nano::unique_lock<std::mutex> lock (mutex);
stopped = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
if (auto i = frontiers.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
if (auto i = push.lock ())
{
try
{
i->promise.set_value (true);
}
catch (std::future_error &)
{
}
}
lock.unlock ();
node->bootstrap_initiator.connections->clear_pulls (incremental_id);
}
void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<std::mutex> & lock_a)
{
bool error (false);
lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->find_connection (endpoint_frontier_request));
lock_a.lock ();
if (connection_l)
{
std::future<bool> future;
{
auto this_l (shared_from_this ());
auto client (std::make_shared<nano::bulk_push_client> (connection_l, this_l));
client->start ();
push = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
error = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
}
if (node->config.logging.network_logging ())
{
node->logger.try_log ("Exiting bulk push client");
if (error)
{
node->logger.try_log ("Bulk push client failed");
}
}
}
void nano::bootstrap_attempt_legacy::add_frontier (nano::pull_info const & pull_a)
{
nano::pull_info pull (pull_a);
nano::lock_guard<std::mutex> lock (mutex);
frontier_pulls.push_back (pull);
}
void nano::bootstrap_attempt_legacy::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
{
nano::lock_guard<std::mutex> lock (mutex);
bulk_push_targets.emplace_back (head, end);
}
bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> & current_target_a)
{
nano::lock_guard<std::mutex> lock (mutex);
auto empty (bulk_push_targets.empty ());
if (!empty)
{
current_target_a = bulk_push_targets.back ();
bulk_push_targets.pop_back ();
}
return empty;
}
void nano::bootstrap_attempt_legacy::add_recent_pull (nano::block_hash const & head_a)
{
nano::lock_guard<std::mutex> lock (mutex);
recent_pulls_head.push_back (head_a);
if (recent_pulls_head.size () > nano::bootstrap_limits::bootstrap_max_confirm_frontiers)
{
recent_pulls_head.pop_front ();
}
}
void nano::bootstrap_attempt_legacy::restart_condition ()
{
/* Conditions to start frontiers confirmation:
- not completed frontiers confirmation
- more than 256 pull retries usually indicating issues with requested pulls
- or 128k processed blocks indicating large bootstrap */
if (!frontiers_confirmation_pending && !frontiers_confirmed && (requeued_pulls > (!node->network_params.network.is_dev_network () ? nano::bootstrap_limits::requeued_pulls_limit : nano::bootstrap_limits::requeued_pulls_limit_dev) || total_blocks > nano::bootstrap_limits::frontier_confirmation_blocks_limit))
{
frontiers_confirmation_pending = true;
}
}
void nano::bootstrap_attempt_legacy::attempt_restart_check (nano::unique_lock<std::mutex> & lock_a)
{
if (frontiers_confirmation_pending)
{
auto confirmed (confirm_frontiers (lock_a));
debug_assert (lock_a.owns_lock ());
if (!confirmed)
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_confirmation_failed, nano::stat::dir::in);
auto score (node->network.excluded_peers.add (endpoint_frontier_request, node->network.size ()));
if (score >= nano::peer_exclusion::score_limit)
{
node->logger.always_log (boost::str (boost::format ("Adding peer %1% to excluded peers list with score %2% after %3% seconds bootstrap attempt") % endpoint_frontier_request % score % std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt_start).count ()));
auto channel = node->network.find_channel (nano::transport::map_tcp_to_endpoint (endpoint_frontier_request));
if (channel != nullptr)
{
node->network.erase (*channel);
}
}
lock_a.unlock ();
stop ();
lock_a.lock ();
// Start new bootstrap connection
auto node_l (node->shared ());
auto this_l (shared_from_this ());
node->background ([node_l, this_l]() {
node_l->bootstrap_initiator.remove_attempt (this_l);
// Delay after removing current attempt
node_l->workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (50), [node_l]() {
node_l->bootstrap_initiator.bootstrap (true);
});
});
}
else
{
node->stats.inc (nano::stat::type::bootstrap, nano::stat::detail::frontier_confirmation_successful, nano::stat::dir::in);
}
frontiers_confirmed = confirmed;
frontiers_confirmation_pending = false;
}
}
bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<std::mutex> & lock_a)
{
bool confirmed (false);
debug_assert (!frontiers_confirmed);
condition.wait (lock_a, [& stopped = stopped] { return !stopped; });
auto this_l (shared_from_this ());
std::vector<nano::block_hash> frontiers;
lock_a.unlock ();
nano::unique_lock<std::mutex> pulls_lock (node->bootstrap_initiator.connections->mutex);
for (auto i (node->bootstrap_initiator.connections->pulls.begin ()), end (node->bootstrap_initiator.connections->pulls.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i)
{
if (!i->head.is_zero () && i->bootstrap_id == incremental_id && std::find (frontiers.begin (), frontiers.end (), i->head) == frontiers.end ())
{
frontiers.push_back (i->head);
}
}
pulls_lock.unlock ();
lock_a.lock ();
for (auto i (recent_pulls_head.begin ()), end (recent_pulls_head.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i)
{
if (!i->is_zero () && std::find (frontiers.begin (), frontiers.end (), *i) == frontiers.end ())
{
frontiers.push_back (*i);
}
}
lock_a.unlock ();
auto frontiers_count (frontiers.size ());
if (frontiers_count > 0)
{
const size_t reps_limit = 20;
auto representatives (node->rep_crawler.representatives ());
auto reps_weight (node->rep_crawler.total_weight ());
auto representatives_copy (representatives);
nano::uint128_t total_weight (0);
// Select random peers from bottom 50% of principal representatives
if (representatives.size () > 1)
{
std::reverse (representatives.begin (), representatives.end ());
representatives.resize (representatives.size () / 2);
for (auto i = static_cast<CryptoPP::word32> (representatives.size () - 1); i > 0; --i)
{
auto k = nano::random_pool::generate_word32 (0, i);
std::swap (representatives[i], representatives[k]);
}
if (representatives.size () > reps_limit)
{
representatives.resize (reps_limit);
}
}
for (auto const & rep : representatives)
{
total_weight += rep.weight.number ();
}
// Select peers with total 25% of reps stake from top 50% of principal representatives
representatives_copy.resize (representatives_copy.size () / 2);
while (total_weight < reps_weight / 4) // 25%
{
auto k = nano::random_pool::generate_word32 (0, static_cast<CryptoPP::word32> (representatives_copy.size () - 1));
auto rep (representatives_copy[k]);
if (std::find (representatives.begin (), representatives.end (), rep) == representatives.end ())
{
representatives.push_back (rep);
total_weight += rep.weight.number ();
}
}
// Start requests
for (auto i (0), max_requests (20); i <= max_requests && !confirmed && !stopped; ++i)
{
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> batched_confirm_req_bundle;
std::deque<std::pair<nano::block_hash, nano::root>> request;
// Find confirmed frontiers (tally > 12.5% of reps stake, 60% of requestsed reps responded
for (auto ii (frontiers.begin ()); ii != frontiers.end ();)
{
if (node->ledger.block_or_pruned_exists (*ii))
{
ii = frontiers.erase (ii);
}
else
{
auto existing (node->active.find_inactive_votes_cache (*ii));
nano::uint128_t tally;
for (auto & voter : existing.voters)
{
tally += node->ledger.weight (voter);
}
if (existing.status.confirmed || (tally > reps_weight / 8 && existing.voters.size () >= representatives.size () * 0.6)) // 12.5% of weight, 60% of reps
{
ii = frontiers.erase (ii);
}
else
{
for (auto const & rep : representatives)
{
if (std::find (existing.voters.begin (), existing.voters.end (), rep.account) == existing.voters.end ())
{
release_assert (!ii->is_zero ());
auto rep_request (batched_confirm_req_bundle.find (rep.channel));
if (rep_request == batched_confirm_req_bundle.end ())
{
std::deque<std::pair<nano::block_hash, nano::root>> insert_root_hash = { std::make_pair (*ii, *ii) };
batched_confirm_req_bundle.emplace (rep.channel, insert_root_hash);
}
else
{
rep_request->second.emplace_back (*ii, *ii);
}
}
}
++ii;
}
}
}
auto confirmed_count (frontiers_count - frontiers.size ());
if (confirmed_count >= frontiers_count * nano::bootstrap_limits::required_frontier_confirmation_ratio) // 80% of frontiers confirmed
{
confirmed = true;
}
else if (i < max_requests)
{
node->network.broadcast_confirm_req_batched_many (batched_confirm_req_bundle);
std::this_thread::sleep_for (std::chrono::milliseconds (!node->network_params.network.is_dev_network () ? 500 : 25));
}
}
if (!confirmed)
{
node->logger.always_log (boost::str (boost::format ("Failed to confirm frontiers for bootstrap attempt. %1% of %2% frontiers were not confirmed") % frontiers.size () % frontiers_count));
}
}
lock_a.lock ();
return confirmed;
}
bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<std::mutex> & lock_a, bool first_attempt)
{
auto result (true);
lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this (), first_attempt));
lock_a.lock ();
if (connection_l && !stopped)
{
endpoint_frontier_request = connection_l->channel->get_tcp_endpoint ();
std::future<bool> future;
{
auto this_l (shared_from_this ());
auto client (std::make_shared<nano::frontier_req_client> (connection_l, this_l));
client->run ();
frontiers = client;
future = client->promise.get_future ();
}
lock_a.unlock ();
result = consume_future (future); // This is out of scope of `client' so when the last reference via boost::asio::io_context is lost and the client is destroyed, the future throws an exception.
lock_a.lock ();
if (result)
{
frontier_pulls.clear ();
}
else
{
account_count = nano::narrow_cast<unsigned int> (frontier_pulls.size ());
// Shuffle pulls
release_assert (std::numeric_limits<CryptoPP::word32>::max () > frontier_pulls.size ());
if (!frontier_pulls.empty ())
{
for (auto i = static_cast<CryptoPP::word32> (frontier_pulls.size () - 1); i > 0; --i)
{
auto k = nano::random_pool::generate_word32 (0, i);
std::swap (frontier_pulls[i], frontier_pulls[k]);
}
}
// Add to regular pulls
while (!frontier_pulls.empty ())
{
auto pull (frontier_pulls.front ());
lock_a.unlock ();
node->bootstrap_initiator.connections->add_pull (pull);
lock_a.lock ();
++pulling;
frontier_pulls.pop_front ();
}
}
if (node->config.logging.network_logging ())
{
if (!result)
{
node->logger.try_log (boost::str (boost::format ("Completed frontier request, %1% out of sync accounts according to %2%") % account_count % connection_l->channel->to_string ()));
}
else
{
node->stats.inc (nano::stat::type::error, nano::stat::detail::frontier_req, nano::stat::dir::out);
}
}
}
return result;
}
void nano::bootstrap_attempt_legacy::run_start (nano::unique_lock<std::mutex> & lock_a)
{
frontiers_received = false;
frontiers_confirmed = false;
total_blocks = 0;
requeued_pulls = 0;
recent_pulls_head.clear ();
auto frontier_failure (true);
uint64_t frontier_attempts (0);
while (!stopped && frontier_failure)
{
++frontier_attempts;
frontier_failure = request_frontier (lock_a, frontier_attempts == 1);
}
frontiers_received = true;
}
void nano::bootstrap_attempt_legacy::run ()
{
debug_assert (started);
debug_assert (!node->flags.disable_legacy_bootstrap);
node->bootstrap_initiator.connections->populate_connections (false);
nano::unique_lock<std::mutex> lock (mutex);
run_start (lock);
while (still_pulling ())
{
while (still_pulling ())
{
// clang-format off
condition.wait (lock, [&stopped = stopped, &pulling = pulling, &frontiers_confirmation_pending = frontiers_confirmation_pending] { return stopped || pulling == 0 || frontiers_confirmation_pending; });
// clang-format on
attempt_restart_check (lock);
}
// Flushing may resolve forks which can add more pulls
node->logger.try_log ("Flushing unchecked blocks");
lock.unlock ();
node->block_processor.flush ();
lock.lock ();
node->logger.try_log ("Finished flushing unchecked blocks");
}
if (!stopped)
{
node->logger.try_log ("Completed legacy pulls");
if (!node->flags.disable_bootstrap_bulk_push_client)
{
request_push (lock);
}
if (!stopped)
{
node->unchecked_cleanup ();
}
}
lock.unlock ();
stop ();
condition.notify_all ();
}
void nano::bootstrap_attempt_legacy::get_information (boost::property_tree::ptree & tree_a)
{
nano::lock_guard<std::mutex> lock (mutex);
tree_a.put ("frontier_pulls", std::to_string (frontier_pulls.size ()));
tree_a.put ("frontiers_received", static_cast<bool> (frontiers_received));
tree_a.put ("frontiers_confirmed", static_cast<bool> (frontiers_confirmed));
tree_a.put ("frontiers_confirmation_pending", static_cast<bool> (frontiers_confirmation_pending));
}

View file

@ -0,0 +1,43 @@
#pragma once
#include <nano/node/bootstrap/bootstrap_attempt.hpp>
#include <boost/property_tree/ptree_fwd.hpp>
#include <atomic>
#include <deque>
#include <memory>
#include <vector>
namespace nano
{
class node;
class bootstrap_attempt_legacy : public bootstrap_attempt
{
public:
explicit bootstrap_attempt_legacy (std::shared_ptr<nano::node> const & node_a, uint64_t incremental_id_a, std::string const & id_a = "");
void run () override;
bool consume_future (std::future<bool> &);
void stop () override;
bool request_frontier (nano::unique_lock<std::mutex> &, bool = false);
void request_push (nano::unique_lock<std::mutex> &);
void add_frontier (nano::pull_info const &) override;
void add_bulk_push_target (nano::block_hash const &, nano::block_hash const &) override;
bool request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> &) override;
void add_recent_pull (nano::block_hash const &) override;
void run_start (nano::unique_lock<std::mutex> &);
void restart_condition () override;
void attempt_restart_check (nano::unique_lock<std::mutex> &);
bool confirm_frontiers (nano::unique_lock<std::mutex> &);
void get_information (boost::property_tree::ptree &) override;
nano::tcp_endpoint endpoint_frontier_request;
std::weak_ptr<nano::frontier_req_client> frontiers;
std::weak_ptr<nano::bulk_push_client> push;
std::deque<nano::pull_info> frontier_pulls;
std::deque<nano::block_hash> recent_pulls_head;
std::vector<std::pair<nano::block_hash, nano::block_hash>> bulk_push_targets;
std::atomic<unsigned> account_count{ 0 };
std::atomic<bool> frontiers_confirmation_pending{ false };
};
}

View file

@ -95,7 +95,7 @@ boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint ()
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (bootstrap_listener & bootstrap_listener, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (bootstrap_listener & bootstrap_listener, std::string const & name)
{
auto sizeof_element = sizeof (decltype (bootstrap_listener.connections)::value_type);
auto composite = std::make_unique<container_info_composite> (name);

View file

@ -31,7 +31,7 @@ private:
uint16_t port;
};
std::unique_ptr<container_info_component> collect_container_info (bootstrap_listener & bootstrap_listener, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (bootstrap_listener & bootstrap_listener, std::string const & name);
class message;
enum class bootstrap_server_type

View file

@ -583,7 +583,7 @@ iterated_frontier (iterated_frontier_a)
{
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (confirmation_height_bounded & confirmation_height_bounded, const std::string & name_a)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (confirmation_height_bounded & confirmation_height_bounded, std::string const & name_a)
{
auto composite = std::make_unique<container_info_composite> (name_a);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "pending_writes", confirmation_height_bounded.pending_writes_size, sizeof (decltype (confirmation_height_bounded.pending_writes)::value_type) }));

View file

@ -131,8 +131,8 @@ private:
std::function<uint64_t ()> awaiting_processing_size_callback;
nano::network_params network_params;
friend std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_bounded &, const std::string & name_a);
friend std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_bounded &, std::string const & name_a);
};
std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_bounded &, const std::string & name_a);
std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_bounded &, std::string const & name_a);
}

View file

@ -201,7 +201,7 @@ void nano::confirmation_height_processor::notify_observers (nano::block_hash con
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (confirmation_height_processor & confirmation_height_processor_a, const std::string & name_a)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (confirmation_height_processor & confirmation_height_processor_a, std::string const & name_a)
{
auto composite = std::make_unique<container_info_composite> (name_a);

View file

@ -478,7 +478,7 @@ iterated_height (iterated_height_a)
{
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (confirmation_height_unbounded & confirmation_height_unbounded, const std::string & name_a)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (confirmation_height_unbounded & confirmation_height_unbounded, std::string const & name_a)
{
auto composite = std::make_unique<container_info_composite> (name_a);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "confirmed_iterated_pairs", confirmation_height_unbounded.confirmed_iterated_pairs_size, sizeof (decltype (confirmation_height_unbounded.confirmed_iterated_pairs)::value_type) }));

View file

@ -110,8 +110,8 @@ private:
std::function<uint64_t ()> awaiting_processing_size_callback;
friend class confirmation_height_dynamic_algorithm_no_transition_while_pending_Test;
friend std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_unbounded &, const std::string & name_a);
friend std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_unbounded &, std::string const & name_a);
};
std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_unbounded &, const std::string & name_a);
std::unique_ptr<nano::container_info_component> collect_container_info (confirmation_height_unbounded &, std::string const & name_a);
}

View file

@ -94,7 +94,7 @@ size_t nano::distributed_work_factory::size () const
return items.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (distributed_work_factory & distributed_work, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (distributed_work_factory & distributed_work, std::string const & name)
{
auto item_count = distributed_work.size ();
auto sizeof_item_element = sizeof (decltype (nano::distributed_work_factory::items)::value_type);

View file

@ -38,5 +38,5 @@ private:
friend std::unique_ptr<container_info_component> collect_container_info (distributed_work_factory &, const std::string &);
};
std::unique_ptr<container_info_component> collect_container_info (distributed_work_factory & distributed_work, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (distributed_work_factory & distributed_work, std::string const & name);
}

View file

@ -127,7 +127,7 @@ size_t nano::gap_cache::size ()
return blocks.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (gap_cache & gap_cache, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (gap_cache & gap_cache, std::string const & name)
{
auto count = gap_cache.size ();
auto sizeof_element = sizeof (decltype (gap_cache.blocks)::value_type);

View file

@ -58,5 +58,5 @@ public:
nano::node & node;
};
std::unique_ptr<container_info_component> collect_container_info (gap_cache & gap_cache, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (gap_cache & gap_cache, std::string const & name);
}

View file

@ -260,20 +260,25 @@ bool nano::mdb_store::do_upgrades (nano::write_transaction & transaction_a, bool
break;
case 14:
upgrade_v14_to_v15 (transaction_a);
needs_vacuuming = true;
[[fallthrough]];
// Upgrades to version 16, 17 & 18 are all part of the v21 node release
case 15:
// Upgrades to v16, v17 & v18 are all part of the v21 node release
upgrade_v15_to_v16 (transaction_a);
[[fallthrough]];
case 16:
upgrade_v16_to_v17 (transaction_a);
[[fallthrough]];
case 17:
upgrade_v17_to_v18 (transaction_a);
needs_vacuuming = true;
[[fallthrough]];
// Upgrades to version 19 & 20 are both part of the v22 node release
case 18:
upgrade_v18_to_v19 (transaction_a);
needs_vacuuming = true;
[[fallthrough]];
case 19:
upgrade_v19_to_v20 (transaction_a);
[[fallthrough]];
case 20:
break;
default:

View file

@ -1018,7 +1018,7 @@ size_t nano::syn_cookies::cookies_size ()
return cookies.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (network & network, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (network & network, std::string const & name)
{
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (network.tcp_channels.collect_container_info ("tcp_channels"));

View file

@ -199,5 +199,5 @@ public:
static size_t const confirm_req_hashes_max = 7;
static size_t const confirm_ack_hashes_max = 12;
};
std::unique_ptr<container_info_component> collect_container_info (network & network, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (network & network, std::string const & name);
}

View file

@ -65,7 +65,7 @@ void nano::node::keepalive (std::string const & address_a, uint16_t port_a)
});
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (rep_crawler & rep_crawler, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (rep_crawler & rep_crawler, std::string const & name)
{
size_t count;
{
@ -574,7 +574,7 @@ void nano::node::process_fork (nano::transaction const & transaction_a, std::sha
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (node & node, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (node & node, std::string const & name)
{
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (collect_container_info (node.work, "work"));
@ -1401,7 +1401,7 @@ bool nano::block_arrival::recent (nano::block_hash const & hash_a)
return arrival.get<tag_hash> ().find (hash_a) != arrival.get<tag_hash> ().end ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_arrival & block_arrival, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_arrival & block_arrival, std::string const & name)
{
size_t count = 0;
{

View file

@ -78,9 +78,9 @@ public:
static std::chrono::seconds constexpr arrival_time_min = std::chrono::seconds (300);
};
std::unique_ptr<container_info_component> collect_container_info (block_arrival & block_arrival, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (block_arrival & block_arrival, std::string const & name);
std::unique_ptr<container_info_component> collect_container_info (rep_crawler & rep_crawler, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (rep_crawler & rep_crawler, std::string const & name);
class node final : public std::enable_shared_from_this<nano::node>
{
@ -211,7 +211,7 @@ private:
nano::locked<std::future<void>> epoch_upgrading;
};
std::unique_ptr<container_info_component> collect_container_info (node & node, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (node & node, std::string const & name);
nano::node_flags const & inactive_node_flag_defaults ();

View file

@ -1,6 +1,6 @@
#include <nano/node/node_observers.hpp>
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::node_observers & node_observers, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::node_observers & node_observers, std::string const & name)
{
auto composite = std::make_unique<nano::container_info_composite> (name);
composite->add_component (collect_container_info (node_observers.blocks, "blocks"));

View file

@ -24,5 +24,5 @@ public:
nano::observer_set<nano::telemetry_data const &, nano::endpoint const &> telemetry;
};
std::unique_ptr<container_info_component> collect_container_info (node_observers & node_observers, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (node_observers & node_observers, std::string const & name);
}

View file

@ -116,7 +116,7 @@ void nano::online_reps::clear ()
online_m = 0;
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (online_reps & online_reps, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (online_reps & online_reps, std::string const & name)
{
size_t count;
{

View file

@ -69,8 +69,8 @@ private:
nano::uint128_t minimum;
friend class election_quorum_minimum_update_weight_before_quorum_checks_Test;
friend std::unique_ptr<container_info_component> collect_container_info (online_reps & online_reps, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (online_reps & online_reps, std::string const & name);
};
std::unique_ptr<container_info_component> collect_container_info (online_reps & online_reps, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (online_reps & online_reps, std::string const & name);
}

View file

@ -83,7 +83,7 @@ size_t nano::peer_exclusion::size () const
return peers.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::peer_exclusion const & excluded_peers, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::peer_exclusion const & excluded_peers, std::string const & name)
{
auto composite = std::make_unique<container_info_composite> (name);

View file

@ -57,5 +57,5 @@ public:
friend class telemetry_remove_peer_invalid_signature_Test;
friend class peer_exclusion_validate_Test;
};
std::unique_ptr<container_info_component> collect_container_info (peer_exclusion const & excluded_peers, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (peer_exclusion const & excluded_peers, std::string const & name);
}

View file

@ -54,7 +54,7 @@ public:
*/
class rep_crawler final
{
friend std::unique_ptr<container_info_component> collect_container_info (rep_crawler & rep_crawler, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (rep_crawler & rep_crawler, std::string const & name);
// clang-format off
class tag_account {};

View file

@ -246,7 +246,7 @@ std::vector<std::shared_ptr<nano::block>> nano::request_aggregator::aggregate (s
return to_generate;
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::request_aggregator & aggregator, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::request_aggregator & aggregator, std::string const & name)
{
auto pools_count = aggregator.size ();
auto sizeof_element = sizeof (decltype (aggregator.requests)::value_type);

View file

@ -160,7 +160,7 @@ void nano::state_block_signature_verification::verify_state_blocks (std::deque<s
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (state_block_signature_verification & state_block_signature_verification, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (state_block_signature_verification & state_block_signature_verification, std::string const & name)
{
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "state_blocks", state_block_signature_verification.size (), sizeof (nano::unchecked_info) }));

View file

@ -45,5 +45,5 @@ private:
void verify_state_blocks (std::deque<std::pair<nano::unchecked_info, bool>> &);
};
std::unique_ptr<nano::container_info_component> collect_container_info (state_block_signature_verification & state_block_signature_verification, const std::string & name);
std::unique_ptr<nano::container_info_component> collect_container_info (state_block_signature_verification & state_block_signature_verification, std::string const & name);
}

View file

@ -466,7 +466,7 @@ bool nano::telemetry_info::awaiting_first_response () const
return data == nano::telemetry_data ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (telemetry & telemetry, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (telemetry & telemetry, std::string const & name)
{
auto composite = std::make_unique<container_info_composite> (name);
size_t callbacks_count;

View file

@ -146,7 +146,7 @@ private:
friend class telemetry_remove_peer_invalid_signature_Test;
};
std::unique_ptr<nano::container_info_component> collect_container_info (telemetry & telemetry, const std::string & name);
std::unique_ptr<nano::container_info_component> collect_container_info (telemetry & telemetry, std::string const & name);
nano::telemetry_data consolidate_telemetry_data (std::vector<telemetry_data> const & telemetry_data);
nano::telemetry_data local_telemetry_data (nano::ledger const & ledger_a, nano::network &, uint64_t, nano::network_params const &, std::chrono::steady_clock::time_point, uint64_t, nano::keypair const &);

View file

@ -280,7 +280,7 @@ void nano::vote_processor::calculate_weights ()
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (vote_processor & vote_processor, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (vote_processor & vote_processor, std::string const & name)
{
size_t votes_count;
size_t representatives_1_count;

View file

@ -75,9 +75,9 @@ private:
bool is_active;
std::thread thread;
friend std::unique_ptr<container_info_component> collect_container_info (vote_processor & vote_processor, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (vote_processor & vote_processor, std::string const & name);
friend class vote_processor_weights_Test;
};
std::unique_ptr<container_info_component> collect_container_info (vote_processor & vote_processor, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (vote_processor & vote_processor, std::string const & name);
}

View file

@ -139,7 +139,7 @@ size_t nano::local_vote_history::size () const
return history.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::local_vote_history & history, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::local_vote_history & history, std::string const & name)
{
size_t history_count = history.size ();
auto sizeof_element = sizeof (decltype (history.history)::value_type);
@ -412,7 +412,7 @@ void nano::vote_generator_session::flush ()
}
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::vote_generator & vote_generator, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (nano::vote_generator & vote_generator, std::string const & name)
{
size_t candidates_count = 0;
size_t requests_count = 0;

View file

@ -105,11 +105,11 @@ private:
bool consistency_check (nano::root const &) const;
mutable std::mutex mutex;
friend std::unique_ptr<container_info_component> collect_container_info (local_vote_history & history, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (local_vote_history & history, std::string const & name);
friend class local_vote_history_basic_Test;
};
std::unique_ptr<container_info_component> collect_container_info (local_vote_history & history, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (local_vote_history & history, std::string const & name);
class vote_generator final
{
@ -151,10 +151,10 @@ private:
bool started{ false };
std::thread thread;
friend std::unique_ptr<container_info_component> collect_container_info (vote_generator & vote_generator, const std::string & name);
friend std::unique_ptr<container_info_component> collect_container_info (vote_generator & vote_generator, std::string const & name);
};
std::unique_ptr<container_info_component> collect_container_info (vote_generator & generator, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (vote_generator & generator, std::string const & name);
class vote_generator_session final
{

View file

@ -1945,7 +1945,7 @@ MDB_txn * nano::wallet_store::tx (nano::transaction const & transaction_a) const
return static_cast<MDB_txn *> (transaction_a.get_handle ());
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (wallets & wallets, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (wallets & wallets, std::string const & name)
{
size_t items_count;
size_t actions_count;

View file

@ -262,7 +262,7 @@ private:
nano::wallet_representatives representatives;
};
std::unique_ptr<container_info_component> collect_container_info (wallets & wallets, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (wallets & wallets, std::string const & name);
class wallets_store
{

View file

@ -792,7 +792,7 @@ size_t nano::vote_uniquer::size ()
return votes.size ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (vote_uniquer & vote_uniquer, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (vote_uniquer & vote_uniquer, std::string const & name)
{
auto count = vote_uniquer.size ();
auto sizeof_element = sizeof (vote_uniquer::value_type);

View file

@ -292,7 +292,7 @@ private:
static unsigned constexpr cleanup_count = 2;
};
std::unique_ptr<container_info_component> collect_container_info (vote_uniquer & vote_uniquer, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (vote_uniquer & vote_uniquer, std::string const & name);
enum class vote_code
{

View file

@ -1529,7 +1529,7 @@ cemented_frontier (cemented_frontier), frontier (frontier), account (account)
{
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (ledger & ledger, const std::string & name)
std::unique_ptr<nano::container_info_component> nano::collect_container_info (ledger & ledger, std::string const & name)
{
auto count = ledger.bootstrap_weights_size.load ();
auto sizeof_element = sizeof (decltype (ledger.bootstrap_weights)::value_type);

View file

@ -83,5 +83,5 @@ private:
void initialize (nano::generate_cache const &);
};
std::unique_ptr<container_info_component> collect_container_info (ledger & ledger, const std::string & name);
std::unique_ptr<container_info_component> collect_container_info (ledger & ledger, std::string const & name);
}

View file

@ -56,42 +56,6 @@ boost::filesystem::path nano::working_path (bool legacy)
return result;
}
bool nano::migrate_working_path (std::string & error_string)
{
bool result (true);
auto old_path (nano::working_path (true));
auto new_path (nano::working_path ());
if (old_path != new_path)
{
boost::system::error_code status_error;
auto old_path_status (boost::filesystem::status (old_path, status_error));
if (status_error == boost::system::errc::success && boost::filesystem::exists (old_path_status) && boost::filesystem::is_directory (old_path_status))
{
auto new_path_status (boost::filesystem::status (new_path, status_error));
if (!boost::filesystem::exists (new_path_status))
{
boost::system::error_code rename_error;
boost::filesystem::rename (old_path, new_path, rename_error);
if (rename_error != boost::system::errc::success)
{
std::stringstream error_string_stream;
error_string_stream << "Unable to migrate data from " << old_path << " to " << new_path;
error_string = error_string_stream.str ();
result = false;
}
}
}
}
return result;
}
boost::filesystem::path nano::unique_path ()
{
auto result (working_path () / boost::filesystem::unique_path ());

View file

@ -8,8 +8,6 @@ namespace nano
{
// OS-specific way of finding a path to a home directory.
boost::filesystem::path working_path (bool = false);
// Function to migrate working_path() from above from RaiBlocks to Nano
bool migrate_working_path (std::string &);
// Get a unique path within the home directory, used for testing.
// Any directories created at this location will be removed when a test finishes.
boost::filesystem::path unique_path ();