Active elections loop tuning (#2306)

* Active elections tuning and removing per-block confirm_req

Co-authored-by: Srayman <nanofaucet@gmail.com>

* Dont broadcast and request confirmation in the same loop for the same election

* Revert socket queue_size_max to original 128, to be increased via another PR

* Use non-doxygen in-definition

* Consistent loop initialization

* Move the alternating condition check upwards

* Fix incorrect documentation on max 16 representatives from rep_crawler

* Cleanup rep_crawler public methods

* Weight-ordered confirmation requests

* Rename confirm_frontiers to search_frontiers and split request_confirm into escalate/broadcast/confirm_req methods

* clang-format

* Add missing bundle insertion check and fix test

* More block broadcasting in tests
This commit is contained in:
Guilherme Lawless 2019-10-11 17:08:39 +01:00 committed by GitHub
commit ab50aff66d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 334 additions and 343 deletions

View file

@ -22,15 +22,15 @@ TEST (active_transactions, bounded_active_elections)
{
node.process_active (send);
node.active.start (send);
ASSERT_NO_ERROR (system.poll ());
ASSERT_FALSE (node.active.empty ());
ASSERT_LE (node.active.size (), node.config.active_elections_size);
++count;
done = count > node.active.size ();
count++;
ASSERT_NO_ERROR (system.poll ());
auto previous_hash = send->hash ();
send = std::make_shared<nano::state_block> (nano::test_genesis_key.pub, previous_hash, nano::test_genesis_key.pub, nano::genesis_amount - count * nano::xrb_ratio, nano::test_genesis_key.pub, nano::test_genesis_key.prv, nano::test_genesis_key.pub, *system.work.generate (previous_hash));
//sleep this thread for the max delay between request loop rounds possible for such a small active_elections_size
std::this_thread::sleep_for (std::chrono::milliseconds (node.network_params.network.request_interval_ms + (node_config.active_elections_size * 20)));
//sleep this thread between request loop rounds
std::this_thread::sleep_for (std::chrono::milliseconds (2 * node.network_params.network.request_interval_ms));
}
}
@ -246,51 +246,50 @@ TEST (active_transactions, keep_local)
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.enable_voting = false;
node_config.active_elections_size = 3; //bound to 3, wont drop wallet created transactions, but good to test dropping remote
node_config.active_elections_size = 2; //bound to 2, wont drop wallet created transactions, but good to test dropping remote
// Disable frontier confirmation to allow the test to finish before
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node1 = *system.add_node (node_config);
auto & node = *system.add_node (node_config);
auto & wallet (*system.wallet (0));
nano::genesis genesis;
//key 1/2 will be managed by the wallet
nano::keypair key1, key2, key3, key4;
nano::keypair key1, key2, key3, key4, key5, key6;
wallet.insert_adhoc (nano::test_genesis_key.prv);
wallet.insert_adhoc (key1.prv);
wallet.insert_adhoc (key2.prv);
auto send1 (wallet.send_action (nano::test_genesis_key.pub, key1.pub, node1.config.receive_minimum.number ()));
auto send2 (wallet.send_action (nano::test_genesis_key.pub, key2.pub, node1.config.receive_minimum.number ()));
auto send3 (wallet.send_action (nano::test_genesis_key.pub, key3.pub, node1.config.receive_minimum.number ()));
auto send4 (wallet.send_action (nano::test_genesis_key.pub, key4.pub, node1.config.receive_minimum.number ()));
auto send1 (wallet.send_action (nano::test_genesis_key.pub, key1.pub, node.config.receive_minimum.number ()));
auto send2 (wallet.send_action (nano::test_genesis_key.pub, key2.pub, node.config.receive_minimum.number ()));
auto send3 (wallet.send_action (nano::test_genesis_key.pub, key3.pub, node.config.receive_minimum.number ()));
auto send4 (wallet.send_action (nano::test_genesis_key.pub, key4.pub, node.config.receive_minimum.number ()));
auto send5 (wallet.send_action (nano::test_genesis_key.pub, key5.pub, node.config.receive_minimum.number ()));
auto send6 (wallet.send_action (nano::test_genesis_key.pub, key6.pub, node.config.receive_minimum.number ()));
system.deadline_set (10s);
while (node1.active.size () != 4)
// should not drop wallet created transactions
while (node.active.size () != 6)
{
ASSERT_NO_ERROR (system.poll ());
}
while (node1.active.size () != 0)
while (!node.active.empty ())
{
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
auto it (node1.active.roots.begin ());
while (!node1.active.roots.empty () && it != node1.active.roots.end ())
nano::lock_guard<std::mutex> active_guard (node.active.mutex);
auto it (node.active.roots.begin ());
while (!node.active.roots.empty () && it != node.active.roots.end ())
{
(it->election)->confirm_once ();
it = node1.active.roots.begin ();
it = node.active.roots.begin ();
}
}
auto open1 (std::make_shared<nano::state_block> (key3.pub, 0, key3.pub, nano::xrb_ratio, send3->hash (), key3.prv, key3.pub, *system.work.generate (key3.pub)));
node1.process_active (open1);
auto open2 (std::make_shared<nano::state_block> (key4.pub, 0, key4.pub, nano::xrb_ratio, send4->hash (), key4.prv, key4.pub, *system.work.generate (key4.pub)));
node1.process_active (open2);
//none are dropped since none are long_unconfirmed
auto open1 (std::make_shared<nano::state_block> (key1.pub, 0, key1.pub, node.config.receive_minimum.number (), send1->hash (), key1.prv, key1.pub, *system.work.generate (key1.pub)));
node.process_active (open1);
node.active.start (open1);
auto open2 (std::make_shared<nano::state_block> (key2.pub, 0, key2.pub, node.config.receive_minimum.number (), send2->hash (), key2.prv, key2.pub, *system.work.generate (key2.pub)));
node.process_active (open2);
node.active.start (open2);
auto open3 (std::make_shared<nano::state_block> (key3.pub, 0, key3.pub, node.config.receive_minimum.number (), send3->hash (), key3.prv, key3.pub, *system.work.generate (key3.pub)));
node.process_active (open3);
node.active.start (open3);
ASSERT_EQ (3, node.active.size ());
system.deadline_set (10s);
while (node1.active.size () != 4)
{
ASSERT_NO_ERROR (system.poll ());
}
auto send5 (wallet.send_action (nano::test_genesis_key.pub, key1.pub, node1.config.receive_minimum.number ()));
node1.active.start (send5);
//drop two lowest non-wallet managed active_transactions before inserting a new into active as all are long_unconfirmed
system.deadline_set (10s);
while (node1.active.size () != 3)
// bound elections, should drop after one loop
while (node.active.size () != node_config.active_elections_size)
{
ASSERT_NO_ERROR (system.poll ());
}
@ -301,7 +300,7 @@ TEST (active_transactions, prioritize_chains)
nano::system system;
nano::node_config node_config (24000, system.logging);
node_config.enable_voting = false;
node_config.active_elections_size = 4; //bound to 3, wont drop wallet created transactions, but good to test dropping remote
node_config.active_elections_size = 4; //bound to 4, wont drop wallet created transactions, but good to test dropping remote
// Disable frontier confirmation to allow the test to finish before
node_config.frontiers_confirmation = nano::frontiers_confirmation_mode::disabled;
auto & node1 = *system.add_node (node_config);
@ -352,16 +351,6 @@ TEST (active_transactions, prioritize_chains)
ASSERT_NO_ERROR (system.poll ());
}
system.deadline_set (10s);
bool done (false);
//wait for all to be long_unconfirmed
while (!done)
{
{
nano::lock_guard<std::mutex> guard (node1.active.mutex);
done = node1.active.long_unconfirmed_size == 4;
}
ASSERT_NO_ERROR (system.poll ());
}
std::this_thread::sleep_for (1s);
node1.process_active (open2);
system.deadline_set (10s);
@ -369,17 +358,6 @@ TEST (active_transactions, prioritize_chains)
{
ASSERT_NO_ERROR (system.poll ());
}
//wait for all to be long_unconfirmed
done = false;
system.deadline_set (10s);
while (!done)
{
{
nano::lock_guard<std::mutex> guard (node1.active.mutex);
done = node1.active.long_unconfirmed_size == 4;
}
ASSERT_NO_ERROR (system.poll ());
}
size_t seen (0);
{
auto it (node1.active.roots.get<1> ().begin ());

View file

@ -895,7 +895,7 @@ TEST (confirmation_height, prioritize_frontiers)
transaction.refresh ();
node->active.prioritize_frontiers_for_confirmation (transaction, std::chrono::seconds (1), std::chrono::seconds (1));
ASSERT_TRUE (priority_orders_match (node->active.priority_wallet_cementable_frontiers, std::array<nano::account, num_accounts>{ key3.pub, nano::genesis_account, key4.pub, key1.pub, key2.pub }));
node->active.confirm_frontiers (transaction);
node->active.search_frontiers (transaction);
// Check that the active transactions roots contains the frontiers
system.deadline_set (std::chrono::seconds (10));

View file

@ -205,7 +205,7 @@ TEST (node, node_receive_quorum)
nano::lock_guard<std::mutex> guard (system.nodes[0]->active.mutex);
auto info (system.nodes[0]->active.roots.find (nano::qualified_root (previous, previous)));
ASSERT_NE (system.nodes[0]->active.roots.end (), info);
done = info->election->confirmation_request_count > nano::active_transactions::minimum_confirmation_request_count;
done = info->election->confirmation_request_count > 2;
}
ASSERT_NO_ERROR (system.poll ());
}
@ -850,11 +850,21 @@ TEST (node_config, v17_v18_upgrade)
auto upgraded (false);
nano::node_config config;
config.logging.init (path);
// Initial values for configs that should be upgraded
config.active_elections_size = 50000;
config.vote_generator_delay = 500ms;
// These config options should not be present
ASSERT_FALSE (tree.get_optional_child ("backup_before_upgrade"));
ASSERT_FALSE (tree.get_optional_child ("work_watcher_period"));
config.deserialize_json (upgraded, tree);
// These configs should have been upgraded
ASSERT_EQ (100, tree.get<unsigned> ("vote_generator_delay"));
ASSERT_EQ (10000, tree.get<unsigned long long> ("active_elections_size"));
// The config options should be added after the upgrade
ASSERT_TRUE (!!tree.get_optional_child ("backup_before_upgrade"));
ASSERT_TRUE (!!tree.get_optional_child ("work_watcher_period"));
@ -878,6 +888,7 @@ TEST (node_config, v18_values)
// Check config is correct
{
tree.put ("active_elections_size", 10000);
tree.put ("vote_generator_delay", 100);
tree.put ("backup_before_upgrade", true);
tree.put ("work_watcher_period", 5);
@ -885,11 +896,13 @@ TEST (node_config, v18_values)
config.deserialize_json (upgraded, tree);
ASSERT_FALSE (upgraded);
ASSERT_EQ (config.active_elections_size, 10000);
ASSERT_EQ (config.vote_generator_delay.count (), 100);
ASSERT_EQ (config.backup_before_upgrade, true);
ASSERT_EQ (config.work_watcher_period.count (), 5);
// Check config is correct with other values
tree.put ("active_elections_size", 5);
tree.put ("vote_generator_delay", std::numeric_limits<unsigned long>::max () - 100);
tree.put ("backup_before_upgrade", false);
tree.put ("work_watcher_period", 999);
@ -897,6 +910,7 @@ TEST (node_config, v18_values)
upgraded = false;
config.deserialize_json (upgraded, tree);
ASSERT_FALSE (upgraded);
ASSERT_EQ (config.active_elections_size, 5);
ASSERT_EQ (config.vote_generator_delay.count (), std::numeric_limits<unsigned long>::max () - 100);
ASSERT_EQ (config.backup_before_upgrade, false);
ASSERT_EQ (config.work_watcher_period.count (), 999);
@ -2315,7 +2329,7 @@ TEST (node, confirm_quorum)
nano::lock_guard<std::mutex> guard (system.nodes[0]->active.mutex);
auto info (system.nodes[0]->active.roots.find (nano::qualified_root (send1->hash (), send1->hash ())));
ASSERT_NE (system.nodes[0]->active.roots.end (), info);
done = info->election->confirmation_request_count > nano::active_transactions::minimum_confirmation_request_count;
done = info->election->confirmation_request_count > 2;
}
ASSERT_NO_ERROR (system.poll ());
}

View file

@ -69,7 +69,7 @@ public:
default_rpc_port = is_live_network () ? 7076 : is_beta_network () ? 55000 : 45000;
default_ipc_port = is_live_network () ? 7077 : is_beta_network () ? 56000 : 46000;
default_websocket_port = is_live_network () ? 7078 : is_beta_network () ? 57000 : 47000;
request_interval_ms = is_test_network () ? (is_sanitizer_build ? 100 : 20) : 16000;
request_interval_ms = is_test_network () ? (is_sanitizer_build ? 100 : 20) : 500;
}
/** Network work thresholds. ~5 seconds of work for the live network */

View file

@ -5,12 +5,13 @@
#include <numeric>
size_t constexpr nano::active_transactions::max_broadcast_queue;
using namespace std::chrono;
nano::active_transactions::active_transactions (nano::node & node_a) :
node (node_a),
long_election_threshold (node.network_params.network.is_test_network () ? 2s : 24s),
election_request_delay (node.network_params.network.is_test_network () ? 0s : 1s),
election_time_to_live (node.network_params.network.is_test_network () ? 0s : 10s),
multipliers_cb (20, 1.),
trended_active_difficulty (node.network_params.network.publish_threshold),
next_frontier_check (steady_clock::now ()),
@ -28,27 +29,27 @@ nano::active_transactions::~active_transactions ()
stop ();
}
void nano::active_transactions::confirm_frontiers (nano::transaction const & transaction_a)
void nano::active_transactions::search_frontiers (nano::transaction const & transaction_a)
{
// Limit maximum count of elections to start
bool representative (node.config.enable_voting && node.wallets.reps_count > 0);
bool half_princpal_representative (representative && node.wallets.half_principal_reps_count > 0);
/* Check less frequently for regular nodes in auto mode */
bool agressive_mode (half_princpal_representative || node.config.frontiers_confirmation == nano::frontiers_confirmation_mode::always);
auto agressive_factor = agressive_mode ? 3min : 15min;
auto request_interval (std::chrono::milliseconds (node.network_params.network.request_interval_ms));
auto agressive_factor = request_interval * (agressive_mode ? 20 : 100);
// Decrease check time for test network
auto is_test_network = node.network_params.network.is_test_network ();
int test_network_factor = is_test_network ? 1000 : 1;
auto roots_size = size ();
auto max_elections = (max_broadcast_queue / 4);
nano::unique_lock<std::mutex> lk (mutex);
auto check_time_exceeded = std::chrono::steady_clock::now () >= next_frontier_check;
lk.unlock ();
auto max_elections = (node.config.active_elections_size / 20);
auto low_active_elections = roots_size < max_elections;
bool wallets_check_required = (!skip_wallets || !priority_wallet_cementable_frontiers.empty ()) && !agressive_mode;
// To minimise dropping real-time transactions, set the maximum number of elections
// for cementing frontiers to half the total active election maximum.
const auto max_active = node.config.active_elections_size / 2;
// Minimise dropping real-time transactions, set the number of frontiers added to a factor of the total number of active elections
auto max_active = node.config.active_elections_size / 5;
if (roots_size <= max_active && (check_time_exceeded || wallets_check_required || (!is_test_network && low_active_elections && agressive_mode)))
{
// When the number of active elections is low increase max number of elections for setting confirmation height.
@ -58,8 +59,8 @@ void nano::active_transactions::confirm_frontiers (nano::transaction const & tra
}
// Spend time prioritizing accounts to reduce voting traffic
auto time_spent_prioritizing_ledger_accounts = (std::chrono::seconds (2));
auto time_spent_prioritizing_wallet_accounts = std::chrono::milliseconds (50);
auto time_spent_prioritizing_ledger_accounts = request_interval / 10;
auto time_spent_prioritizing_wallet_accounts = request_interval / 25;
prioritize_frontiers_for_confirmation (transaction_a, is_test_network ? std::chrono::milliseconds (50) : time_spent_prioritizing_ledger_accounts, time_spent_prioritizing_wallet_accounts);
size_t elections_count (0);
@ -82,7 +83,7 @@ void nano::active_transactions::confirm_frontiers (nano::transaction const & tra
if (info.block_count > confirmation_height && !this->node.pending_confirmation_height.is_processing_block (info.head))
{
auto block (this->node.store.block_get (transaction_a, info.head));
if (!this->start (block))
if (!this->start (block, true))
{
++elections_count;
// Calculate votes for local representatives
@ -148,189 +149,258 @@ void nano::active_transactions::post_confirmation_height_set (nano::transaction
}
}
void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> & lock_a)
void nano::active_transactions::election_escalate (std::shared_ptr<nano::election> & election_l, nano::transaction const & transaction_l, size_t const & roots_size_l)
{
std::unordered_set<nano::qualified_root> inactive_l;
auto transaction_l (node.store.tx_begin_read ());
unsigned unconfirmed_count_l (0);
unsigned unconfirmed_request_count_l (0);
unsigned could_fit_delay_l = node.network_params.network.is_test_network () ? high_confirmation_request_count - 1 : 1;
std::deque<std::shared_ptr<nano::block>> blocks_bundle_l;
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> batched_confirm_req_bundle_l;
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> single_confirm_req_bundle_l;
/* Confirm frontiers when there aren't many confirmations already pending and node finished initial bootstrap
In auto mode start confirm only if node contains almost principal representative (half of required for principal weight)
The confirmation height processor works asynchronously, compressing several roots into one frontier, so probably_unconfirmed_frontiers is not always correct*/
lock_a.unlock ();
auto pending_confirmation_height_size (node.pending_confirmation_height.size ());
bool probably_unconfirmed_frontiers (node.ledger.block_count_cache > node.ledger.cemented_count + roots.size () + pending_confirmation_height_size);
bool bootstrap_weight_reached (node.ledger.block_count_cache >= node.ledger.bootstrap_weight_max_blocks);
if (node.config.frontiers_confirmation != nano::frontiers_confirmation_mode::disabled && bootstrap_weight_reached && probably_unconfirmed_frontiers && pending_confirmation_height_size < confirmed_frontiers_max_pending_cut_off)
static unsigned constexpr high_confirmation_request_count{ 16 };
// Log votes for very long unconfirmed elections
if (election_l->confirmation_request_count % (4 * high_confirmation_request_count) == 1)
{
confirm_frontiers (transaction_l);
auto tally_l (election_l->tally ());
election_l->log_votes (tally_l);
}
lock_a.lock ();
auto representatives_l (node.rep_crawler.representatives (std::numeric_limits<size_t>::max ()));
auto roots_size_l (roots.size ());
for (auto i (roots.get<1> ().begin ()), n (roots.get<1> ().end ()); i != n; ++i)
/*
* Escalation for long unconfirmed elections
* Start new elections for previous block & source if there are less than 100 active elections
*/
if (election_l->confirmation_request_count % high_confirmation_request_count == 1 && roots_size_l < 100 && !node.network_params.network.is_test_network ())
{
auto root_l (i->root);
auto election_l (i->election);
if ((election_l->confirmed || election_l->stopped) && election_l->confirmation_request_count >= minimum_confirmation_request_count - 1)
bool escalated_l (false);
std::shared_ptr<nano::block> previous_l;
auto previous_hash_l (election_l->status.winner->previous ());
if (!previous_hash_l.is_zero ())
{
if (election_l->stopped)
previous_l = node.store.block_get (transaction_l, previous_hash_l);
if (previous_l != nullptr && blocks.find (previous_hash_l) == blocks.end () && !node.block_confirmed_or_being_confirmed (transaction_l, previous_hash_l))
{
inactive_l.insert (root_l);
add (std::move (previous_l), true);
escalated_l = true;
}
}
else
{
if (election_l->confirmation_request_count > high_confirmation_request_count)
{
++unconfirmed_count_l;
unconfirmed_request_count_l += election_l->confirmation_request_count;
// Log votes for very long unconfirmed elections
if (election_l->confirmation_request_count % 50 == 1)
{
auto tally_l (election_l->tally ());
election_l->log_votes (tally_l);
}
/* Escalation for long unconfirmed elections
Start new elections for previous block & source
if there are less than 100 active elections */
if (election_l->confirmation_request_count % high_confirmation_request_count == 1 && roots_size_l < 100 && !node.network_params.network.is_test_network ())
{
bool escalated_l (false);
std::shared_ptr<nano::block> previous_l;
auto previous_hash_l (election_l->status.winner->previous ());
if (!previous_hash_l.is_zero ())
{
previous_l = node.store.block_get (transaction_l, previous_hash_l);
if (previous_l != nullptr && blocks.find (previous_hash_l) == blocks.end () && !node.block_confirmed_or_being_confirmed (transaction_l, previous_hash_l))
{
add (std::move (previous_l));
escalated_l = true;
}
}
/* If previous block not existing/not commited yet, block_source can cause segfault for state blocks
/* If previous block not existing/not commited yet, block_source can cause segfault for state blocks
So source check can be done only if previous != nullptr or previous is 0 (open account) */
if (previous_hash_l.is_zero () || previous_l != nullptr)
{
auto source_hash (node.ledger.block_source (transaction_l, *election_l->status.winner));
if (!source_hash.is_zero () && source_hash != previous_hash_l && blocks.find (source_hash) == blocks.end ())
{
auto source (node.store.block_get (transaction_l, source_hash));
if (source != nullptr && !node.block_confirmed_or_being_confirmed (transaction_l, source_hash))
{
add (std::move (source));
escalated_l = true;
}
}
}
if (escalated_l)
{
election_l->update_dependent ();
}
if (previous_hash_l.is_zero () || previous_l != nullptr)
{
auto source_hash_l (node.ledger.block_source (transaction_l, *election_l->status.winner));
if (!source_hash_l.is_zero () && source_hash_l != previous_hash_l && blocks.find (source_hash_l) == blocks.end ())
{
auto source_l (node.store.block_get (transaction_l, source_hash_l));
if (source_l != nullptr && !node.block_confirmed_or_being_confirmed (transaction_l, source_hash_l))
{
add (std::move (source_l), true);
escalated_l = true;
}
}
if (election_l->confirmation_request_count < high_confirmation_request_count || election_l->confirmation_request_count % high_confirmation_request_count == could_fit_delay_l)
{
if (node.ledger.could_fit (transaction_l, *election_l->status.winner))
{
// Broadcast winner
if (blocks_bundle_l.size () < max_broadcast_queue)
{
blocks_bundle_l.push_back (election_l->status.winner);
}
}
else
{
if (election_l->confirmation_request_count != 0)
{
election_l->stop ();
inactive_l.insert (root_l);
}
}
}
std::unordered_set<std::shared_ptr<nano::transport::channel>> rep_channels_missing_vote_l;
// Add all rep endpoints that haven't already voted
for (auto & rep : representatives_l)
{
if (election_l->last_votes.find (rep.account) == election_l->last_votes.end ())
{
rep_channels_missing_vote_l.insert (rep.channel);
}
if (escalated_l)
{
election_l->update_dependent ();
}
}
}
if (node.config.logging.vote_logging ())
void nano::active_transactions::election_broadcast (std::shared_ptr<nano::election> & election_l, nano::transaction const & transaction_l, std::deque<std::shared_ptr<nano::block>> & blocks_bundle_l, std::unordered_set<nano::qualified_root> & inactive_l, nano::qualified_root & root_l)
{
if (node.ledger.could_fit (transaction_l, *election_l->status.winner))
{
// Broadcast current winner
if (blocks_bundle_l.size () < max_block_broadcasts)
{
blocks_bundle_l.push_back (election_l->status.winner);
}
}
else if (election_l->confirmation_request_count != 0)
{
election_l->stop ();
inactive_l.insert (root_l);
}
}
bool nano::active_transactions::election_request_confirm (std::shared_ptr<nano::election> & election_l, std::vector<nano::representative> const & representatives_l, size_t const & roots_size_l,
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> & single_confirm_req_bundle_l,
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> & batched_confirm_req_bundle_l)
{
bool inserted_into_any_bundle{ false };
std::vector<std::shared_ptr<nano::transport::channel>> rep_channels_missing_vote_l;
// Add all rep endpoints that haven't already voted
for (const auto & rep : representatives_l)
{
if (election_l->last_votes.find (rep.account) == election_l->last_votes.end ())
{
rep_channels_missing_vote_l.push_back (rep.channel);
if (node.config.logging.vote_logging () && election_l->confirmation_request_count > 0)
{
node.logger.try_log ("Representative did not respond to confirm_req, retrying: ", rep.account.to_account ());
}
}
}
// Unique channels as there can be multiple reps per channel
rep_channels_missing_vote_l.erase (std::unique (rep_channels_missing_vote_l.begin (), rep_channels_missing_vote_l.end ()), rep_channels_missing_vote_l.end ());
bool low_reps_weight (rep_channels_missing_vote_l.empty () || node.rep_crawler.total_weight () < node.config.online_weight_minimum.number ());
if (low_reps_weight && roots_size_l <= 5 && !node.network_params.network.is_test_network ())
{
// Spam mode
auto deque_l (node.network.udp_channels.random_set (100));
auto vec (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto i : deque_l)
{
vec->push_back (i);
}
single_confirm_req_bundle_l.push_back (std::make_pair (election_l->status.winner, vec));
inserted_into_any_bundle = true;
}
else
{
auto single_confirm_req_channels_l (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto & rep : rep_channels_missing_vote_l)
{
if (rep->get_network_version () >= node.network_params.protocol.tcp_realtime_protocol_version_min)
{
// Send batch request to peers supporting confirm_req by hash + root
auto rep_request_l (batched_confirm_req_bundle_l.find (rep));
auto block_l (election_l->status.winner);
auto root_hash_l (std::make_pair (block_l->hash (), block_l->root ()));
if (rep_request_l == batched_confirm_req_bundle_l.end ())
{
// Maximum number of representatives
if (batched_confirm_req_bundle_l.size () < max_confirm_representatives)
{
node.logger.try_log ("Representative did not respond to confirm_req, retrying: ", rep.account.to_account ());
std::deque<std::pair<nano::block_hash, nano::root>> insert_root_hash = { root_hash_l };
batched_confirm_req_bundle_l.insert (std::make_pair (rep, insert_root_hash));
inserted_into_any_bundle = true;
}
}
}
bool low_reps_weight (rep_channels_missing_vote_l.empty () || node.rep_crawler.total_weight () < node.config.online_weight_minimum.number ());
if (low_reps_weight && roots_size_l <= 5 && !node.network_params.network.is_test_network ())
{
// Spam mode
auto deque_l (node.network.udp_channels.random_set (100));
auto vec (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto i : deque_l)
// Maximum number of hashes
else if (rep_request_l->second.size () < max_confirm_req_batches * nano::network::confirm_req_hashes_max)
{
vec->push_back (i);
rep_request_l->second.push_back (root_hash_l);
inserted_into_any_bundle = true;
}
single_confirm_req_bundle_l.push_back (std::make_pair (election_l->status.winner, vec));
}
else
{
auto single_confirm_req_channels (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto & rep : rep_channels_missing_vote_l)
{
if (rep->get_network_version () >= node.network_params.protocol.tcp_realtime_protocol_version_min)
{
// Send batch request to peers supporting confirm_req by hash + root
auto rep_request (batched_confirm_req_bundle_l.find (rep));
auto block (election_l->status.winner);
auto root_hash (std::make_pair (block->hash (), block->root ()));
if (rep_request == batched_confirm_req_bundle_l.end ())
{
if (batched_confirm_req_bundle_l.size () < max_broadcast_queue)
{
std::deque<std::pair<nano::block_hash, nano::root>> insert_root_hash = { root_hash };
batched_confirm_req_bundle_l.insert (std::make_pair (rep, insert_root_hash));
}
}
else if (rep_request->second.size () < max_broadcast_queue * nano::network::confirm_req_hashes_max)
{
rep_request->second.push_back (root_hash);
}
}
else
{
single_confirm_req_channels->push_back (rep);
}
}
// broadcast_confirm_req_base modifies reps, so we clone it once to avoid aliasing
if (single_confirm_req_bundle_l.size () < max_broadcast_queue && !single_confirm_req_channels->empty ())
{
single_confirm_req_bundle_l.push_back (std::make_pair (election_l->status.winner, single_confirm_req_channels));
}
single_confirm_req_channels_l->push_back (rep);
}
}
// broadcast_confirm_req_base modifies reps, so we clone it once to avoid aliasing
if (single_confirm_req_bundle_l.size () < max_confirm_req && !single_confirm_req_channels_l->empty ())
{
single_confirm_req_bundle_l.push_back (std::make_pair (election_l->status.winner, single_confirm_req_channels_l));
inserted_into_any_bundle = true;
}
}
return inserted_into_any_bundle;
}
void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> & lock_a)
{
assert (!mutex.try_lock ());
auto transaction_l (node.store.tx_begin_read ());
std::unordered_set<nano::qualified_root> inactive_l;
std::deque<std::shared_ptr<nano::block>> blocks_bundle_l;
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> batched_confirm_req_bundle_l;
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> single_confirm_req_bundle_l;
lock_a.unlock ();
/*
* Confirm frontiers when there aren't many confirmations already pending and node finished initial bootstrap
* In auto mode start confirm only if node contains almost principal representative (half of required for principal weight)
*/
// Due to the confirmation height processor working asynchronously and compressing several roots into one frontier, probably_unconfirmed_frontiers can be wrong
{
auto pending_confirmation_height_size (node.pending_confirmation_height.size ());
bool probably_unconfirmed_frontiers (node.ledger.block_count_cache > node.ledger.cemented_count + roots.size () + pending_confirmation_height_size);
bool bootstrap_weight_reached (node.ledger.block_count_cache >= node.ledger.bootstrap_weight_max_blocks);
if (node.config.frontiers_confirmation != nano::frontiers_confirmation_mode::disabled && bootstrap_weight_reached && probably_unconfirmed_frontiers && pending_confirmation_height_size < confirmed_frontiers_max_pending_cut_off)
{
search_frontiers (transaction_l);
}
}
lock_a.lock ();
// Any new election started from process_live only gets requests after at least 1 second
auto cutoff_l (std::chrono::steady_clock::now () - election_request_delay);
// Elections taking too long get escalated
auto long_election_cutoff_l (std::chrono::steady_clock::now () - long_election_threshold);
// The lowest PoW difficulty elections have a maximum time to live if they are beyond the soft threshold size for the container
auto election_ttl_cutoff_l (std::chrono::steady_clock::now () - election_time_to_live);
auto const representatives_l (node.rep_crawler.representatives (std::numeric_limits<size_t>::max ()));
auto roots_size_l (roots.size ());
auto & sorted_roots_l = roots.get<1> ();
size_t count_l{ 0 };
/*
* Loop through active elections in descending order of proof-of-work difficulty, requesting confirmation
*
* Only up to a certain amount of elections are queued for confirmation request and block rebroadcasting. The remaining elections can still be confirmed if votes arrive
* We avoid selecting the same elections repeatedly in the next loops, through a modulo on confirmation_request_count
* An election only gets confirmation_request_count increased after the first confirm_req; after that it is increased every loop unless they don't fit in the queues
* Elections extending the soft config.active_elections_size limit are flushed after a certain time-to-live cutoff
* Flushed elections are later re-activated via frontier confirmation
*/
for (auto i = sorted_roots_l.begin (), n = sorted_roots_l.end (); i != n; ++i, ++count_l)
{
auto election_l (i->election);
auto root_l (i->root);
// Erase finished elections
if ((election_l->confirmed || election_l->stopped))
{
inactive_l.insert (root_l);
}
// Drop elections
else if (count_l >= node.config.active_elections_size && election_l->election_start < election_ttl_cutoff_l && !node.wallets.watcher->is_watched (root_l))
{
election_l->stop ();
inactive_l.insert (root_l);
}
// Broadcast and request confirmation
else if (election_l->skip_delay || election_l->election_start < cutoff_l)
{
bool increment_counter_l{ true };
// Escalate long election after a certain time and number of requests performed
if (election_l->confirmation_request_count > 4 && election_l->election_start < long_election_cutoff_l)
{
election_escalate (election_l, transaction_l, roots_size_l);
}
// Block broadcasting
if (election_l->confirmation_request_count % 8 == 1 || node.network_params.network.is_test_network ())
{
election_broadcast (election_l, transaction_l, blocks_bundle_l, inactive_l, root_l);
}
// Confirmation requesting
else if (election_l->confirmation_request_count % 4 == 0)
{
// If failed to insert into any of the bundles (capped), don't increment the counter so that the same root is sent for confirmation in the next loop
if (!election_request_confirm (election_l, representatives_l, roots_size_l, single_confirm_req_bundle_l, batched_confirm_req_bundle_l))
{
increment_counter_l = false;
}
}
if (increment_counter_l)
{
++election_l->confirmation_request_count;
}
}
++election_l->confirmation_request_count;
}
ongoing_broadcasts = !blocks_bundle_l.empty () + !batched_confirm_req_bundle_l.empty () + !single_confirm_req_bundle_l.empty ();
lock_a.unlock ();
// Rebroadcast unconfirmed blocks
if (!blocks_bundle_l.empty ())
{
node.network.flood_block_many (std::move (blocks_bundle_l), [this]() {
node.network.flood_block_many (
std::move (blocks_bundle_l), [this]() {
{
nano::lock_guard<std::mutex> guard_l (this->mutex);
--this->ongoing_broadcasts;
}
this->condition.notify_all ();
});
},
10); // 500ms / (10ms / 1 block) > 30 blocks
}
// Batched confirmation requests
// Batch confirmation request
if (!batched_confirm_req_bundle_l.empty ())
{
node.network.broadcast_confirm_req_batched_many (
@ -341,18 +411,20 @@ void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> &
}
this->condition.notify_all ();
},
50);
20); // 500ms / (20ms / 5 batch size) > (20*7 = 140) batches
}
// Single confirmation requests
if (!single_confirm_req_bundle_l.empty ())
{
node.network.broadcast_confirm_req_many (single_confirm_req_bundle_l, [this]() {
node.network.broadcast_confirm_req_many (
single_confirm_req_bundle_l, [this]() {
{
nano::lock_guard<std::mutex> guard_l (this->mutex);
--this->ongoing_broadcasts;
}
this->condition.notify_all ();
});
},
10); // 500ms / (10-20ms / 1 req) > 15 reqs
}
lock_a.lock ();
// Erase inactive elections
@ -366,11 +438,6 @@ void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> &
roots.erase (root_it);
}
}
long_unconfirmed_size = unconfirmed_count_l;
if (unconfirmed_count_l > 0)
{
node.logger.try_log (boost::str (boost::format ("%1% blocks have been unconfirmed averaging %2% confirmation requests") % unconfirmed_count_l % (unconfirmed_request_count_l / unconfirmed_count_l)));
}
}
void nano::active_transactions::request_loop ()
@ -388,12 +455,12 @@ void nano::active_transactions::request_loop ()
while (!stopped)
{
// Account for the time spent in request_confirm by defining the wakeup point beforehand
const auto wakeup_l (std::chrono::steady_clock::now () + std::chrono::milliseconds (node.network_params.network.request_interval_ms));
request_confirm (lock);
update_active_difficulty (lock);
const auto extra_delay_l (std::min (roots.size (), max_broadcast_queue) * node.network.broadcast_interval_ms * 2);
const auto wakeup_l (std::chrono::steady_clock::now () + std::chrono::milliseconds (node.network_params.network.request_interval_ms + extra_delay_l));
// Sleep until all broadcasts are done, plus the remaining loop time
while (!stopped && ongoing_broadcasts)
{
@ -583,13 +650,13 @@ void nano::active_transactions::stop ()
roots.clear ();
}
bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, bool const skip_delay_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
nano::lock_guard<std::mutex> lock (mutex);
return add (block_a, confirmation_action_a);
return add (block_a, skip_delay_a, confirmation_action_a);
}
bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, bool const skip_delay_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
auto error (true);
if (!stopped)
@ -599,7 +666,7 @@ bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::
if (existing == roots.end () && confirmed_set.get<1> ().find (root) == confirmed_set.get<1> ().end ())
{
auto hash (block_a->hash ());
auto election (nano::make_shared<nano::election> (node, block_a, confirmation_action_a));
auto election (nano::make_shared<nano::election> (node, block_a, skip_delay_a, confirmation_action_a));
uint64_t difficulty (0);
error = nano::work_validate (*block_a, &difficulty);
release_assert (!error);
@ -608,10 +675,6 @@ bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::
adjust_difficulty (hash);
election->insert_inactive_votes_cache ();
}
if (roots.size () >= node.config.active_elections_size)
{
flush_lowest ();
}
}
return error;
}
@ -801,8 +864,7 @@ void nano::active_transactions::update_active_difficulty (nano::unique_lock<std:
{
std::vector<uint64_t> active_root_difficulties;
active_root_difficulties.reserve (roots.size ());
auto min_election_time (std::chrono::milliseconds (node.network_params.network.request_interval_ms));
auto cutoff (std::chrono::steady_clock::now () - min_election_time);
auto cutoff (std::chrono::steady_clock::now () - election_request_delay - 1s);
for (auto & root : roots)
{
if (!root.election->confirmed && !root.election->stopped && root.election->election_start < cutoff)
@ -886,36 +948,6 @@ void nano::active_transactions::erase (nano::block const & block_a)
}
}
void nano::active_transactions::flush_lowest ()
{
size_t count (0);
assert (!roots.empty ());
auto & sorted_roots = roots.get<1> ();
for (auto it = sorted_roots.rbegin (); it != sorted_roots.rend ();)
{
if (count != 2)
{
auto election = it->election;
if (election->confirmation_request_count > high_confirmation_request_count && !election->confirmed && !election->stopped && !node.wallets.watcher->is_watched (it->root))
{
it = decltype (it){ sorted_roots.erase (std::next (it).base ()) };
election->stop ();
election->clear_blocks ();
election->clear_dependent ();
count++;
}
else
{
++it;
}
}
else
{
break;
}
}
}
bool nano::active_transactions::empty ()
{
nano::lock_guard<std::mutex> lock (mutex);

View file

@ -3,6 +3,8 @@
#include <nano/lib/numbers.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/gap_cache.hpp>
#include <nano/node/repcrawler.hpp>
#include <nano/node/transport/transport.hpp>
#include <nano/secure/common.hpp>
#include <boost/circular_buffer.hpp>
@ -86,7 +88,7 @@ public:
// Start an election for a block
// Call action with confirmed block, may be different than what we started with
// clang-format off
bool start (std::shared_ptr<nano::block>, std::function<void(std::shared_ptr<nano::block>)> const & = [](std::shared_ptr<nano::block>) {});
bool start (std::shared_ptr<nano::block>, bool const = false, std::function<void(std::shared_ptr<nano::block>)> const & = [](std::shared_ptr<nano::block>) {});
// clang-format on
// If this returns true, the vote is a replay
// If this returns false, the vote may or may not be a replay
@ -101,8 +103,6 @@ public:
uint64_t limited_active_difficulty ();
std::deque<std::shared_ptr<nano::block>> list_blocks (bool = false);
void erase (nano::block const &);
//drop 2 from roots based on adjusted_difficulty
void flush_lowest ();
bool empty ();
size_t size ();
void stop ();
@ -126,12 +126,15 @@ public:
nano::gap_information find_inactive_votes_cache (nano::block_hash const &);
nano::node & node;
std::mutex mutex;
// Minimum number of confirmation requests
static unsigned constexpr minimum_confirmation_request_count = 2;
// Threshold for considering confirmation request count high
static unsigned constexpr high_confirmation_request_count = 2;
size_t long_unconfirmed_size = 0;
static size_t constexpr max_broadcast_queue = 1000;
std::chrono::seconds const long_election_threshold;
// Delay until requesting confirmation for an election
std::chrono::milliseconds const election_request_delay;
// Maximum time an election can be kept active if it is extending the container
std::chrono::seconds const election_time_to_live;
static size_t constexpr max_block_broadcasts = 30;
static size_t constexpr max_confirm_representatives = 30;
static size_t constexpr max_confirm_req_batches = 20;
static size_t constexpr max_confirm_req = 15;
boost::circular_buffer<double> multipliers_cb;
uint64_t trended_active_difficulty;
size_t priority_cementable_frontiers_size ();
@ -144,11 +147,16 @@ public:
private:
// Call action with confirmed block, may be different than what we started with
// clang-format off
bool add (std::shared_ptr<nano::block>, std::function<void(std::shared_ptr<nano::block>)> const & = [](std::shared_ptr<nano::block>) {});
bool add (std::shared_ptr<nano::block>, bool const = false, std::function<void(std::shared_ptr<nano::block>)> const & = [](std::shared_ptr<nano::block>) {});
// clang-format on
void request_loop ();
void search_frontiers (nano::transaction const &);
void election_escalate (std::shared_ptr<nano::election> &, nano::transaction const &, size_t const &);
void election_broadcast (std::shared_ptr<nano::election> &, nano::transaction const &, std::deque<std::shared_ptr<nano::block>> &, std::unordered_set<nano::qualified_root> &, nano::qualified_root &);
bool election_request_confirm (std::shared_ptr<nano::election> &, std::vector<nano::representative> const &, size_t const &,
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> & single_confirm_req_bundle_l,
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> & batched_confirm_req_bundle_l);
void request_confirm (nano::unique_lock<std::mutex> &);
void confirm_frontiers (nano::transaction const &);
nano::account next_frontier_account{ 0 };
std::chrono::steady_clock::time_point next_frontier_check{ std::chrono::steady_clock::now () };
nano::condition_variable condition;

View file

@ -369,7 +369,7 @@ void nano::block_processor::process_batch (nano::unique_lock<std::mutex> & lock_
void nano::block_processor::process_live (nano::block_hash const & hash_a, std::shared_ptr<nano::block> block_a, const bool watch_work_a)
{
// Start collecting quorum on block
node.active.start (block_a);
node.active.start (block_a, false);
//add block to watcher if desired after block has been added to active
if (watch_work_a)
{
@ -382,28 +382,6 @@ void nano::block_processor::process_live (nano::block_hash const & hash_a, std::
// Announce our weighted vote to the network
generator.add (hash_a);
}
// Request confirmation for new block with delay
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + confirmation_request_delay, [node_w, block_a]() {
if (auto node_l = node_w.lock ())
{
// Check if votes were already requested
bool send_request (false);
{
nano::lock_guard<std::mutex> lock (node_l->active.mutex);
auto existing (node_l->active.blocks.find (block_a->hash ()));
if (existing != node_l->active.blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->confirmation_request_count == 0)
{
send_request = true;
}
}
// Request votes
if (send_request)
{
node_l->network.broadcast_confirm_req (block_a);
}
}
});
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, nano::unchecked_info info_a, const bool watch_work_a)

View file

@ -7,14 +7,14 @@ nano::election_vote_result::election_vote_result (bool replay_a, bool processed_
processed = processed_a;
}
nano::election::election (nano::node & node_a, std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) :
nano::election::election (nano::node & node_a, std::shared_ptr<nano::block> block_a, bool const skip_delay_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) :
confirmation_action (confirmation_action_a),
node (node_a),
election_start (std::chrono::steady_clock::now ()),
status ({ block_a, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), 0, nano::election_status_type::ongoing }),
skip_delay (skip_delay_a),
confirmed (false),
stopped (false),
confirmation_request_count (0)
stopped (false)
{
last_votes.insert (std::make_pair (node.network_params.random.not_an_account, nano::vote_info{ std::chrono::steady_clock::now (), 0, block_a->hash () }));
blocks.insert (std::make_pair (block_a->hash (), block_a));
@ -47,10 +47,6 @@ void nano::election::confirm_once (nano::election_status_type type_a)
node_l->process_confirmed (status_l);
confirmation_action_l (status_l.winner);
});
if (confirmation_request_count > node.active.high_confirmation_request_count)
{
--node.active.long_unconfirmed_size;
}
auto root (status.winner->qualified_root ());
node.active.pending_conf_height.emplace (status.winner->hash (), shared_from_this ());
clear_blocks ();

View file

@ -34,7 +34,7 @@ class election final : public std::enable_shared_from_this<nano::election>
std::function<void(std::shared_ptr<nano::block>)> confirmation_action;
public:
election (nano::node &, std::shared_ptr<nano::block>, std::function<void(std::shared_ptr<nano::block>)> const &);
election (nano::node &, std::shared_ptr<nano::block>, bool const, std::function<void(std::shared_ptr<nano::block>)> const &);
nano::election_vote_result vote (nano::account, uint64_t, nano::block_hash);
nano::tally_t tally ();
// Check if we have vote quorum
@ -57,10 +57,11 @@ public:
std::unordered_map<nano::block_hash, std::shared_ptr<nano::block>> blocks;
std::chrono::steady_clock::time_point election_start;
nano::election_status status;
bool skip_delay;
std::atomic<bool> confirmed;
bool stopped;
std::unordered_map<nano::block_hash, nano::uint128_t> last_tally;
unsigned confirmation_request_count;
unsigned confirmation_request_count{ 0 };
std::unordered_set<nano::block_hash> dependent_blocks;
std::chrono::seconds late_blocks_delay{ 5 };
};

View file

@ -1911,7 +1911,7 @@ void nano::json_handler::confirmation_quorum ()
if (request.get<bool> ("peer_details", false))
{
boost::property_tree::ptree peers;
for (auto & peer : node.rep_crawler.representatives_by_weight ())
for (auto & peer : node.rep_crawler.representatives ())
{
boost::property_tree::ptree peer_node;
peer_node.put ("account", peer.account.to_account ());

View file

@ -325,20 +325,19 @@ void nano::network::broadcast_confirm_req_base (std::shared_ptr<nano::block> blo
}
}
void nano::network::broadcast_confirm_req_batched_many (std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> request_bundle_a, std::function<void()> callback_a, unsigned delay_a, bool resumption)
void nano::network::broadcast_confirm_req_batched_many (std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> request_bundle_a, std::function<void()> callback_a, unsigned delay_a, bool resumption_a)
{
const size_t max_reps = 50;
if (!resumption && node.config.logging.network_logging ())
const size_t burst_size_l{ 5 };
if (!resumption_a && node.config.logging.network_logging ())
{
node.logger.try_log (boost::str (boost::format ("Broadcasting batch confirm req to %1% representatives") % request_bundle_a.size ()));
}
auto count_l (0);
while (!request_bundle_a.empty () && count_l < max_reps)
for (size_t count_l (0); !request_bundle_a.empty () && count_l < burst_size_l; ++count_l)
{
auto j (request_bundle_a.begin ());
while (j != request_bundle_a.end ())
{
++count_l;
std::vector<std::pair<nano::block_hash, nano::root>> roots_hashes_l;
// Limit max request size hash + root to 7 pairs
while (roots_hashes_l.size () < confirm_req_hashes_max && !j->second.empty ())

View file

@ -557,7 +557,7 @@ void nano::node::process_fork (nano::transaction const & transaction_a, std::sha
if (ledger_block && !block_confirmed_or_being_confirmed (transaction_a, ledger_block->hash ()))
{
std::weak_ptr<nano::node> this_w (shared_from_this ());
if (!active.start (ledger_block, [this_w, root](std::shared_ptr<nano::block>) {
if (!active.start (ledger_block, false, [this_w, root](std::shared_ptr<nano::block>) {
if (auto this_l = this_w.lock ())
{
auto attempt (this_l->bootstrap_initiator.current_attempt ());
@ -1042,7 +1042,7 @@ void nano::node::add_initial_peers ()
void nano::node::block_confirm (std::shared_ptr<nano::block> block_a)
{
active.start (block_a);
active.start (block_a, false);
network.broadcast_confirm_req (block_a);
// Calculate votes for local representatives
if (config.enable_voting && active.active (*block_a))

View file

@ -587,7 +587,8 @@ bool nano::node_config::upgrade_json (unsigned version_a, nano::jsonconfig & jso
}
case 17:
{
json.put ("vote_generator_delay", vote_generator_delay.count ()); // Update value
json.put ("active_elections_size", 10000); // Update value
json.put ("vote_generator_delay", 100); // Update value
json.put ("backup_before_upgrade", backup_before_upgrade);
json.put ("work_watcher_period", work_watcher_period.count ());
}

View file

@ -80,7 +80,7 @@ public:
/** Timeout for initiated async operations */
std::chrono::seconds tcp_io_timeout{ (network_params.network.is_test_network () && !is_sanitizer_build) ? std::chrono::seconds (5) : std::chrono::seconds (15) };
std::chrono::nanoseconds pow_sleep_interval{ 0 };
size_t active_elections_size{ 50000 };
size_t active_elections_size{ 10000 };
/** Default maximum incoming TCP connections, including realtime network & bootstrap */
unsigned tcp_incoming_connections_max{ 1024 };
bool use_memory_pools{ true };

View file

@ -163,18 +163,6 @@ nano::uint128_t nano::rep_crawler::total_weight () const
return result;
}
std::vector<nano::representative> nano::rep_crawler::representatives_by_weight ()
{
std::vector<nano::representative> result;
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n; ++i)
{
assert (i->weight.number () > 0);
result.push_back (*i);
}
return result;
}
void nano::rep_crawler::on_rep_request (std::shared_ptr<nano::transport::channel> channel_a)
{
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
@ -257,7 +245,6 @@ void nano::rep_crawler::update_weights ()
std::vector<nano::representative> nano::rep_crawler::representatives (size_t count_a)
{
std::vector<representative> result;
result.reserve (std::min (count_a, size_t (16)));
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n && result.size () < count_a; ++i)
{

View file

@ -100,15 +100,12 @@ public:
/** Get total available weight from representatives */
nano::uint128_t total_weight () const;
/** Request a list of the top \p count_a known representatives. The maximum number of reps returned is 16. */
std::vector<representative> representatives (size_t count_a);
/** Request a list of the top \p count_a known representatives in descending order of weight. */
std::vector<representative> representatives (size_t count_a = std::numeric_limits<size_t>::max ());
/** Request a list of the top \p count_a known representative endpoints. The maximum number of reps returned is 16. */
/** Request a list of the top \p count_a known representative endpoints. */
std::vector<std::shared_ptr<nano::transport::channel>> representative_endpoints (size_t count_a);
/** Returns all representatives registered with weight in descending order */
std::vector<nano::representative> representatives_by_weight ();
/** Total number of representatives */
size_t representative_count ();