Allow filtering a specific mutex for tracking with NANO_TIMED_LOCKS (#2765)

* Allow filtering a specific mutex for tracking with NANO_TIMED_LOCKS

* Cmake format

* Re-add execute permissions

* Fix formatting

* Update new std::mutex to nano::mutex
This commit is contained in:
Wesley Shillingford 2021-03-04 18:04:11 +00:00 committed by GitHub
commit b0f81d5e3e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
122 changed files with 928 additions and 735 deletions

View file

@ -81,6 +81,33 @@ set(NANO_TIMED_LOCKS
set(NANO_TIMED_LOCKS_IGNORE_BLOCKED set(NANO_TIMED_LOCKS_IGNORE_BLOCKED
OFF OFF
CACHE BOOL "") CACHE BOOL "")
set(NANO_TIMED_LOCKS_FILTER
""
CACHE
STRING
"Selects which mutex should be the only one to have stacktraces generated, empty string means report on all"
)
set_property(
CACHE NANO_TIMED_LOCKS_FILTER
PROPERTY STRINGS
active
block_arrival
block_processor
block_uniquer
confirmation_height_processor
dropped_elections,
election_winner_details
gap_cache
network_filter
observer_set
request_aggregator
state_block_signature_verification
telemetry
vote_generator
vote_processor
vote_uniquer
votes_cache
work_pool)
set(NANO_FUZZER_TEST set(NANO_FUZZER_TEST
OFF OFF
CACHE BOOL "") CACHE BOOL "")
@ -103,6 +130,7 @@ endif()
if(${NANO_TIMED_LOCKS} GREATER 0) if(${NANO_TIMED_LOCKS} GREATER 0)
add_definitions(-DNANO_TIMED_LOCKS=${NANO_TIMED_LOCKS}) add_definitions(-DNANO_TIMED_LOCKS=${NANO_TIMED_LOCKS})
add_definitions(-DNANO_TIMED_LOCKS_FILTER=${NANO_TIMED_LOCKS_FILTER})
if(NANO_TIMED_LOCKS_IGNORE_BLOCKED) if(NANO_TIMED_LOCKS_IGNORE_BLOCKED)
add_definitions(-DNANO_TIMED_LOCKS_IGNORE_BLOCKED) add_definitions(-DNANO_TIMED_LOCKS_IGNORE_BLOCKED)
endif() endif()

View file

@ -13,9 +13,9 @@ if [[ $(grep -rl --exclude="*asio.hpp" "asio::async_write" ./nano) ]]; then
exit 1 exit 1
fi fi
# prevent unsolicited use of std::lock_guard, std::unique_lock & std::condition_variable outside of allowed areas # prevent unsolicited use of std::lock_guard, std::unique_lock, std::condition_variable & std::mutex outside of allowed areas
if [[ $(grep -rl --exclude={"*random_pool.cpp","*random_pool.hpp","*random_pool_shuffle.hpp","*locks.hpp","*locks.cpp"} "std::unique_lock\|std::lock_guard\|std::condition_variable" ./nano) ]]; then if [[ $(grep -rl --exclude={"*random_pool.cpp","*random_pool.hpp","*random_pool_shuffle.hpp","*locks.hpp","*locks.cpp"} "std::unique_lock\|std::lock_guard\|std::condition_variable\|std::mutex" ./nano) ]]; then
echo "Using std::unique_lock, std::lock_guard or std::condition_variable is not permitted (except in nano/lib/locks.hpp and non-nano dependent libraries). Use the nano::* versions instead" echo "Using std::unique_lock, std::lock_guard, std::condition_variable or std::mutex is not permitted (except in nano/lib/locks.hpp and non-nano dependent libraries). Use the nano::* versions instead"
exit 1 exit 1
fi fi

View file

@ -48,7 +48,7 @@ TEST (active_transactions, confirm_active)
auto peers (node2.network.random_set (1)); auto peers (node2.network.random_set (1));
ASSERT_FALSE (peers.empty ()); ASSERT_FALSE (peers.empty ());
{ {
nano::lock_guard<std::mutex> guard (node2.rep_crawler.probable_reps_mutex); nano::lock_guard<nano::mutex> guard (node2.rep_crawler.probable_reps_mutex);
node2.rep_crawler.probable_reps.emplace (nano::dev_genesis_key.pub, nano::genesis_amount, *peers.begin ()); node2.rep_crawler.probable_reps.emplace (nano::dev_genesis_key.pub, nano::genesis_amount, *peers.begin ());
} }
ASSERT_TIMELY (10s, node2.ledger.cache.cemented_count == 2 && node2.active.empty ()); ASSERT_TIMELY (10s, node2.ledger.cache.cemented_count == 2 && node2.active.empty ());
@ -91,7 +91,7 @@ TEST (active_transactions, confirm_frontier)
auto peers (node2.network.random_set (1)); auto peers (node2.network.random_set (1));
ASSERT_FALSE (peers.empty ()); ASSERT_FALSE (peers.empty ());
{ {
nano::lock_guard<std::mutex> guard (node2.rep_crawler.probable_reps_mutex); nano::lock_guard<nano::mutex> guard (node2.rep_crawler.probable_reps_mutex);
node2.rep_crawler.probable_reps.emplace (nano::dev_genesis_key.pub, nano::genesis_amount, *peers.begin ()); node2.rep_crawler.probable_reps.emplace (nano::dev_genesis_key.pub, nano::genesis_amount, *peers.begin ());
} }
ASSERT_TIMELY (5s, node2.ledger.cache.cemented_count == 2 && node2.active.empty ()); ASSERT_TIMELY (5s, node2.ledger.cache.cemented_count == 2 && node2.active.empty ());
@ -271,7 +271,7 @@ TEST (active_transactions, inactive_votes_cache_existing_vote)
ASSERT_EQ (send->hash (), last_vote1.hash); ASSERT_EQ (send->hash (), last_vote1.hash);
ASSERT_EQ (1, last_vote1.timestamp); ASSERT_EQ (1, last_vote1.timestamp);
// Attempt to change vote with inactive_votes_cache // Attempt to change vote with inactive_votes_cache
nano::unique_lock<std::mutex> active_lock (node.active.mutex); nano::unique_lock<nano::mutex> active_lock (node.active.mutex);
node.active.add_inactive_votes_cache (active_lock, send->hash (), key.pub); node.active.add_inactive_votes_cache (active_lock, send->hash (), key.pub);
active_lock.unlock (); active_lock.unlock ();
auto cache (node.active.find_inactive_votes_cache (send->hash ())); auto cache (node.active.find_inactive_votes_cache (send->hash ()));
@ -554,13 +554,13 @@ TEST (active_transactions, update_difficulty)
{ {
{ {
// node1 // node1
nano::lock_guard<std::mutex> guard1 (node1.active.mutex); nano::lock_guard<nano::mutex> guard1 (node1.active.mutex);
auto const existing1 (node1.active.roots.find (send1->qualified_root ())); auto const existing1 (node1.active.roots.find (send1->qualified_root ()));
ASSERT_NE (existing1, node1.active.roots.end ()); ASSERT_NE (existing1, node1.active.roots.end ());
auto const existing2 (node1.active.roots.find (send2->qualified_root ())); auto const existing2 (node1.active.roots.find (send2->qualified_root ()));
ASSERT_NE (existing2, node1.active.roots.end ()); ASSERT_NE (existing2, node1.active.roots.end ());
// node2 // node2
nano::lock_guard<std::mutex> guard2 (node2.active.mutex); nano::lock_guard<nano::mutex> guard2 (node2.active.mutex);
auto const existing3 (node2.active.roots.find (send1->qualified_root ())); auto const existing3 (node2.active.roots.find (send1->qualified_root ()));
ASSERT_NE (existing3, node2.active.roots.end ()); ASSERT_NE (existing3, node2.active.roots.end ());
auto const existing4 (node2.active.roots.find (send2->qualified_root ())); auto const existing4 (node2.active.roots.find (send2->qualified_root ()));
@ -657,7 +657,7 @@ TEST (active_transactions, vote_replays)
// Removing blocks as recently confirmed makes every vote indeterminate // Removing blocks as recently confirmed makes every vote indeterminate
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.recently_confirmed.clear (); node.active.recently_confirmed.clear ();
} }
ASSERT_EQ (nano::vote_code::indeterminate, node.active.vote (vote_send1)); ASSERT_EQ (nano::vote_code::indeterminate, node.active.vote (vote_send1));
@ -1015,7 +1015,7 @@ TEST (active_transactions, confirmation_consistency)
ASSERT_NO_ERROR (system.poll (5ms)); ASSERT_NO_ERROR (system.poll (5ms));
} }
ASSERT_NO_ERROR (system.poll_until_true (1s, [&node, &block, i] { ASSERT_NO_ERROR (system.poll_until_true (1s, [&node, &block, i] {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
EXPECT_EQ (i + 1, node.active.recently_confirmed.size ()); EXPECT_EQ (i + 1, node.active.recently_confirmed.size ());
EXPECT_EQ (block->qualified_root (), node.active.recently_confirmed.back ().first); EXPECT_EQ (block->qualified_root (), node.active.recently_confirmed.back ().first);
return i + 1 == node.active.recently_cemented.size (); // done after a callback return i + 1 == node.active.recently_cemented.size (); // done after a callback
@ -1106,7 +1106,7 @@ TEST (active_transactions, insertion_prioritization)
std::sort (blocks.begin (), blocks.end (), [](auto const & blockl, auto const & blockr) { return blockl->difficulty () > blockr->difficulty (); }); std::sort (blocks.begin (), blocks.end (), [](auto const & blockl, auto const & blockr) { return blockl->difficulty () > blockr->difficulty (); });
auto update_active_multiplier = [&node] { auto update_active_multiplier = [&node] {
nano::unique_lock<std::mutex> lock (node.active.mutex); nano::unique_lock<nano::mutex> lock (node.active.mutex);
node.active.update_active_multiplier (lock); node.active.update_active_multiplier (lock);
}; };
@ -1132,7 +1132,7 @@ TEST (active_multiplier, less_than_one)
{ {
nano::system system (1); nano::system system (1);
auto & node (*system.nodes[0]); auto & node (*system.nodes[0]);
nano::unique_lock<std::mutex> lock (node.active.mutex); nano::unique_lock<nano::mutex> lock (node.active.mutex);
auto base_active_difficulty = node.network_params.network.publish_thresholds.epoch_1; auto base_active_difficulty = node.network_params.network.publish_thresholds.epoch_1;
auto base_active_multiplier = 1.0; auto base_active_multiplier = 1.0;
auto min_active_difficulty = node.network_params.network.publish_thresholds.entry; auto min_active_difficulty = node.network_params.network.publish_thresholds.entry;
@ -1241,7 +1241,7 @@ TEST (active_transactions, election_difficulty_update_old)
ASSERT_EQ (1, node.active.size ()); ASSERT_EQ (1, node.active.size ());
auto multiplier = node.active.roots.begin ()->multiplier; auto multiplier = node.active.roots.begin ()->multiplier;
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
ASSERT_EQ (node.active.normalized_multiplier (*send1), multiplier); ASSERT_EQ (node.active.normalized_multiplier (*send1), multiplier);
} }
// Should not update with a lower difficulty // Should not update with a lower difficulty
@ -1675,7 +1675,7 @@ TEST (active_transactions, difficulty_update_observer)
auto & node (*system.nodes[0]); auto & node (*system.nodes[0]);
std::atomic<bool> update_received (false); std::atomic<bool> update_received (false);
node.observers.difficulty.add ([& mutex = node.active.mutex, &update_received](uint64_t difficulty_a) { node.observers.difficulty.add ([& mutex = node.active.mutex, &update_received](uint64_t difficulty_a) {
nano::unique_lock<std::mutex> lock (mutex, std::defer_lock); nano::unique_lock<nano::mutex> lock (mutex, std::defer_lock);
EXPECT_TRUE (lock.try_lock ()); EXPECT_TRUE (lock.try_lock ());
update_received = true; update_received = true;
}); });

View file

@ -333,7 +333,7 @@ TEST (bootstrap_processor, DISABLED_pull_requeue_network_error)
ASSERT_TIMELY (2s, attempt->frontiers_received); ASSERT_TIMELY (2s, attempt->frontiers_received);
// Add non-existing pull & stop remote peer // Add non-existing pull & stop remote peer
{ {
nano::unique_lock<std::mutex> lock (node1->bootstrap_initiator.connections->mutex); nano::unique_lock<nano::mutex> lock (node1->bootstrap_initiator.connections->mutex);
ASSERT_FALSE (attempt->stopped); ASSERT_FALSE (attempt->stopped);
++attempt->pulling; ++attempt->pulling;
node1->bootstrap_initiator.connections->pulls.push_back (nano::pull_info (nano::dev_genesis_key.pub, send1->hash (), genesis.hash (), attempt->incremental_id)); node1->bootstrap_initiator.connections->pulls.push_back (nano::pull_info (nano::dev_genesis_key.pub, send1->hash (), genesis.hash (), attempt->incremental_id));

View file

@ -10,13 +10,13 @@ using namespace std::chrono_literals;
namespace namespace
{ {
void add_callback_stats (nano::node & node, std::vector<nano::block_hash> * observer_order = nullptr, std::mutex * mutex = nullptr) void add_callback_stats (nano::node & node, std::vector<nano::block_hash> * observer_order = nullptr, nano::mutex * mutex = nullptr)
{ {
node.observers.blocks.add ([& stats = node.stats, observer_order, mutex](nano::election_status const & status_a, nano::account const &, nano::amount const &, bool) { node.observers.blocks.add ([& stats = node.stats, observer_order, mutex](nano::election_status const & status_a, nano::account const &, nano::amount const &, bool) {
stats.inc (nano::stat::type::http_callback, nano::stat::detail::http_callback, nano::stat::dir::out); stats.inc (nano::stat::type::http_callback, nano::stat::detail::http_callback, nano::stat::dir::out);
if (mutex) if (mutex)
{ {
nano::lock_guard<std::mutex> guard (*mutex); nano::lock_guard<nano::mutex> guard (*mutex);
debug_assert (observer_order); debug_assert (observer_order);
observer_order->push_back (status_a.winner->hash ()); observer_order->push_back (status_a.winner->hash ());
} }
@ -1006,7 +1006,7 @@ TEST (confirmation_height, callback_confirmed_history)
ASSERT_TIMELY (10s, node->active.size () == 0); ASSERT_TIMELY (10s, node->active.size () == 0);
ASSERT_EQ (0, node->active.list_recently_cemented ().size ()); ASSERT_EQ (0, node->active.list_recently_cemented ().size ());
{ {
nano::lock_guard<std::mutex> guard (node->active.mutex); nano::lock_guard<nano::mutex> guard (node->active.mutex);
ASSERT_EQ (0, node->active.blocks.size ()); ASSERT_EQ (0, node->active.blocks.size ());
} }
@ -1151,7 +1151,7 @@ TEST (confirmation_height, cemented_gap_below_receive)
} }
std::vector<nano::block_hash> observer_order; std::vector<nano::block_hash> observer_order;
std::mutex mutex; nano::mutex mutex;
add_callback_stats (*node, &observer_order, &mutex); add_callback_stats (*node, &observer_order, &mutex);
node->block_confirm (open1); node->block_confirm (open1);
@ -1172,7 +1172,7 @@ TEST (confirmation_height, cemented_gap_below_receive)
// Check that the order of callbacks is correct // Check that the order of callbacks is correct
std::vector<nano::block_hash> expected_order = { send.hash (), open.hash (), send1.hash (), receive1.hash (), send2.hash (), dummy_send.hash (), receive2.hash (), dummy_send1.hash (), send3.hash (), open1->hash () }; std::vector<nano::block_hash> expected_order = { send.hash (), open.hash (), send1.hash (), receive1.hash (), send2.hash (), dummy_send.hash (), receive2.hash (), dummy_send1.hash (), send3.hash (), open1->hash () };
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
ASSERT_EQ (observer_order, expected_order); ASSERT_EQ (observer_order, expected_order);
}; };

View file

@ -31,7 +31,7 @@ TEST (confirmation_solicitor, batches)
auto send (std::make_shared<nano::send_block> (nano::genesis_hash, nano::keypair ().pub, nano::genesis_amount - 100, nano::dev_genesis_key.prv, nano::dev_genesis_key.pub, *system.work.generate (nano::genesis_hash))); auto send (std::make_shared<nano::send_block> (nano::genesis_hash, nano::keypair ().pub, nano::genesis_amount - 100, nano::dev_genesis_key.prv, nano::dev_genesis_key.pub, *system.work.generate (nano::genesis_hash)));
send->sideband_set ({}); send->sideband_set ({});
{ {
nano::lock_guard<std::mutex> guard (node2.active.mutex); nano::lock_guard<nano::mutex> guard (node2.active.mutex);
for (size_t i (0); i < nano::network::confirm_req_hashes_max; ++i) for (size_t i (0); i < nano::network::confirm_req_hashes_max; ++i)
{ {
auto election (std::make_shared<nano::election> (node2, send, nullptr, nullptr, false, nano::election_behavior::normal)); auto election (std::make_shared<nano::election> (node2, send, nullptr, nullptr, false, nano::election_behavior::normal));
@ -115,7 +115,7 @@ TEST (confirmation_solicitor, bypass_max_requests_cap)
// Add a vote for something else, not the winner // Add a vote for something else, not the winner
for (auto const & rep : representatives) for (auto const & rep : representatives)
{ {
nano::lock_guard<std::mutex> guard (election->mutex); nano::lock_guard<nano::mutex> guard (election->mutex);
election->last_votes[rep.account] = { std::chrono::steady_clock::now (), 1, 1 }; election->last_votes[rep.account] = { std::chrono::steady_clock::now (), 1, 1 };
} }
ASSERT_FALSE (solicitor.add (*election)); ASSERT_FALSE (solicitor.add (*election));

View file

@ -166,7 +166,7 @@ TEST (conflicts, reprioritize)
node1.process_active (send1); node1.process_active (send1);
node1.block_processor.flush (); node1.block_processor.flush ();
{ {
nano::lock_guard<std::mutex> guard (node1.active.mutex); nano::lock_guard<nano::mutex> guard (node1.active.mutex);
auto existing1 (node1.active.roots.find (send1->qualified_root ())); auto existing1 (node1.active.roots.find (send1->qualified_root ()));
ASSERT_NE (node1.active.roots.end (), existing1); ASSERT_NE (node1.active.roots.end (), existing1);
ASSERT_EQ (multiplier1, existing1->multiplier); ASSERT_EQ (multiplier1, existing1->multiplier);
@ -177,7 +177,7 @@ TEST (conflicts, reprioritize)
node1.process_active (std::make_shared<nano::send_block> (send1_copy)); node1.process_active (std::make_shared<nano::send_block> (send1_copy));
node1.block_processor.flush (); node1.block_processor.flush ();
{ {
nano::lock_guard<std::mutex> guard (node1.active.mutex); nano::lock_guard<nano::mutex> guard (node1.active.mutex);
auto existing2 (node1.active.roots.find (send1->qualified_root ())); auto existing2 (node1.active.roots.find (send1->qualified_root ()));
ASSERT_NE (node1.active.roots.end (), existing2); ASSERT_NE (node1.active.roots.end (), existing2);
ASSERT_EQ (multiplier2, existing2->multiplier); ASSERT_EQ (multiplier2, existing2->multiplier);

View file

@ -239,7 +239,7 @@ TEST (election, quorum_minimum_update_weight_before_quorum_checks)
ASSERT_TIMELY (10s, !node1.rep_crawler.response (channel, vote2)); ASSERT_TIMELY (10s, !node1.rep_crawler.response (channel, vote2));
ASSERT_FALSE (election.election->confirmed ()); ASSERT_FALSE (election.election->confirmed ());
{ {
nano::lock_guard<std::mutex> guard (node1.online_reps.mutex); nano::lock_guard<nano::mutex> guard (node1.online_reps.mutex);
// Modify online_m for online_reps to more than is available, this checks that voting below updates it to current online reps. // Modify online_m for online_reps to more than is available, this checks that voting below updates it to current online reps.
node1.online_reps.online_m = node_config.online_weight_minimum.number () + 20; node1.online_reps.online_m = node_config.online_weight_minimum.number () + 20;
} }

View file

@ -168,7 +168,7 @@ TEST (frontiers_confirmation, prioritize_frontiers_max_optimistic_elections)
} }
{ {
nano::unique_lock<std::mutex> lk (node->active.mutex); nano::unique_lock<nano::mutex> lk (node->active.mutex);
node->active.frontiers_confirmation (lk); node->active.frontiers_confirmation (lk);
} }
@ -179,7 +179,7 @@ TEST (frontiers_confirmation, prioritize_frontiers_max_optimistic_elections)
// Call frontiers confirmation again and confirm that next_frontier_account hasn't changed // Call frontiers confirmation again and confirm that next_frontier_account hasn't changed
{ {
nano::unique_lock<std::mutex> lk (node->active.mutex); nano::unique_lock<nano::mutex> lk (node->active.mutex);
node->active.frontiers_confirmation (lk); node->active.frontiers_confirmation (lk);
} }

View file

@ -19,7 +19,7 @@ TEST (gap_cache, add_existing)
nano::gap_cache cache (*system.nodes[0]); nano::gap_cache cache (*system.nodes[0]);
auto block1 (std::make_shared<nano::send_block> (0, 1, 2, nano::keypair ().prv, 4, 5)); auto block1 (std::make_shared<nano::send_block> (0, 1, 2, nano::keypair ().prv, 4, 5));
cache.add (block1->hash ()); cache.add (block1->hash ());
nano::unique_lock<std::mutex> lock (cache.mutex); nano::unique_lock<nano::mutex> lock (cache.mutex);
auto existing1 (cache.blocks.get<1> ().find (block1->hash ())); auto existing1 (cache.blocks.get<1> ().find (block1->hash ()));
ASSERT_NE (cache.blocks.get<1> ().end (), existing1); ASSERT_NE (cache.blocks.get<1> ().end (), existing1);
auto arrival (existing1->arrival); auto arrival (existing1->arrival);
@ -39,7 +39,7 @@ TEST (gap_cache, comparison)
nano::gap_cache cache (*system.nodes[0]); nano::gap_cache cache (*system.nodes[0]);
auto block1 (std::make_shared<nano::send_block> (1, 0, 2, nano::keypair ().prv, 4, 5)); auto block1 (std::make_shared<nano::send_block> (1, 0, 2, nano::keypair ().prv, 4, 5));
cache.add (block1->hash ()); cache.add (block1->hash ());
nano::unique_lock<std::mutex> lock (cache.mutex); nano::unique_lock<nano::mutex> lock (cache.mutex);
auto existing1 (cache.blocks.get<1> ().find (block1->hash ())); auto existing1 (cache.blocks.get<1> ().find (block1->hash ()));
ASSERT_NE (cache.blocks.get<1> ().end (), existing1); ASSERT_NE (cache.blocks.get<1> ().end (), existing1);
auto arrival (existing1->arrival); auto arrival (existing1->arrival);

View file

@ -798,7 +798,7 @@ TEST (votes, add_one)
auto existing1 (votes1.find (nano::dev_genesis_key.pub)); auto existing1 (votes1.find (nano::dev_genesis_key.pub));
ASSERT_NE (votes1.end (), existing1); ASSERT_NE (votes1.end (), existing1);
ASSERT_EQ (send1->hash (), existing1->second.hash); ASSERT_EQ (send1->hash (), existing1->second.hash);
nano::lock_guard<std::mutex> guard (node1.active.mutex); nano::lock_guard<nano::mutex> guard (node1.active.mutex);
auto winner (*election1.election->tally ().begin ()); auto winner (*election1.election->tally ().begin ());
ASSERT_EQ (*send1, *winner.second); ASSERT_EQ (*send1, *winner.second);
ASSERT_EQ (nano::genesis_amount - 100, winner.first); ASSERT_EQ (nano::genesis_amount - 100, winner.first);
@ -872,7 +872,7 @@ TEST (votes, add_existing)
node1.work_generate_blocking (*send2); node1.work_generate_blocking (*send2);
auto vote2 (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 2, send2)); auto vote2 (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 2, send2));
// Pretend we've waited the timeout // Pretend we've waited the timeout
nano::unique_lock<std::mutex> lock (election1.election->mutex); nano::unique_lock<nano::mutex> lock (election1.election->mutex);
election1.election->last_votes[nano::dev_genesis_key.pub].time = std::chrono::steady_clock::now () - std::chrono::seconds (20); election1.election->last_votes[nano::dev_genesis_key.pub].time = std::chrono::steady_clock::now () - std::chrono::seconds (20);
lock.unlock (); lock.unlock ();
ASSERT_EQ (nano::vote_code::vote, node1.active.vote (vote2)); ASSERT_EQ (nano::vote_code::vote, node1.active.vote (vote2));
@ -911,7 +911,7 @@ TEST (votes, add_old)
node1.work_generate_blocking (*send2); node1.work_generate_blocking (*send2);
auto vote2 (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 1, send2)); auto vote2 (std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 1, send2));
{ {
nano::lock_guard<std::mutex> lock (election1.election->mutex); nano::lock_guard<nano::mutex> lock (election1.election->mutex);
election1.election->last_votes[nano::dev_genesis_key.pub].time = std::chrono::steady_clock::now () - std::chrono::seconds (20); election1.election->last_votes[nano::dev_genesis_key.pub].time = std::chrono::steady_clock::now () - std::chrono::seconds (20);
} }
node1.vote_processor.vote_blocking (vote2, channel); node1.vote_processor.vote_blocking (vote2, channel);

View file

@ -1,3 +1,4 @@
#include <nano/lib/config.hpp>
#include <nano/lib/locks.hpp> #include <nano/lib/locks.hpp>
#include <nano/test_common/testutil.hpp> #include <nano/test_common/testutil.hpp>
@ -6,7 +7,7 @@
#include <future> #include <future>
#include <regex> #include <regex>
#if NANO_TIMED_LOCKS > 0 #if USING_NANO_TIMED_LOCKS
namespace namespace
{ {
unsigned num_matches (std::string const & str) unsigned num_matches (std::string const & str)
@ -30,11 +31,11 @@ TEST (locks, no_conflicts)
std::stringstream ss; std::stringstream ss;
nano::cout_redirect (ss.rdbuf ()); nano::cout_redirect (ss.rdbuf ());
std::mutex guard_mutex; nano::mutex guard_mutex;
nano::lock_guard<std::mutex> guard (guard_mutex); nano::lock_guard<nano::mutex> guard (guard_mutex);
std::mutex lk_mutex; nano::mutex lk_mutex;
nano::unique_lock<std::mutex> lk (lk_mutex); nano::unique_lock<nano::mutex> lk (lk_mutex);
// This could fail if NANO_TIMED_LOCKS is such a low value that the above mutexes are held longer than that before reaching this statement // This could fail if NANO_TIMED_LOCKS is such a low value that the above mutexes are held longer than that before reaching this statement
ASSERT_EQ (ss.str (), ""); ASSERT_EQ (ss.str (), "");
@ -48,24 +49,21 @@ TEST (locks, lock_guard)
std::stringstream ss; std::stringstream ss;
nano::cout_redirect redirect (ss.rdbuf ()); nano::cout_redirect redirect (ss.rdbuf ());
std::mutex mutex; nano::mutex mutex{ xstr (NANO_TIMED_LOCKS_FILTER) };
// Depending on timing the mutex could be reached first in // Depending on timing the mutex could be reached first in
std::promise<void> promise; std::promise<void> promise;
std::thread t; std::thread t ([&mutex, &promise] {
{ nano::lock_guard<nano::mutex> guard (mutex);
t = std::thread ([&mutex, &promise] { promise.set_value ();
nano::lock_guard<std::mutex> guard (mutex); // Tries to make sure that the other guard to held for a minimum of NANO_TIMED_LOCKS, may need to increase this for low NANO_TIMED_LOCKS values
promise.set_value (); std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS * 2));
// Tries to make sure that the other guard to held for a minimum of NANO_TIMED_LOCKS, may need to increase this for low NANO_TIMED_LOCKS values });
std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS * 2));
});
}
// Wait until the lock_guard has been reached in the other thread // Wait until the lock_guard has been reached in the other thread
promise.get_future ().wait (); promise.get_future ().wait ();
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
t.join (); t.join ();
} }
@ -85,12 +83,12 @@ TEST (locks, unique_lock)
std::stringstream ss; std::stringstream ss;
nano::cout_redirect redirect (ss.rdbuf ()); nano::cout_redirect redirect (ss.rdbuf ());
std::mutex mutex; nano::mutex mutex{ xstr (NANO_TIMED_LOCKS_FILTER) };
// Depending on timing the mutex could be reached first in // Depending on timing the mutex could be reached first in
std::promise<void> promise; std::promise<void> promise;
std::thread t ([&mutex, &promise] { std::thread t ([&mutex, &promise] {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS)); std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS));
lk.unlock (); lk.unlock ();
lk.lock (); lk.lock ();
@ -103,7 +101,7 @@ TEST (locks, unique_lock)
// Wait until the lock_guard has been reached in the other thread // Wait until the lock_guard has been reached in the other thread
promise.get_future ().wait (); promise.get_future ().wait ();
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
t.join (); t.join ();
} }
@ -124,7 +122,7 @@ TEST (locks, condition_variable_wait)
nano::cout_redirect redirect (ss.rdbuf ()); nano::cout_redirect redirect (ss.rdbuf ());
nano::condition_variable cv; nano::condition_variable cv;
std::mutex mutex; nano::mutex mutex;
std::atomic<bool> notified{ false }; std::atomic<bool> notified{ false };
std::atomic<bool> finished{ false }; std::atomic<bool> finished{ false };
std::thread t ([&] { std::thread t ([&] {
@ -136,7 +134,7 @@ TEST (locks, condition_variable_wait)
} }
}); });
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS)); std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS));
cv.wait (lk, [&notified] { cv.wait (lk, [&notified] {
return notified.load (); return notified.load ();
@ -157,11 +155,11 @@ TEST (locks, condition_variable_wait_until)
nano::cout_redirect redirect (ss.rdbuf ()); nano::cout_redirect redirect (ss.rdbuf ());
nano::condition_variable cv; nano::condition_variable cv;
std::mutex mutex; nano::mutex mutex;
auto impl = [&](auto time_to_sleep) { auto impl = [&](auto time_to_sleep) {
std::atomic<bool> notified{ false }; std::atomic<bool> notified{ false };
std::atomic<bool> finished{ false }; std::atomic<bool> finished{ false };
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
std::this_thread::sleep_for (std::chrono::milliseconds (time_to_sleep)); std::this_thread::sleep_for (std::chrono::milliseconds (time_to_sleep));
std::thread t ([&] { std::thread t ([&] {
while (!finished) while (!finished)
@ -189,8 +187,8 @@ TEST (locks, condition_variable_wait_until)
TEST (locks, defer_lock) TEST (locks, defer_lock)
{ {
std::mutex mutex; nano::mutex mutex;
nano::unique_lock<std::mutex> lock (mutex, std::defer_lock); nano::unique_lock<nano::mutex> lock (mutex, std::defer_lock);
ASSERT_FALSE (lock.owns_lock ()); ASSERT_FALSE (lock.owns_lock ());
ASSERT_TRUE (lock.try_lock ()); ASSERT_TRUE (lock.try_lock ());
ASSERT_TRUE (lock.owns_lock ()); ASSERT_TRUE (lock.owns_lock ());

View file

@ -762,7 +762,7 @@ TEST (tcp_listener, tcp_listener_timeout_empty)
while (!disconnected) while (!disconnected)
{ {
{ {
nano::lock_guard<std::mutex> guard (node0->bootstrap.mutex); nano::lock_guard<nano::mutex> guard (node0->bootstrap.mutex);
disconnected = node0->bootstrap.connections.empty (); disconnected = node0->bootstrap.connections.empty ();
} }
ASSERT_NO_ERROR (system.poll ()); ASSERT_NO_ERROR (system.poll ());
@ -786,7 +786,7 @@ TEST (tcp_listener, tcp_listener_timeout_node_id_handshake)
}); });
ASSERT_TIMELY (5s, node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake) != 0); ASSERT_TIMELY (5s, node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake) != 0);
{ {
nano::lock_guard<std::mutex> guard (node0->bootstrap.mutex); nano::lock_guard<nano::mutex> guard (node0->bootstrap.mutex);
ASSERT_EQ (node0->bootstrap.connections.size (), 1); ASSERT_EQ (node0->bootstrap.connections.size (), 1);
} }
bool disconnected (false); bool disconnected (false);
@ -794,7 +794,7 @@ TEST (tcp_listener, tcp_listener_timeout_node_id_handshake)
while (!disconnected) while (!disconnected)
{ {
{ {
nano::lock_guard<std::mutex> guard (node0->bootstrap.mutex); nano::lock_guard<nano::mutex> guard (node0->bootstrap.mutex);
disconnected = node0->bootstrap.connections.empty (); disconnected = node0->bootstrap.connections.empty ();
} }
ASSERT_NO_ERROR (system.poll ()); ASSERT_NO_ERROR (system.poll ());

View file

@ -424,7 +424,7 @@ TEST (node, search_pending_confirmed)
system.wallet (0)->insert_adhoc (key2.prv); system.wallet (0)->insert_adhoc (key2.prv);
ASSERT_FALSE (system.wallet (0)->search_pending (system.wallet (0)->wallets.tx_begin_read ())); ASSERT_FALSE (system.wallet (0)->search_pending (system.wallet (0)->wallets.tx_begin_read ()));
{ {
nano::lock_guard<std::mutex> guard (node->active.mutex); nano::lock_guard<nano::mutex> guard (node->active.mutex);
auto existing1 (node->active.blocks.find (send1->hash ())); auto existing1 (node->active.blocks.find (send1->hash ()));
ASSERT_EQ (node->active.blocks.end (), existing1); ASSERT_EQ (node->active.blocks.end (), existing1);
auto existing2 (node->active.blocks.find (send2->hash ())); auto existing2 (node->active.blocks.find (send2->hash ()));
@ -470,7 +470,7 @@ TEST (node, search_pending_pruned)
system.wallet (1)->insert_adhoc (key2.prv); system.wallet (1)->insert_adhoc (key2.prv);
ASSERT_FALSE (system.wallet (1)->search_pending (system.wallet (1)->wallets.tx_begin_read ())); ASSERT_FALSE (system.wallet (1)->search_pending (system.wallet (1)->wallets.tx_begin_read ()));
{ {
nano::lock_guard<std::mutex> guard (node2->active.mutex); nano::lock_guard<nano::mutex> guard (node2->active.mutex);
auto existing1 (node2->active.blocks.find (send1->hash ())); auto existing1 (node2->active.blocks.find (send1->hash ()));
ASSERT_EQ (node2->active.blocks.end (), existing1); ASSERT_EQ (node2->active.blocks.end (), existing1);
auto existing2 (node2->active.blocks.find (send2->hash ())); auto existing2 (node2->active.blocks.find (send2->hash ()));
@ -2018,7 +2018,7 @@ TEST (node, bootstrap_confirm_frontiers)
ASSERT_NO_ERROR (system1.poll ()); ASSERT_NO_ERROR (system1.poll ());
} }
{ {
nano::lock_guard<std::mutex> guard (node1->active.mutex); nano::lock_guard<nano::mutex> guard (node1->active.mutex);
auto existing1 (node1->active.blocks.find (send0.hash ())); auto existing1 (node1->active.blocks.find (send0.hash ()));
ASSERT_NE (node1->active.blocks.end (), existing1); ASSERT_NE (node1->active.blocks.end (), existing1);
} }
@ -2445,7 +2445,7 @@ TEST (node, online_reps_rep_crawler)
ASSERT_EQ (0, node1.online_reps.online ()); ASSERT_EQ (0, node1.online_reps.online ());
// After inserting to rep crawler // After inserting to rep crawler
{ {
nano::lock_guard<std::mutex> guard (node1.rep_crawler.probable_reps_mutex); nano::lock_guard<nano::mutex> guard (node1.rep_crawler.probable_reps_mutex);
node1.rep_crawler.active.insert (nano::genesis_hash); node1.rep_crawler.active.insert (nano::genesis_hash);
} }
node1.vote_processor.vote_blocking (vote, std::make_shared<nano::transport::channel_loopback> (node1)); node1.vote_processor.vote_blocking (vote, std::make_shared<nano::transport::channel_loopback> (node1));
@ -3101,7 +3101,7 @@ TEST (node, epoch_conflict_confirm)
nano::blocks_confirm (*node0, { change, epoch_open }); nano::blocks_confirm (*node0, { change, epoch_open });
ASSERT_EQ (2, node0->active.size ()); ASSERT_EQ (2, node0->active.size ());
{ {
nano::lock_guard<std::mutex> lock (node0->active.mutex); nano::lock_guard<nano::mutex> lock (node0->active.mutex);
ASSERT_TRUE (node0->active.blocks.find (change->hash ()) != node0->active.blocks.end ()); ASSERT_TRUE (node0->active.blocks.find (change->hash ()) != node0->active.blocks.end ());
ASSERT_TRUE (node0->active.blocks.find (epoch_open->hash ()) != node0->active.blocks.end ()); ASSERT_TRUE (node0->active.blocks.find (epoch_open->hash ()) != node0->active.blocks.end ());
} }
@ -3894,7 +3894,7 @@ TEST (active_difficulty, recalculate_work)
ASSERT_TIMELY (2s, !node1.active.empty ()); ASSERT_TIMELY (2s, !node1.active.empty ());
auto sum (std::accumulate (node1.active.multipliers_cb.begin (), node1.active.multipliers_cb.end (), double(0))); auto sum (std::accumulate (node1.active.multipliers_cb.begin (), node1.active.multipliers_cb.end (), double(0)));
ASSERT_EQ (node1.active.active_difficulty (), nano::difficulty::from_multiplier (sum / node1.active.multipliers_cb.size (), node1.network_params.network.publish_thresholds.epoch_2)); ASSERT_EQ (node1.active.active_difficulty (), nano::difficulty::from_multiplier (sum / node1.active.multipliers_cb.size (), node1.network_params.network.publish_thresholds.epoch_2));
nano::unique_lock<std::mutex> lock (node1.active.mutex); nano::unique_lock<nano::mutex> lock (node1.active.mutex);
// Fake history records to force work recalculation // Fake history records to force work recalculation
for (auto i (0); i < node1.active.multipliers_cb.size (); i++) for (auto i (0); i < node1.active.multipliers_cb.size (); i++)
{ {
@ -4178,7 +4178,7 @@ TEST (node, dependency_graph)
ASSERT_NO_ERROR (system.poll_until_true (15s, [&] { ASSERT_NO_ERROR (system.poll_until_true (15s, [&] {
// Not many blocks should be active simultaneously // Not many blocks should be active simultaneously
EXPECT_LT (node.active.size (), 6); EXPECT_LT (node.active.size (), 6);
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
// Ensure that active blocks have their ancestors confirmed // Ensure that active blocks have their ancestors confirmed
auto error = std::any_of (dependency_graph.cbegin (), dependency_graph.cend (), [&](auto entry) { auto error = std::any_of (dependency_graph.cbegin (), dependency_graph.cend (), [&](auto entry) {
@ -4456,7 +4456,7 @@ TEST (node, deferred_dependent_elections)
// Frontier confirmation also starts elections // Frontier confirmation also starts elections
ASSERT_NO_ERROR (system.poll_until_true (5s, [&node, &send2] { ASSERT_NO_ERROR (system.poll_until_true (5s, [&node, &send2] {
nano::unique_lock<std::mutex> lock (node.active.mutex); nano::unique_lock<nano::mutex> lock (node.active.mutex);
node.active.frontiers_confirmation (lock); node.active.frontiers_confirmation (lock);
lock.unlock (); lock.unlock ();
return node.active.election (send2->qualified_root ()) != nullptr; return node.active.election (send2->qualified_root ()) != nullptr;
@ -4536,7 +4536,7 @@ TEST (rep_crawler, local)
auto loopback = std::make_shared<nano::transport::channel_loopback> (node); auto loopback = std::make_shared<nano::transport::channel_loopback> (node);
auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, std::vector{ nano::genesis_hash }); auto vote = std::make_shared<nano::vote> (nano::dev_genesis_key.pub, nano::dev_genesis_key.prv, 0, std::vector{ nano::genesis_hash });
{ {
nano::lock_guard<std::mutex> guard (node.rep_crawler.probable_reps_mutex); nano::lock_guard<nano::mutex> guard (node.rep_crawler.probable_reps_mutex);
node.rep_crawler.active.insert (nano::genesis_hash); node.rep_crawler.active.insert (nano::genesis_hash);
node.rep_crawler.responses.emplace_back (loopback, vote); node.rep_crawler.responses.emplace_back (loopback, vote);
} }

View file

@ -565,7 +565,7 @@ TEST (telemetry, remove_peer_different_genesis)
ASSERT_EQ (node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out), 1); ASSERT_EQ (node0->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out), 1);
ASSERT_EQ (node1->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out), 1); ASSERT_EQ (node1->stats.count (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out), 1);
nano::lock_guard<std::mutex> guard (node0->network.excluded_peers.mutex); nano::lock_guard<nano::mutex> guard (node0->network.excluded_peers.mutex);
ASSERT_EQ (1, node0->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node1->network.endpoint ().address ())); ASSERT_EQ (1, node0->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node1->network.endpoint ().address ()));
ASSERT_EQ (1, node1->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node0->network.endpoint ().address ())); ASSERT_EQ (1, node1->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node0->network.endpoint ().address ()));
} }
@ -609,7 +609,7 @@ TEST (telemetry, remove_peer_different_genesis_udp)
ASSERT_EQ (node0->network.tcp_channels.size (), 0); ASSERT_EQ (node0->network.tcp_channels.size (), 0);
ASSERT_EQ (node1->network.tcp_channels.size (), 0); ASSERT_EQ (node1->network.tcp_channels.size (), 0);
nano::lock_guard<std::mutex> guard (node0->network.excluded_peers.mutex); nano::lock_guard<nano::mutex> guard (node0->network.excluded_peers.mutex);
ASSERT_EQ (1, node0->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node1->network.endpoint ().address ())); ASSERT_EQ (1, node0->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node1->network.endpoint ().address ()));
ASSERT_EQ (1, node1->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node0->network.endpoint ().address ())); ASSERT_EQ (1, node1->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (node0->network.endpoint ().address ()));
} }
@ -636,7 +636,7 @@ TEST (telemetry, remove_peer_invalid_signature)
ASSERT_TIMELY (10s, node->stats.count (nano::stat::type::telemetry, nano::stat::detail::invalid_signature) > 0); ASSERT_TIMELY (10s, node->stats.count (nano::stat::type::telemetry, nano::stat::detail::invalid_signature) > 0);
ASSERT_NO_ERROR (system.poll_until_true (3s, [&node, address = channel->get_endpoint ().address ()]() -> bool { ASSERT_NO_ERROR (system.poll_until_true (3s, [&node, address = channel->get_endpoint ().address ()]() -> bool {
nano::lock_guard<std::mutex> guard (node->network.excluded_peers.mutex); nano::lock_guard<nano::mutex> guard (node->network.excluded_peers.mutex);
return node->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (address); return node->network.excluded_peers.peers.get<nano::peer_exclusion::tag_endpoint> ().count (address);
})); }));
} }

View file

@ -120,16 +120,16 @@ TEST (thread_pool_alarm, one)
{ {
nano::thread_pool workers (1u, nano::thread_role::name::unknown); nano::thread_pool workers (1u, nano::thread_role::name::unknown);
std::atomic<bool> done (false); std::atomic<bool> done (false);
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
workers.add_timed_task (std::chrono::steady_clock::now (), [&]() { workers.add_timed_task (std::chrono::steady_clock::now (), [&]() {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
done = true; done = true;
} }
condition.notify_one (); condition.notify_one ();
}); });
nano::unique_lock<std::mutex> unique (mutex); nano::unique_lock<nano::mutex> unique (mutex);
condition.wait (unique, [&]() { return !!done; }); condition.wait (unique, [&]() { return !!done; });
} }
@ -137,19 +137,19 @@ TEST (thread_pool_alarm, many)
{ {
nano::thread_pool workers (50u, nano::thread_role::name::unknown); nano::thread_pool workers (50u, nano::thread_role::name::unknown);
std::atomic<int> count (0); std::atomic<int> count (0);
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
for (auto i (0); i < 50; ++i) for (auto i (0); i < 50; ++i)
{ {
workers.add_timed_task (std::chrono::steady_clock::now (), [&]() { workers.add_timed_task (std::chrono::steady_clock::now (), [&]() {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
count += 1; count += 1;
} }
condition.notify_one (); condition.notify_one ();
}); });
} }
nano::unique_lock<std::mutex> unique (mutex); nano::unique_lock<nano::mutex> unique (mutex);
condition.wait (unique, [&]() { return count == 50; }); condition.wait (unique, [&]() { return count == 50; });
} }
@ -158,20 +158,20 @@ TEST (thread_pool_alarm, top_execution)
nano::thread_pool workers (1u, nano::thread_role::name::unknown); nano::thread_pool workers (1u, nano::thread_role::name::unknown);
int value1 (0); int value1 (0);
int value2 (0); int value2 (0);
std::mutex mutex; nano::mutex mutex;
std::promise<bool> promise; std::promise<bool> promise;
workers.add_timed_task (std::chrono::steady_clock::now (), [&]() { workers.add_timed_task (std::chrono::steady_clock::now (), [&]() {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
value1 = 1; value1 = 1;
value2 = 1; value2 = 1;
}); });
workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (1), [&]() { workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (1), [&]() {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
value2 = 2; value2 = 2;
promise.set_value (false); promise.set_value (false);
}); });
promise.get_future ().get (); promise.get_future ().get ();
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
ASSERT_EQ (1, value1); ASSERT_EQ (1, value1);
ASSERT_EQ (2, value2); ASSERT_EQ (2, value2);
} }

View file

@ -208,23 +208,23 @@ TEST (vote_spacing, rapid)
wallet.insert_adhoc (nano::dev_genesis_key.prv); wallet.insert_adhoc (nano::dev_genesis_key.prv);
nano::state_block_builder builder; nano::state_block_builder builder;
auto send1 = builder.make_block () auto send1 = builder.make_block ()
.account (nano::dev_genesis_key.pub) .account (nano::dev_genesis_key.pub)
.previous (nano::genesis_hash) .previous (nano::genesis_hash)
.representative (nano::dev_genesis_key.pub) .representative (nano::dev_genesis_key.pub)
.balance (nano::genesis_amount - nano::Gxrb_ratio) .balance (nano::genesis_amount - nano::Gxrb_ratio)
.link (nano::dev_genesis_key.pub) .link (nano::dev_genesis_key.pub)
.sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub)
.work (*system.work.generate (nano::genesis_hash)) .work (*system.work.generate (nano::genesis_hash))
.build_shared (); .build_shared ();
auto send2 = builder.make_block () auto send2 = builder.make_block ()
.account (nano::dev_genesis_key.pub) .account (nano::dev_genesis_key.pub)
.previous (nano::genesis_hash) .previous (nano::genesis_hash)
.representative (nano::dev_genesis_key.pub) .representative (nano::dev_genesis_key.pub)
.balance (nano::genesis_amount - nano::Gxrb_ratio - 1) .balance (nano::genesis_amount - nano::Gxrb_ratio - 1)
.link (nano::dev_genesis_key.pub) .link (nano::dev_genesis_key.pub)
.sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub) .sign (nano::dev_genesis_key.prv, nano::dev_genesis_key.pub)
.work (*system.work.generate (nano::genesis_hash)) .work (*system.work.generate (nano::genesis_hash))
.build_shared (); .build_shared ();
ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *send1).code); ASSERT_EQ (nano::process_result::progress, node.ledger.process (node.store.tx_begin_write (), *send1).code);
node.active.generator.add (nano::genesis_hash, send1->hash ()); node.active.generator.add (nano::genesis_hash, send1->hash ());
ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::vote_generator, nano::stat::detail::generator_broadcasts) == 1); ASSERT_TIMELY (3s, node.stats.count (nano::stat::type::vote_generator, nano::stat::detail::generator_broadcasts) == 1);

View file

@ -1014,7 +1014,7 @@ TEST (wallet, limited_difficulty)
wallet.insert_adhoc (nano::dev_genesis_key.prv, false); wallet.insert_adhoc (nano::dev_genesis_key.prv, false);
{ {
// Force active difficulty to an impossibly high value // Force active difficulty to an impossibly high value
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.trended_active_multiplier = 1024 * 1024 * 1024; node.active.trended_active_multiplier = 1024 * 1024 * 1024;
} }
ASSERT_EQ (node.max_work_generate_difficulty (nano::work_version::work_1), node.active.limited_active_difficulty (*genesis.open)); ASSERT_EQ (node.max_work_generate_difficulty (nano::work_version::work_1), node.active.limited_active_difficulty (*genesis.open));
@ -1102,7 +1102,7 @@ TEST (wallet, epoch_2_receive_propagation)
// Receiving should use the lower difficulty // Receiving should use the lower difficulty
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.trended_active_multiplier = 1.0; node.active.trended_active_multiplier = 1.0;
} }
auto receive2 = wallet.receive_action (send2->hash (), key.pub, amount, send2->link ().as_account (), 1); auto receive2 = wallet.receive_action (send2->hash (), key.pub, amount, send2->link ().as_account (), 1);
@ -1152,7 +1152,7 @@ TEST (wallet, epoch_2_receive_unopened)
// Receiving should use the lower difficulty // Receiving should use the lower difficulty
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.trended_active_multiplier = 1.0; node.active.trended_active_multiplier = 1.0;
} }
auto receive1 = wallet.receive_action (send1->hash (), key.pub, amount, send1->link ().as_account (), 1); auto receive1 = wallet.receive_action (send1->hash (), key.pub, amount, send1->link ().as_account (), 1);

View file

@ -106,7 +106,7 @@ TEST (wallets, reload)
ASSERT_FALSE (error); ASSERT_FALSE (error);
ASSERT_EQ (1, node1.wallets.items.size ()); ASSERT_EQ (1, node1.wallets.items.size ());
{ {
nano::lock_guard<std::mutex> lock_wallet (node1.wallets.mutex); nano::lock_guard<nano::mutex> lock_wallet (node1.wallets.mutex);
nano::inactive_node node (node1.application_path, nano::inactive_node_flag_defaults ()); nano::inactive_node node (node1.application_path, nano::inactive_node_flag_defaults ());
auto wallet (node.node->wallets.create (one)); auto wallet (node.node->wallets.create (one));
ASSERT_NE (wallet, nullptr); ASSERT_NE (wallet, nullptr);
@ -132,7 +132,7 @@ TEST (wallets, vote_minimum)
nano::state_block open2 (key2.pub, 0, key2.pub, node1.config.vote_minimum.number () - 1, send2.hash (), key2.prv, key2.pub, *system.work.generate (key2.pub)); nano::state_block open2 (key2.pub, 0, key2.pub, node1.config.vote_minimum.number () - 1, send2.hash (), key2.prv, key2.pub, *system.work.generate (key2.pub));
ASSERT_EQ (nano::process_result::progress, node1.process (open2).code); ASSERT_EQ (nano::process_result::progress, node1.process (open2).code);
auto wallet (node1.wallets.items.begin ()->second); auto wallet (node1.wallets.items.begin ()->second);
nano::unique_lock<std::mutex> representatives_lk (wallet->representatives_mutex); nano::unique_lock<nano::mutex> representatives_lk (wallet->representatives_mutex);
ASSERT_EQ (0, wallet->representatives.size ()); ASSERT_EQ (0, wallet->representatives.size ());
representatives_lk.unlock (); representatives_lk.unlock ();
wallet->insert_adhoc (nano::dev_genesis_key.prv); wallet->insert_adhoc (nano::dev_genesis_key.prv);
@ -180,7 +180,7 @@ TEST (wallets, search_pending)
flags.disable_search_pending = true; flags.disable_search_pending = true;
auto & node (*system.add_node (config, flags)); auto & node (*system.add_node (config, flags));
nano::unique_lock<std::mutex> lk (node.wallets.mutex); nano::unique_lock<nano::mutex> lk (node.wallets.mutex);
auto wallets = node.wallets.get_wallets (); auto wallets = node.wallets.get_wallets ();
lk.unlock (); lk.unlock ();
ASSERT_EQ (1, wallets.size ()); ASSERT_EQ (1, wallets.size ());

View file

@ -79,7 +79,7 @@ TEST (websocket, active_difficulty)
// Fake history records and force a trended_active_multiplier change // Fake history records and force a trended_active_multiplier change
{ {
nano::unique_lock<std::mutex> lock (node1->active.mutex); nano::unique_lock<nano::mutex> lock (node1->active.mutex);
node1->active.multipliers_cb.push_front (10.); node1->active.multipliers_cb.push_front (10.);
node1->active.update_active_multiplier (lock); node1->active.update_active_multiplier (lock);
} }

View file

@ -26,14 +26,14 @@ TEST (work_watcher, update)
auto multiplier2 (nano::normalized_multiplier (nano::difficulty::to_multiplier (difficulty2, nano::work_threshold (block2->work_version (), nano::block_details (nano::epoch::epoch_0, true, false, false))), node.network_params.network.publish_thresholds.epoch_1)); auto multiplier2 (nano::normalized_multiplier (nano::difficulty::to_multiplier (difficulty2, nano::work_threshold (block2->work_version (), nano::block_details (nano::epoch::epoch_0, true, false, false))), node.network_params.network.publish_thresholds.epoch_1));
double updated_multiplier1{ multiplier1 }, updated_multiplier2{ multiplier2 }, target_multiplier{ std::max (multiplier1, multiplier2) + 1e-6 }; double updated_multiplier1{ multiplier1 }, updated_multiplier2{ multiplier2 }, target_multiplier{ std::max (multiplier1, multiplier2) + 1e-6 };
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.trended_active_multiplier = target_multiplier; node.active.trended_active_multiplier = target_multiplier;
} }
system.deadline_set (20s); system.deadline_set (20s);
while (updated_multiplier1 == multiplier1 || updated_multiplier2 == multiplier2) while (updated_multiplier1 == multiplier1 || updated_multiplier2 == multiplier2)
{ {
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
{ {
auto const existing (node.active.roots.find (block1->qualified_root ())); auto const existing (node.active.roots.find (block1->qualified_root ()));
//if existing is junk the block has been confirmed already //if existing is junk the block has been confirmed already
@ -74,7 +74,7 @@ TEST (work_watcher, propagate)
auto updated_multiplier{ multiplier }; auto updated_multiplier{ multiplier };
auto propagated_multiplier{ multiplier }; auto propagated_multiplier{ multiplier };
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.trended_active_multiplier = multiplier * 1.001; node.active.trended_active_multiplier = multiplier * 1.001;
} }
bool updated{ false }; bool updated{ false };
@ -83,7 +83,7 @@ TEST (work_watcher, propagate)
while (!(updated && propagated)) while (!(updated && propagated))
{ {
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
{ {
auto const existing (node.active.roots.find (block->qualified_root ())); auto const existing (node.active.roots.find (block->qualified_root ()));
ASSERT_NE (existing, node.active.roots.end ()); ASSERT_NE (existing, node.active.roots.end ());
@ -91,7 +91,7 @@ TEST (work_watcher, propagate)
} }
} }
{ {
nano::lock_guard<std::mutex> guard (node_passive.active.mutex); nano::lock_guard<nano::mutex> guard (node_passive.active.mutex);
{ {
auto const existing (node_passive.active.roots.find (block->qualified_root ())); auto const existing (node_passive.active.roots.find (block->qualified_root ()));
ASSERT_NE (existing, node_passive.active.roots.end ()); ASSERT_NE (existing, node_passive.active.roots.end ());
@ -165,13 +165,13 @@ TEST (work_watcher, generation_disabled)
auto multiplier = nano::normalized_multiplier (nano::difficulty::to_multiplier (difficulty, nano::work_threshold (block->work_version (), nano::block_details (nano::epoch::epoch_0, true, false, false))), node.network_params.network.publish_thresholds.epoch_1); auto multiplier = nano::normalized_multiplier (nano::difficulty::to_multiplier (difficulty, nano::work_threshold (block->work_version (), nano::block_details (nano::epoch::epoch_0, true, false, false))), node.network_params.network.publish_thresholds.epoch_1);
double updated_multiplier{ multiplier }; double updated_multiplier{ multiplier };
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
node.active.trended_active_multiplier = multiplier * 10; node.active.trended_active_multiplier = multiplier * 10;
} }
std::this_thread::sleep_for (2s); std::this_thread::sleep_for (2s);
ASSERT_TRUE (node.wallets.watcher->is_watched (block->qualified_root ())); ASSERT_TRUE (node.wallets.watcher->is_watched (block->qualified_root ()));
{ {
nano::lock_guard<std::mutex> guard (node.active.mutex); nano::lock_guard<nano::mutex> guard (node.active.mutex);
auto const existing (node.active.roots.find (block->qualified_root ())); auto const existing (node.active.roots.find (block->qualified_root ()));
ASSERT_NE (existing, node.active.roots.end ()); ASSERT_NE (existing, node.active.roots.end ());
updated_multiplier = existing->multiplier; updated_multiplier = existing->multiplier;
@ -194,7 +194,7 @@ TEST (work_watcher, cancel)
auto work1 (node.work_generate_blocking (nano::dev_genesis_key.pub)); auto work1 (node.work_generate_blocking (nano::dev_genesis_key.pub));
auto const block1 (wallet.send_action (nano::dev_genesis_key.pub, key.pub, 100, *work1, false)); auto const block1 (wallet.send_action (nano::dev_genesis_key.pub, key.pub, 100, *work1, false));
{ {
nano::unique_lock<std::mutex> lock (node.active.mutex); nano::unique_lock<nano::mutex> lock (node.active.mutex);
// Prevent active difficulty repopulating multipliers // Prevent active difficulty repopulating multipliers
node.network_params.network.request_interval_ms = 10000; node.network_params.network.request_interval_ms = 10000;
// Fill multipliers_cb and update active difficulty; // Fill multipliers_cb and update active difficulty;
@ -240,7 +240,7 @@ TEST (work_watcher, confirm_while_generating)
auto work1 (node.work_generate_blocking (nano::dev_genesis_key.pub)); auto work1 (node.work_generate_blocking (nano::dev_genesis_key.pub));
auto const block1 (wallet.send_action (nano::dev_genesis_key.pub, key.pub, 100, *work1, false)); auto const block1 (wallet.send_action (nano::dev_genesis_key.pub, key.pub, 100, *work1, false));
{ {
nano::unique_lock<std::mutex> lock (node.active.mutex); nano::unique_lock<nano::mutex> lock (node.active.mutex);
// Prevent active difficulty repopulating multipliers // Prevent active difficulty repopulating multipliers
node.network_params.network.request_interval_ms = 10000; node.network_params.network.request_interval_ms = 10000;
// Fill multipliers_cb and update active difficulty; // Fill multipliers_cb and update active difficulty;

View file

@ -7,21 +7,21 @@ std::mutex nano::random_pool::mutex;
void nano::random_pool::generate_block (unsigned char * output, size_t size) void nano::random_pool::generate_block (unsigned char * output, size_t size)
{ {
auto & pool = get_pool (); auto & pool = get_pool ();
std::lock_guard<std::mutex> guard (mutex); std::lock_guard guard (mutex);
pool.GenerateBlock (output, size); pool.GenerateBlock (output, size);
} }
unsigned nano::random_pool::generate_word32 (unsigned min, unsigned max) unsigned nano::random_pool::generate_word32 (unsigned min, unsigned max)
{ {
auto & pool = get_pool (); auto & pool = get_pool ();
std::lock_guard<std::mutex> guard (mutex); std::lock_guard guard (mutex);
return pool.GenerateWord32 (min, max); return pool.GenerateWord32 (min, max);
} }
unsigned char nano::random_pool::generate_byte () unsigned char nano::random_pool::generate_byte ()
{ {
auto & pool = get_pool (); auto & pool = get_pool ();
std::lock_guard<std::mutex> guard (mutex); std::lock_guard guard (mutex);
return pool.GenerateByte (); return pool.GenerateByte ();
} }

View file

@ -9,7 +9,7 @@ namespace nano
template <class Iter> template <class Iter>
void random_pool_shuffle (Iter begin, Iter end) void random_pool_shuffle (Iter begin, Iter end)
{ {
std::lock_guard<std::mutex> guard (random_pool::mutex); std::lock_guard guard (random_pool::mutex);
random_pool::get_pool ().Shuffle (begin, end); random_pool::get_pool ().Shuffle (begin, end);
} }
} }

View file

@ -1851,7 +1851,7 @@ std::shared_ptr<nano::block> nano::block_uniquer::unique (std::shared_ptr<nano::
if (result != nullptr) if (result != nullptr)
{ {
nano::uint256_union key (block_a->full_hash ()); nano::uint256_union key (block_a->full_hash ());
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto & existing (blocks[key]); auto & existing (blocks[key]);
if (auto block_l = existing.lock ()) if (auto block_l = existing.lock ())
{ {
@ -1888,7 +1888,7 @@ std::shared_ptr<nano::block> nano::block_uniquer::unique (std::shared_ptr<nano::
size_t nano::block_uniquer::size () size_t nano::block_uniquer::size ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return blocks.size (); return blocks.size ();
} }

View file

@ -408,7 +408,7 @@ public:
size_t size (); size_t size ();
private: private:
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::block_uniquer) };
std::unordered_map<std::remove_const_t<value_type::first_type>, value_type::second_type> blocks; std::unordered_map<std::remove_const_t<value_type::first_type>, value_type::second_type> blocks;
static unsigned constexpr cleanup_count = 2; static unsigned constexpr cleanup_count = 2;
}; };

View file

@ -1,20 +1,25 @@
#if NANO_TIMED_LOCKS > 0 #include <nano/lib/config.hpp>
#include <nano/lib/locks.hpp> #include <nano/lib/locks.hpp>
#include <nano/lib/utility.hpp> #include <nano/lib/utility.hpp>
#include <boost/format.hpp>
#include <cstring>
#include <iostream> #include <iostream>
#if USING_NANO_TIMED_LOCKS
namespace nano namespace nano
{ {
// These mutexes must have std::mutex interface in addition to "const char* get_name ()" method
template <typename Mutex> template <typename Mutex>
void output (const char * str, std::chrono::milliseconds time, Mutex & mutex) void output (const char * str, std::chrono::milliseconds time, Mutex & mutex)
{ {
static std::mutex cout_mutex; static nano::mutex cout_mutex;
auto stacktrace = nano::generate_stacktrace (); auto stacktrace = nano::generate_stacktrace ();
// Guard standard out to keep the output from being interleaved // Guard standard out to keep the output from being interleaved
std::lock_guard<std::mutex> guard (cout_mutex); std::lock_guard guard (cout_mutex);
std::cout << std::addressof (mutex) << " Mutex " << str << " for: " << time.count () << "ms\n" std::cout << (boost::format ("%1% Mutex %2% %3% for %4%ms\n%5%") % std::addressof (mutex) % mutex.get_name () % str % time.count () % stacktrace).str ()
<< stacktrace << std::endl; << std::endl;
} }
template <typename Mutex> template <typename Mutex>
@ -23,7 +28,12 @@ void output_if_held_long_enough (nano::timer<std::chrono::milliseconds> & timer,
auto time_held = timer.since_start (); auto time_held = timer.since_start ();
if (time_held >= std::chrono::milliseconds (NANO_TIMED_LOCKS)) if (time_held >= std::chrono::milliseconds (NANO_TIMED_LOCKS))
{ {
output ("held", time_held, mutex); std::unique_lock lk (nano::mutex_to_filter_mutex);
if (!nano::any_filters_registered () || (nano::mutex_to_filter == &mutex))
{
lk.unlock ();
output ("held", time_held, mutex);
}
} }
if (timer.current_state () != nano::timer_state::stopped) if (timer.current_state () != nano::timer_state::stopped)
{ {
@ -38,19 +48,17 @@ void output_if_blocked_long_enough (nano::timer<std::chrono::milliseconds> & tim
auto time_blocked = timer.since_start (); auto time_blocked = timer.since_start ();
if (time_blocked >= std::chrono::milliseconds (NANO_TIMED_LOCKS)) if (time_blocked >= std::chrono::milliseconds (NANO_TIMED_LOCKS))
{ {
output ("blocked", time_blocked, mutex); std::unique_lock lk (nano::mutex_to_filter_mutex);
if (!nano::any_filters_registered () || (nano::mutex_to_filter == &mutex))
{
lk.unlock ();
output ("blocked", time_blocked, mutex);
}
} }
} }
#endif #endif
// Explicit instantations lock_guard<nano::mutex>::lock_guard (nano::mutex & mutex) :
template void output (const char * str, std::chrono::milliseconds time, std::mutex & mutex);
template void output_if_held_long_enough (nano::timer<std::chrono::milliseconds> & timer, std::mutex & mutex);
#ifndef NANO_TIMED_LOCKS_IGNORE_BLOCKED
template void output_if_blocked_long_enough (nano::timer<std::chrono::milliseconds> & timer, std::mutex & mutex);
#endif
lock_guard<std::mutex>::lock_guard (std::mutex & mutex) :
mut (mutex) mut (mutex)
{ {
timer.start (); timer.start ();
@ -61,7 +69,7 @@ mut (mutex)
#endif #endif
} }
lock_guard<std::mutex>::~lock_guard () noexcept lock_guard<nano::mutex>::~lock_guard () noexcept
{ {
mut.unlock (); mut.unlock ();
output_if_held_long_enough (timer, mut); output_if_held_long_enough (timer, mut);
@ -195,7 +203,7 @@ void unique_lock<Mutex, U>::validate () const
} }
// Explicit instantiations for allowed types // Explicit instantiations for allowed types
template class unique_lock<std::mutex>; template class unique_lock<nano::mutex>;
void condition_variable::notify_one () noexcept void condition_variable::notify_one () noexcept
{ {
@ -207,7 +215,7 @@ void condition_variable::notify_all () noexcept
cnd.notify_all (); cnd.notify_all ();
} }
void condition_variable::wait (nano::unique_lock<std::mutex> & lk) void condition_variable::wait (nano::unique_lock<nano::mutex> & lk)
{ {
if (!lk.mut || !lk.owns) if (!lk.mut || !lk.owns)
{ {
@ -220,5 +228,66 @@ void condition_variable::wait (nano::unique_lock<std::mutex> & lk)
cnd.wait (lk); cnd.wait (lk);
lk.timer.restart (); lk.timer.restart ();
} }
template class unique_lock<nano::mutex>;
nano::mutex * mutex_to_filter{ nullptr };
nano::mutex mutex_to_filter_mutex;
bool should_be_filtered (const char * name)
{
return std::strcmp (name, xstr (NANO_TIMED_LOCKS_FILTER)) == 0;
}
bool any_filters_registered ()
{
return std::strcmp ("", xstr (NANO_TIMED_LOCKS_FILTER)) != 0;
}
} }
#endif #endif
char const * nano::mutex_identifier (mutexes mutex)
{
switch (mutex)
{
case mutexes::active:
return "active";
case mutexes::block_arrival:
return "block_arrival";
case mutexes::block_processor:
return "block_processor";
case mutexes::block_uniquer:
return "block_uniquer";
case mutexes::blockstore_cache:
return "blockstore_cache";
case mutexes::confirmation_height_processor:
return "confirmation_height_processor";
case mutexes::dropped_elections:
return "dropped_elections";
case mutexes::election_winner_details:
return "election_winner_details";
case mutexes::gap_cache:
return "gap_cache";
case mutexes::network_filter:
return "network_filter";
case mutexes::observer_set:
return "observer_set";
case mutexes::request_aggregator:
return "request_aggregator";
case mutexes::state_block_signature_verification:
return "state_block_signature_verification";
case mutexes::telemetry:
return "telemetry";
case mutexes::vote_generator:
return "vote_generator";
case mutexes::vote_processor:
return "vote_processor";
case mutexes::vote_uniquer:
return "vote_uniquer";
case mutexes::votes_cache:
return "votes_cache";
case mutexes::work_pool:
return "work_pool";
}
throw std::runtime_error ("Invalid mutexes enum specified");
}

View file

@ -1,6 +1,8 @@
#pragma once #pragma once
#if NANO_TIMED_LOCKS > 0 #define USING_NANO_TIMED_LOCKS (NANO_TIMED_LOCKS > 0)
#if USING_NANO_TIMED_LOCKS
#include <nano/lib/timer.hpp> #include <nano/lib/timer.hpp>
#endif #endif
@ -9,15 +11,110 @@
namespace nano namespace nano
{ {
#if NANO_TIMED_LOCKS > 0 class mutex;
extern nano::mutex * mutex_to_filter;
extern nano::mutex mutex_to_filter_mutex;
bool should_be_filtered (const char * name);
bool any_filters_registered ();
enum class mutexes
{
active,
block_arrival,
block_processor,
block_uniquer,
blockstore_cache,
confirmation_height_processor,
dropped_elections,
election_winner_details,
gap_cache,
network_filter,
observer_set,
request_aggregator,
state_block_signature_verification,
telemetry,
vote_generator,
vote_processor,
vote_uniquer,
votes_cache,
work_pool
};
char const * mutex_identifier (mutexes mutex);
class mutex
{
public:
mutex () = default;
mutex (const char * name_a)
#if USING_NANO_TIMED_LOCKS
:
name (name_a)
#endif
{
#if USING_NANO_TIMED_LOCKS
// This mutex should be filtered
if (name && should_be_filtered (name))
{
std::lock_guard guard (mutex_to_filter_mutex);
mutex_to_filter = this;
}
#endif
}
#if USING_NANO_TIMED_LOCKS
~mutex ()
{
// Unfilter this destroyed mutex
if (name && should_be_filtered (name))
{
// Unregister the mutex
std::lock_guard guard (mutex_to_filter_mutex);
mutex_to_filter = nullptr;
}
}
#endif
void lock ()
{
mutex_m.lock ();
}
void unlock ()
{
mutex_m.unlock ();
}
bool try_lock ()
{
return mutex_m.try_lock ();
}
#if USING_NANO_TIMED_LOCKS
const char * get_name () const
{
return name ? name : "";
}
#endif
private:
#if USING_NANO_TIMED_LOCKS
const char * name{ nullptr };
#endif
std::mutex mutex_m;
};
#if USING_NANO_TIMED_LOCKS
template <typename Mutex> template <typename Mutex>
void output (const char * str, std::chrono::milliseconds time, Mutex & mutex); void output (const char * str, std::chrono::milliseconds time, Mutex & mutex);
template <typename Mutex> template <typename Mutex>
void output_if_held_long_enough (nano::timer<std::chrono::milliseconds> & timer, Mutex & mutex); void output_if_held_long_enough (nano::timer<std::chrono::milliseconds> & timer, Mutex & mutex);
#ifndef NANO_TIMED_LOCKS_IGNORE_BLOCKED
template <typename Mutex> template <typename Mutex>
void output_if_blocked_long_enough (nano::timer<std::chrono::milliseconds> & timer, Mutex & mutex); void output_if_blocked_long_enough (nano::timer<std::chrono::milliseconds> & timer, Mutex & mutex);
#endif
template <typename Mutex> template <typename Mutex>
class lock_guard final class lock_guard final
@ -36,21 +133,21 @@ private:
}; };
template <> template <>
class lock_guard<std::mutex> final class lock_guard<nano::mutex> final
{ {
public: public:
explicit lock_guard (std::mutex & mutex_a); explicit lock_guard (nano::mutex & mutex_a);
~lock_guard () noexcept; ~lock_guard () noexcept;
lock_guard (const lock_guard &) = delete; lock_guard (const lock_guard &) = delete;
lock_guard & operator= (const lock_guard &) = delete; lock_guard & operator= (const lock_guard &) = delete;
private: private:
std::mutex & mut; nano::mutex & mut;
nano::timer<std::chrono::milliseconds> timer; nano::timer<std::chrono::milliseconds> timer;
}; };
template <typename Mutex, typename = std::enable_if_t<std::is_same<Mutex, std::mutex>::value>> template <typename Mutex, typename = std::enable_if_t<std::is_same<Mutex, nano::mutex>::value>>
class unique_lock final class unique_lock final
{ {
public: public:
@ -94,10 +191,10 @@ public:
void notify_one () noexcept; void notify_one () noexcept;
void notify_all () noexcept; void notify_all () noexcept;
void wait (nano::unique_lock<std::mutex> & lt); void wait (nano::unique_lock<nano::mutex> & lt);
template <typename Pred> template <typename Pred>
void wait (nano::unique_lock<std::mutex> & lk, Pred pred) void wait (nano::unique_lock<nano::mutex> & lk, Pred pred)
{ {
while (!pred ()) while (!pred ())
{ {
@ -106,7 +203,7 @@ public:
} }
template <typename Clock, typename Duration> template <typename Clock, typename Duration>
std::cv_status wait_until (nano::unique_lock<std::mutex> & lk, std::chrono::time_point<Clock, Duration> const & timeout_time) std::cv_status wait_until (nano::unique_lock<nano::mutex> & lk, std::chrono::time_point<Clock, Duration> const & timeout_time)
{ {
if (!lk.mut || !lk.owns) if (!lk.mut || !lk.owns)
{ {
@ -122,7 +219,7 @@ public:
} }
template <typename Clock, typename Duration, typename Pred> template <typename Clock, typename Duration, typename Pred>
bool wait_until (nano::unique_lock<std::mutex> & lk, std::chrono::time_point<Clock, Duration> const & timeout_time, Pred pred) bool wait_until (nano::unique_lock<nano::mutex> & lk, std::chrono::time_point<Clock, Duration> const & timeout_time, Pred pred)
{ {
while (!pred ()) while (!pred ())
{ {
@ -135,13 +232,13 @@ public:
} }
template <typename Rep, typename Period> template <typename Rep, typename Period>
void wait_for (nano::unique_lock<std::mutex> & lk, std::chrono::duration<Rep, Period> const & rel_time) void wait_for (nano::unique_lock<nano::mutex> & lk, std::chrono::duration<Rep, Period> const & rel_time)
{ {
wait_until (lk, std::chrono::steady_clock::now () + rel_time); wait_until (lk, std::chrono::steady_clock::now () + rel_time);
} }
template <typename Rep, typename Period, typename Pred> template <typename Rep, typename Period, typename Pred>
bool wait_for (nano::unique_lock<std::mutex> & lk, std::chrono::duration<Rep, Period> const & rel_time, Pred pred) bool wait_for (nano::unique_lock<nano::mutex> & lk, std::chrono::duration<Rep, Period> const & rel_time, Pred pred)
{ {
return wait_until (lk, std::chrono::steady_clock::now () + rel_time, std::move (pred)); return wait_until (lk, std::chrono::steady_clock::now () + rel_time, std::move (pred));
} }
@ -212,7 +309,7 @@ public:
T & operator= (T const & other) T & operator= (T const & other)
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
obj = other; obj = other;
return obj; return obj;
} }
@ -230,6 +327,6 @@ public:
private: private:
T obj; T obj;
std::mutex mutex; nano::mutex mutex;
}; };
} }

View file

@ -108,7 +108,7 @@ public:
{ {
auto error (true); auto error (true);
auto time_now = std::chrono::steady_clock::now (); auto time_now = std::chrono::steady_clock::now ();
nano::unique_lock<std::mutex> lk (last_log_time_mutex); nano::unique_lock<nano::mutex> lk (last_log_time_mutex);
if (((time_now - last_log_time) > min_log_delta) || last_log_time == std::chrono::steady_clock::time_point{}) if (((time_now - last_log_time) > min_log_delta) || last_log_time == std::chrono::steady_clock::time_point{})
{ {
last_log_time = time_now; last_log_time = time_now;
@ -132,7 +132,7 @@ public:
std::chrono::milliseconds min_log_delta{ 0 }; std::chrono::milliseconds min_log_delta{ 0 };
private: private:
std::mutex last_log_time_mutex; nano::mutex last_log_time_mutex;
std::chrono::steady_clock::time_point last_log_time; std::chrono::steady_clock::time_point last_log_time;
boost::log::sources::severity_logger_mt<severity_level> boost_logger_mt; boost::log::sources::severity_logger_mt<severity_level> boost_logger_mt;
}; };

View file

@ -20,7 +20,7 @@ nano::rate::token_bucket::token_bucket (size_t max_token_count_a, size_t refill_
bool nano::rate::token_bucket::try_consume (unsigned tokens_required_a) bool nano::rate::token_bucket::try_consume (unsigned tokens_required_a)
{ {
debug_assert (tokens_required_a <= 1e9); debug_assert (tokens_required_a <= 1e9);
nano::lock_guard<std::mutex> lk (bucket_mutex); nano::lock_guard<nano::mutex> lk (bucket_mutex);
refill (); refill ();
bool possible = current_size >= tokens_required_a; bool possible = current_size >= tokens_required_a;
if (possible) if (possible)
@ -48,6 +48,6 @@ void nano::rate::token_bucket::refill ()
size_t nano::rate::token_bucket::largest_burst () const size_t nano::rate::token_bucket::largest_burst () const
{ {
nano::lock_guard<std::mutex> lk (bucket_mutex); nano::lock_guard<nano::mutex> lk (bucket_mutex);
return max_token_count - smallest_size; return max_token_count - smallest_size;
} }

View file

@ -49,7 +49,7 @@ namespace rate
/** The minimum observed bucket size, from which the largest burst can be derived */ /** The minimum observed bucket size, from which the largest burst can be derived */
size_t smallest_size{ 0 }; size_t smallest_size{ 0 };
std::chrono::steady_clock::time_point last_refill; std::chrono::steady_clock::time_point last_refill;
mutable std::mutex bucket_mutex; mutable nano::mutex bucket_mutex;
}; };
} }
} }

View file

@ -3,7 +3,7 @@
void nano::rep_weights::representation_add (nano::account const & source_rep_a, nano::uint128_t const & amount_a) void nano::rep_weights::representation_add (nano::account const & source_rep_a, nano::uint128_t const & amount_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto source_previous (get (source_rep_a)); auto source_previous (get (source_rep_a));
put (source_rep_a, source_previous + amount_a); put (source_rep_a, source_previous + amount_a);
} }
@ -12,7 +12,7 @@ void nano::rep_weights::representation_add_dual (nano::account const & source_re
{ {
if (source_rep_1 != source_rep_2) if (source_rep_1 != source_rep_2)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto source_previous_1 (get (source_rep_1)); auto source_previous_1 (get (source_rep_1));
put (source_rep_1, source_previous_1 + amount_1); put (source_rep_1, source_previous_1 + amount_1);
auto source_previous_2 (get (source_rep_2)); auto source_previous_2 (get (source_rep_2));
@ -26,27 +26,27 @@ void nano::rep_weights::representation_add_dual (nano::account const & source_re
void nano::rep_weights::representation_put (nano::account const & account_a, nano::uint128_union const & representation_a) void nano::rep_weights::representation_put (nano::account const & account_a, nano::uint128_union const & representation_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
put (account_a, representation_a); put (account_a, representation_a);
} }
nano::uint128_t nano::rep_weights::representation_get (nano::account const & account_a) const nano::uint128_t nano::rep_weights::representation_get (nano::account const & account_a) const
{ {
nano::lock_guard<std::mutex> lk (mutex); nano::lock_guard<nano::mutex> lk (mutex);
return get (account_a); return get (account_a);
} }
/** Makes a copy */ /** Makes a copy */
std::unordered_map<nano::account, nano::uint128_t> nano::rep_weights::get_rep_amounts () const std::unordered_map<nano::account, nano::uint128_t> nano::rep_weights::get_rep_amounts () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return rep_amounts; return rep_amounts;
} }
void nano::rep_weights::copy_from (nano::rep_weights & other_a) void nano::rep_weights::copy_from (nano::rep_weights & other_a)
{ {
nano::lock_guard<std::mutex> guard_this (mutex); nano::lock_guard<nano::mutex> guard_this (mutex);
nano::lock_guard<std::mutex> guard_other (other_a.mutex); nano::lock_guard<nano::mutex> guard_other (other_a.mutex);
for (auto const & entry : other_a.rep_amounts) for (auto const & entry : other_a.rep_amounts)
{ {
auto prev_amount (get (entry.first)); auto prev_amount (get (entry.first));
@ -86,7 +86,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (na
size_t rep_amounts_count; size_t rep_amounts_count;
{ {
nano::lock_guard<std::mutex> guard (rep_weights.mutex); nano::lock_guard<nano::mutex> guard (rep_weights.mutex);
rep_amounts_count = rep_weights.rep_amounts.size (); rep_amounts_count = rep_weights.rep_amounts.size ();
} }
auto sizeof_element = sizeof (decltype (rep_weights.rep_amounts)::value_type); auto sizeof_element = sizeof (decltype (rep_weights.rep_amounts)::value_type);

View file

@ -23,7 +23,7 @@ public:
void copy_from (rep_weights & other_a); void copy_from (rep_weights & other_a);
private: private:
mutable std::mutex mutex; mutable nano::mutex mutex;
std::unordered_map<nano::account, nano::uint128_t> rep_amounts; std::unordered_map<nano::account, nano::uint128_t> rep_amounts;
void put (nano::account const & account_a, nano::uint128_union const & representation_a); void put (nano::account const & account_a, nano::uint128_union const & representation_a);
nano::uint128_t get (nano::account const & account_a) const; nano::uint128_t get (nano::account const & account_a) const;

View file

@ -203,7 +203,7 @@ std::shared_ptr<nano::stat_entry> nano::stat::get_entry (uint32_t key)
std::shared_ptr<nano::stat_entry> nano::stat::get_entry (uint32_t key, size_t interval, size_t capacity) std::shared_ptr<nano::stat_entry> nano::stat::get_entry (uint32_t key, size_t interval, size_t capacity)
{ {
nano::unique_lock<std::mutex> lock (stat_mutex); nano::unique_lock<nano::mutex> lock (stat_mutex);
return get_entry_impl (key, interval, capacity); return get_entry_impl (key, interval, capacity);
} }
@ -230,7 +230,7 @@ std::unique_ptr<nano::stat_log_sink> nano::stat::log_sink_json () const
void nano::stat::log_counters (stat_log_sink & sink) void nano::stat::log_counters (stat_log_sink & sink)
{ {
nano::unique_lock<std::mutex> lock (stat_mutex); nano::unique_lock<nano::mutex> lock (stat_mutex);
log_counters_impl (sink); log_counters_impl (sink);
} }
@ -265,7 +265,7 @@ void nano::stat::log_counters_impl (stat_log_sink & sink)
void nano::stat::log_samples (stat_log_sink & sink) void nano::stat::log_samples (stat_log_sink & sink)
{ {
nano::unique_lock<std::mutex> lock (stat_mutex); nano::unique_lock<nano::mutex> lock (stat_mutex);
log_samples_impl (sink); log_samples_impl (sink);
} }
@ -308,7 +308,7 @@ void nano::stat::update (uint32_t key_a, uint64_t value)
auto now (std::chrono::steady_clock::now ()); auto now (std::chrono::steady_clock::now ());
nano::unique_lock<std::mutex> lock (stat_mutex); nano::unique_lock<nano::mutex> lock (stat_mutex);
if (!stopped) if (!stopped)
{ {
auto entry (get_entry_impl (key_a, config.interval, config.capacity)); auto entry (get_entry_impl (key_a, config.interval, config.capacity));
@ -360,20 +360,20 @@ void nano::stat::update (uint32_t key_a, uint64_t value)
std::chrono::seconds nano::stat::last_reset () std::chrono::seconds nano::stat::last_reset ()
{ {
nano::unique_lock<std::mutex> lock (stat_mutex); nano::unique_lock<nano::mutex> lock (stat_mutex);
auto now (std::chrono::steady_clock::now ()); auto now (std::chrono::steady_clock::now ());
return std::chrono::duration_cast<std::chrono::seconds> (now - timestamp); return std::chrono::duration_cast<std::chrono::seconds> (now - timestamp);
} }
void nano::stat::stop () void nano::stat::stop ()
{ {
nano::lock_guard<std::mutex> guard (stat_mutex); nano::lock_guard<nano::mutex> guard (stat_mutex);
stopped = true; stopped = true;
} }
void nano::stat::clear () void nano::stat::clear ()
{ {
nano::unique_lock<std::mutex> lock (stat_mutex); nano::unique_lock<nano::mutex> lock (stat_mutex);
entries.clear (); entries.clear ();
timestamp = std::chrono::steady_clock::now (); timestamp = std::chrono::steady_clock::now ();
} }
@ -785,14 +785,14 @@ std::string nano::stat::dir_to_string (uint32_t key)
nano::stat_datapoint::stat_datapoint (stat_datapoint const & other_a) nano::stat_datapoint::stat_datapoint (stat_datapoint const & other_a)
{ {
nano::lock_guard<std::mutex> lock (other_a.datapoint_mutex); nano::lock_guard<nano::mutex> lock (other_a.datapoint_mutex);
value = other_a.value; value = other_a.value;
timestamp = other_a.timestamp; timestamp = other_a.timestamp;
} }
nano::stat_datapoint & nano::stat_datapoint::operator= (stat_datapoint const & other_a) nano::stat_datapoint & nano::stat_datapoint::operator= (stat_datapoint const & other_a)
{ {
nano::lock_guard<std::mutex> lock (other_a.datapoint_mutex); nano::lock_guard<nano::mutex> lock (other_a.datapoint_mutex);
value = other_a.value; value = other_a.value;
timestamp = other_a.timestamp; timestamp = other_a.timestamp;
return *this; return *this;
@ -800,32 +800,32 @@ nano::stat_datapoint & nano::stat_datapoint::operator= (stat_datapoint const & o
uint64_t nano::stat_datapoint::get_value () const uint64_t nano::stat_datapoint::get_value () const
{ {
nano::lock_guard<std::mutex> lock (datapoint_mutex); nano::lock_guard<nano::mutex> lock (datapoint_mutex);
return value; return value;
} }
void nano::stat_datapoint::set_value (uint64_t value_a) void nano::stat_datapoint::set_value (uint64_t value_a)
{ {
nano::lock_guard<std::mutex> lock (datapoint_mutex); nano::lock_guard<nano::mutex> lock (datapoint_mutex);
value = value_a; value = value_a;
} }
std::chrono::system_clock::time_point nano::stat_datapoint::get_timestamp () const std::chrono::system_clock::time_point nano::stat_datapoint::get_timestamp () const
{ {
nano::lock_guard<std::mutex> lock (datapoint_mutex); nano::lock_guard<nano::mutex> lock (datapoint_mutex);
return timestamp; return timestamp;
} }
void nano::stat_datapoint::set_timestamp (std::chrono::system_clock::time_point timestamp_a) void nano::stat_datapoint::set_timestamp (std::chrono::system_clock::time_point timestamp_a)
{ {
nano::lock_guard<std::mutex> lock (datapoint_mutex); nano::lock_guard<nano::mutex> lock (datapoint_mutex);
timestamp = timestamp_a; timestamp = timestamp_a;
} }
/** Add \addend to the current value and optionally update the timestamp */ /** Add \addend to the current value and optionally update the timestamp */
void nano::stat_datapoint::add (uint64_t addend, bool update_timestamp) void nano::stat_datapoint::add (uint64_t addend, bool update_timestamp)
{ {
nano::lock_guard<std::mutex> lock (datapoint_mutex); nano::lock_guard<nano::mutex> lock (datapoint_mutex);
value += addend; value += addend;
if (update_timestamp) if (update_timestamp)
{ {

View file

@ -71,7 +71,7 @@ public:
void add (uint64_t addend, bool update_timestamp = true); void add (uint64_t addend, bool update_timestamp = true);
private: private:
mutable std::mutex datapoint_mutex; mutable nano::mutex datapoint_mutex;
/** Value of the sample interval */ /** Value of the sample interval */
uint64_t value{ 0 }; uint64_t value{ 0 };
/** When the sample was added. This is wall time (system_clock), suitable for display purposes. */ /** When the sample was added. This is wall time (system_clock), suitable for display purposes. */
@ -545,6 +545,6 @@ private:
bool stopped{ false }; bool stopped{ false };
/** All access to stat is thread safe, including calls from observers on the same thread */ /** All access to stat is thread safe, including calls from observers on the same thread */
std::mutex stat_mutex; nano::mutex stat_mutex;
}; };
} }

View file

@ -210,7 +210,7 @@ nano::thread_pool::~thread_pool ()
void nano::thread_pool::stop () void nano::thread_pool::stop ()
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
if (!stopped) if (!stopped)
{ {
stopped = true; stopped = true;
@ -229,7 +229,7 @@ void nano::thread_pool::stop ()
void nano::thread_pool::push_task (std::function<void()> task) void nano::thread_pool::push_task (std::function<void()> task)
{ {
++num_tasks; ++num_tasks;
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
if (!stopped) if (!stopped)
{ {
boost::asio::post (*thread_pool_m, [this, task]() { boost::asio::post (*thread_pool_m, [this, task]() {
@ -241,7 +241,7 @@ void nano::thread_pool::push_task (std::function<void()> task)
void nano::thread_pool::add_timed_task (std::chrono::steady_clock::time_point const & expiry_time, std::function<void()> task) void nano::thread_pool::add_timed_task (std::chrono::steady_clock::time_point const & expiry_time, std::function<void()> task)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
if (!stopped && thread_pool_m) if (!stopped && thread_pool_m)
{ {
auto timer = std::make_shared<boost::asio::steady_timer> (thread_pool_m->get_executor (), expiry_time); auto timer = std::make_shared<boost::asio::steady_timer> (thread_pool_m->get_executor (), expiry_time);

View file

@ -184,7 +184,7 @@ public:
uint64_t num_queued_tasks () const; uint64_t num_queued_tasks () const;
private: private:
std::mutex mutex; nano::mutex mutex;
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
unsigned num_threads; unsigned num_threads;
std::unique_ptr<boost::asio::thread_pool> thread_pool_m; std::unique_ptr<boost::asio::thread_pool> thread_pool_m;

View file

@ -138,18 +138,18 @@ class observer_set final
public: public:
void add (std::function<void(T...)> const & observer_a) void add (std::function<void(T...)> const & observer_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
observers.push_back (observer_a); observers.push_back (observer_a);
} }
void notify (T... args) void notify (T... args)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
for (auto & i : observers) for (auto & i : observers)
{ {
i (args...); i (args...);
} }
} }
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::observer_set) };
std::vector<std::function<void(T...)>> observers; std::vector<std::function<void(T...)>> observers;
}; };
@ -158,7 +158,7 @@ std::unique_ptr<container_info_component> collect_container_info (observer_set<T
{ {
size_t count = 0; size_t count = 0;
{ {
nano::lock_guard<std::mutex> lock (observer_set.mutex); nano::lock_guard<nano::mutex> lock (observer_set.mutex);
count = observer_set.observers.size (); count = observer_set.observers.size ();
} }

View file

@ -241,7 +241,7 @@ void nano::work_pool::loop (uint64_t thread)
uint64_t output; uint64_t output;
blake2b_state hash; blake2b_state hash;
blake2b_init (&hash, sizeof (output)); blake2b_init (&hash, sizeof (output));
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto pow_sleep = pow_rate_limiter; auto pow_sleep = pow_rate_limiter;
while (!done) while (!done)
{ {
@ -321,7 +321,7 @@ void nano::work_pool::loop (uint64_t thread)
void nano::work_pool::cancel (nano::root const & root_a) void nano::work_pool::cancel (nano::root const & root_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
if (!done) if (!done)
{ {
if (!pending.empty ()) if (!pending.empty ())
@ -349,7 +349,7 @@ void nano::work_pool::cancel (nano::root const & root_a)
void nano::work_pool::stop () void nano::work_pool::stop ()
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
done = true; done = true;
++ticket; ++ticket;
} }
@ -362,7 +362,7 @@ void nano::work_pool::generate (nano::work_version const version_a, nano::root c
if (!threads.empty ()) if (!threads.empty ())
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
pending.emplace_back (version_a, root_a, difficulty_a, callback_a); pending.emplace_back (version_a, root_a, difficulty_a, callback_a);
} }
producer_condition.notify_all (); producer_condition.notify_all ();
@ -404,7 +404,7 @@ boost::optional<uint64_t> nano::work_pool::generate (nano::work_version const ve
size_t nano::work_pool::size () size_t nano::work_pool::size ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return pending.size (); return pending.size ();
} }
@ -412,7 +412,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (wo
{ {
size_t count; size_t count;
{ {
nano::lock_guard<std::mutex> guard (work_pool.mutex); nano::lock_guard<nano::mutex> guard (work_pool.mutex);
count = work_pool.pending.size (); count = work_pool.pending.size ();
} }
auto sizeof_element = sizeof (decltype (work_pool.pending)::value_type); auto sizeof_element = sizeof (decltype (work_pool.pending)::value_type);

View file

@ -75,7 +75,7 @@ public:
bool done; bool done;
std::vector<boost::thread> threads; std::vector<boost::thread> threads;
std::list<nano::work_item> pending; std::list<nano::work_item> pending;
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::work_pool) };
nano::condition_variable producer_condition; nano::condition_variable producer_condition;
std::chrono::nanoseconds pow_rate_limiter; std::chrono::nanoseconds pow_rate_limiter;
std::function<boost::optional<uint64_t> (nano::work_version const, nano::root const &, uint64_t, std::atomic<int> &)> opencl; std::function<boost::optional<uint64_t> (nano::work_version const, nano::root const &, uint64_t, std::atomic<int> &)> opencl;

View file

@ -72,7 +72,6 @@ int main (int argc, char * const * argv)
("help", "Print out options") ("help", "Print out options")
("version", "Prints out version") ("version", "Prints out version")
("config", boost::program_options::value<std::vector<nano::config_key_value_pair>>()->multitoken(), "Pass node configuration values. This takes precedence over any values in the configuration file. This option can be repeated multiple times.") ("config", boost::program_options::value<std::vector<nano::config_key_value_pair>>()->multitoken(), "Pass node configuration values. This takes precedence over any values in the configuration file. This option can be repeated multiple times.")
("rpcconfig", boost::program_options::value<std::vector<nano::config_key_value_pair>>()->multitoken(), "Pass RPC configuration values. This takes precedence over any values in the RPC configuration file. This option can be repeated multiple times.")
("daemon", "Start node daemon") ("daemon", "Start node daemon")
("compare_rep_weights", "Display a summarized comparison between the hardcoded bootstrap weights and representative weights from the ledger. Full comparison is output to logs") ("compare_rep_weights", "Display a summarized comparison between the hardcoded bootstrap weights and representative weights from the ledger. Full comparison is output to logs")
("debug_block_count", "Display the number of blocks") ("debug_block_count", "Display the number of blocks")
@ -1363,7 +1362,7 @@ int main (int argc, char * const * argv)
} }
threads_count = std::max (1u, threads_count); threads_count = std::max (1u, threads_count);
std::vector<std::thread> threads; std::vector<std::thread> threads;
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
std::atomic<bool> finished (false); std::atomic<bool> finished (false);
std::deque<std::pair<nano::account, nano::account_info>> accounts; std::deque<std::pair<nano::account, nano::account_info>> accounts;
@ -1374,8 +1373,8 @@ int main (int argc, char * const * argv)
auto print_error_message = [&silent, &errors](std::string const & error_message_a) { auto print_error_message = [&silent, &errors](std::string const & error_message_a) {
if (!silent) if (!silent)
{ {
static std::mutex cerr_mutex; static nano::mutex cerr_mutex;
nano::lock_guard<std::mutex> lock (cerr_mutex); nano::lock_guard<nano::mutex> lock (cerr_mutex);
std::cerr << error_message_a; std::cerr << error_message_a;
} }
++errors; ++errors;
@ -1386,7 +1385,7 @@ int main (int argc, char * const * argv)
{ {
threads.emplace_back ([&function_a, node, &mutex, &condition, &finished, &deque_a]() { threads.emplace_back ([&function_a, node, &mutex, &condition, &finished, &deque_a]() {
auto transaction (node->store.tx_begin_read ()); auto transaction (node->store.tx_begin_read ());
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (!deque_a.empty () || !finished) while (!deque_a.empty () || !finished)
{ {
while (deque_a.empty () && !finished) while (deque_a.empty () && !finished)
@ -1638,7 +1637,7 @@ int main (int argc, char * const * argv)
for (auto i (node->store.accounts_begin (transaction)), n (node->store.accounts_end ()); i != n; ++i) for (auto i (node->store.accounts_begin (transaction)), n (node->store.accounts_end ()); i != n; ++i)
{ {
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (accounts.size () > accounts_deque_overflow) if (accounts.size () > accounts_deque_overflow)
{ {
auto wait_ms (250 * accounts.size () / accounts_deque_overflow); auto wait_ms (250 * accounts.size () / accounts_deque_overflow);
@ -1650,7 +1649,7 @@ int main (int argc, char * const * argv)
condition.notify_all (); condition.notify_all ();
} }
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
finished = true; finished = true;
} }
condition.notify_all (); condition.notify_all ();
@ -1749,7 +1748,7 @@ int main (int argc, char * const * argv)
for (auto i (node->store.pending_begin (transaction)), n (node->store.pending_end ()); i != n; ++i) for (auto i (node->store.pending_begin (transaction)), n (node->store.pending_end ()); i != n; ++i)
{ {
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (pending.size () > pending_deque_overflow) if (pending.size () > pending_deque_overflow)
{ {
auto wait_ms (50 * pending.size () / pending_deque_overflow); auto wait_ms (50 * pending.size () / pending_deque_overflow);
@ -1761,7 +1760,7 @@ int main (int argc, char * const * argv)
condition.notify_all (); condition.notify_all ();
} }
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
finished = true; finished = true;
} }
condition.notify_all (); condition.notify_all ();

View file

@ -43,7 +43,7 @@ thread ([this]() {
this->block_already_cemented_callback (hash_a); this->block_already_cemented_callback (hash_a);
}); });
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
condition.wait (lock, [& started = started] { return started; }); condition.wait (lock, [& started = started] { return started; });
} }
@ -55,7 +55,7 @@ nano::active_transactions::~active_transactions ()
bool nano::active_transactions::insert_election_from_frontiers_confirmation (std::shared_ptr<nano::block> const & block_a, nano::account const & account_a, nano::uint128_t previous_balance_a, nano::election_behavior election_behavior_a) bool nano::active_transactions::insert_election_from_frontiers_confirmation (std::shared_ptr<nano::block> const & block_a, nano::account const & account_a, nano::uint128_t previous_balance_a, nano::election_behavior election_behavior_a)
{ {
bool inserted{ false }; bool inserted{ false };
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (roots.get<tag_root> ().find (block_a->qualified_root ()) == roots.get<tag_root> ().end ()) if (roots.get<tag_root> ().find (block_a->qualified_root ()) == roots.get<tag_root> ().end ())
{ {
std::function<void(std::shared_ptr<nano::block> const &)> election_confirmation_cb; std::function<void(std::shared_ptr<nano::block> const &)> election_confirmation_cb;
@ -124,7 +124,7 @@ void nano::active_transactions::set_next_frontier_check (bool agressive_mode_a)
void nano::active_transactions::confirm_prioritized_frontiers (nano::transaction const & transaction_a, uint64_t max_elections_a, uint64_t & elections_count_a) void nano::active_transactions::confirm_prioritized_frontiers (nano::transaction const & transaction_a, uint64_t max_elections_a, uint64_t & elections_count_a)
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
auto start_elections_for_prioritized_frontiers = [&transaction_a, &elections_count_a, max_elections_a, &lk, this](prioritize_num_uncemented & cementable_frontiers) { auto start_elections_for_prioritized_frontiers = [&transaction_a, &elections_count_a, max_elections_a, &lk, this](prioritize_num_uncemented & cementable_frontiers) {
while (!cementable_frontiers.empty () && !this->stopped && elections_count_a < max_elections_a && optimistic_elections_count < max_optimistic ()) while (!cementable_frontiers.empty () && !this->stopped && elections_count_a < max_elections_a && optimistic_elections_count < max_optimistic ())
{ {
@ -193,7 +193,7 @@ void nano::active_transactions::block_cemented_callback (std::shared_ptr<nano::b
else else
{ {
auto hash (block_a->hash ()); auto hash (block_a->hash ());
nano::unique_lock<std::mutex> election_winners_lk (election_winner_details_mutex); nano::unique_lock<nano::mutex> election_winners_lk (election_winner_details_mutex);
auto existing (election_winner_details.find (hash)); auto existing (election_winner_details.find (hash));
if (existing != election_winner_details.end ()) if (existing != election_winner_details.end ())
{ {
@ -202,7 +202,7 @@ void nano::active_transactions::block_cemented_callback (std::shared_ptr<nano::b
election_winners_lk.unlock (); election_winners_lk.unlock ();
if (election->confirmed () && election->winner ()->hash () == hash) if (election->confirmed () && election->winner ()->hash () == hash)
{ {
nano::unique_lock<std::mutex> election_lk (election->mutex); nano::unique_lock<nano::mutex> election_lk (election->mutex);
auto status_l = election->status; auto status_l = election->status;
election_lk.unlock (); election_lk.unlock ();
add_recently_cemented (status_l); add_recently_cemented (status_l);
@ -260,13 +260,13 @@ void nano::active_transactions::block_cemented_callback (std::shared_ptr<nano::b
void nano::active_transactions::add_election_winner_details (nano::block_hash const & hash_a, std::shared_ptr<nano::election> const & election_a) void nano::active_transactions::add_election_winner_details (nano::block_hash const & hash_a, std::shared_ptr<nano::election> const & election_a)
{ {
nano::lock_guard<std::mutex> guard (election_winner_details_mutex); nano::lock_guard<nano::mutex> guard (election_winner_details_mutex);
election_winner_details.emplace (hash_a, election_a); election_winner_details.emplace (hash_a, election_a);
} }
void nano::active_transactions::remove_election_winner_details (nano::block_hash const & hash_a) void nano::active_transactions::remove_election_winner_details (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> guard (election_winner_details_mutex); nano::lock_guard<nano::mutex> guard (election_winner_details_mutex);
election_winner_details.erase (hash_a); election_winner_details.erase (hash_a);
} }
@ -279,7 +279,7 @@ void nano::active_transactions::block_already_cemented_callback (nano::block_has
remove_election_winner_details (hash_a); remove_election_winner_details (hash_a);
} }
void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> & lock_a) void nano::active_transactions::request_confirm (nano::unique_lock<nano::mutex> & lock_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
@ -351,7 +351,7 @@ void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> &
} }
} }
void nano::active_transactions::cleanup_election (nano::unique_lock<std::mutex> & lock_a, nano::election_cleanup_info const & info_a) void nano::active_transactions::cleanup_election (nano::unique_lock<nano::mutex> & lock_a, nano::election_cleanup_info const & info_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
@ -388,7 +388,7 @@ void nano::active_transactions::cleanup_election (nano::unique_lock<std::mutex>
std::vector<std::shared_ptr<nano::election>> nano::active_transactions::list_active (size_t max_a) std::vector<std::shared_ptr<nano::election>> nano::active_transactions::list_active (size_t max_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return list_active_impl (max_a); return list_active_impl (max_a);
} }
@ -409,7 +409,7 @@ std::vector<std::shared_ptr<nano::election>> nano::active_transactions::list_act
void nano::active_transactions::add_expired_optimistic_election (nano::election const & election_a) void nano::active_transactions::add_expired_optimistic_election (nano::election const & election_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto account = election_a.status.winner->account (); auto account = election_a.status.winner->account ();
if (account.is_zero ()) if (account.is_zero ())
{ {
@ -443,7 +443,7 @@ unsigned nano::active_transactions::max_optimistic ()
return node.ledger.cache.cemented_count < node.ledger.bootstrap_weight_max_blocks ? std::numeric_limits<unsigned>::max () : 50u; return node.ledger.cache.cemented_count < node.ledger.bootstrap_weight_max_blocks ? std::numeric_limits<unsigned>::max () : 50u;
} }
void nano::active_transactions::frontiers_confirmation (nano::unique_lock<std::mutex> & lock_a) void nano::active_transactions::frontiers_confirmation (nano::unique_lock<nano::mutex> & lock_a)
{ {
// Spend some time prioritizing accounts with the most uncemented blocks to reduce voting traffic // Spend some time prioritizing accounts with the most uncemented blocks to reduce voting traffic
auto request_interval = std::chrono::milliseconds (node.network_params.network.request_interval_ms); auto request_interval = std::chrono::milliseconds (node.network_params.network.request_interval_ms);
@ -560,7 +560,7 @@ bool nano::active_transactions::should_do_frontiers_confirmation () const
void nano::active_transactions::request_loop () void nano::active_transactions::request_loop ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
started = true; started = true;
lock.unlock (); lock.unlock ();
condition.notify_all (); condition.notify_all ();
@ -606,7 +606,7 @@ bool nano::active_transactions::prioritize_account_for_confirmation (nano::activ
if (info_a.block_count > confirmation_height_a && !confirmation_height_processor.is_processing_block (info_a.head)) if (info_a.block_count > confirmation_height_a && !confirmation_height_processor.is_processing_block (info_a.head))
{ {
auto num_uncemented = info_a.block_count - confirmation_height_a; auto num_uncemented = info_a.block_count - confirmation_height_a;
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto it = cementable_frontiers_a.get<tag_account> ().find (account_a); auto it = cementable_frontiers_a.get<tag_account> ().find (account_a);
if (it != cementable_frontiers_a.get<tag_account> ().end ()) if (it != cementable_frontiers_a.get<tag_account> ().end ())
{ {
@ -652,7 +652,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
size_t priority_cementable_frontiers_size; size_t priority_cementable_frontiers_size;
size_t priority_wallet_cementable_frontiers_size; size_t priority_wallet_cementable_frontiers_size;
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
priority_cementable_frontiers_size = priority_cementable_frontiers.size (); priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
priority_wallet_cementable_frontiers_size = priority_wallet_cementable_frontiers.size (); priority_wallet_cementable_frontiers_size = priority_wallet_cementable_frontiers.size ();
} }
@ -673,7 +673,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
{ {
// Prioritize wallet accounts first // Prioritize wallet accounts first
{ {
nano::lock_guard<std::mutex> lock (node.wallets.mutex); nano::lock_guard<nano::mutex> lock (node.wallets.mutex);
auto wallet_transaction (node.wallets.tx_begin_read ()); auto wallet_transaction (node.wallets.tx_begin_read ());
auto const & items = node.wallets.items; auto const & items = node.wallets.items;
if (items.empty ()) if (items.empty ())
@ -707,7 +707,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
auto it = priority_cementable_frontiers.find (account); auto it = priority_cementable_frontiers.find (account);
if (it != priority_cementable_frontiers.end ()) if (it != priority_cementable_frontiers.end ())
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
priority_cementable_frontiers.erase (it); priority_cementable_frontiers.erase (it);
priority_cementable_frontiers_size = priority_cementable_frontiers.size (); priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
} }
@ -780,7 +780,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
void nano::active_transactions::stop () void nano::active_transactions::stop ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (!started) if (!started)
{ {
condition.wait (lock, [& started = started] { return started; }); condition.wait (lock, [& started = started] { return started; });
@ -797,7 +797,7 @@ void nano::active_transactions::stop ()
roots.clear (); roots.clear ();
} }
nano::election_insertion_result nano::active_transactions::insert_impl (nano::unique_lock<std::mutex> & lock_a, std::shared_ptr<nano::block> const & block_a, boost::optional<nano::uint128_t> const & previous_balance_a, nano::election_behavior election_behavior_a, std::function<void(std::shared_ptr<nano::block> const &)> const & confirmation_action_a) nano::election_insertion_result nano::active_transactions::insert_impl (nano::unique_lock<nano::mutex> & lock_a, std::shared_ptr<nano::block> const & block_a, boost::optional<nano::uint128_t> const & previous_balance_a, nano::election_behavior election_behavior_a, std::function<void(std::shared_ptr<nano::block> const &)> const & confirmation_action_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
debug_assert (block_a->has_sideband ()); debug_assert (block_a->has_sideband ());
@ -861,7 +861,7 @@ nano::election_insertion_result nano::active_transactions::insert_impl (nano::un
nano::election_insertion_result nano::active_transactions::insert (std::shared_ptr<nano::block> const & block_a, boost::optional<nano::uint128_t> const & previous_balance_a, nano::election_behavior election_behavior_a, std::function<void(std::shared_ptr<nano::block> const &)> const & confirmation_action_a) nano::election_insertion_result nano::active_transactions::insert (std::shared_ptr<nano::block> const & block_a, boost::optional<nano::uint128_t> const & previous_balance_a, nano::election_behavior election_behavior_a, std::function<void(std::shared_ptr<nano::block> const &)> const & confirmation_action_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
return insert_impl (lock, block_a, previous_balance_a, election_behavior_a, confirmation_action_a); return insert_impl (lock, block_a, previous_balance_a, election_behavior_a, confirmation_action_a);
} }
@ -873,7 +873,7 @@ nano::vote_code nano::active_transactions::vote (std::shared_ptr<nano::vote> con
unsigned recently_confirmed_counter (0); unsigned recently_confirmed_counter (0);
std::vector<std::pair<std::shared_ptr<nano::election>, nano::block_hash>> process; std::vector<std::pair<std::shared_ptr<nano::election>, nano::block_hash>> process;
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
for (auto vote_block : vote_a->blocks) for (auto vote_block : vote_a->blocks)
{ {
auto & recently_confirmed_by_hash (recently_confirmed.get<tag_hash> ()); auto & recently_confirmed_by_hash (recently_confirmed.get<tag_hash> ());
@ -945,20 +945,20 @@ nano::vote_code nano::active_transactions::vote (std::shared_ptr<nano::vote> con
bool nano::active_transactions::active (nano::qualified_root const & root_a) bool nano::active_transactions::active (nano::qualified_root const & root_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return roots.get<tag_root> ().find (root_a) != roots.get<tag_root> ().end (); return roots.get<tag_root> ().find (root_a) != roots.get<tag_root> ().end ();
} }
bool nano::active_transactions::active (nano::block const & block_a) bool nano::active_transactions::active (nano::block const & block_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return roots.get<tag_root> ().find (block_a.qualified_root ()) != roots.get<tag_root> ().end () && blocks.find (block_a.hash ()) != blocks.end (); return roots.get<tag_root> ().find (block_a.qualified_root ()) != roots.get<tag_root> ().end () && blocks.find (block_a.hash ()) != blocks.end ();
} }
std::shared_ptr<nano::election> nano::active_transactions::election (nano::qualified_root const & root_a) const std::shared_ptr<nano::election> nano::active_transactions::election (nano::qualified_root const & root_a) const
{ {
std::shared_ptr<nano::election> result; std::shared_ptr<nano::election> result;
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto existing = roots.get<tag_root> ().find (root_a); auto existing = roots.get<tag_root> ().find (root_a);
if (existing != roots.get<tag_root> ().end ()) if (existing != roots.get<tag_root> ().end ())
{ {
@ -970,7 +970,7 @@ std::shared_ptr<nano::election> nano::active_transactions::election (nano::quali
std::shared_ptr<nano::block> nano::active_transactions::winner (nano::block_hash const & hash_a) const std::shared_ptr<nano::block> nano::active_transactions::winner (nano::block_hash const & hash_a) const
{ {
std::shared_ptr<nano::block> result; std::shared_ptr<nano::block> result;
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto existing = blocks.find (hash_a); auto existing = blocks.find (hash_a);
if (existing != blocks.end ()) if (existing != blocks.end ())
{ {
@ -1011,7 +1011,7 @@ nano::election_insertion_result nano::active_transactions::activate (nano::accou
bool nano::active_transactions::update_difficulty (nano::block const & block_a) bool nano::active_transactions::update_difficulty (nano::block const & block_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto existing_election (roots.get<tag_root> ().find (block_a.qualified_root ())); auto existing_election (roots.get<tag_root> ().find (block_a.qualified_root ()));
bool error = existing_election == roots.get<tag_root> ().end () || update_difficulty_impl (existing_election, block_a); bool error = existing_election == roots.get<tag_root> ().end () || update_difficulty_impl (existing_election, block_a);
return error; return error;
@ -1116,7 +1116,7 @@ double nano::active_transactions::normalized_multiplier (nano::block const & blo
return multiplier; return multiplier;
} }
void nano::active_transactions::update_active_multiplier (nano::unique_lock<std::mutex> & lock_a) void nano::active_transactions::update_active_multiplier (nano::unique_lock<nano::mutex> & lock_a)
{ {
debug_assert (!mutex.try_lock ()); debug_assert (!mutex.try_lock ());
last_prioritized_multiplier.reset (); last_prioritized_multiplier.reset ();
@ -1188,13 +1188,13 @@ double nano::active_transactions::active_multiplier ()
std::deque<nano::election_status> nano::active_transactions::list_recently_cemented () std::deque<nano::election_status> nano::active_transactions::list_recently_cemented ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return recently_cemented; return recently_cemented;
} }
void nano::active_transactions::add_recently_cemented (nano::election_status const & status_a) void nano::active_transactions::add_recently_cemented (nano::election_status const & status_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
recently_cemented.push_back (status_a); recently_cemented.push_back (status_a);
if (recently_cemented.size () > node.config.confirmation_history_size) if (recently_cemented.size () > node.config.confirmation_history_size)
{ {
@ -1204,7 +1204,7 @@ void nano::active_transactions::add_recently_cemented (nano::election_status con
void nano::active_transactions::add_recently_confirmed (nano::qualified_root const & root_a, nano::block_hash const & hash_a) void nano::active_transactions::add_recently_confirmed (nano::qualified_root const & root_a, nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
recently_confirmed.get<tag_sequence> ().emplace_back (root_a, hash_a); recently_confirmed.get<tag_sequence> ().emplace_back (root_a, hash_a);
if (recently_confirmed.size () > recently_confirmed_size) if (recently_confirmed.size () > recently_confirmed_size)
{ {
@ -1214,13 +1214,13 @@ void nano::active_transactions::add_recently_confirmed (nano::qualified_root con
void nano::active_transactions::erase_recently_confirmed (nano::block_hash const & hash_a) void nano::active_transactions::erase_recently_confirmed (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
recently_confirmed.get<tag_hash> ().erase (hash_a); recently_confirmed.get<tag_hash> ().erase (hash_a);
} }
void nano::active_transactions::erase (nano::block const & block_a) void nano::active_transactions::erase (nano::block const & block_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto root_it (roots.get<tag_root> ().find (block_a.qualified_root ())); auto root_it (roots.get<tag_root> ().find (block_a.qualified_root ()));
if (root_it != roots.get<tag_root> ().end ()) if (root_it != roots.get<tag_root> ().end ())
{ {
@ -1234,7 +1234,7 @@ void nano::active_transactions::erase (nano::block const & block_a)
void nano::active_transactions::erase (nano::qualified_root const & root_a) void nano::active_transactions::erase (nano::qualified_root const & root_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto root_it (roots.get<tag_root> ().find (root_a)); auto root_it (roots.get<tag_root> ().find (root_a));
if (root_it != roots.get<tag_root> ().end ()) if (root_it != roots.get<tag_root> ().end ())
{ {
@ -1246,26 +1246,26 @@ void nano::active_transactions::erase (nano::qualified_root const & root_a)
void nano::active_transactions::erase_hash (nano::block_hash const & hash_a) void nano::active_transactions::erase_hash (nano::block_hash const & hash_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
[[maybe_unused]] auto erased (blocks.erase (hash_a)); [[maybe_unused]] auto erased (blocks.erase (hash_a));
debug_assert (erased == 1); debug_assert (erased == 1);
} }
bool nano::active_transactions::empty () bool nano::active_transactions::empty ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return roots.empty (); return roots.empty ();
} }
size_t nano::active_transactions::size () size_t nano::active_transactions::size ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return roots.size (); return roots.size ();
} }
bool nano::active_transactions::publish (std::shared_ptr<nano::block> const & block_a) bool nano::active_transactions::publish (std::shared_ptr<nano::block> const & block_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto existing (roots.get<tag_root> ().find (block_a->qualified_root ())); auto existing (roots.get<tag_root> ().find (block_a->qualified_root ()));
auto result (true); auto result (true);
if (existing != roots.get<tag_root> ().end ()) if (existing != roots.get<tag_root> ().end ())
@ -1291,13 +1291,13 @@ bool nano::active_transactions::publish (std::shared_ptr<nano::block> const & bl
boost::optional<nano::election_status_type> nano::active_transactions::confirm_block (nano::transaction const & transaction_a, std::shared_ptr<nano::block> const & block_a) boost::optional<nano::election_status_type> nano::active_transactions::confirm_block (nano::transaction const & transaction_a, std::shared_ptr<nano::block> const & block_a)
{ {
auto hash (block_a->hash ()); auto hash (block_a->hash ());
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto existing (blocks.find (hash)); auto existing (blocks.find (hash));
boost::optional<nano::election_status_type> status_type; boost::optional<nano::election_status_type> status_type;
if (existing != blocks.end ()) if (existing != blocks.end ())
{ {
lock.unlock (); lock.unlock ();
nano::unique_lock<std::mutex> election_lock (existing->second->mutex); nano::unique_lock<nano::mutex> election_lock (existing->second->mutex);
if (existing->second->status.winner && existing->second->status.winner->hash () == hash) if (existing->second->status.winner && existing->second->status.winner->hash () == hash)
{ {
if (!existing->second->confirmed ()) if (!existing->second->confirmed ())
@ -1308,7 +1308,7 @@ boost::optional<nano::election_status_type> nano::active_transactions::confirm_b
else else
{ {
#ifndef NDEBUG #ifndef NDEBUG
nano::unique_lock<std::mutex> election_winners_lk (election_winner_details_mutex); nano::unique_lock<nano::mutex> election_winners_lk (election_winner_details_mutex);
debug_assert (election_winner_details.find (hash) != election_winner_details.cend ()); debug_assert (election_winner_details.find (hash) != election_winner_details.cend ());
#endif #endif
status_type = nano::election_status_type::active_confirmed_quorum; status_type = nano::election_status_type::active_confirmed_quorum;
@ -1329,29 +1329,29 @@ boost::optional<nano::election_status_type> nano::active_transactions::confirm_b
size_t nano::active_transactions::priority_cementable_frontiers_size () size_t nano::active_transactions::priority_cementable_frontiers_size ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return priority_cementable_frontiers.size (); return priority_cementable_frontiers.size ();
} }
size_t nano::active_transactions::priority_wallet_cementable_frontiers_size () size_t nano::active_transactions::priority_wallet_cementable_frontiers_size ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return priority_wallet_cementable_frontiers.size (); return priority_wallet_cementable_frontiers.size ();
} }
boost::circular_buffer<double> nano::active_transactions::difficulty_trend () boost::circular_buffer<double> nano::active_transactions::difficulty_trend ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return multipliers_cb; return multipliers_cb;
} }
size_t nano::active_transactions::inactive_votes_cache_size () size_t nano::active_transactions::inactive_votes_cache_size ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return inactive_votes_cache.size (); return inactive_votes_cache.size ();
} }
void nano::active_transactions::add_inactive_votes_cache (nano::unique_lock<std::mutex> & lock_a, nano::block_hash const & hash_a, nano::account const & representative_a) void nano::active_transactions::add_inactive_votes_cache (nano::unique_lock<nano::mutex> & lock_a, nano::block_hash const & hash_a, nano::account const & representative_a)
{ {
// Check principal representative status // Check principal representative status
if (node.ledger.weight (representative_a) > node.minimum_principal_weight ()) if (node.ledger.weight (representative_a) > node.minimum_principal_weight ())
@ -1421,7 +1421,7 @@ void nano::active_transactions::add_inactive_votes_cache (nano::unique_lock<std:
void nano::active_transactions::trigger_inactive_votes_cache_election (std::shared_ptr<nano::block> const & block_a) void nano::active_transactions::trigger_inactive_votes_cache_election (std::shared_ptr<nano::block> const & block_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto const status = find_inactive_votes_cache_impl (block_a->hash ()).status; auto const status = find_inactive_votes_cache_impl (block_a->hash ()).status;
if (status.election_started) if (status.election_started)
{ {
@ -1431,7 +1431,7 @@ void nano::active_transactions::trigger_inactive_votes_cache_election (std::shar
nano::inactive_cache_information nano::active_transactions::find_inactive_votes_cache (nano::block_hash const & hash_a) nano::inactive_cache_information nano::active_transactions::find_inactive_votes_cache (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return find_inactive_votes_cache_impl (hash_a); return find_inactive_votes_cache_impl (hash_a);
} }
@ -1454,14 +1454,14 @@ void nano::active_transactions::erase_inactive_votes_cache (nano::block_hash con
inactive_votes_cache.get<tag_hash> ().erase (hash_a); inactive_votes_cache.get<tag_hash> ().erase (hash_a);
} }
nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check (nano::unique_lock<std::mutex> & lock_a, nano::account const & voter_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a) nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check (nano::unique_lock<nano::mutex> & lock_a, nano::account const & voter_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
lock_a.unlock (); lock_a.unlock ();
return inactive_votes_bootstrap_check_impl (lock_a, node.ledger.weight (voter_a), 1, hash_a, previously_a); return inactive_votes_bootstrap_check_impl (lock_a, node.ledger.weight (voter_a), 1, hash_a, previously_a);
} }
nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check (nano::unique_lock<std::mutex> & lock_a, std::vector<nano::account> const & voters_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a) nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check (nano::unique_lock<nano::mutex> & lock_a, std::vector<nano::account> const & voters_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a)
{ {
/** Perform checks on accumulated tally from inactive votes /** Perform checks on accumulated tally from inactive votes
* These votes are generally either for unconfirmed blocks or old confirmed blocks * These votes are generally either for unconfirmed blocks or old confirmed blocks
@ -1479,7 +1479,7 @@ nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_
return inactive_votes_bootstrap_check_impl (lock_a, tally, voters_a.size (), hash_a, previously_a); return inactive_votes_bootstrap_check_impl (lock_a, tally, voters_a.size (), hash_a, previously_a);
} }
nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check_impl (nano::unique_lock<std::mutex> & lock_a, nano::uint128_t const & tally_a, size_t voters_size_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a) nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check_impl (nano::unique_lock<nano::mutex> & lock_a, nano::uint128_t const & tally_a, size_t voters_size_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a)
{ {
debug_assert (!lock_a.owns_lock ()); debug_assert (!lock_a.owns_lock ());
nano::inactive_cache_status status (previously_a); nano::inactive_cache_status status (previously_a);
@ -1532,7 +1532,7 @@ bool nano::purge_singleton_inactive_votes_cache_pool_memory ()
size_t nano::active_transactions::election_winner_details_size () size_t nano::active_transactions::election_winner_details_size ()
{ {
nano::lock_guard<std::mutex> guard (election_winner_details_mutex); nano::lock_guard<nano::mutex> guard (election_winner_details_mutex);
return election_winner_details.size (); return election_winner_details.size ();
} }
@ -1560,7 +1560,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (ac
size_t recently_cemented_count; size_t recently_cemented_count;
{ {
nano::lock_guard<std::mutex> guard (active_transactions.mutex); nano::lock_guard<nano::mutex> guard (active_transactions.mutex);
roots_count = active_transactions.roots.size (); roots_count = active_transactions.roots.size ();
blocks_count = active_transactions.blocks.size (); blocks_count = active_transactions.blocks.size ();
recently_confirmed_count = active_transactions.recently_confirmed.size (); recently_confirmed_count = active_transactions.recently_confirmed.size ();
@ -1590,7 +1590,7 @@ stats (stats_a)
void nano::dropped_elections::add (nano::qualified_root const & root_a) void nano::dropped_elections::add (nano::qualified_root const & root_a)
{ {
stats.inc (nano::stat::type::election, nano::stat::detail::election_drop); stats.inc (nano::stat::type::election, nano::stat::detail::election_drop);
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto & items_by_sequence = items.get<tag_sequence> (); auto & items_by_sequence = items.get<tag_sequence> ();
items_by_sequence.emplace_back (nano::election_timepoint{ std::chrono::steady_clock::now (), root_a }); items_by_sequence.emplace_back (nano::election_timepoint{ std::chrono::steady_clock::now (), root_a });
if (items.size () > capacity) if (items.size () > capacity)
@ -1601,13 +1601,13 @@ void nano::dropped_elections::add (nano::qualified_root const & root_a)
void nano::dropped_elections::erase (nano::qualified_root const & root_a) void nano::dropped_elections::erase (nano::qualified_root const & root_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
items.get<tag_root> ().erase (root_a); items.get<tag_root> ().erase (root_a);
} }
std::chrono::steady_clock::time_point nano::dropped_elections::find (nano::qualified_root const & root_a) const std::chrono::steady_clock::time_point nano::dropped_elections::find (nano::qualified_root const & root_a) const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto & items_by_root = items.get<tag_root> (); auto & items_by_root = items.get<tag_root> ();
auto existing (items_by_root.find (root_a)); auto existing (items_by_root.find (root_a));
if (existing != items_by_root.end ()) if (existing != items_by_root.end ())
@ -1622,6 +1622,6 @@ std::chrono::steady_clock::time_point nano::dropped_elections::find (nano::quali
size_t nano::dropped_elections::size () const size_t nano::dropped_elections::size () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return items.size (); return items.size ();
} }

View file

@ -128,7 +128,8 @@ public:
private: private:
ordered_dropped items; ordered_dropped items;
mutable std::mutex mutex; mutable nano::mutex mutex{ mutex_identifier (mutexes::dropped_elections) };
nano::stat & stats; nano::stat & stats;
}; };
@ -203,7 +204,7 @@ public:
// Returns a list of elections sorted by difficulty // Returns a list of elections sorted by difficulty
std::vector<std::shared_ptr<nano::election>> list_active (size_t = std::numeric_limits<size_t>::max ()); std::vector<std::shared_ptr<nano::election>> list_active (size_t = std::numeric_limits<size_t>::max ());
double normalized_multiplier (nano::block const &, boost::optional<roots_iterator> const & = boost::none) const; double normalized_multiplier (nano::block const &, boost::optional<roots_iterator> const & = boost::none) const;
void update_active_multiplier (nano::unique_lock<std::mutex> &); void update_active_multiplier (nano::unique_lock<nano::mutex> &);
uint64_t active_difficulty (); uint64_t active_difficulty ();
uint64_t limited_active_difficulty (nano::block const &); uint64_t limited_active_difficulty (nano::block const &);
uint64_t limited_active_difficulty (nano::work_version const, uint64_t const); uint64_t limited_active_difficulty (nano::work_version const, uint64_t const);
@ -226,14 +227,14 @@ public:
void add_recently_cemented (nano::election_status const &); void add_recently_cemented (nano::election_status const &);
void add_recently_confirmed (nano::qualified_root const &, nano::block_hash const &); void add_recently_confirmed (nano::qualified_root const &, nano::block_hash const &);
void erase_recently_confirmed (nano::block_hash const &); void erase_recently_confirmed (nano::block_hash const &);
void add_inactive_votes_cache (nano::unique_lock<std::mutex> &, nano::block_hash const &, nano::account const &); void add_inactive_votes_cache (nano::unique_lock<nano::mutex> &, nano::block_hash const &, nano::account const &);
// Inserts an election if conditions are met // Inserts an election if conditions are met
void trigger_inactive_votes_cache_election (std::shared_ptr<nano::block> const &); void trigger_inactive_votes_cache_election (std::shared_ptr<nano::block> const &);
nano::inactive_cache_information find_inactive_votes_cache (nano::block_hash const &); nano::inactive_cache_information find_inactive_votes_cache (nano::block_hash const &);
void erase_inactive_votes_cache (nano::block_hash const &); void erase_inactive_votes_cache (nano::block_hash const &);
nano::confirmation_height_processor & confirmation_height_processor; nano::confirmation_height_processor & confirmation_height_processor;
nano::node & node; nano::node & node;
mutable std::mutex mutex; mutable nano::mutex mutex{ mutex_identifier (mutexes::active) };
boost::circular_buffer<double> multipliers_cb; boost::circular_buffer<double> multipliers_cb;
std::atomic<double> trended_active_multiplier; std::atomic<double> trended_active_multiplier;
size_t priority_cementable_frontiers_size (); size_t priority_cementable_frontiers_size ();
@ -262,20 +263,21 @@ public:
// clang-format on // clang-format on
private: private:
std::mutex election_winner_details_mutex; nano::mutex election_winner_details_mutex{ mutex_identifier (mutexes::election_winner_details) };
std::unordered_map<nano::block_hash, std::shared_ptr<nano::election>> election_winner_details; std::unordered_map<nano::block_hash, std::shared_ptr<nano::election>> election_winner_details;
// Call action with confirmed block, may be different than what we started with // Call action with confirmed block, may be different than what we started with
// clang-format off // clang-format off
nano::election_insertion_result insert_impl (nano::unique_lock<std::mutex> &, std::shared_ptr<nano::block> const&, boost::optional<nano::uint128_t> const & = boost::none, nano::election_behavior = nano::election_behavior::normal, std::function<void(std::shared_ptr<nano::block>const&)> const & = nullptr); nano::election_insertion_result insert_impl (nano::unique_lock<nano::mutex> &, std::shared_ptr<nano::block> const&, boost::optional<nano::uint128_t> const & = boost::none, nano::election_behavior = nano::election_behavior::normal, std::function<void(std::shared_ptr<nano::block>const&)> const & = nullptr);
// clang-format on // clang-format on
// Returns false if the election difficulty was updated // Returns false if the election difficulty was updated
bool update_difficulty_impl (roots_iterator const &, nano::block const &); bool update_difficulty_impl (roots_iterator const &, nano::block const &);
void request_loop (); void request_loop ();
void request_confirm (nano::unique_lock<std::mutex> &); void request_confirm (nano::unique_lock<nano::mutex> &);
void erase (nano::qualified_root const &); void erase (nano::qualified_root const &);
// Erase all blocks from active and, if not confirmed, clear digests from network filters // Erase all blocks from active and, if not confirmed, clear digests from network filters
void cleanup_election (nano::unique_lock<std::mutex> &, nano::election_cleanup_info const &); void cleanup_election (nano::unique_lock<nano::mutex> &, nano::election_cleanup_info const &);
// Returns a list of elections sorted by difficulty, mutex must be locked // Returns a list of elections sorted by difficulty, mutex must be locked
std::vector<std::shared_ptr<nano::election>> list_active_impl (size_t) const; std::vector<std::shared_ptr<nano::election>> list_active_impl (size_t) const;
@ -328,7 +330,7 @@ private:
nano::frontiers_confirmation_info get_frontiers_confirmation_info (); nano::frontiers_confirmation_info get_frontiers_confirmation_info ();
void confirm_prioritized_frontiers (nano::transaction const &, uint64_t, uint64_t &); void confirm_prioritized_frontiers (nano::transaction const &, uint64_t, uint64_t &);
void confirm_expired_frontiers_pessimistically (nano::transaction const &, uint64_t, uint64_t &); void confirm_expired_frontiers_pessimistically (nano::transaction const &, uint64_t, uint64_t &);
void frontiers_confirmation (nano::unique_lock<std::mutex> &); void frontiers_confirmation (nano::unique_lock<nano::mutex> &);
bool insert_election_from_frontiers_confirmation (std::shared_ptr<nano::block> const &, nano::account const &, nano::uint128_t, nano::election_behavior); bool insert_election_from_frontiers_confirmation (std::shared_ptr<nano::block> const &, nano::account const &, nano::uint128_t, nano::election_behavior);
nano::account next_frontier_account{ 0 }; nano::account next_frontier_account{ 0 };
std::chrono::steady_clock::time_point next_frontier_check{ std::chrono::steady_clock::now () }; std::chrono::steady_clock::time_point next_frontier_check{ std::chrono::steady_clock::now () };
@ -349,9 +351,9 @@ private:
static size_t constexpr confirmed_frontiers_max_pending_size{ 10000 }; static size_t constexpr confirmed_frontiers_max_pending_size{ 10000 };
static std::chrono::minutes constexpr expired_optimistic_election_info_cutoff{ 30 }; static std::chrono::minutes constexpr expired_optimistic_election_info_cutoff{ 30 };
ordered_cache inactive_votes_cache; ordered_cache inactive_votes_cache;
nano::inactive_cache_status inactive_votes_bootstrap_check (nano::unique_lock<std::mutex> &, std::vector<nano::account> const &, nano::block_hash const &, nano::inactive_cache_status const &); nano::inactive_cache_status inactive_votes_bootstrap_check (nano::unique_lock<nano::mutex> &, std::vector<nano::account> const &, nano::block_hash const &, nano::inactive_cache_status const &);
nano::inactive_cache_status inactive_votes_bootstrap_check (nano::unique_lock<std::mutex> &, nano::account const &, nano::block_hash const &, nano::inactive_cache_status const &); nano::inactive_cache_status inactive_votes_bootstrap_check (nano::unique_lock<nano::mutex> &, nano::account const &, nano::block_hash const &, nano::inactive_cache_status const &);
nano::inactive_cache_status inactive_votes_bootstrap_check_impl (nano::unique_lock<std::mutex> &, nano::uint128_t const &, size_t, nano::block_hash const &, nano::inactive_cache_status const &); nano::inactive_cache_status inactive_votes_bootstrap_check_impl (nano::unique_lock<nano::mutex> &, nano::uint128_t const &, size_t, nano::block_hash const &, nano::inactive_cache_status const &);
nano::inactive_cache_information find_inactive_votes_cache_impl (nano::block_hash const &); nano::inactive_cache_information find_inactive_votes_cache_impl (nano::block_hash const &);
boost::thread thread; boost::thread thread;

View file

@ -39,7 +39,7 @@ state_block_signature_verification (node.checker, node.ledger.network_params.led
{ {
{ {
// Prevent a race with condition.wait in block_processor::flush // Prevent a race with condition.wait in block_processor::flush
nano::lock_guard<std::mutex> guard (this->mutex); nano::lock_guard<nano::mutex> guard (this->mutex);
} }
this->condition.notify_all (); this->condition.notify_all ();
} }
@ -54,7 +54,7 @@ nano::block_processor::~block_processor ()
void nano::block_processor::stop () void nano::block_processor::stop ()
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
stopped = true; stopped = true;
} }
condition.notify_all (); condition.notify_all ();
@ -65,7 +65,7 @@ void nano::block_processor::flush ()
{ {
node.checker.flush (); node.checker.flush ();
flushing = true; flushing = true;
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped && (have_blocks () || active || state_block_signature_verification.is_active ())) while (!stopped && (have_blocks () || active || state_block_signature_verification.is_active ()))
{ {
condition.wait (lock); condition.wait (lock);
@ -75,7 +75,7 @@ void nano::block_processor::flush ()
size_t nano::block_processor::size () size_t nano::block_processor::size ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
return (blocks.size () + state_block_signature_verification.size () + forced.size ()); return (blocks.size () + state_block_signature_verification.size () + forced.size ());
} }
@ -109,7 +109,7 @@ void nano::block_processor::add (nano::unchecked_info const & info_a, const bool
It's designed to help with realtime blocks traffic if block processor is not performing large task like bootstrap. It's designed to help with realtime blocks traffic if block processor is not performing large task like bootstrap.
If deque is a quarter full then push back to allow other blocks processing. */ If deque is a quarter full then push back to allow other blocks processing. */
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
blocks.emplace_front (info_a, false); blocks.emplace_front (info_a, false);
} }
condition.notify_all (); condition.notify_all ();
@ -117,7 +117,7 @@ void nano::block_processor::add (nano::unchecked_info const & info_a, const bool
else else
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
blocks.emplace_front (info_a, false); blocks.emplace_front (info_a, false);
} }
condition.notify_all (); condition.notify_all ();
@ -134,7 +134,7 @@ void nano::block_processor::add_local (nano::unchecked_info const & info_a, bool
void nano::block_processor::force (std::shared_ptr<nano::block> const & block_a) void nano::block_processor::force (std::shared_ptr<nano::block> const & block_a)
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
forced.push_back (block_a); forced.push_back (block_a);
} }
condition.notify_all (); condition.notify_all ();
@ -143,7 +143,7 @@ void nano::block_processor::force (std::shared_ptr<nano::block> const & block_a)
void nano::block_processor::update (std::shared_ptr<nano::block> const & block_a) void nano::block_processor::update (std::shared_ptr<nano::block> const & block_a)
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
updates.push_back (block_a); updates.push_back (block_a);
} }
condition.notify_all (); condition.notify_all ();
@ -151,13 +151,13 @@ void nano::block_processor::update (std::shared_ptr<nano::block> const & block_a
void nano::block_processor::wait_write () void nano::block_processor::wait_write ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
awaiting_write = true; awaiting_write = true;
} }
void nano::block_processor::process_blocks () void nano::block_processor::process_blocks ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped) while (!stopped)
{ {
if (have_blocks_ready ()) if (have_blocks_ready ())
@ -203,7 +203,7 @@ bool nano::block_processor::have_blocks ()
void nano::block_processor::process_verified_state_blocks (std::deque<std::pair<nano::unchecked_info, bool>> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures) void nano::block_processor::process_verified_state_blocks (std::deque<std::pair<nano::unchecked_info, bool>> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures)
{ {
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
for (auto i (0); i < verifications.size (); ++i) for (auto i (0); i < verifications.size (); ++i)
{ {
debug_assert (verifications[i] == 1 || verifications[i] == 0); debug_assert (verifications[i] == 1 || verifications[i] == 0);
@ -239,7 +239,7 @@ void nano::block_processor::process_verified_state_blocks (std::deque<std::pair<
condition.notify_all (); condition.notify_all ();
} }
void nano::block_processor::process_batch (nano::unique_lock<std::mutex> & lock_a) void nano::block_processor::process_batch (nano::unique_lock<nano::mutex> & lock_a)
{ {
auto scoped_write_guard = write_database_queue.wait (nano::writer::process_batch); auto scoped_write_guard = write_database_queue.wait (nano::writer::process_batch);
block_post_events post_events ([& store = node.store] { return store.tx_begin_read (); }); block_post_events post_events ([& store = node.store] { return store.tx_begin_read (); });
@ -567,7 +567,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (bl
size_t forced_count; size_t forced_count;
{ {
nano::lock_guard<std::mutex> guard (block_processor.mutex); nano::lock_guard<nano::mutex> guard (block_processor.mutex);
blocks_count = block_processor.blocks.size (); blocks_count = block_processor.blocks.size ();
forced_count = block_processor.forced.size (); forced_count = block_processor.forced.size ();
} }

View file

@ -71,7 +71,7 @@ public:
private: private:
void queue_unchecked (nano::write_transaction const &, nano::block_hash const &); void queue_unchecked (nano::write_transaction const &, nano::block_hash const &);
void process_batch (nano::unique_lock<std::mutex> &); void process_batch (nano::unique_lock<nano::mutex> &);
void process_live (nano::transaction const &, nano::block_hash const &, std::shared_ptr<nano::block> const &, nano::process_return const &, const bool = false, nano::block_origin const = nano::block_origin::remote); void process_live (nano::transaction const &, nano::block_hash const &, std::shared_ptr<nano::block> const &, nano::process_return const &, const bool = false, nano::block_origin const = nano::block_origin::remote);
void process_old (nano::transaction const &, std::shared_ptr<nano::block> const &, nano::block_origin const); void process_old (nano::transaction const &, std::shared_ptr<nano::block> const &, nano::block_origin const);
void requeue_invalid (nano::block_hash const &, nano::unchecked_info const &); void requeue_invalid (nano::block_hash const &, nano::unchecked_info const &);
@ -86,7 +86,7 @@ private:
nano::condition_variable condition; nano::condition_variable condition;
nano::node & node; nano::node & node;
nano::write_database_queue & write_database_queue; nano::write_database_queue & write_database_queue;
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::block_processor) };
nano::state_block_signature_verification state_block_signature_verification; nano::state_block_signature_verification state_block_signature_verification;
friend std::unique_ptr<container_info_component> collect_container_info (block_processor & block_processor, std::string const & name); friend std::unique_ptr<container_info_component> collect_container_info (block_processor & block_processor, std::string const & name);

View file

@ -37,7 +37,7 @@ void nano::bootstrap_initiator::bootstrap (bool force, std::string id_a)
{ {
stop_attempts (); stop_attempts ();
} }
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (!stopped && find_attempt (nano::bootstrap_mode::legacy) == nullptr) if (!stopped && find_attempt (nano::bootstrap_mode::legacy) == nullptr)
{ {
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
@ -66,7 +66,7 @@ void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bo
{ {
stop_attempts (); stop_attempts ();
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto legacy_attempt (std::make_shared<nano::bootstrap_attempt_legacy> (node.shared (), attempts.incremental++, id_a)); auto legacy_attempt (std::make_shared<nano::bootstrap_attempt_legacy> (node.shared (), attempts.incremental++, id_a));
attempts_list.push_back (legacy_attempt); attempts_list.push_back (legacy_attempt);
attempts.add (legacy_attempt); attempts.add (legacy_attempt);
@ -93,7 +93,7 @@ void nano::bootstrap_initiator::bootstrap_lazy (nano::hash_or_account const & ha
stop_attempts (); stop_attempts ();
} }
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_lazy, nano::stat::dir::out); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_lazy, nano::stat::dir::out);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
if (!stopped && find_attempt (nano::bootstrap_mode::lazy) == nullptr) if (!stopped && find_attempt (nano::bootstrap_mode::lazy) == nullptr)
{ {
lazy_attempt = std::make_shared<nano::bootstrap_attempt_lazy> (node.shared (), attempts.incremental++, id_a.empty () ? hash_or_account_a.to_string () : id_a); lazy_attempt = std::make_shared<nano::bootstrap_attempt_lazy> (node.shared (), attempts.incremental++, id_a.empty () ? hash_or_account_a.to_string () : id_a);
@ -116,7 +116,7 @@ void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & ac
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_wallet_lazy, nano::stat::dir::out); node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_wallet_lazy, nano::stat::dir::out);
if (wallet_attempt == nullptr) if (wallet_attempt == nullptr)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
std::string id (!accounts_a.empty () ? accounts_a[0].to_account () : ""); std::string id (!accounts_a.empty () ? accounts_a[0].to_account () : "");
wallet_attempt = std::make_shared<nano::bootstrap_attempt_wallet> (node.shared (), attempts.incremental++, id); wallet_attempt = std::make_shared<nano::bootstrap_attempt_wallet> (node.shared (), attempts.incremental++, id);
attempts_list.push_back (wallet_attempt); attempts_list.push_back (wallet_attempt);
@ -132,7 +132,7 @@ void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & ac
void nano::bootstrap_initiator::run_bootstrap () void nano::bootstrap_initiator::run_bootstrap ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped) while (!stopped)
{ {
if (has_new_attempts ()) if (has_new_attempts ())
@ -164,13 +164,13 @@ void nano::bootstrap_initiator::lazy_requeue (nano::block_hash const & hash_a, n
void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a) void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a)
{ {
nano::lock_guard<std::mutex> lock (observers_mutex); nano::lock_guard<nano::mutex> lock (observers_mutex);
observers.push_back (observer_a); observers.push_back (observer_a);
} }
bool nano::bootstrap_initiator::in_progress () bool nano::bootstrap_initiator::in_progress ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return !attempts_list.empty (); return !attempts_list.empty ();
} }
@ -188,7 +188,7 @@ std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::find_attempt
void nano::bootstrap_initiator::remove_attempt (std::shared_ptr<nano::bootstrap_attempt> attempt_a) void nano::bootstrap_initiator::remove_attempt (std::shared_ptr<nano::bootstrap_attempt> attempt_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto attempt (std::find (attempts_list.begin (), attempts_list.end (), attempt_a)); auto attempt (std::find (attempts_list.begin (), attempts_list.end (), attempt_a));
if (attempt != attempts_list.end ()) if (attempt != attempts_list.end ())
{ {
@ -232,25 +232,25 @@ bool nano::bootstrap_initiator::has_new_attempts ()
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt () std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return find_attempt (nano::bootstrap_mode::legacy); return find_attempt (nano::bootstrap_mode::legacy);
} }
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_lazy_attempt () std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_lazy_attempt ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return find_attempt (nano::bootstrap_mode::lazy); return find_attempt (nano::bootstrap_mode::lazy);
} }
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_wallet_attempt () std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_wallet_attempt ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return find_attempt (nano::bootstrap_mode::wallet_lazy); return find_attempt (nano::bootstrap_mode::wallet_lazy);
} }
void nano::bootstrap_initiator::stop_attempts () void nano::bootstrap_initiator::stop_attempts ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
std::vector<std::shared_ptr<nano::bootstrap_attempt>> copy_attempts; std::vector<std::shared_ptr<nano::bootstrap_attempt>> copy_attempts;
copy_attempts.swap (attempts_list); copy_attempts.swap (attempts_list);
attempts.clear (); attempts.clear ();
@ -281,7 +281,7 @@ void nano::bootstrap_initiator::stop ()
void nano::bootstrap_initiator::notify_listeners (bool in_progress_a) void nano::bootstrap_initiator::notify_listeners (bool in_progress_a)
{ {
nano::lock_guard<std::mutex> lock (observers_mutex); nano::lock_guard<nano::mutex> lock (observers_mutex);
for (auto & i : observers) for (auto & i : observers)
{ {
i (in_progress_a); i (in_progress_a);
@ -293,11 +293,11 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (bo
size_t count; size_t count;
size_t cache_count; size_t cache_count;
{ {
nano::lock_guard<std::mutex> guard (bootstrap_initiator.observers_mutex); nano::lock_guard<nano::mutex> guard (bootstrap_initiator.observers_mutex);
count = bootstrap_initiator.observers.size (); count = bootstrap_initiator.observers.size ();
} }
{ {
nano::lock_guard<std::mutex> guard (bootstrap_initiator.cache.pulls_cache_mutex); nano::lock_guard<nano::mutex> guard (bootstrap_initiator.cache.pulls_cache_mutex);
cache_count = bootstrap_initiator.cache.cache.size (); cache_count = bootstrap_initiator.cache.cache.size ();
} }
@ -313,7 +313,7 @@ void nano::pulls_cache::add (nano::pull_info const & pull_a)
{ {
if (pull_a.processed > 500) if (pull_a.processed > 500)
{ {
nano::lock_guard<std::mutex> guard (pulls_cache_mutex); nano::lock_guard<nano::mutex> guard (pulls_cache_mutex);
// Clean old pull // Clean old pull
if (cache.size () > cache_size_max) if (cache.size () > cache_size_max)
{ {
@ -342,7 +342,7 @@ void nano::pulls_cache::add (nano::pull_info const & pull_a)
void nano::pulls_cache::update_pull (nano::pull_info & pull_a) void nano::pulls_cache::update_pull (nano::pull_info & pull_a)
{ {
nano::lock_guard<std::mutex> guard (pulls_cache_mutex); nano::lock_guard<nano::mutex> guard (pulls_cache_mutex);
nano::uint512_union head_512 (pull_a.account_or_head, pull_a.head_original); nano::uint512_union head_512 (pull_a.account_or_head, pull_a.head_original);
auto existing (cache.get<account_head_tag> ().find (head_512)); auto existing (cache.get<account_head_tag> ().find (head_512));
if (existing != cache.get<account_head_tag> ().end ()) if (existing != cache.get<account_head_tag> ().end ())
@ -353,32 +353,32 @@ void nano::pulls_cache::update_pull (nano::pull_info & pull_a)
void nano::pulls_cache::remove (nano::pull_info const & pull_a) void nano::pulls_cache::remove (nano::pull_info const & pull_a)
{ {
nano::lock_guard<std::mutex> guard (pulls_cache_mutex); nano::lock_guard<nano::mutex> guard (pulls_cache_mutex);
nano::uint512_union head_512 (pull_a.account_or_head, pull_a.head_original); nano::uint512_union head_512 (pull_a.account_or_head, pull_a.head_original);
cache.get<account_head_tag> ().erase (head_512); cache.get<account_head_tag> ().erase (head_512);
} }
void nano::bootstrap_attempts::add (std::shared_ptr<nano::bootstrap_attempt> attempt_a) void nano::bootstrap_attempts::add (std::shared_ptr<nano::bootstrap_attempt> attempt_a)
{ {
nano::lock_guard<std::mutex> lock (bootstrap_attempts_mutex); nano::lock_guard<nano::mutex> lock (bootstrap_attempts_mutex);
attempts.emplace (attempt_a->incremental_id, attempt_a); attempts.emplace (attempt_a->incremental_id, attempt_a);
} }
void nano::bootstrap_attempts::remove (uint64_t incremental_id_a) void nano::bootstrap_attempts::remove (uint64_t incremental_id_a)
{ {
nano::lock_guard<std::mutex> lock (bootstrap_attempts_mutex); nano::lock_guard<nano::mutex> lock (bootstrap_attempts_mutex);
attempts.erase (incremental_id_a); attempts.erase (incremental_id_a);
} }
void nano::bootstrap_attempts::clear () void nano::bootstrap_attempts::clear ()
{ {
nano::lock_guard<std::mutex> lock (bootstrap_attempts_mutex); nano::lock_guard<nano::mutex> lock (bootstrap_attempts_mutex);
attempts.clear (); attempts.clear ();
} }
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_attempts::find (uint64_t incremental_id_a) std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_attempts::find (uint64_t incremental_id_a)
{ {
nano::lock_guard<std::mutex> lock (bootstrap_attempts_mutex); nano::lock_guard<nano::mutex> lock (bootstrap_attempts_mutex);
auto find_attempt (attempts.find (incremental_id_a)); auto find_attempt (attempts.find (incremental_id_a));
if (find_attempt != attempts.end ()) if (find_attempt != attempts.end ())
{ {
@ -392,6 +392,6 @@ std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_attempts::find (uint64_
size_t nano::bootstrap_attempts::size () size_t nano::bootstrap_attempts::size ()
{ {
nano::lock_guard<std::mutex> lock (bootstrap_attempts_mutex); nano::lock_guard<nano::mutex> lock (bootstrap_attempts_mutex);
return attempts.size (); return attempts.size ();
} }

View file

@ -48,7 +48,7 @@ public:
void add (nano::pull_info const &); void add (nano::pull_info const &);
void update_pull (nano::pull_info &); void update_pull (nano::pull_info &);
void remove (nano::pull_info const &); void remove (nano::pull_info const &);
std::mutex pulls_cache_mutex; nano::mutex pulls_cache_mutex;
class account_head_tag class account_head_tag
{ {
}; };
@ -72,7 +72,7 @@ public:
std::shared_ptr<nano::bootstrap_attempt> find (uint64_t); std::shared_ptr<nano::bootstrap_attempt> find (uint64_t);
size_t size (); size_t size ();
std::atomic<uint64_t> incremental{ 0 }; std::atomic<uint64_t> incremental{ 0 };
std::mutex bootstrap_attempts_mutex; nano::mutex bootstrap_attempts_mutex;
std::map<uint64_t, std::shared_ptr<nano::bootstrap_attempt>> attempts; std::map<uint64_t, std::shared_ptr<nano::bootstrap_attempt>> attempts;
}; };
@ -107,9 +107,9 @@ private:
void stop_attempts (); void stop_attempts ();
std::vector<std::shared_ptr<nano::bootstrap_attempt>> attempts_list; std::vector<std::shared_ptr<nano::bootstrap_attempt>> attempts_list;
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
std::mutex observers_mutex; nano::mutex observers_mutex;
std::vector<std::function<void(bool)>> observers; std::vector<std::function<void(bool)>> observers;
std::vector<boost::thread> bootstrap_initiator_threads; std::vector<boost::thread> bootstrap_initiator_threads;

View file

@ -51,7 +51,7 @@ nano::bootstrap_attempt::~bootstrap_attempt ()
bool nano::bootstrap_attempt::should_log () bool nano::bootstrap_attempt::should_log ()
{ {
nano::lock_guard<std::mutex> guard (next_log_mutex); nano::lock_guard<nano::mutex> guard (next_log_mutex);
auto result (false); auto result (false);
auto now (std::chrono::steady_clock::now ()); auto now (std::chrono::steady_clock::now ());
if (next_log < now) if (next_log < now)
@ -73,7 +73,7 @@ bool nano::bootstrap_attempt::still_pulling ()
void nano::bootstrap_attempt::pull_started () void nano::bootstrap_attempt::pull_started ()
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
++pulling; ++pulling;
} }
condition.notify_all (); condition.notify_all ();
@ -82,7 +82,7 @@ void nano::bootstrap_attempt::pull_started ()
void nano::bootstrap_attempt::pull_finished () void nano::bootstrap_attempt::pull_finished ()
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
--pulling; --pulling;
} }
condition.notify_all (); condition.notify_all ();
@ -91,7 +91,7 @@ void nano::bootstrap_attempt::pull_finished ()
void nano::bootstrap_attempt::stop () void nano::bootstrap_attempt::stop ()
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
stopped = true; stopped = true;
} }
condition.notify_all (); condition.notify_all ();

View file

@ -39,7 +39,7 @@ public:
virtual void wallet_start (std::deque<nano::account> &); virtual void wallet_start (std::deque<nano::account> &);
virtual size_t wallet_size (); virtual size_t wallet_size ();
virtual void get_information (boost::property_tree::ptree &) = 0; virtual void get_information (boost::property_tree::ptree &) = 0;
std::mutex next_log_mutex; nano::mutex next_log_mutex;
std::chrono::steady_clock::time_point next_log{ std::chrono::steady_clock::now () }; std::chrono::steady_clock::time_point next_log{ std::chrono::steady_clock::now () };
std::atomic<unsigned> pulling{ 0 }; std::atomic<unsigned> pulling{ 0 };
std::shared_ptr<nano::node> node; std::shared_ptr<nano::node> node;
@ -53,7 +53,7 @@ public:
std::atomic<bool> frontiers_received{ false }; std::atomic<bool> frontiers_received{ false };
std::atomic<bool> frontiers_confirmed{ false }; std::atomic<bool> frontiers_confirmed{ false };
nano::bootstrap_mode mode; nano::bootstrap_mode mode;
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
}; };
} }

View file

@ -40,13 +40,13 @@ double nano::bootstrap_client::sample_block_rate ()
void nano::bootstrap_client::set_start_time (std::chrono::steady_clock::time_point start_time_a) void nano::bootstrap_client::set_start_time (std::chrono::steady_clock::time_point start_time_a)
{ {
nano::lock_guard<std::mutex> guard (start_time_mutex); nano::lock_guard<nano::mutex> guard (start_time_mutex);
start_time_m = start_time_a; start_time_m = start_time_a;
} }
double nano::bootstrap_client::elapsed_seconds () const double nano::bootstrap_client::elapsed_seconds () const
{ {
nano::lock_guard<std::mutex> guard (start_time_mutex); nano::lock_guard<nano::mutex> guard (start_time_mutex);
return std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time_m).count (); return std::chrono::duration_cast<std::chrono::duration<double>> (std::chrono::steady_clock::now () - start_time_m).count ();
} }
@ -66,7 +66,7 @@ node (node_a)
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_connections::connection (std::shared_ptr<nano::bootstrap_attempt> const & attempt_a, bool use_front_connection) std::shared_ptr<nano::bootstrap_client> nano::bootstrap_connections::connection (std::shared_ptr<nano::bootstrap_attempt> const & attempt_a, bool use_front_connection)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
condition.wait (lock, [& stopped = stopped, &idle = idle, &new_connections_empty = new_connections_empty] { return stopped || !idle.empty () || new_connections_empty; }); condition.wait (lock, [& stopped = stopped, &idle = idle, &new_connections_empty = new_connections_empty] { return stopped || !idle.empty () || new_connections_empty; });
std::shared_ptr<nano::bootstrap_client> result; std::shared_ptr<nano::bootstrap_client> result;
if (!stopped && !idle.empty ()) if (!stopped && !idle.empty ())
@ -93,7 +93,7 @@ std::shared_ptr<nano::bootstrap_client> nano::bootstrap_connections::connection
void nano::bootstrap_connections::pool_connection (std::shared_ptr<nano::bootstrap_client> const & client_a, bool new_client, bool push_front) void nano::bootstrap_connections::pool_connection (std::shared_ptr<nano::bootstrap_client> const & client_a, bool new_client, bool push_front)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto const & socket_l = client_a->socket; auto const & socket_l = client_a->socket;
if (!stopped && !client_a->pending_stop && !node.network.excluded_peers.check (client_a->channel->get_tcp_endpoint ())) if (!stopped && !client_a->pending_stop && !node.network.excluded_peers.check (client_a->channel->get_tcp_endpoint ()))
{ {
@ -127,7 +127,7 @@ void nano::bootstrap_connections::add_connection (nano::endpoint const & endpoin
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_connections::find_connection (nano::tcp_endpoint const & endpoint_a) std::shared_ptr<nano::bootstrap_client> nano::bootstrap_connections::find_connection (nano::tcp_endpoint const & endpoint_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
std::shared_ptr<nano::bootstrap_client> result; std::shared_ptr<nano::bootstrap_client> result;
for (auto i (idle.begin ()), end (idle.end ()); i != end && !stopped; ++i) for (auto i (idle.begin ()), end (idle.end ()); i != end && !stopped; ++i)
{ {
@ -209,7 +209,7 @@ void nano::bootstrap_connections::populate_connections (bool repeat)
std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections; std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections;
std::unordered_set<nano::tcp_endpoint> endpoints; std::unordered_set<nano::tcp_endpoint> endpoints;
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
num_pulls = pulls.size (); num_pulls = pulls.size ();
std::deque<std::weak_ptr<nano::bootstrap_client>> new_clients; std::deque<std::weak_ptr<nano::bootstrap_client>> new_clients;
for (auto & c : clients) for (auto & c : clients)
@ -288,13 +288,13 @@ void nano::bootstrap_connections::populate_connections (bool repeat)
{ {
connect_client (endpoint); connect_client (endpoint);
endpoints.insert (endpoint); endpoints.insert (endpoint);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
new_connections_empty = false; new_connections_empty = false;
} }
else if (connections_count == 0) else if (connections_count == 0)
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
new_connections_empty = true; new_connections_empty = true;
} }
condition.notify_all (); condition.notify_all ();
@ -326,13 +326,13 @@ void nano::bootstrap_connections::add_pull (nano::pull_info const & pull_a)
nano::pull_info pull (pull_a); nano::pull_info pull (pull_a);
node.bootstrap_initiator.cache.update_pull (pull); node.bootstrap_initiator.cache.update_pull (pull);
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
pulls.push_back (pull); pulls.push_back (pull);
} }
condition.notify_all (); condition.notify_all ();
} }
void nano::bootstrap_connections::request_pull (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_connections::request_pull (nano::unique_lock<nano::mutex> & lock_a)
{ {
lock_a.unlock (); lock_a.unlock ();
auto connection_l (connection ()); auto connection_l (connection ());
@ -399,7 +399,7 @@ void nano::bootstrap_connections::requeue_pull (nano::pull_info const & pull_a,
if (attempt_l->mode == nano::bootstrap_mode::legacy && (pull.attempts < pull.retry_limit + (pull.processed / nano::bootstrap_limits::requeued_pulls_processed_blocks_factor))) if (attempt_l->mode == nano::bootstrap_mode::legacy && (pull.attempts < pull.retry_limit + (pull.processed / nano::bootstrap_limits::requeued_pulls_processed_blocks_factor)))
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
pulls.push_front (pull); pulls.push_front (pull);
} }
attempt_l->pull_started (); attempt_l->pull_started ();
@ -411,7 +411,7 @@ void nano::bootstrap_connections::requeue_pull (nano::pull_info const & pull_a,
if (!attempt_l->lazy_processed_or_exists (pull.account_or_head.as_block_hash ())) if (!attempt_l->lazy_processed_or_exists (pull.account_or_head.as_block_hash ()))
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
pulls.push_back (pull); pulls.push_back (pull);
} }
attempt_l->pull_started (); attempt_l->pull_started ();
@ -441,7 +441,7 @@ void nano::bootstrap_connections::requeue_pull (nano::pull_info const & pull_a,
void nano::bootstrap_connections::clear_pulls (uint64_t bootstrap_id_a) void nano::bootstrap_connections::clear_pulls (uint64_t bootstrap_id_a)
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto i (pulls.begin ()); auto i (pulls.begin ());
while (i != pulls.end ()) while (i != pulls.end ())
{ {
@ -461,7 +461,7 @@ void nano::bootstrap_connections::clear_pulls (uint64_t bootstrap_id_a)
void nano::bootstrap_connections::run () void nano::bootstrap_connections::run ()
{ {
start_populate_connections (); start_populate_connections ();
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped) while (!stopped)
{ {
if (!pulls.empty ()) if (!pulls.empty ())
@ -480,7 +480,7 @@ void nano::bootstrap_connections::run ()
void nano::bootstrap_connections::stop () void nano::bootstrap_connections::stop ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
stopped = true; stopped = true;
lock.unlock (); lock.unlock ();
condition.notify_all (); condition.notify_all ();

View file

@ -39,7 +39,7 @@ public:
std::atomic<bool> hard_stop{ false }; std::atomic<bool> hard_stop{ false };
private: private:
mutable std::mutex start_time_mutex; mutable nano::mutex start_time_mutex;
std::chrono::steady_clock::time_point start_time_m; std::chrono::steady_clock::time_point start_time_m;
}; };
@ -57,7 +57,7 @@ public:
void populate_connections (bool repeat = true); void populate_connections (bool repeat = true);
void start_populate_connections (); void start_populate_connections ();
void add_pull (nano::pull_info const & pull_a); void add_pull (nano::pull_info const & pull_a);
void request_pull (nano::unique_lock<std::mutex> & lock_a); void request_pull (nano::unique_lock<nano::mutex> & lock_a);
void requeue_pull (nano::pull_info const & pull_a, bool network_error = false); void requeue_pull (nano::pull_info const & pull_a, bool network_error = false);
void clear_pulls (uint64_t); void clear_pulls (uint64_t);
void run (); void run ();
@ -70,7 +70,7 @@ public:
std::atomic<bool> populate_connections_started{ false }; std::atomic<bool> populate_connections_started{ false };
std::atomic<bool> new_connections_empty{ false }; std::atomic<bool> new_connections_empty{ false };
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
}; };
} }

View file

@ -28,7 +28,7 @@ nano::bootstrap_attempt_lazy::~bootstrap_attempt_lazy ()
void nano::bootstrap_attempt_lazy::lazy_start (nano::hash_or_account const & hash_or_account_a, bool confirmed) void nano::bootstrap_attempt_lazy::lazy_start (nano::hash_or_account const & hash_or_account_a, bool confirmed)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Add start blocks, limit 1024 (4k with disabled legacy bootstrap) // Add start blocks, limit 1024 (4k with disabled legacy bootstrap)
size_t max_keys (node->flags.disable_legacy_bootstrap ? 4 * 1024 : 1024); size_t max_keys (node->flags.disable_legacy_bootstrap ? 4 * 1024 : 1024);
if (lazy_keys.size () < max_keys && lazy_keys.find (hash_or_account_a.as_block_hash ()) == lazy_keys.end () && !lazy_blocks_processed (hash_or_account_a.as_block_hash ())) if (lazy_keys.size () < max_keys && lazy_keys.find (hash_or_account_a.as_block_hash ()) == lazy_keys.end () && !lazy_blocks_processed (hash_or_account_a.as_block_hash ()))
@ -53,13 +53,13 @@ void nano::bootstrap_attempt_lazy::lazy_add (nano::hash_or_account const & hash_
void nano::bootstrap_attempt_lazy::lazy_add (nano::pull_info const & pull_a) void nano::bootstrap_attempt_lazy::lazy_add (nano::pull_info const & pull_a)
{ {
debug_assert (pull_a.account_or_head == pull_a.head); debug_assert (pull_a.account_or_head == pull_a.head);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
lazy_add (pull_a.account_or_head, pull_a.retry_limit); lazy_add (pull_a.account_or_head, pull_a.retry_limit);
} }
void nano::bootstrap_attempt_lazy::lazy_requeue (nano::block_hash const & hash_a, nano::block_hash const & previous_a, bool confirmed_a) void nano::bootstrap_attempt_lazy::lazy_requeue (nano::block_hash const & hash_a, nano::block_hash const & previous_a, bool confirmed_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Add only known blocks // Add only known blocks
if (lazy_blocks_processed (hash_a)) if (lazy_blocks_processed (hash_a))
{ {
@ -88,7 +88,7 @@ uint32_t nano::bootstrap_attempt_lazy::lazy_batch_size ()
return result; return result;
} }
void nano::bootstrap_attempt_lazy::lazy_pull_flush (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_attempt_lazy::lazy_pull_flush (nano::unique_lock<nano::mutex> & lock_a)
{ {
static size_t const max_pulls (static_cast<size_t> (nano::bootstrap_limits::bootstrap_connection_scale_target_blocks) * 3); static size_t const max_pulls (static_cast<size_t> (nano::bootstrap_limits::bootstrap_connection_scale_target_blocks) * 3);
if (pulling < max_pulls) if (pulling < max_pulls)
@ -187,7 +187,7 @@ void nano::bootstrap_attempt_lazy::run ()
debug_assert (!node->flags.disable_lazy_bootstrap); debug_assert (!node->flags.disable_lazy_bootstrap);
node->bootstrap_initiator.connections->populate_connections (false); node->bootstrap_initiator.connections->populate_connections (false);
lazy_start_time = std::chrono::steady_clock::now (); lazy_start_time = std::chrono::steady_clock::now ();
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while ((still_pulling () || !lazy_finished ()) && !lazy_has_expired ()) while ((still_pulling () || !lazy_finished ()) && !lazy_has_expired ())
{ {
unsigned iterations (0); unsigned iterations (0);
@ -248,7 +248,7 @@ bool nano::bootstrap_attempt_lazy::process_block_lazy (std::shared_ptr<nano::blo
{ {
bool stop_pull (false); bool stop_pull (false);
auto hash (block_a->hash ()); auto hash (block_a->hash ());
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Processing new blocks // Processing new blocks
if (!lazy_blocks_processed (hash)) if (!lazy_blocks_processed (hash))
{ {
@ -488,7 +488,7 @@ bool nano::bootstrap_attempt_lazy::lazy_blocks_processed (nano::block_hash const
bool nano::bootstrap_attempt_lazy::lazy_processed_or_exists (nano::block_hash const & hash_a) bool nano::bootstrap_attempt_lazy::lazy_processed_or_exists (nano::block_hash const & hash_a)
{ {
bool result (false); bool result (false);
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (lazy_blocks_processed (hash_a)) if (lazy_blocks_processed (hash_a))
{ {
result = true; result = true;
@ -518,7 +518,7 @@ unsigned nano::bootstrap_attempt_lazy::lazy_retry_limit_confirmed ()
void nano::bootstrap_attempt_lazy::get_information (boost::property_tree::ptree & tree_a) void nano::bootstrap_attempt_lazy::get_information (boost::property_tree::ptree & tree_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
tree_a.put ("lazy_blocks", std::to_string (lazy_blocks.size ())); tree_a.put ("lazy_blocks", std::to_string (lazy_blocks.size ()));
tree_a.put ("lazy_state_backlog", std::to_string (lazy_state_backlog.size ())); tree_a.put ("lazy_state_backlog", std::to_string (lazy_state_backlog.size ()));
tree_a.put ("lazy_balances", std::to_string (lazy_balances.size ())); tree_a.put ("lazy_balances", std::to_string (lazy_balances.size ()));
@ -543,7 +543,7 @@ nano::bootstrap_attempt_wallet::~bootstrap_attempt_wallet ()
node->bootstrap_initiator.notify_listeners (false); node->bootstrap_initiator.notify_listeners (false);
} }
void nano::bootstrap_attempt_wallet::request_pending (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_attempt_wallet::request_pending (nano::unique_lock<nano::mutex> & lock_a)
{ {
lock_a.unlock (); lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this ())); auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this ()));
@ -567,7 +567,7 @@ void nano::bootstrap_attempt_wallet::requeue_pending (nano::account const & acco
{ {
auto account (account_a); auto account (account_a);
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
wallet_accounts.push_front (account); wallet_accounts.push_front (account);
} }
condition.notify_all (); condition.notify_all ();
@ -576,7 +576,7 @@ void nano::bootstrap_attempt_wallet::requeue_pending (nano::account const & acco
void nano::bootstrap_attempt_wallet::wallet_start (std::deque<nano::account> & accounts_a) void nano::bootstrap_attempt_wallet::wallet_start (std::deque<nano::account> & accounts_a)
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
wallet_accounts.swap (accounts_a); wallet_accounts.swap (accounts_a);
} }
condition.notify_all (); condition.notify_all ();
@ -598,7 +598,7 @@ void nano::bootstrap_attempt_wallet::run ()
node->bootstrap_initiator.connections->populate_connections (false); node->bootstrap_initiator.connections->populate_connections (false);
auto start_time (std::chrono::steady_clock::now ()); auto start_time (std::chrono::steady_clock::now ());
auto max_time (std::chrono::minutes (10)); auto max_time (std::chrono::minutes (10));
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (wallet_finished () && std::chrono::steady_clock::now () - start_time < max_time) while (wallet_finished () && std::chrono::steady_clock::now () - start_time < max_time)
{ {
if (!wallet_accounts.empty ()) if (!wallet_accounts.empty ())
@ -621,12 +621,12 @@ void nano::bootstrap_attempt_wallet::run ()
size_t nano::bootstrap_attempt_wallet::wallet_size () size_t nano::bootstrap_attempt_wallet::wallet_size ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return wallet_accounts.size (); return wallet_accounts.size ();
} }
void nano::bootstrap_attempt_wallet::get_information (boost::property_tree::ptree & tree_a) void nano::bootstrap_attempt_wallet::get_information (boost::property_tree::ptree & tree_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
tree_a.put ("wallet_accounts", std::to_string (wallet_accounts.size ())); tree_a.put ("wallet_accounts", std::to_string (wallet_accounts.size ()));
} }

View file

@ -43,7 +43,7 @@ public:
bool lazy_finished (); bool lazy_finished ();
bool lazy_has_expired () const override; bool lazy_has_expired () const override;
uint32_t lazy_batch_size () override; uint32_t lazy_batch_size () override;
void lazy_pull_flush (nano::unique_lock<std::mutex> & lock_a); void lazy_pull_flush (nano::unique_lock<nano::mutex> & lock_a);
bool process_block_lazy (std::shared_ptr<nano::block> const &, nano::account const &, uint64_t, nano::bulk_pull::count_t, unsigned); bool process_block_lazy (std::shared_ptr<nano::block> const &, nano::account const &, uint64_t, nano::bulk_pull::count_t, unsigned);
void lazy_block_state (std::shared_ptr<nano::block> const &, unsigned); void lazy_block_state (std::shared_ptr<nano::block> const &, unsigned);
void lazy_block_state_backlog_check (std::shared_ptr<nano::block> const &, nano::block_hash const &); void lazy_block_state_backlog_check (std::shared_ptr<nano::block> const &, nano::block_hash const &);
@ -90,7 +90,7 @@ class bootstrap_attempt_wallet final : public bootstrap_attempt
public: public:
explicit bootstrap_attempt_wallet (std::shared_ptr<nano::node> const & node_a, uint64_t incremental_id_a, std::string id_a = ""); explicit bootstrap_attempt_wallet (std::shared_ptr<nano::node> const & node_a, uint64_t incremental_id_a, std::string id_a = "");
~bootstrap_attempt_wallet (); ~bootstrap_attempt_wallet ();
void request_pending (nano::unique_lock<std::mutex> &); void request_pending (nano::unique_lock<nano::mutex> &);
void requeue_pending (nano::account const &) override; void requeue_pending (nano::account const &) override;
void run () override; void run () override;
void wallet_start (std::deque<nano::account> &) override; void wallet_start (std::deque<nano::account> &) override;

View file

@ -27,7 +27,7 @@ bool nano::bootstrap_attempt_legacy::consume_future (std::future<bool> & future_
void nano::bootstrap_attempt_legacy::stop () void nano::bootstrap_attempt_legacy::stop ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
stopped = true; stopped = true;
lock.unlock (); lock.unlock ();
condition.notify_all (); condition.notify_all ();
@ -56,7 +56,7 @@ void nano::bootstrap_attempt_legacy::stop ()
node->bootstrap_initiator.connections->clear_pulls (incremental_id); node->bootstrap_initiator.connections->clear_pulls (incremental_id);
} }
void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<nano::mutex> & lock_a)
{ {
bool error (false); bool error (false);
lock_a.unlock (); lock_a.unlock ();
@ -89,19 +89,19 @@ void nano::bootstrap_attempt_legacy::request_push (nano::unique_lock<std::mutex>
void nano::bootstrap_attempt_legacy::add_frontier (nano::pull_info const & pull_a) void nano::bootstrap_attempt_legacy::add_frontier (nano::pull_info const & pull_a)
{ {
nano::pull_info pull (pull_a); nano::pull_info pull (pull_a);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
frontier_pulls.push_back (pull); frontier_pulls.push_back (pull);
} }
void nano::bootstrap_attempt_legacy::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end) void nano::bootstrap_attempt_legacy::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
bulk_push_targets.emplace_back (head, end); bulk_push_targets.emplace_back (head, end);
} }
bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> & current_target_a) bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> & current_target_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto empty (bulk_push_targets.empty ()); auto empty (bulk_push_targets.empty ());
if (!empty) if (!empty)
{ {
@ -113,7 +113,7 @@ bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::b
void nano::bootstrap_attempt_legacy::add_recent_pull (nano::block_hash const & head_a) void nano::bootstrap_attempt_legacy::add_recent_pull (nano::block_hash const & head_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
recent_pulls_head.push_back (head_a); recent_pulls_head.push_back (head_a);
if (recent_pulls_head.size () > nano::bootstrap_limits::bootstrap_max_confirm_frontiers) if (recent_pulls_head.size () > nano::bootstrap_limits::bootstrap_max_confirm_frontiers)
{ {
@ -133,7 +133,7 @@ void nano::bootstrap_attempt_legacy::restart_condition ()
} }
} }
void nano::bootstrap_attempt_legacy::attempt_restart_check (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_attempt_legacy::attempt_restart_check (nano::unique_lock<nano::mutex> & lock_a)
{ {
if (frontiers_confirmation_pending) if (frontiers_confirmation_pending)
{ {
@ -175,7 +175,7 @@ void nano::bootstrap_attempt_legacy::attempt_restart_check (nano::unique_lock<st
} }
} }
bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<std::mutex> & lock_a) bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<nano::mutex> & lock_a)
{ {
bool confirmed (false); bool confirmed (false);
debug_assert (!frontiers_confirmed); debug_assert (!frontiers_confirmed);
@ -183,7 +183,7 @@ bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<std::m
auto this_l (shared_from_this ()); auto this_l (shared_from_this ());
std::vector<nano::block_hash> frontiers; std::vector<nano::block_hash> frontiers;
lock_a.unlock (); lock_a.unlock ();
nano::unique_lock<std::mutex> pulls_lock (node->bootstrap_initiator.connections->mutex); nano::unique_lock<nano::mutex> pulls_lock (node->bootstrap_initiator.connections->mutex);
for (auto i (node->bootstrap_initiator.connections->pulls.begin ()), end (node->bootstrap_initiator.connections->pulls.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i) for (auto i (node->bootstrap_initiator.connections->pulls.begin ()), end (node->bootstrap_initiator.connections->pulls.end ()); i != end && frontiers.size () != nano::bootstrap_limits::bootstrap_max_confirm_frontiers; ++i)
{ {
if (!i->head.is_zero () && i->bootstrap_id == incremental_id && std::find (frontiers.begin (), frontiers.end (), i->head) == frontiers.end ()) if (!i->head.is_zero () && i->bootstrap_id == incremental_id && std::find (frontiers.begin (), frontiers.end (), i->head) == frontiers.end ())
@ -307,7 +307,7 @@ bool nano::bootstrap_attempt_legacy::confirm_frontiers (nano::unique_lock<std::m
return confirmed; return confirmed;
} }
bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<std::mutex> & lock_a, bool first_attempt) bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<nano::mutex> & lock_a, bool first_attempt)
{ {
auto result (true); auto result (true);
lock_a.unlock (); lock_a.unlock ();
@ -370,7 +370,7 @@ bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<std::mu
return result; return result;
} }
void nano::bootstrap_attempt_legacy::run_start (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_attempt_legacy::run_start (nano::unique_lock<nano::mutex> & lock_a)
{ {
frontiers_received = false; frontiers_received = false;
frontiers_confirmed = false; frontiers_confirmed = false;
@ -392,7 +392,7 @@ void nano::bootstrap_attempt_legacy::run ()
debug_assert (started); debug_assert (started);
debug_assert (!node->flags.disable_legacy_bootstrap); debug_assert (!node->flags.disable_legacy_bootstrap);
node->bootstrap_initiator.connections->populate_connections (false); node->bootstrap_initiator.connections->populate_connections (false);
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
run_start (lock); run_start (lock);
while (still_pulling ()) while (still_pulling ())
{ {
@ -429,7 +429,7 @@ void nano::bootstrap_attempt_legacy::run ()
void nano::bootstrap_attempt_legacy::get_information (boost::property_tree::ptree & tree_a) void nano::bootstrap_attempt_legacy::get_information (boost::property_tree::ptree & tree_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
tree_a.put ("frontier_pulls", std::to_string (frontier_pulls.size ())); tree_a.put ("frontier_pulls", std::to_string (frontier_pulls.size ()));
tree_a.put ("frontiers_received", static_cast<bool> (frontiers_received)); tree_a.put ("frontiers_received", static_cast<bool> (frontiers_received));
tree_a.put ("frontiers_confirmed", static_cast<bool> (frontiers_confirmed)); tree_a.put ("frontiers_confirmed", static_cast<bool> (frontiers_confirmed));

View file

@ -20,16 +20,16 @@ public:
void run () override; void run () override;
bool consume_future (std::future<bool> &); bool consume_future (std::future<bool> &);
void stop () override; void stop () override;
bool request_frontier (nano::unique_lock<std::mutex> &, bool = false); bool request_frontier (nano::unique_lock<nano::mutex> &, bool = false);
void request_push (nano::unique_lock<std::mutex> &); void request_push (nano::unique_lock<nano::mutex> &);
void add_frontier (nano::pull_info const &) override; void add_frontier (nano::pull_info const &) override;
void add_bulk_push_target (nano::block_hash const &, nano::block_hash const &) override; void add_bulk_push_target (nano::block_hash const &, nano::block_hash const &) override;
bool request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> &) override; bool request_bulk_push_target (std::pair<nano::block_hash, nano::block_hash> &) override;
void add_recent_pull (nano::block_hash const &) override; void add_recent_pull (nano::block_hash const &) override;
void run_start (nano::unique_lock<std::mutex> &); void run_start (nano::unique_lock<nano::mutex> &);
void restart_condition () override; void restart_condition () override;
void attempt_restart_check (nano::unique_lock<std::mutex> &); void attempt_restart_check (nano::unique_lock<nano::mutex> &);
bool confirm_frontiers (nano::unique_lock<std::mutex> &); bool confirm_frontiers (nano::unique_lock<nano::mutex> &);
void get_information (boost::property_tree::ptree &) override; void get_information (boost::property_tree::ptree &) override;
nano::tcp_endpoint endpoint_frontier_request; nano::tcp_endpoint endpoint_frontier_request;
std::weak_ptr<nano::frontier_req_client> frontiers; std::weak_ptr<nano::frontier_req_client> frontiers;

View file

@ -15,7 +15,7 @@ port (port_a)
void nano::bootstrap_listener::start () void nano::bootstrap_listener::start ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
on = true; on = true;
listening_socket = std::make_shared<nano::server_socket> (node, boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port), node.config.tcp_incoming_connections_max); listening_socket = std::make_shared<nano::server_socket> (node, boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::any (), port), node.config.tcp_incoming_connections_max);
boost::system::error_code ec; boost::system::error_code ec;
@ -45,13 +45,13 @@ void nano::bootstrap_listener::stop ()
{ {
decltype (connections) connections_l; decltype (connections) connections_l;
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
on = false; on = false;
connections_l.swap (connections); connections_l.swap (connections);
} }
if (listening_socket) if (listening_socket)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
listening_socket->close (); listening_socket->close ();
listening_socket = nullptr; listening_socket = nullptr;
} }
@ -59,7 +59,7 @@ void nano::bootstrap_listener::stop ()
size_t nano::bootstrap_listener::connection_count () size_t nano::bootstrap_listener::connection_count ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return connections.size (); return connections.size ();
} }
@ -68,7 +68,7 @@ void nano::bootstrap_listener::accept_action (boost::system::error_code const &
if (!node.network.excluded_peers.check (socket_a->remote_endpoint ())) if (!node.network.excluded_peers.check (socket_a->remote_endpoint ()))
{ {
auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ())); auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ()));
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
connections[connection.get ()] = connection; connections[connection.get ()] = connection;
connection->receive (); connection->receive ();
} }
@ -84,7 +84,7 @@ void nano::bootstrap_listener::accept_action (boost::system::error_code const &
boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint () boost::asio::ip::tcp::endpoint nano::bootstrap_listener::endpoint ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
if (on && listening_socket) if (on && listening_socket)
{ {
return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), listening_socket->listening_port ()); return boost::asio::ip::tcp::endpoint (boost::asio::ip::address_v6::loopback (), listening_socket->listening_port ());
@ -133,7 +133,7 @@ nano::bootstrap_server::~bootstrap_server ()
} }
} }
stop (); stop ();
nano::lock_guard<std::mutex> lock (node->bootstrap.mutex); nano::lock_guard<nano::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this); node->bootstrap.connections.erase (this);
} }
@ -547,7 +547,7 @@ void nano::bootstrap_server::receive_node_id_handshake_action (boost::system::er
void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a) void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a)
{ {
debug_assert (message_a != nullptr); debug_assert (message_a != nullptr);
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto start (requests.empty ()); auto start (requests.empty ());
requests.push (std::move (message_a)); requests.push (std::move (message_a));
if (start) if (start)
@ -558,7 +558,7 @@ void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message
void nano::bootstrap_server::finish_request () void nano::bootstrap_server::finish_request ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
requests.pop (); requests.pop ();
if (!requests.empty ()) if (!requests.empty ())
{ {
@ -598,7 +598,7 @@ void nano::bootstrap_server::timeout ()
node->logger.try_log ("Closing incoming tcp / bootstrap server by timeout"); node->logger.try_log ("Closing incoming tcp / bootstrap server by timeout");
} }
{ {
nano::lock_guard<std::mutex> lock (node->bootstrap.mutex); nano::lock_guard<nano::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this); node->bootstrap.connections.erase (this);
} }
socket->close (); socket->close ();
@ -606,7 +606,7 @@ void nano::bootstrap_server::timeout ()
} }
else else
{ {
nano::lock_guard<std::mutex> lock (node->bootstrap.mutex); nano::lock_guard<nano::mutex> lock (node->bootstrap.mutex);
node->bootstrap.connections.erase (this); node->bootstrap.connections.erase (this);
} }
} }
@ -726,7 +726,7 @@ public:
}; };
} }
void nano::bootstrap_server::run_next (nano::unique_lock<std::mutex> & lock_a) void nano::bootstrap_server::run_next (nano::unique_lock<nano::mutex> & lock_a)
{ {
debug_assert (!requests.empty ()); debug_assert (!requests.empty ());
request_response_visitor visitor (shared_from_this ()); request_response_visitor visitor (shared_from_this ());

View file

@ -18,7 +18,7 @@ public:
void accept_action (boost::system::error_code const &, std::shared_ptr<nano::socket> const &); void accept_action (boost::system::error_code const &, std::shared_ptr<nano::socket> const &);
size_t connection_count (); size_t connection_count ();
std::mutex mutex; nano::mutex mutex;
std::unordered_map<nano::bootstrap_server *, std::weak_ptr<nano::bootstrap_server>> connections; std::unordered_map<nano::bootstrap_server *, std::weak_ptr<nano::bootstrap_server>> connections;
nano::tcp_endpoint endpoint (); nano::tcp_endpoint endpoint ();
nano::node & node; nano::node & node;
@ -62,13 +62,13 @@ public:
void finish_request (); void finish_request ();
void finish_request_async (); void finish_request_async ();
void timeout (); void timeout ();
void run_next (nano::unique_lock<std::mutex> & lock_a); void run_next (nano::unique_lock<nano::mutex> & lock_a);
bool is_bootstrap_connection (); bool is_bootstrap_connection ();
bool is_realtime_connection (); bool is_realtime_connection ();
std::shared_ptr<std::vector<uint8_t>> receive_buffer; std::shared_ptr<std::vector<uint8_t>> receive_buffer;
std::shared_ptr<nano::socket> socket; std::shared_ptr<nano::socket> socket;
std::shared_ptr<nano::node> node; std::shared_ptr<nano::node> node;
std::mutex mutex; nano::mutex mutex;
std::queue<std::unique_ptr<nano::message>> requests; std::queue<std::unique_ptr<nano::message>> requests;
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
std::atomic<nano::bootstrap_server_type> type{ nano::bootstrap_server_type::undefined }; std::atomic<nano::bootstrap_server_type> type{ nano::bootstrap_server_type::undefined };

View file

@ -1048,7 +1048,7 @@ std::error_code nano::handle_node_options (boost::program_options::variables_map
{ {
bool error (true); bool error (true);
{ {
nano::lock_guard<std::mutex> lock (node->wallets.mutex); nano::lock_guard<nano::mutex> lock (node->wallets.mutex);
auto transaction (node->wallets.tx_begin_write ()); auto transaction (node->wallets.tx_begin_write ());
nano::wallet wallet (error, transaction, node->wallets, wallet_id.to_string (), contents.str ()); nano::wallet wallet (error, transaction, node->wallets, wallet_id.to_string (), contents.str ());
} }
@ -1060,7 +1060,7 @@ std::error_code nano::handle_node_options (boost::program_options::variables_map
else else
{ {
node->wallets.reload (); node->wallets.reload ();
nano::lock_guard<std::mutex> lock (node->wallets.mutex); nano::lock_guard<nano::mutex> lock (node->wallets.mutex);
release_assert (node->wallets.items.find (wallet_id) != node->wallets.items.end ()); release_assert (node->wallets.items.find (wallet_id) != node->wallets.items.end ());
std::cout << "Import completed\n"; std::cout << "Import completed\n";
} }

View file

@ -35,7 +35,7 @@ nano::confirmation_height_processor::~confirmation_height_processor ()
void nano::confirmation_height_processor::stop () void nano::confirmation_height_processor::stop ()
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
stopped = true; stopped = true;
} }
condition.notify_one (); condition.notify_one ();
@ -47,7 +47,7 @@ void nano::confirmation_height_processor::stop ()
void nano::confirmation_height_processor::run (confirmation_height_mode mode_a) void nano::confirmation_height_processor::run (confirmation_height_mode mode_a)
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
while (!stopped) while (!stopped)
{ {
if (!paused && !awaiting_processing.empty ()) if (!paused && !awaiting_processing.empty ())
@ -139,14 +139,14 @@ void nano::confirmation_height_processor::run (confirmation_height_mode mode_a)
// Pausing only affects processing new blocks, not the current one being processed. Currently only used in tests // Pausing only affects processing new blocks, not the current one being processed. Currently only used in tests
void nano::confirmation_height_processor::pause () void nano::confirmation_height_processor::pause ()
{ {
nano::lock_guard<std::mutex> lk (mutex); nano::lock_guard<nano::mutex> lk (mutex);
paused = true; paused = true;
} }
void nano::confirmation_height_processor::unpause () void nano::confirmation_height_processor::unpause ()
{ {
{ {
nano::lock_guard<std::mutex> lk (mutex); nano::lock_guard<nano::mutex> lk (mutex);
paused = false; paused = false;
} }
condition.notify_one (); condition.notify_one ();
@ -155,7 +155,7 @@ void nano::confirmation_height_processor::unpause ()
void nano::confirmation_height_processor::add (std::shared_ptr<nano::block> const & block_a) void nano::confirmation_height_processor::add (std::shared_ptr<nano::block> const & block_a)
{ {
{ {
nano::lock_guard<std::mutex> lk (mutex); nano::lock_guard<nano::mutex> lk (mutex);
awaiting_processing.get<tag_sequence> ().emplace_back (block_a); awaiting_processing.get<tag_sequence> ().emplace_back (block_a);
} }
condition.notify_one (); condition.notify_one ();
@ -163,7 +163,7 @@ void nano::confirmation_height_processor::add (std::shared_ptr<nano::block> cons
void nano::confirmation_height_processor::set_next_hash () void nano::confirmation_height_processor::set_next_hash ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
debug_assert (!awaiting_processing.empty ()); debug_assert (!awaiting_processing.empty ());
original_block = awaiting_processing.get<tag_sequence> ().front ().block; original_block = awaiting_processing.get<tag_sequence> ().front ().block;
original_hashes_pending.insert (original_block->hash ()); original_hashes_pending.insert (original_block->hash ());
@ -217,13 +217,13 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (co
size_t nano::confirmation_height_processor::awaiting_processing_size () const size_t nano::confirmation_height_processor::awaiting_processing_size () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return awaiting_processing.size (); return awaiting_processing.size ();
} }
bool nano::confirmation_height_processor::is_processing_added_block (nano::block_hash const & hash_a) const bool nano::confirmation_height_processor::is_processing_added_block (nano::block_hash const & hash_a) const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return original_hashes_pending.count (hash_a) > 0 || awaiting_processing.get<tag_hash> ().count (hash_a) > 0; return original_hashes_pending.count (hash_a) > 0 || awaiting_processing.get<tag_hash> ().count (hash_a) > 0;
} }
@ -234,6 +234,6 @@ bool nano::confirmation_height_processor::is_processing_block (nano::block_hash
nano::block_hash nano::confirmation_height_processor::current () const nano::block_hash nano::confirmation_height_processor::current () const
{ {
nano::lock_guard<std::mutex> lk (mutex); nano::lock_guard<nano::mutex> lk (mutex);
return original_block ? original_block->hash () : 0; return original_block ? original_block->hash () : 0;
} }

View file

@ -48,7 +48,7 @@ public:
void add_block_already_cemented_observer (std::function<void(nano::block_hash const &)> const &); void add_block_already_cemented_observer (std::function<void(nano::block_hash const &)> const &);
private: private:
mutable std::mutex mutex; mutable nano::mutex mutex{ mutex_identifier (mutexes::confirmation_height_processor) };
// Hashes which have been added to the confirmation height processor, but not yet processed // Hashes which have been added to the confirmation height processor, but not yet processed
// clang-format off // clang-format off
struct block_wrapper struct block_wrapper

View file

@ -65,7 +65,7 @@ void nano::confirmation_height_unbounded::process ()
debug_assert (current == original_block->hash ()); debug_assert (current == original_block->hash ());
// This is the original block passed so can use it directly // This is the original block passed so can use it directly
block = original_block; block = original_block;
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
block_cache[original_block->hash ()] = original_block; block_cache[original_block->hash ()] = original_block;
} }
else else
@ -207,7 +207,7 @@ void nano::confirmation_height_unbounded::collect_unconfirmed_receive_and_source
{ {
debug_assert (hash == hash_a); debug_assert (hash == hash_a);
block = block_a; block = block_a;
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
block_cache[hash] = block_a; block_cache[hash] = block_a;
} }
else else
@ -412,7 +412,7 @@ void nano::confirmation_height_unbounded::cement_blocks (nano::write_guard & sco
// Reverse it so that the callbacks start from the lowest newly cemented block and move upwards // Reverse it so that the callbacks start from the lowest newly cemented block and move upwards
std::reverse (pending.block_callback_data.begin (), pending.block_callback_data.end ()); std::reverse (pending.block_callback_data.begin (), pending.block_callback_data.end ());
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
std::transform (pending.block_callback_data.begin (), pending.block_callback_data.end (), std::back_inserter (cemented_blocks), [& block_cache = block_cache](auto const & hash_a) { std::transform (pending.block_callback_data.begin (), pending.block_callback_data.end (), std::back_inserter (cemented_blocks), [& block_cache = block_cache](auto const & hash_a) {
debug_assert (block_cache.count (hash_a) == 1); debug_assert (block_cache.count (hash_a) == 1);
return block_cache.at (hash_a); return block_cache.at (hash_a);
@ -440,7 +440,7 @@ void nano::confirmation_height_unbounded::cement_blocks (nano::write_guard & sco
std::shared_ptr<nano::block> nano::confirmation_height_unbounded::get_block_and_sideband (nano::block_hash const & hash_a, nano::transaction const & transaction_a) std::shared_ptr<nano::block> nano::confirmation_height_unbounded::get_block_and_sideband (nano::block_hash const & hash_a, nano::transaction const & transaction_a)
{ {
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
auto block_cache_it = block_cache.find (hash_a); auto block_cache_it = block_cache.find (hash_a);
if (block_cache_it != block_cache.cend ()) if (block_cache_it != block_cache.cend ())
{ {
@ -468,20 +468,20 @@ void nano::confirmation_height_unbounded::clear_process_vars ()
implicit_receive_cemented_mapping.clear (); implicit_receive_cemented_mapping.clear ();
implicit_receive_cemented_mapping_size = 0; implicit_receive_cemented_mapping_size = 0;
{ {
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
block_cache.clear (); block_cache.clear ();
} }
} }
bool nano::confirmation_height_unbounded::has_iterated_over_block (nano::block_hash const & hash_a) const bool nano::confirmation_height_unbounded::has_iterated_over_block (nano::block_hash const & hash_a) const
{ {
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
return block_cache.count (hash_a) == 1; return block_cache.count (hash_a) == 1;
} }
uint64_t nano::confirmation_height_unbounded::block_cache_size () const uint64_t nano::confirmation_height_unbounded::block_cache_size () const
{ {
nano::lock_guard<std::mutex> guard (block_cache_mutex); nano::lock_guard<nano::mutex> guard (block_cache_mutex);
return block_cache.size (); return block_cache.size ();
} }

View file

@ -71,7 +71,7 @@ private:
std::unordered_map<nano::block_hash, std::weak_ptr<conf_height_details>> implicit_receive_cemented_mapping; std::unordered_map<nano::block_hash, std::weak_ptr<conf_height_details>> implicit_receive_cemented_mapping;
nano::relaxed_atomic_integral<uint64_t> implicit_receive_cemented_mapping_size{ 0 }; nano::relaxed_atomic_integral<uint64_t> implicit_receive_cemented_mapping_size{ 0 };
mutable std::mutex block_cache_mutex; mutable nano::mutex block_cache_mutex;
std::unordered_map<nano::block_hash, std::shared_ptr<nano::block>> block_cache; std::unordered_map<nano::block_hash, std::shared_ptr<nano::block>> block_cache;
uint64_t block_cache_size () const; uint64_t block_cache_size () const;

View file

@ -129,7 +129,7 @@ void nano::distributed_work::do_request (nano::tcp_endpoint const & endpoint_a)
auto this_l (shared_from_this ()); auto this_l (shared_from_this ());
auto connection (std::make_shared<peer_request> (node.io_ctx, endpoint_a)); auto connection (std::make_shared<peer_request> (node.io_ctx, endpoint_a));
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
connections.emplace_back (connection); connections.emplace_back (connection);
} }
connection->socket.async_connect (connection->endpoint, connection->socket.async_connect (connection->endpoint,
@ -271,7 +271,7 @@ void nano::distributed_work::stop_once (bool const local_stop_a)
{ {
if (!stopped.exchange (true)) if (!stopped.exchange (true))
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
if (local_stop_a && node.local_work_generation_enabled ()) if (local_stop_a && node.local_work_generation_enabled ())
{ {
node.work.cancel (request.root); node.work.cancel (request.root);
@ -389,6 +389,6 @@ void nano::distributed_work::handle_failure ()
void nano::distributed_work::add_bad_peer (nano::tcp_endpoint const & endpoint_a) void nano::distributed_work::add_bad_peer (nano::tcp_endpoint const & endpoint_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
bad_peers.emplace_back (boost::str (boost::format ("%1%:%2%") % endpoint_a.address () % endpoint_a.port ())); bad_peers.emplace_back (boost::str (boost::format ("%1%:%2%") % endpoint_a.address () % endpoint_a.port ()));
} }

View file

@ -104,7 +104,7 @@ private:
std::vector<std::string> bad_peers; // websocket std::vector<std::string> bad_peers; // websocket
std::string winner; // websocket std::string winner; // websocket
std::mutex mutex; nano::mutex mutex;
std::atomic<unsigned> resolved_extra{ 0 }; std::atomic<unsigned> resolved_extra{ 0 };
std::atomic<unsigned> failures{ 0 }; std::atomic<unsigned> failures{ 0 };
std::atomic<bool> finished{ false }; std::atomic<bool> finished{ false };

View file

@ -27,7 +27,7 @@ bool nano::distributed_work_factory::make (std::chrono::seconds const & backoff_
{ {
auto distributed (std::make_shared<nano::distributed_work> (node, request_a, backoff_a)); auto distributed (std::make_shared<nano::distributed_work> (node, request_a, backoff_a));
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
items.emplace (request_a.root, distributed); items.emplace (request_a.root, distributed);
} }
distributed->start (); distributed->start ();
@ -39,7 +39,7 @@ bool nano::distributed_work_factory::make (std::chrono::seconds const & backoff_
void nano::distributed_work_factory::cancel (nano::root const & root_a) void nano::distributed_work_factory::cancel (nano::root const & root_a)
{ {
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
auto root_items_l = items.equal_range (root_a); auto root_items_l = items.equal_range (root_a);
std::for_each (root_items_l.first, root_items_l.second, [](auto item_l) { std::for_each (root_items_l.first, root_items_l.second, [](auto item_l) {
if (auto distributed_l = item_l.second.lock ()) if (auto distributed_l = item_l.second.lock ())
@ -53,7 +53,7 @@ void nano::distributed_work_factory::cancel (nano::root const & root_a)
void nano::distributed_work_factory::cleanup_finished () void nano::distributed_work_factory::cleanup_finished ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
// std::erase_if in c++20 // std::erase_if in c++20
auto erase_if = [](decltype (items) & container, auto pred) { auto erase_if = [](decltype (items) & container, auto pred) {
for (auto it = container.begin (), end = container.end (); it != end;) for (auto it = container.begin (), end = container.end (); it != end;)
@ -76,7 +76,7 @@ void nano::distributed_work_factory::stop ()
if (!stopped.exchange (true)) if (!stopped.exchange (true))
{ {
// Cancel any ongoing work // Cancel any ongoing work
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
for (auto & item_l : items) for (auto & item_l : items)
{ {
if (auto distributed_l = item_l.second.lock ()) if (auto distributed_l = item_l.second.lock ())
@ -90,7 +90,7 @@ void nano::distributed_work_factory::stop ()
size_t nano::distributed_work_factory::size () const size_t nano::distributed_work_factory::size () const
{ {
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
return items.size (); return items.size ();
} }

View file

@ -32,7 +32,7 @@ private:
std::unordered_multimap<nano::root, std::weak_ptr<nano::distributed_work>> items; std::unordered_multimap<nano::root, std::weak_ptr<nano::distributed_work>> items;
nano::node & node; nano::node & node;
mutable std::mutex mutex; mutable nano::mutex mutex;
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
friend std::unique_ptr<container_info_component> collect_container_info (distributed_work_factory &, const std::string &); friend std::unique_ptr<container_info_component> collect_container_info (distributed_work_factory &, const std::string &);

View file

@ -33,11 +33,11 @@ qualified_root (block_a->qualified_root ())
last_blocks.emplace (block_a->hash (), block_a); last_blocks.emplace (block_a->hash (), block_a);
} }
void nano::election::confirm_once (nano::unique_lock<std::mutex> & lock_a, nano::election_status_type type_a) void nano::election::confirm_once (nano::unique_lock<nano::mutex> & lock_a, nano::election_status_type type_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
// This must be kept above the setting of election state, as dependent confirmed elections require up to date changes to election_winner_details // This must be kept above the setting of election state, as dependent confirmed elections require up to date changes to election_winner_details
nano::unique_lock<std::mutex> election_winners_lk (node.active.election_winner_details_mutex); nano::unique_lock<nano::mutex> election_winners_lk (node.active.election_winner_details_mutex);
if (state_m.exchange (nano::election::state_t::confirmed) != nano::election::state_t::confirmed && (node.active.election_winner_details.count (status.winner->hash ()) == 0)) if (state_m.exchange (nano::election::state_t::confirmed) != nano::election::state_t::confirmed && (node.active.election_winner_details.count (status.winner->hash ()) == 0))
{ {
node.active.election_winner_details.emplace (status.winner->hash (), shared_from_this ()); node.active.election_winner_details.emplace (status.winner->hash (), shared_from_this ());
@ -140,7 +140,7 @@ void nano::election::send_confirm_req (nano::confirmation_solicitor & solicitor_
{ {
if ((base_latency () * (optimistic () ? 10 : 5)) < (std::chrono::steady_clock::now () - last_req)) if ((base_latency () * (optimistic () ? 10 : 5)) < (std::chrono::steady_clock::now () - last_req))
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
if (!solicitor_a.add (*this)) if (!solicitor_a.add (*this))
{ {
last_req = std::chrono::steady_clock::now (); last_req = std::chrono::steady_clock::now ();
@ -168,7 +168,7 @@ void nano::election::broadcast_block (nano::confirmation_solicitor & solicitor_a
{ {
if (base_latency () * 15 < std::chrono::steady_clock::now () - last_block) if (base_latency () * 15 < std::chrono::steady_clock::now () - last_block)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
if (!solicitor_a.broadcast (*this)) if (!solicitor_a.broadcast (*this))
{ {
last_block = std::chrono::steady_clock::now (); last_block = std::chrono::steady_clock::now ();
@ -214,7 +214,7 @@ bool nano::election::transition_time (nano::confirmation_solicitor & solicitor_a
auto const expire_time = std::chrono::milliseconds (optimistic () ? optimistic_expiration_time : 5 * 60 * 1000); auto const expire_time = std::chrono::milliseconds (optimistic () ? optimistic_expiration_time : 5 * 60 * 1000);
if (!confirmed () && expire_time < std::chrono::steady_clock::now () - election_start) if (!confirmed () && expire_time < std::chrono::steady_clock::now () - election_start)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
// It is possible the election confirmed while acquiring the mutex // It is possible the election confirmed while acquiring the mutex
// state_change returning true would indicate it // state_change returning true would indicate it
if (!state_change (state_m.load (), nano::election::state_t::expired_unconfirmed)) if (!state_change (state_m.load (), nano::election::state_t::expired_unconfirmed))
@ -243,7 +243,7 @@ bool nano::election::have_quorum (nano::tally_t const & tally_a) const
nano::tally_t nano::election::tally () const nano::tally_t nano::election::tally () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return tally_impl (); return tally_impl ();
} }
@ -267,7 +267,7 @@ nano::tally_t nano::election::tally_impl () const
return result; return result;
} }
void nano::election::confirm_if_quorum (nano::unique_lock<std::mutex> & lock_a) void nano::election::confirm_if_quorum (nano::unique_lock<nano::mutex> & lock_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
auto tally_l (tally_impl ()); auto tally_l (tally_impl ());
@ -320,7 +320,7 @@ void nano::election::log_votes (nano::tally_t const & tally_a, std::string const
std::shared_ptr<nano::block> nano::election::find (nano::block_hash const & hash_a) const std::shared_ptr<nano::block> nano::election::find (nano::block_hash const & hash_a) const
{ {
std::shared_ptr<nano::block> result; std::shared_ptr<nano::block> result;
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
if (auto existing = last_blocks.find (hash_a); existing != last_blocks.end ()) if (auto existing = last_blocks.find (hash_a); existing != last_blocks.end ())
{ {
result = existing->second; result = existing->second;
@ -350,7 +350,7 @@ nano::election_vote_result nano::election::vote (nano::account const & rep, uint
cooldown = 1; cooldown = 1;
} }
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto last_vote_it (last_votes.find (rep)); auto last_vote_it (last_votes.find (rep));
if (last_vote_it == last_votes.end ()) if (last_vote_it == last_votes.end ())
@ -385,7 +385,7 @@ nano::election_vote_result nano::election::vote (nano::account const & rep, uint
bool nano::election::publish (std::shared_ptr<nano::block> const & block_a) bool nano::election::publish (std::shared_ptr<nano::block> const & block_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Do not insert new blocks if already confirmed // Do not insert new blocks if already confirmed
auto result (confirmed ()); auto result (confirmed ());
@ -426,7 +426,7 @@ bool nano::election::publish (std::shared_ptr<nano::block> const & block_a)
nano::election_cleanup_info nano::election::cleanup_info () const nano::election_cleanup_info nano::election::cleanup_info () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return cleanup_info_impl (); return cleanup_info_impl ();
} }
@ -442,7 +442,7 @@ nano::election_cleanup_info nano::election::cleanup_info_impl () const
size_t nano::election::insert_inactive_votes_cache (nano::inactive_cache_information const & cache_a) size_t nano::election::insert_inactive_votes_cache (nano::inactive_cache_information const & cache_a)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
for (auto const & rep : cache_a.voters) for (auto const & rep : cache_a.voters)
{ {
auto inserted (last_votes.emplace (rep, nano::vote_info{ std::chrono::steady_clock::time_point::min (), 0, cache_a.hash })); auto inserted (last_votes.emplace (rep, nano::vote_info{ std::chrono::steady_clock::time_point::min (), 0, cache_a.hash }));
@ -492,7 +492,7 @@ void nano::election::prioritize (nano::vote_generator_session & generator_sessio
nano::election_extended_status nano::election::current_status () const nano::election_extended_status nano::election::current_status () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
nano::election_status status_l = status; nano::election_status status_l = status;
status_l.confirmation_request_count = confirmation_request_count; status_l.confirmation_request_count = confirmation_request_count;
status_l.block_count = nano::narrow_cast<decltype (status_l.block_count)> (last_blocks.size ()); status_l.block_count = nano::narrow_cast<decltype (status_l.block_count)> (last_blocks.size ());
@ -502,7 +502,7 @@ nano::election_extended_status nano::election::current_status () const
std::shared_ptr<nano::block> nano::election::winner () const std::shared_ptr<nano::block> nano::election::winner () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return status.winner; return status.winner;
} }
@ -554,7 +554,7 @@ void nano::election::remove_block (nano::block_hash const & hash_a)
} }
} }
bool nano::election::replace_by_weight (nano::unique_lock<std::mutex> & lock_a, nano::block_hash const & hash_a) bool nano::election::replace_by_weight (nano::unique_lock<nano::mutex> & lock_a, nano::block_hash const & hash_a)
{ {
debug_assert (lock_a.owns_lock ()); debug_assert (lock_a.owns_lock ());
nano::block_hash replaced_block (0); nano::block_hash replaced_block (0);
@ -612,20 +612,20 @@ bool nano::election::replace_by_weight (nano::unique_lock<std::mutex> & lock_a,
void nano::election::force_confirm (nano::election_status_type type_a) void nano::election::force_confirm (nano::election_status_type type_a)
{ {
release_assert (node.network_params.network.is_dev_network ()); release_assert (node.network_params.network.is_dev_network ());
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
confirm_once (lock, type_a); confirm_once (lock, type_a);
} }
std::unordered_map<nano::block_hash, std::shared_ptr<nano::block>> nano::election::blocks () const std::unordered_map<nano::block_hash, std::shared_ptr<nano::block>> nano::election::blocks () const
{ {
debug_assert (node.network_params.network.is_dev_network ()); debug_assert (node.network_params.network.is_dev_network ());
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return last_blocks; return last_blocks;
} }
std::unordered_map<nano::account, nano::vote_info> nano::election::votes () const std::unordered_map<nano::account, nano::vote_info> nano::election::votes () const
{ {
debug_assert (node.network_params.network.is_dev_network ()); debug_assert (node.network_params.network.is_dev_network ());
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return last_votes; return last_votes;
} }

View file

@ -109,7 +109,7 @@ public: // Interface
bool publish (std::shared_ptr<nano::block> const & block_a); bool publish (std::shared_ptr<nano::block> const & block_a);
size_t insert_inactive_votes_cache (nano::inactive_cache_information const &); size_t insert_inactive_votes_cache (nano::inactive_cache_information const &);
// Confirm this block if quorum is met // Confirm this block if quorum is met
void confirm_if_quorum (nano::unique_lock<std::mutex> &); void confirm_if_quorum (nano::unique_lock<nano::mutex> &);
void prioritize (nano::vote_generator_session &); void prioritize (nano::vote_generator_session &);
nano::election_cleanup_info cleanup_info () const; nano::election_cleanup_info cleanup_info () const;
@ -121,14 +121,14 @@ public: // Information
private: private:
nano::tally_t tally_impl () const; nano::tally_t tally_impl () const;
// lock_a does not own the mutex on return // lock_a does not own the mutex on return
void confirm_once (nano::unique_lock<std::mutex> & lock_a, nano::election_status_type = nano::election_status_type::active_confirmed_quorum); void confirm_once (nano::unique_lock<nano::mutex> & lock_a, nano::election_status_type = nano::election_status_type::active_confirmed_quorum);
void broadcast_block (nano::confirmation_solicitor &); void broadcast_block (nano::confirmation_solicitor &);
void send_confirm_req (nano::confirmation_solicitor &); void send_confirm_req (nano::confirmation_solicitor &);
// Calculate votes for local representatives // Calculate votes for local representatives
void generate_votes () const; void generate_votes () const;
void remove_votes (nano::block_hash const &); void remove_votes (nano::block_hash const &);
void remove_block (nano::block_hash const &); void remove_block (nano::block_hash const &);
bool replace_by_weight (nano::unique_lock<std::mutex> & lock_a, nano::block_hash const &); bool replace_by_weight (nano::unique_lock<nano::mutex> & lock_a, nano::block_hash const &);
nano::election_cleanup_info cleanup_info_impl () const; nano::election_cleanup_info cleanup_info_impl () const;
private: private:
@ -140,7 +140,7 @@ private:
std::chrono::steady_clock::time_point const election_start = { std::chrono::steady_clock::now () }; std::chrono::steady_clock::time_point const election_start = { std::chrono::steady_clock::now () };
nano::node & node; nano::node & node;
mutable std::mutex mutex; mutable nano::mutex mutex;
static std::chrono::seconds constexpr late_blocks_delay{ 5 }; static std::chrono::seconds constexpr late_blocks_delay{ 5 };
static size_t constexpr max_blocks{ 10 }; static size_t constexpr max_blocks{ 10 };

View file

@ -11,7 +11,7 @@ node (node_a)
void nano::gap_cache::add (nano::block_hash const & hash_a, std::chrono::steady_clock::time_point time_point_a) void nano::gap_cache::add (nano::block_hash const & hash_a, std::chrono::steady_clock::time_point time_point_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto existing (blocks.get<tag_hash> ().find (hash_a)); auto existing (blocks.get<tag_hash> ().find (hash_a));
if (existing != blocks.get<tag_hash> ().end ()) if (existing != blocks.get<tag_hash> ().end ())
{ {
@ -31,13 +31,13 @@ void nano::gap_cache::add (nano::block_hash const & hash_a, std::chrono::steady_
void nano::gap_cache::erase (nano::block_hash const & hash_a) void nano::gap_cache::erase (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
blocks.get<tag_hash> ().erase (hash_a); blocks.get<tag_hash> ().erase (hash_a);
} }
void nano::gap_cache::vote (std::shared_ptr<nano::vote> const & vote_a) void nano::gap_cache::vote (std::shared_ptr<nano::vote> const & vote_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
for (auto hash : *vote_a) for (auto hash : *vote_a)
{ {
auto & gap_blocks_by_hash (blocks.get<tag_hash> ()); auto & gap_blocks_by_hash (blocks.get<tag_hash> ());
@ -123,7 +123,7 @@ nano::uint128_t nano::gap_cache::bootstrap_threshold ()
size_t nano::gap_cache::size () size_t nano::gap_cache::size ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return blocks.size (); return blocks.size ();
} }

View file

@ -54,7 +54,7 @@ public:
ordered_gaps blocks; ordered_gaps blocks;
// clang-format on // clang-format on
size_t const max = 256; size_t const max = 256;
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::gap_cache) };
nano::node & node; nano::node & node;
}; };

View file

@ -92,7 +92,7 @@ void nano::ipc::access::clear ()
nano::error nano::ipc::access::deserialize_toml (nano::tomlconfig & toml) nano::error nano::ipc::access::deserialize_toml (nano::tomlconfig & toml)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
clear (); clear ();
nano::error error; nano::error error;
@ -208,7 +208,7 @@ nano::error nano::ipc::access::deserialize_toml (nano::tomlconfig & toml)
bool nano::ipc::access::has_access (std::string const & credentials_a, nano::ipc::access_permission permssion_a) const bool nano::ipc::access::has_access (std::string const & credentials_a, nano::ipc::access_permission permssion_a) const
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
bool permitted = false; bool permitted = false;
auto user = users.find (credentials_a); auto user = users.find (credentials_a);
if (user != users.end ()) if (user != users.end ())
@ -224,7 +224,7 @@ bool nano::ipc::access::has_access (std::string const & credentials_a, nano::ipc
bool nano::ipc::access::has_access_to_all (std::string const & credentials_a, std::initializer_list<nano::ipc::access_permission> permissions_a) const bool nano::ipc::access::has_access_to_all (std::string const & credentials_a, std::initializer_list<nano::ipc::access_permission> permissions_a) const
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
bool permitted = false; bool permitted = false;
auto user = users.find (credentials_a); auto user = users.find (credentials_a);
if (user != users.end ()) if (user != users.end ())
@ -243,7 +243,7 @@ bool nano::ipc::access::has_access_to_all (std::string const & credentials_a, st
bool nano::ipc::access::has_access_to_oneof (std::string const & credentials_a, std::initializer_list<nano::ipc::access_permission> permissions_a) const bool nano::ipc::access::has_access_to_oneof (std::string const & credentials_a, std::initializer_list<nano::ipc::access_permission> permissions_a) const
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
bool permitted = false; bool permitted = false;
auto user = users.find (credentials_a); auto user = users.find (credentials_a);
if (user != users.end ()) if (user != users.end ())

View file

@ -2,8 +2,8 @@
#include <nano/lib/config.hpp> #include <nano/lib/config.hpp>
#include <nano/lib/errors.hpp> #include <nano/lib/errors.hpp>
#include <nano/lib/locks.hpp>
#include <mutex>
#include <string> #include <string>
#include <thread> #include <thread>
#include <unordered_map> #include <unordered_map>
@ -125,7 +125,7 @@ namespace ipc
*/ */
access_user default_user; access_user default_user;
/** The config can be externally reloaded and concurrently accessed */ /** The config can be externally reloaded and concurrently accessed */
mutable std::mutex mutex; mutable nano::mutex mutex;
}; };
nano::error read_access_config_toml (boost::filesystem::path const & data_path_a, nano::ipc::access & config_a); nano::error read_access_config_toml (boost::filesystem::path const & data_path_a, nano::ipc::access & config_a);

View file

@ -128,8 +128,8 @@ public:
std::weak_ptr<session> session_m; std::weak_ptr<session> session_m;
}; };
static std::mutex subscriber_mutex; static nano::mutex subscriber_mutex;
nano::unique_lock<std::mutex> lock (subscriber_mutex); nano::unique_lock<nano::mutex> lock (subscriber_mutex);
if (!subscriber) if (!subscriber)
{ {

View file

@ -1766,7 +1766,7 @@ void nano::json_handler::bootstrap_status ()
response_l.put ("total_attempts_count", std::to_string (node.bootstrap_initiator.attempts.incremental)); response_l.put ("total_attempts_count", std::to_string (node.bootstrap_initiator.attempts.incremental));
boost::property_tree::ptree connections; boost::property_tree::ptree connections;
{ {
nano::lock_guard<std::mutex> connections_lock (node.bootstrap_initiator.connections->mutex); nano::lock_guard<nano::mutex> connections_lock (node.bootstrap_initiator.connections->mutex);
connections.put ("clients", std::to_string (node.bootstrap_initiator.connections->clients.size ())); connections.put ("clients", std::to_string (node.bootstrap_initiator.connections->clients.size ()));
connections.put ("connections", std::to_string (node.bootstrap_initiator.connections->connections_count)); connections.put ("connections", std::to_string (node.bootstrap_initiator.connections->connections_count));
connections.put ("idle", std::to_string (node.bootstrap_initiator.connections->idle.size ())); connections.put ("idle", std::to_string (node.bootstrap_initiator.connections->idle.size ()));
@ -1776,7 +1776,7 @@ void nano::json_handler::bootstrap_status ()
response_l.add_child ("connections", connections); response_l.add_child ("connections", connections);
boost::property_tree::ptree attempts; boost::property_tree::ptree attempts;
{ {
nano::lock_guard<std::mutex> attempts_lock (node.bootstrap_initiator.attempts.bootstrap_attempts_mutex); nano::lock_guard<nano::mutex> attempts_lock (node.bootstrap_initiator.attempts.bootstrap_attempts_mutex);
for (auto i : node.bootstrap_initiator.attempts.attempts) for (auto i : node.bootstrap_initiator.attempts.attempts)
{ {
boost::property_tree::ptree entry; boost::property_tree::ptree entry;

View file

@ -138,7 +138,7 @@ void nano::mdb_txn_tracker::serialize_json (boost::property_tree::ptree & json,
std::vector<mdb_txn_stats> copy_stats; std::vector<mdb_txn_stats> copy_stats;
std::vector<bool> are_writes; std::vector<bool> are_writes;
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
copy_stats = stats; copy_stats = stats;
are_writes.reserve (stats.size ()); are_writes.reserve (stats.size ());
std::transform (stats.cbegin (), stats.cend (), std::back_inserter (are_writes), [](auto & mdb_txn_stat) { std::transform (stats.cbegin (), stats.cend (), std::back_inserter (are_writes), [](auto & mdb_txn_stat) {
@ -209,7 +209,7 @@ void nano::mdb_txn_tracker::log_if_held_long_enough (nano::mdb_txn_stats const &
void nano::mdb_txn_tracker::add (const nano::transaction_impl * transaction_impl) void nano::mdb_txn_tracker::add (const nano::transaction_impl * transaction_impl)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
debug_assert (std::find_if (stats.cbegin (), stats.cend (), matches_txn (transaction_impl)) == stats.cend ()); debug_assert (std::find_if (stats.cbegin (), stats.cend (), matches_txn (transaction_impl)) == stats.cend ());
stats.emplace_back (transaction_impl); stats.emplace_back (transaction_impl);
} }
@ -217,7 +217,7 @@ void nano::mdb_txn_tracker::add (const nano::transaction_impl * transaction_impl
/** Can be called without error if transaction does not exist */ /** Can be called without error if transaction does not exist */
void nano::mdb_txn_tracker::erase (const nano::transaction_impl * transaction_impl) void nano::mdb_txn_tracker::erase (const nano::transaction_impl * transaction_impl)
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
auto it = std::find_if (stats.begin (), stats.end (), matches_txn (transaction_impl)); auto it = std::find_if (stats.begin (), stats.end (), matches_txn (transaction_impl));
if (it != stats.end ()) if (it != stats.end ())
{ {

View file

@ -73,7 +73,7 @@ public:
void erase (const nano::transaction_impl * transaction_impl); void erase (const nano::transaction_impl * transaction_impl);
private: private:
std::mutex mutex; nano::mutex mutex;
std::vector<mdb_txn_stats> stats; std::vector<mdb_txn_stats> stats;
nano::logger_mt & logger; nano::logger_mt & logger;
nano::txn_tracking_config txn_tracking_config; nano::txn_tracking_config txn_tracking_config;

View file

@ -816,7 +816,7 @@ stopped (false)
nano::message_buffer * nano::message_buffer_manager::allocate () nano::message_buffer * nano::message_buffer_manager::allocate ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
if (!stopped && free.empty () && full.empty ()) if (!stopped && free.empty () && full.empty ())
{ {
stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in); stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in);
@ -842,7 +842,7 @@ void nano::message_buffer_manager::enqueue (nano::message_buffer * data_a)
{ {
debug_assert (data_a != nullptr); debug_assert (data_a != nullptr);
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
full.push_back (data_a); full.push_back (data_a);
} }
condition.notify_all (); condition.notify_all ();
@ -850,7 +850,7 @@ void nano::message_buffer_manager::enqueue (nano::message_buffer * data_a)
nano::message_buffer * nano::message_buffer_manager::dequeue () nano::message_buffer * nano::message_buffer_manager::dequeue ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped && full.empty ()) while (!stopped && full.empty ())
{ {
condition.wait (lock); condition.wait (lock);
@ -868,7 +868,7 @@ void nano::message_buffer_manager::release (nano::message_buffer * data_a)
{ {
debug_assert (data_a != nullptr); debug_assert (data_a != nullptr);
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
free.push_back (data_a); free.push_back (data_a);
} }
condition.notify_all (); condition.notify_all ();
@ -877,7 +877,7 @@ void nano::message_buffer_manager::release (nano::message_buffer * data_a)
void nano::message_buffer_manager::stop () void nano::message_buffer_manager::stop ()
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
stopped = true; stopped = true;
} }
condition.notify_all (); condition.notify_all ();
@ -892,7 +892,7 @@ max_entries (incoming_connections_max_a * nano::tcp_message_manager::max_entries
void nano::tcp_message_manager::put_message (nano::tcp_message_item const & item_a) void nano::tcp_message_manager::put_message (nano::tcp_message_item const & item_a)
{ {
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (entries.size () >= max_entries && !stopped) while (entries.size () >= max_entries && !stopped)
{ {
producer_condition.wait (lock); producer_condition.wait (lock);
@ -905,7 +905,7 @@ void nano::tcp_message_manager::put_message (nano::tcp_message_item const & item
nano::tcp_message_item nano::tcp_message_manager::get_message () nano::tcp_message_item nano::tcp_message_manager::get_message ()
{ {
nano::tcp_message_item result; nano::tcp_message_item result;
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
while (entries.empty () && !stopped) while (entries.empty () && !stopped)
{ {
consumer_condition.wait (lock); consumer_condition.wait (lock);
@ -927,7 +927,7 @@ nano::tcp_message_item nano::tcp_message_manager::get_message ()
void nano::tcp_message_manager::stop () void nano::tcp_message_manager::stop ()
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
stopped = true; stopped = true;
} }
consumer_condition.notify_all (); consumer_condition.notify_all ();
@ -943,7 +943,7 @@ boost::optional<nano::uint256_union> nano::syn_cookies::assign (nano::endpoint c
{ {
auto ip_addr (endpoint_a.address ()); auto ip_addr (endpoint_a.address ());
debug_assert (ip_addr.is_v6 ()); debug_assert (ip_addr.is_v6 ());
nano::lock_guard<std::mutex> lock (syn_cookie_mutex); nano::lock_guard<nano::mutex> lock (syn_cookie_mutex);
unsigned & ip_cookies = cookies_per_ip[ip_addr]; unsigned & ip_cookies = cookies_per_ip[ip_addr];
boost::optional<nano::uint256_union> result; boost::optional<nano::uint256_union> result;
if (ip_cookies < max_cookies_per_ip) if (ip_cookies < max_cookies_per_ip)
@ -965,7 +965,7 @@ bool nano::syn_cookies::validate (nano::endpoint const & endpoint_a, nano::accou
{ {
auto ip_addr (endpoint_a.address ()); auto ip_addr (endpoint_a.address ());
debug_assert (ip_addr.is_v6 ()); debug_assert (ip_addr.is_v6 ());
nano::lock_guard<std::mutex> lock (syn_cookie_mutex); nano::lock_guard<nano::mutex> lock (syn_cookie_mutex);
auto result (true); auto result (true);
auto cookie_it (cookies.find (endpoint_a)); auto cookie_it (cookies.find (endpoint_a));
if (cookie_it != cookies.end () && !nano::validate_message (node_id, cookie_it->second.cookie, sig)) if (cookie_it != cookies.end () && !nano::validate_message (node_id, cookie_it->second.cookie, sig))
@ -987,7 +987,7 @@ bool nano::syn_cookies::validate (nano::endpoint const & endpoint_a, nano::accou
void nano::syn_cookies::purge (std::chrono::steady_clock::time_point const & cutoff_a) void nano::syn_cookies::purge (std::chrono::steady_clock::time_point const & cutoff_a)
{ {
nano::lock_guard<std::mutex> lock (syn_cookie_mutex); nano::lock_guard<nano::mutex> lock (syn_cookie_mutex);
auto it (cookies.begin ()); auto it (cookies.begin ());
while (it != cookies.end ()) while (it != cookies.end ())
{ {
@ -1014,7 +1014,7 @@ void nano::syn_cookies::purge (std::chrono::steady_clock::time_point const & cut
size_t nano::syn_cookies::cookies_size () size_t nano::syn_cookies::cookies_size ()
{ {
nano::lock_guard<std::mutex> lock (syn_cookie_mutex); nano::lock_guard<nano::mutex> lock (syn_cookie_mutex);
return cookies.size (); return cookies.size ();
} }
@ -1033,7 +1033,7 @@ std::unique_ptr<nano::container_info_component> nano::syn_cookies::collect_conta
size_t syn_cookies_count; size_t syn_cookies_count;
size_t syn_cookies_per_ip_count; size_t syn_cookies_per_ip_count;
{ {
nano::lock_guard<std::mutex> syn_cookie_guard (syn_cookie_mutex); nano::lock_guard<nano::mutex> syn_cookie_guard (syn_cookie_mutex);
syn_cookies_count = cookies.size (); syn_cookies_count = cookies.size ();
syn_cookies_per_ip_count = cookies_per_ip.size (); syn_cookies_per_ip_count = cookies_per_ip.size ();
} }

View file

@ -58,7 +58,7 @@ public:
private: private:
nano::stat & stats; nano::stat & stats;
std::mutex mutex; nano::mutex mutex;
nano::condition_variable condition; nano::condition_variable condition;
boost::circular_buffer<nano::message_buffer *> free; boost::circular_buffer<nano::message_buffer *> free;
boost::circular_buffer<nano::message_buffer *> full; boost::circular_buffer<nano::message_buffer *> full;
@ -76,7 +76,7 @@ public:
void stop (); void stop ();
private: private:
std::mutex mutex; nano::mutex mutex;
nano::condition_variable producer_condition; nano::condition_variable producer_condition;
nano::condition_variable consumer_condition; nano::condition_variable consumer_condition;
std::deque<nano::tcp_message_item> entries; std::deque<nano::tcp_message_item> entries;
@ -110,7 +110,7 @@ private:
nano::uint256_union cookie; nano::uint256_union cookie;
std::chrono::steady_clock::time_point created_at; std::chrono::steady_clock::time_point created_at;
}; };
mutable std::mutex syn_cookie_mutex; mutable nano::mutex syn_cookie_mutex;
std::unordered_map<nano::endpoint, syn_cookie_info> cookies; std::unordered_map<nano::endpoint, syn_cookie_info> cookies;
std::unordered_map<boost::asio::ip::address, unsigned> cookies_per_ip; std::unordered_map<boost::asio::ip::address, unsigned> cookies_per_ip;
size_t max_cookies_per_ip; size_t max_cookies_per_ip;

View file

@ -69,7 +69,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (re
{ {
size_t count; size_t count;
{ {
nano::lock_guard<std::mutex> guard (rep_crawler.active_mutex); nano::lock_guard<nano::mutex> guard (rep_crawler.active_mutex);
count = rep_crawler.active.size (); count = rep_crawler.active.size ();
} }
@ -908,7 +908,7 @@ void nano::node::bootstrap_wallet ()
{ {
std::deque<nano::account> accounts; std::deque<nano::account> accounts;
{ {
nano::lock_guard<std::mutex> lock (wallets.mutex); nano::lock_guard<nano::mutex> lock (wallets.mutex);
auto transaction (wallets.tx_begin_read ()); auto transaction (wallets.tx_begin_read ());
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i) for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i)
{ {
@ -1280,7 +1280,7 @@ void nano::node::ongoing_online_weight_calculation ()
void nano::node::receive_confirmed (nano::transaction const & block_transaction_a, nano::block_hash const & hash_a, nano::account const & destination_a) void nano::node::receive_confirmed (nano::transaction const & block_transaction_a, nano::block_hash const & hash_a, nano::account const & destination_a)
{ {
nano::unique_lock<std::mutex> lk (wallets.mutex); nano::unique_lock<nano::mutex> lk (wallets.mutex);
auto wallets_l = wallets.get_wallets (); auto wallets_l = wallets.get_wallets ();
auto wallet_transaction = wallets.tx_begin_read (); auto wallet_transaction = wallets.tx_begin_read ();
lk.unlock (); lk.unlock ();
@ -1383,7 +1383,7 @@ void nano::node::process_confirmed (nano::election_status const & status_a, uint
bool nano::block_arrival::add (nano::block_hash const & hash_a) bool nano::block_arrival::add (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ()); auto now (std::chrono::steady_clock::now ());
auto inserted (arrival.get<tag_sequence> ().emplace_back (nano::block_arrival_info{ now, hash_a })); auto inserted (arrival.get<tag_sequence> ().emplace_back (nano::block_arrival_info{ now, hash_a }));
auto result (!inserted.second); auto result (!inserted.second);
@ -1392,7 +1392,7 @@ bool nano::block_arrival::add (nano::block_hash const & hash_a)
bool nano::block_arrival::recent (nano::block_hash const & hash_a) bool nano::block_arrival::recent (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ()); auto now (std::chrono::steady_clock::now ());
while (arrival.size () > arrival_size_min && arrival.get<tag_sequence> ().front ().arrival + arrival_time_min < now) while (arrival.size () > arrival_size_min && arrival.get<tag_sequence> ().front ().arrival + arrival_time_min < now)
{ {
@ -1405,7 +1405,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (bl
{ {
size_t count = 0; size_t count = 0;
{ {
nano::lock_guard<std::mutex> guard (block_arrival.mutex); nano::lock_guard<nano::mutex> guard (block_arrival.mutex);
count = block_arrival.arrival.size (); count = block_arrival.arrival.size ();
} }
@ -1477,7 +1477,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
auto signer (nano::pub_key (prv_a)); auto signer (nano::pub_key (prv_a));
debug_assert (signer == ledger.epoch_signer (link)); debug_assert (signer == ledger.epoch_signer (link));
std::mutex upgrader_mutex; nano::mutex upgrader_mutex;
nano::condition_variable upgrader_condition; nano::condition_variable upgrader_condition;
class account_upgrade_item final class account_upgrade_item final
@ -1553,7 +1553,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
if (threads != 0) if (threads != 0)
{ {
{ {
nano::unique_lock<std::mutex> lock (upgrader_mutex); nano::unique_lock<nano::mutex> lock (upgrader_mutex);
++workers; ++workers;
while (workers > threads) while (workers > threads)
{ {
@ -1563,7 +1563,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
this->workers.push_task ([node_l = shared_from_this (), &upgrader_process, &upgrader_mutex, &upgrader_condition, &upgraded_accounts, &workers, epoch, difficulty, signer, root, account]() { this->workers.push_task ([node_l = shared_from_this (), &upgrader_process, &upgrader_mutex, &upgrader_condition, &upgraded_accounts, &workers, epoch, difficulty, signer, root, account]() {
upgrader_process (*node_l, upgraded_accounts, epoch, difficulty, signer, root, account); upgrader_process (*node_l, upgraded_accounts, epoch, difficulty, signer, root, account);
{ {
nano::lock_guard<std::mutex> lock (upgrader_mutex); nano::lock_guard<nano::mutex> lock (upgrader_mutex);
--workers; --workers;
} }
upgrader_condition.notify_all (); upgrader_condition.notify_all ();
@ -1576,7 +1576,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
} }
} }
{ {
nano::unique_lock<std::mutex> lock (upgrader_mutex); nano::unique_lock<nano::mutex> lock (upgrader_mutex);
while (workers > 0) while (workers > 0)
{ {
upgrader_condition.wait (lock); upgrader_condition.wait (lock);
@ -1632,7 +1632,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
if (threads != 0) if (threads != 0)
{ {
{ {
nano::unique_lock<std::mutex> lock (upgrader_mutex); nano::unique_lock<nano::mutex> lock (upgrader_mutex);
++workers; ++workers;
while (workers > threads) while (workers > threads)
{ {
@ -1642,7 +1642,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
this->workers.push_task ([node_l = shared_from_this (), &upgrader_process, &upgrader_mutex, &upgrader_condition, &upgraded_pending, &workers, epoch, difficulty, signer, root, account]() { this->workers.push_task ([node_l = shared_from_this (), &upgrader_process, &upgrader_mutex, &upgrader_condition, &upgraded_pending, &workers, epoch, difficulty, signer, root, account]() {
upgrader_process (*node_l, upgraded_pending, epoch, difficulty, signer, root, account); upgrader_process (*node_l, upgraded_pending, epoch, difficulty, signer, root, account);
{ {
nano::lock_guard<std::mutex> lock (upgrader_mutex); nano::lock_guard<nano::mutex> lock (upgrader_mutex);
--workers; --workers;
} }
upgrader_condition.notify_all (); upgrader_condition.notify_all ();
@ -1677,7 +1677,7 @@ void nano::node::epoch_upgrader_impl (nano::private_key const & prv_a, nano::epo
} }
} }
{ {
nano::unique_lock<std::mutex> lock (upgrader_mutex); nano::unique_lock<nano::mutex> lock (upgrader_mutex);
while (workers > 0) while (workers > 0)
{ {
upgrader_condition.wait (lock); upgrader_condition.wait (lock);

View file

@ -73,7 +73,7 @@ public:
boost::multi_index::member<nano::block_arrival_info, nano::block_hash, &nano::block_arrival_info::hash>>>> boost::multi_index::member<nano::block_arrival_info, nano::block_hash, &nano::block_arrival_info::hash>>>>
arrival; arrival;
// clang-format on // clang-format on
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::block_arrival) };
static size_t constexpr arrival_size_min = 8 * 1024; static size_t constexpr arrival_size_min = 8 * 1024;
static std::chrono::seconds constexpr arrival_time_min = std::chrono::seconds (300); static std::chrono::seconds constexpr arrival_time_min = std::chrono::seconds (300);
}; };

View file

@ -18,7 +18,7 @@ void nano::online_reps::observe (nano::account const & rep_a)
{ {
if (ledger.weight (rep_a) > 0) if (ledger.weight (rep_a) > 0)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto now = std::chrono::steady_clock::now (); auto now = std::chrono::steady_clock::now ();
auto new_insert = reps.get<tag_account> ().erase (rep_a) == 0; auto new_insert = reps.get<tag_account> ().erase (rep_a) == 0;
reps.insert ({ now, rep_a }); reps.insert ({ now, rep_a });
@ -34,7 +34,7 @@ void nano::online_reps::observe (nano::account const & rep_a)
void nano::online_reps::sample () void nano::online_reps::sample ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
nano::uint128_t online_l = online_m; nano::uint128_t online_l = online_m;
lock.unlock (); lock.unlock ();
nano::uint128_t trend_l; nano::uint128_t trend_l;
@ -83,19 +83,19 @@ nano::uint128_t nano::online_reps::calculate_trend (nano::transaction & transact
nano::uint128_t nano::online_reps::trended () const nano::uint128_t nano::online_reps::trended () const
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return trended_m; return trended_m;
} }
nano::uint128_t nano::online_reps::online () const nano::uint128_t nano::online_reps::online () const
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return online_m; return online_m;
} }
nano::uint128_t nano::online_reps::delta () const nano::uint128_t nano::online_reps::delta () const
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
// Using a larger container to ensure maximum precision // Using a larger container to ensure maximum precision
auto weight = static_cast<nano::uint256_t> (std::max ({ online_m, trended_m, config.online_weight_minimum.number () })); auto weight = static_cast<nano::uint256_t> (std::max ({ online_m, trended_m, config.online_weight_minimum.number () }));
return ((weight * online_weight_quorum) / 100).convert_to<nano::uint128_t> (); return ((weight * online_weight_quorum) / 100).convert_to<nano::uint128_t> ();
@ -104,14 +104,14 @@ nano::uint128_t nano::online_reps::delta () const
std::vector<nano::account> nano::online_reps::list () std::vector<nano::account> nano::online_reps::list ()
{ {
std::vector<nano::account> result; std::vector<nano::account> result;
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
std::for_each (reps.begin (), reps.end (), [&result](rep_info const & info_a) { result.push_back (info_a.account); }); std::for_each (reps.begin (), reps.end (), [&result](rep_info const & info_a) { result.push_back (info_a.account); });
return result; return result;
} }
void nano::online_reps::clear () void nano::online_reps::clear ()
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
reps.clear (); reps.clear ();
online_m = 0; online_m = 0;
} }
@ -120,7 +120,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (on
{ {
size_t count; size_t count;
{ {
nano::lock_guard<std::mutex> guard (online_reps.mutex); nano::lock_guard<nano::mutex> guard (online_reps.mutex);
count = online_reps.reps.size (); count = online_reps.reps.size ();
} }

View file

@ -54,7 +54,7 @@ private:
}; };
nano::uint128_t calculate_trend (nano::transaction &) const; nano::uint128_t calculate_trend (nano::transaction &) const;
nano::uint128_t calculate_online () const; nano::uint128_t calculate_online () const;
mutable std::mutex mutex; mutable nano::mutex mutex;
nano::ledger & ledger; nano::ledger & ledger;
nano::node_config const & config; nano::node_config const & config;
boost::multi_index_container<rep_info, boost::multi_index_container<rep_info,

View file

@ -449,7 +449,7 @@ boost::optional<uint64_t> nano::opencl_work::generate_work (nano::work_version c
boost::optional<uint64_t> nano::opencl_work::generate_work (nano::work_version const version_a, nano::root const & root_a, uint64_t const difficulty_a, std::atomic<int> & ticket_a) boost::optional<uint64_t> nano::opencl_work::generate_work (nano::work_version const version_a, nano::root const & root_a, uint64_t const difficulty_a, std::atomic<int> & ticket_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
bool error (false); bool error (false);
int ticket_l (ticket_a); int ticket_l (ticket_a);
uint64_t result (0); uint64_t result (0);

View file

@ -46,7 +46,7 @@ public:
boost::optional<uint64_t> generate_work (nano::work_version const, nano::root const &, uint64_t const, std::atomic<int> &); boost::optional<uint64_t> generate_work (nano::work_version const, nano::root const &, uint64_t const, std::atomic<int> &);
static std::unique_ptr<opencl_work> create (bool, nano::opencl_config const &, nano::logger_mt &); static std::unique_ptr<opencl_work> create (bool, nano::opencl_config const &, nano::logger_mt &);
nano::opencl_config const & config; nano::opencl_config const & config;
std::mutex mutex; nano::mutex mutex;
cl_context context; cl_context context;
cl_mem attempt_buffer; cl_mem attempt_buffer;
cl_mem result_buffer; cl_mem result_buffer;

View file

@ -8,7 +8,7 @@ constexpr double nano::peer_exclusion::peers_percentage_limit;
uint64_t nano::peer_exclusion::add (nano::tcp_endpoint const & endpoint_a, size_t const network_peers_count_a) uint64_t nano::peer_exclusion::add (nano::tcp_endpoint const & endpoint_a, size_t const network_peers_count_a)
{ {
uint64_t result (0); uint64_t result (0);
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
// Clean old excluded peers // Clean old excluded peers
auto limited = limited_size (network_peers_count_a); auto limited = limited_size (network_peers_count_a);
while (peers.size () > 1 && peers.size () > limited) while (peers.size () > 1 && peers.size () > limited)
@ -49,7 +49,7 @@ uint64_t nano::peer_exclusion::add (nano::tcp_endpoint const & endpoint_a, size_
bool nano::peer_exclusion::check (nano::tcp_endpoint const & endpoint_a) bool nano::peer_exclusion::check (nano::tcp_endpoint const & endpoint_a)
{ {
bool excluded (false); bool excluded (false);
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto & peers_by_endpoint (peers.get<tag_endpoint> ()); auto & peers_by_endpoint (peers.get<tag_endpoint> ());
auto existing (peers_by_endpoint.find (endpoint_a.address ())); auto existing (peers_by_endpoint.find (endpoint_a.address ()));
if (existing != peers_by_endpoint.end () && existing->score >= score_limit) if (existing != peers_by_endpoint.end () && existing->score >= score_limit)
@ -68,7 +68,7 @@ bool nano::peer_exclusion::check (nano::tcp_endpoint const & endpoint_a)
void nano::peer_exclusion::remove (nano::tcp_endpoint const & endpoint_a) void nano::peer_exclusion::remove (nano::tcp_endpoint const & endpoint_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
peers.get<tag_endpoint> ().erase (endpoint_a.address ()); peers.get<tag_endpoint> ().erase (endpoint_a.address ());
} }
@ -79,7 +79,7 @@ size_t nano::peer_exclusion::limited_size (size_t const network_peers_count_a) c
size_t nano::peer_exclusion::size () const size_t nano::peer_exclusion::size () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return peers.size (); return peers.size ();
} }

View file

@ -37,7 +37,7 @@ public:
private: private:
ordered_endpoints peers; ordered_endpoints peers;
mutable std::mutex mutex; mutable nano::mutex mutex;
public: public:
constexpr static size_t size_max = 5000; constexpr static size_t size_max = 5000;

View file

@ -48,7 +48,7 @@ void nano::port_mapping::refresh_devices ()
} }
} }
// Update port mapping // Update port mapping
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
upnp = std::move (upnp_l); upnp = std::move (upnp_l);
if (igd_error_l == 1 || igd_error_l == 2) if (igd_error_l == 1 || igd_error_l == 2)
{ {
@ -61,7 +61,7 @@ void nano::port_mapping::refresh_devices ()
nano::endpoint nano::port_mapping::external_address () nano::endpoint nano::port_mapping::external_address ()
{ {
nano::endpoint result_l (boost::asio::ip::address_v6{}, 0); nano::endpoint result_l (boost::asio::ip::address_v6{}, 0);
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
for (auto & protocol : protocols | boost::adaptors::filtered ([](auto const & p) { return p.enabled; })) for (auto & protocol : protocols | boost::adaptors::filtered ([](auto const & p) { return p.enabled; }))
{ {
if (protocol.external_port != 0) if (protocol.external_port != 0)
@ -77,7 +77,7 @@ void nano::port_mapping::refresh_mapping ()
debug_assert (!network_params.network.is_dev_network ()); debug_assert (!network_params.network.is_dev_network ());
if (on) if (on)
{ {
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
auto node_port_l (std::to_string (node.network.endpoint ().port ())); auto node_port_l (std::to_string (node.network.endpoint ().port ()));
auto config_port_l (get_config_port (node_port_l)); auto config_port_l (get_config_port (node_port_l));
@ -114,7 +114,7 @@ bool nano::port_mapping::check_mapping ()
// Long discovery time and fast setup/teardown make this impractical for testing // Long discovery time and fast setup/teardown make this impractical for testing
debug_assert (!network_params.network.is_dev_network ()); debug_assert (!network_params.network.is_dev_network ());
bool result_l (true); bool result_l (true);
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
auto node_port_l (std::to_string (node.network.endpoint ().port ())); auto node_port_l (std::to_string (node.network.endpoint ().port ()));
auto config_port_l (get_config_port (node_port_l)); auto config_port_l (get_config_port (node_port_l));
for (auto & protocol : protocols | boost::adaptors::filtered ([](auto const & p) { return p.enabled; })) for (auto & protocol : protocols | boost::adaptors::filtered ([](auto const & p) { return p.enabled; }))
@ -187,7 +187,7 @@ void nano::port_mapping::check_mapping_loop ()
void nano::port_mapping::stop () void nano::port_mapping::stop ()
{ {
on = false; on = false;
nano::lock_guard<std::mutex> guard_l (mutex); nano::lock_guard<nano::mutex> guard_l (mutex);
for (auto & protocol : protocols | boost::adaptors::filtered ([](auto const & p) { return p.enabled; })) for (auto & protocol : protocols | boost::adaptors::filtered ([](auto const & p) { return p.enabled; }))
{ {
if (protocol.external_port != 0) if (protocol.external_port != 0)

View file

@ -60,6 +60,6 @@ private:
std::array<mapping_protocol, 2> protocols; std::array<mapping_protocol, 2> protocols;
uint64_t check_count{ 0 }; uint64_t check_count{ 0 };
std::atomic<bool> on{ false }; std::atomic<bool> on{ false };
std::mutex mutex; nano::mutex mutex;
}; };
} }

View file

@ -16,7 +16,7 @@ node (node_a)
void nano::rep_crawler::remove (nano::block_hash const & hash_a) void nano::rep_crawler::remove (nano::block_hash const & hash_a)
{ {
nano::lock_guard<std::mutex> lock (active_mutex); nano::lock_guard<nano::mutex> lock (active_mutex);
active.erase (hash_a); active.erase (hash_a);
} }
@ -29,7 +29,7 @@ void nano::rep_crawler::validate ()
{ {
decltype (responses) responses_l; decltype (responses) responses_l;
{ {
nano::lock_guard<std::mutex> lock (active_mutex); nano::lock_guard<nano::mutex> lock (active_mutex);
responses_l.swap (responses); responses_l.swap (responses);
} }
auto minimum = node.minimum_principal_weight (); auto minimum = node.minimum_principal_weight ();
@ -44,7 +44,7 @@ void nano::rep_crawler::validate ()
if (rep_weight > minimum) if (rep_weight > minimum)
{ {
auto updated_or_inserted = false; auto updated_or_inserted = false;
nano::unique_lock<std::mutex> lock (probable_reps_mutex); nano::unique_lock<nano::mutex> lock (probable_reps_mutex);
auto existing (probable_reps.find (vote->account)); auto existing (probable_reps.find (vote->account));
if (existing != probable_reps.end ()) if (existing != probable_reps.end ())
{ {
@ -128,7 +128,7 @@ void nano::rep_crawler::query (std::vector<std::shared_ptr<nano::transport::chan
auto transaction (node.store.tx_begin_read ()); auto transaction (node.store.tx_begin_read ());
auto hash_root (node.ledger.hash_root_random (transaction)); auto hash_root (node.ledger.hash_root_random (transaction));
{ {
nano::lock_guard<std::mutex> lock (active_mutex); nano::lock_guard<nano::mutex> lock (active_mutex);
// Don't send same block multiple times in tests // Don't send same block multiple times in tests
if (node.network_params.network.is_dev_network ()) if (node.network_params.network.is_dev_network ())
{ {
@ -169,7 +169,7 @@ void nano::rep_crawler::query (std::shared_ptr<nano::transport::channel> const &
bool nano::rep_crawler::is_pr (nano::transport::channel const & channel_a) const bool nano::rep_crawler::is_pr (nano::transport::channel const & channel_a) const
{ {
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
auto existing = probable_reps.get<tag_channel_ref> ().find (channel_a); auto existing = probable_reps.get<tag_channel_ref> ().find (channel_a);
bool result = false; bool result = false;
if (existing != probable_reps.get<tag_channel_ref> ().end ()) if (existing != probable_reps.get<tag_channel_ref> ().end ())
@ -182,7 +182,7 @@ bool nano::rep_crawler::is_pr (nano::transport::channel const & channel_a) const
bool nano::rep_crawler::response (std::shared_ptr<nano::transport::channel> const & channel_a, std::shared_ptr<nano::vote> const & vote_a) bool nano::rep_crawler::response (std::shared_ptr<nano::transport::channel> const & channel_a, std::shared_ptr<nano::vote> const & vote_a)
{ {
bool error = true; bool error = true;
nano::lock_guard<std::mutex> lock (active_mutex); nano::lock_guard<nano::mutex> lock (active_mutex);
for (auto i = vote_a->begin (), n = vote_a->end (); i != n; ++i) for (auto i = vote_a->begin (), n = vote_a->end (); i != n; ++i)
{ {
if (active.count (*i) != 0) if (active.count (*i) != 0)
@ -197,7 +197,7 @@ bool nano::rep_crawler::response (std::shared_ptr<nano::transport::channel> cons
nano::uint128_t nano::rep_crawler::total_weight () const nano::uint128_t nano::rep_crawler::total_weight () const
{ {
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
nano::uint128_t result (0); nano::uint128_t result (0);
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n; ++i) for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n; ++i)
{ {
@ -216,7 +216,7 @@ nano::uint128_t nano::rep_crawler::total_weight () const
void nano::rep_crawler::on_rep_request (std::shared_ptr<nano::transport::channel> const & channel_a) void nano::rep_crawler::on_rep_request (std::shared_ptr<nano::transport::channel> const & channel_a)
{ {
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
if (channel_a->get_tcp_endpoint ().address () != boost::asio::ip::address_v6::any ()) if (channel_a->get_tcp_endpoint ().address () != boost::asio::ip::address_v6::any ())
{ {
probably_rep_t::index<tag_channel_ref>::type & channel_ref_index = probable_reps.get<tag_channel_ref> (); probably_rep_t::index<tag_channel_ref>::type & channel_ref_index = probable_reps.get<tag_channel_ref> ();
@ -237,7 +237,7 @@ void nano::rep_crawler::cleanup_reps ()
std::vector<std::shared_ptr<nano::transport::channel>> channels; std::vector<std::shared_ptr<nano::transport::channel>> channels;
{ {
// Check known rep channels // Check known rep channels
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
auto iterator (probable_reps.get<tag_last_request> ().begin ()); auto iterator (probable_reps.get<tag_last_request> ().begin ());
while (iterator != probable_reps.get<tag_last_request> ().end ()) while (iterator != probable_reps.get<tag_last_request> ().end ())
{ {
@ -275,7 +275,7 @@ void nano::rep_crawler::cleanup_reps ()
} }
if (!equal) if (!equal)
{ {
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
probable_reps.get<tag_channel_ref> ().erase (*i); probable_reps.get<tag_channel_ref> ().erase (*i);
} }
} }
@ -283,7 +283,7 @@ void nano::rep_crawler::cleanup_reps ()
void nano::rep_crawler::update_weights () void nano::rep_crawler::update_weights ()
{ {
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
for (auto i (probable_reps.get<tag_last_request> ().begin ()), n (probable_reps.get<tag_last_request> ().end ()); i != n;) for (auto i (probable_reps.get<tag_last_request> ().begin ()), n (probable_reps.get<tag_last_request> ().end ()); i != n;)
{ {
auto weight (node.ledger.weight (i->account)); auto weight (node.ledger.weight (i->account));
@ -309,7 +309,7 @@ std::vector<nano::representative> nano::rep_crawler::representatives (size_t cou
{ {
auto version_min (opt_version_min_a.value_or (node.network_params.protocol.protocol_version_min ())); auto version_min (opt_version_min_a.value_or (node.network_params.protocol.protocol_version_min ()));
std::vector<representative> result; std::vector<representative> result;
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n && result.size () < count_a; ++i) for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n && result.size () < count_a; ++i)
{ {
if (i->weight > weight_a && i->channel->get_network_version () >= version_min) if (i->weight > weight_a && i->channel->get_network_version () >= version_min)
@ -339,6 +339,6 @@ std::vector<std::shared_ptr<nano::transport::channel>> nano::rep_crawler::repres
/** Total number of representatives */ /** Total number of representatives */
size_t nano::rep_crawler::representative_count () size_t nano::rep_crawler::representative_count ()
{ {
nano::lock_guard<std::mutex> lock (probable_reps_mutex); nano::lock_guard<nano::mutex> lock (probable_reps_mutex);
return probable_reps.size (); return probable_reps.size ();
} }

View file

@ -118,7 +118,7 @@ private:
nano::node & node; nano::node & node;
/** Protects the active-hash container */ /** Protects the active-hash container */
std::mutex active_mutex; nano::mutex active_mutex;
/** We have solicted votes for these random blocks */ /** We have solicted votes for these random blocks */
std::unordered_set<nano::block_hash> active; std::unordered_set<nano::block_hash> active;
@ -142,7 +142,7 @@ private:
void update_weights (); void update_weights ();
/** Protects the probable_reps container */ /** Protects the probable_reps container */
mutable std::mutex probable_reps_mutex; mutable nano::mutex probable_reps_mutex;
/** Probable representatives */ /** Probable representatives */
probably_rep_t probable_reps; probably_rep_t probable_reps;

View file

@ -26,7 +26,7 @@ thread ([this]() { run (); })
generator.set_reply_action ([this](std::shared_ptr<nano::vote> const & vote_a, std::shared_ptr<nano::transport::channel> const & channel_a) { generator.set_reply_action ([this](std::shared_ptr<nano::vote> const & vote_a, std::shared_ptr<nano::transport::channel> const & channel_a) {
this->reply_action (vote_a, channel_a); this->reply_action (vote_a, channel_a);
}); });
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
condition.wait (lock, [& started = started] { return started; }); condition.wait (lock, [& started = started] { return started; });
} }
@ -35,7 +35,7 @@ void nano::request_aggregator::add (std::shared_ptr<nano::transport::channel> co
debug_assert (wallets.reps ().voting > 0); debug_assert (wallets.reps ().voting > 0);
bool error = true; bool error = true;
auto const endpoint (nano::transport::map_endpoint_to_v6 (channel_a->get_endpoint ())); auto const endpoint (nano::transport::map_endpoint_to_v6 (channel_a->get_endpoint ()));
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Protecting from ever-increasing memory usage when request are consumed slower than generated // Protecting from ever-increasing memory usage when request are consumed slower than generated
// Reject request if the oldest request has not yet been processed after its deadline + a modest margin // Reject request if the oldest request has not yet been processed after its deadline + a modest margin
if (requests.empty () || (requests.get<tag_deadline> ().begin ()->deadline + 2 * this->max_delay > std::chrono::steady_clock::now ())) if (requests.empty () || (requests.get<tag_deadline> ().begin ()->deadline + 2 * this->max_delay > std::chrono::steady_clock::now ()))
@ -69,7 +69,7 @@ void nano::request_aggregator::add (std::shared_ptr<nano::transport::channel> co
void nano::request_aggregator::run () void nano::request_aggregator::run ()
{ {
nano::thread_role::set (nano::thread_role::name::request_aggregator); nano::thread_role::set (nano::thread_role::name::request_aggregator);
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
started = true; started = true;
lock.unlock (); lock.unlock ();
condition.notify_all (); condition.notify_all ();
@ -117,7 +117,7 @@ void nano::request_aggregator::run ()
void nano::request_aggregator::stop () void nano::request_aggregator::stop ()
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
stopped = true; stopped = true;
} }
condition.notify_all (); condition.notify_all ();
@ -129,7 +129,7 @@ void nano::request_aggregator::stop ()
std::size_t nano::request_aggregator::size () std::size_t nano::request_aggregator::size ()
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
return requests.size (); return requests.size ();
} }

View file

@ -100,7 +100,7 @@ private:
bool stopped{ false }; bool stopped{ false };
bool started{ false }; bool started{ false };
nano::condition_variable condition; nano::condition_variable condition;
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::request_aggregator) };
std::thread thread; std::thread thread;
friend std::unique_ptr<container_info_component> collect_container_info (request_aggregator &, const std::string &); friend std::unique_ptr<container_info_component> collect_container_info (request_aggregator &, const std::string &);

View file

@ -72,7 +72,7 @@ private:
std::unique_ptr<rocksdb::DB> db; std::unique_ptr<rocksdb::DB> db;
std::vector<std::unique_ptr<rocksdb::ColumnFamilyHandle>> handles; std::vector<std::unique_ptr<rocksdb::ColumnFamilyHandle>> handles;
std::shared_ptr<rocksdb::TableFactory> small_table_factory; std::shared_ptr<rocksdb::TableFactory> small_table_factory;
std::unordered_map<nano::tables, std::mutex> write_lock_mutexes; std::unordered_map<nano::tables, nano::mutex> write_lock_mutexes;
nano::rocksdb_config rocksdb_config; nano::rocksdb_config rocksdb_config;
unsigned const max_block_write_batch_num_m; unsigned const max_block_write_batch_num_m;

View file

@ -32,7 +32,7 @@ void * nano::read_rocksdb_txn::get_handle () const
return (void *)&options; return (void *)&options;
} }
nano::write_rocksdb_txn::write_rocksdb_txn (rocksdb::OptimisticTransactionDB * db_a, std::vector<nano::tables> const & tables_requiring_locks_a, std::vector<nano::tables> const & tables_no_locks_a, std::unordered_map<nano::tables, std::mutex> & mutexes_a) : nano::write_rocksdb_txn::write_rocksdb_txn (rocksdb::OptimisticTransactionDB * db_a, std::vector<nano::tables> const & tables_requiring_locks_a, std::vector<nano::tables> const & tables_no_locks_a, std::unordered_map<nano::tables, nano::mutex> & mutexes_a) :
db (db_a), db (db_a),
tables_requiring_locks (tables_requiring_locks_a), tables_requiring_locks (tables_requiring_locks_a),
tables_no_locks (tables_no_locks_a), tables_no_locks (tables_no_locks_a),

View file

@ -28,7 +28,7 @@ private:
class write_rocksdb_txn final : public write_transaction_impl class write_rocksdb_txn final : public write_transaction_impl
{ {
public: public:
write_rocksdb_txn (rocksdb::OptimisticTransactionDB * db_a, std::vector<nano::tables> const & tables_requiring_locks_a, std::vector<nano::tables> const & tables_no_locks_a, std::unordered_map<nano::tables, std::mutex> & mutexes_a); write_rocksdb_txn (rocksdb::OptimisticTransactionDB * db_a, std::vector<nano::tables> const & tables_requiring_locks_a, std::vector<nano::tables> const & tables_no_locks_a, std::unordered_map<nano::tables, nano::mutex> & mutexes_a);
~write_rocksdb_txn (); ~write_rocksdb_txn ();
void commit () override; void commit () override;
void renew () override; void renew () override;
@ -40,7 +40,7 @@ private:
rocksdb::OptimisticTransactionDB * db; rocksdb::OptimisticTransactionDB * db;
std::vector<nano::tables> tables_requiring_locks; std::vector<nano::tables> tables_requiring_locks;
std::vector<nano::tables> tables_no_locks; std::vector<nano::tables> tables_no_locks;
std::unordered_map<nano::tables, std::mutex> & mutexes; std::unordered_map<nano::tables, nano::mutex> & mutexes;
bool active{ true }; bool active{ true };
void lock (); void lock ();

View file

@ -29,7 +29,7 @@ nano::state_block_signature_verification::~state_block_signature_verification ()
void nano::state_block_signature_verification::stop () void nano::state_block_signature_verification::stop ()
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
stopped = true; stopped = true;
} }
@ -42,7 +42,7 @@ void nano::state_block_signature_verification::stop ()
void nano::state_block_signature_verification::run (uint64_t state_block_signature_verification_size) void nano::state_block_signature_verification::run (uint64_t state_block_signature_verification_size)
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
while (!stopped) while (!stopped)
{ {
if (!state_blocks.empty ()) if (!state_blocks.empty ())
@ -70,14 +70,14 @@ void nano::state_block_signature_verification::run (uint64_t state_block_signatu
bool nano::state_block_signature_verification::is_active () bool nano::state_block_signature_verification::is_active ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return active; return active;
} }
void nano::state_block_signature_verification::add (nano::unchecked_info const & info_a, bool watch_work_a) void nano::state_block_signature_verification::add (nano::unchecked_info const & info_a, bool watch_work_a)
{ {
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
state_blocks.emplace_back (info_a, watch_work_a); state_blocks.emplace_back (info_a, watch_work_a);
} }
condition.notify_one (); condition.notify_one ();
@ -85,7 +85,7 @@ void nano::state_block_signature_verification::add (nano::unchecked_info const &
size_t nano::state_block_signature_verification::size () size_t nano::state_block_signature_verification::size ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return state_blocks.size (); return state_blocks.size ();
} }

View file

@ -33,7 +33,7 @@ private:
nano::node_config & node_config; nano::node_config & node_config;
nano::logger_mt & logger; nano::logger_mt & logger;
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::state_block_signature_verification) };
bool stopped{ false }; bool stopped{ false };
bool active{ false }; bool active{ false };
std::deque<std::pair<nano::unchecked_info, bool>> state_blocks; std::deque<std::pair<nano::unchecked_info, bool>> state_blocks;

View file

@ -46,7 +46,7 @@ void nano::telemetry::set (nano::telemetry_ack const & message_a, nano::transpor
{ {
if (!stopped) if (!stopped)
{ {
nano::unique_lock<std::mutex> lk (mutex); nano::unique_lock<nano::mutex> lk (mutex);
nano::endpoint endpoint = channel_a.get_endpoint (); nano::endpoint endpoint = channel_a.get_endpoint ();
auto it = recent_or_initial_request_telemetry_data.find (endpoint); auto it = recent_or_initial_request_telemetry_data.find (endpoint);
if (it == recent_or_initial_request_telemetry_data.cend () || !it->undergoing_request) if (it == recent_or_initial_request_telemetry_data.cend () || !it->undergoing_request)
@ -181,7 +181,7 @@ void nano::telemetry::ongoing_req_all_peers (std::chrono::milliseconds next_requ
{ {
// Cleanup any stale saved telemetry data for non-existent peers // Cleanup any stale saved telemetry data for non-existent peers
nano::lock_guard<std::mutex> guard (this_l->mutex); nano::lock_guard<nano::mutex> guard (this_l->mutex);
for (auto it = this_l->recent_or_initial_request_telemetry_data.begin (); it != this_l->recent_or_initial_request_telemetry_data.end ();) for (auto it = this_l->recent_or_initial_request_telemetry_data.begin (); it != this_l->recent_or_initial_request_telemetry_data.end ();)
{ {
if (!it->undergoing_request && !this_l->within_cache_cutoff (*it) && peers.count (it->endpoint) == 0) if (!it->undergoing_request && !this_l->within_cache_cutoff (*it) && peers.count (it->endpoint) == 0)
@ -218,7 +218,7 @@ void nano::telemetry::ongoing_req_all_peers (std::chrono::milliseconds next_requ
} }
// Schedule the next request; Use the default request time unless a telemetry request cache expires sooner // Schedule the next request; Use the default request time unless a telemetry request cache expires sooner
nano::lock_guard<std::mutex> guard (this_l->mutex); nano::lock_guard<nano::mutex> guard (this_l->mutex);
long long next_round = std::chrono::duration_cast<std::chrono::milliseconds> (this_l->cache_cutoff + this_l->response_time_cutoff).count (); long long next_round = std::chrono::duration_cast<std::chrono::milliseconds> (this_l->cache_cutoff + this_l->response_time_cutoff).count ();
if (!this_l->recent_or_initial_request_telemetry_data.empty ()) if (!this_l->recent_or_initial_request_telemetry_data.empty ())
{ {
@ -249,7 +249,7 @@ std::unordered_map<nano::endpoint, nano::telemetry_data> nano::telemetry::get_me
{ {
std::unordered_map<nano::endpoint, nano::telemetry_data> telemetry_data; std::unordered_map<nano::endpoint, nano::telemetry_data> telemetry_data;
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto range = boost::make_iterator_range (recent_or_initial_request_telemetry_data); auto range = boost::make_iterator_range (recent_or_initial_request_telemetry_data);
// clang-format off // clang-format off
nano::transform_if (range.begin (), range.end (), std::inserter (telemetry_data, telemetry_data.end ()), nano::transform_if (range.begin (), range.end (), std::inserter (telemetry_data, telemetry_data.end ()),
@ -286,7 +286,7 @@ void nano::telemetry::get_metrics_single_peer_async (std::shared_ptr<nano::trans
}; };
// Check if this is within the cache // Check if this is within the cache
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
auto it = recent_or_initial_request_telemetry_data.find (channel_a->get_endpoint ()); auto it = recent_or_initial_request_telemetry_data.find (channel_a->get_endpoint ());
if (it != recent_or_initial_request_telemetry_data.cend () && within_cache_cutoff (*it)) if (it != recent_or_initial_request_telemetry_data.cend () && within_cache_cutoff (*it))
{ {
@ -361,7 +361,7 @@ void nano::telemetry::fire_request_message (std::shared_ptr<nano::transport::cha
{ {
// Error sending the telemetry_req message // Error sending the telemetry_req message
this_l->stats.inc (nano::stat::type::telemetry, nano::stat::detail::failed_send_telemetry_req); this_l->stats.inc (nano::stat::type::telemetry, nano::stat::detail::failed_send_telemetry_req);
nano::lock_guard<std::mutex> guard (this_l->mutex); nano::lock_guard<nano::mutex> guard (this_l->mutex);
this_l->channel_processed (endpoint, true); this_l->channel_processed (endpoint, true);
} }
else else
@ -370,7 +370,7 @@ void nano::telemetry::fire_request_message (std::shared_ptr<nano::transport::cha
this_l->workers.add_timed_task (std::chrono::steady_clock::now () + this_l->response_time_cutoff, [round_l, this_w, endpoint]() { this_l->workers.add_timed_task (std::chrono::steady_clock::now () + this_l->response_time_cutoff, [round_l, this_w, endpoint]() {
if (auto this_l = this_w.lock ()) if (auto this_l = this_w.lock ())
{ {
nano::lock_guard<std::mutex> guard (this_l->mutex); nano::lock_guard<nano::mutex> guard (this_l->mutex);
auto it = this_l->recent_or_initial_request_telemetry_data.find (endpoint); auto it = this_l->recent_or_initial_request_telemetry_data.find (endpoint);
if (it != this_l->recent_or_initial_request_telemetry_data.cend () && it->undergoing_request && round_l == it->round) if (it != this_l->recent_or_initial_request_telemetry_data.cend () && it->undergoing_request && round_l == it->round)
{ {
@ -412,7 +412,7 @@ void nano::telemetry::flush_callbacks_async (nano::endpoint const & endpoint_a,
workers.push_task ([endpoint_a, error_a, this_w = std::weak_ptr<nano::telemetry> (shared_from_this ())]() { workers.push_task ([endpoint_a, error_a, this_w = std::weak_ptr<nano::telemetry> (shared_from_this ())]() {
if (auto this_l = this_w.lock ()) if (auto this_l = this_w.lock ())
{ {
nano::unique_lock<std::mutex> lk (this_l->mutex); nano::unique_lock<nano::mutex> lk (this_l->mutex);
while (!this_l->callbacks[endpoint_a].empty ()) while (!this_l->callbacks[endpoint_a].empty ())
{ {
lk.unlock (); lk.unlock ();
@ -429,7 +429,7 @@ void nano::telemetry::invoke_callbacks (nano::endpoint const & endpoint_a, bool
telemetry_data_response response_data{ nano::telemetry_data (), endpoint_a, error_a }; telemetry_data_response response_data{ nano::telemetry_data (), endpoint_a, error_a };
{ {
// Copy data so that it can be used outside of holding the lock // Copy data so that it can be used outside of holding the lock
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
callbacks_l = callbacks[endpoint_a]; callbacks_l = callbacks[endpoint_a];
auto it = recent_or_initial_request_telemetry_data.find (endpoint_a); auto it = recent_or_initial_request_telemetry_data.find (endpoint_a);
@ -449,7 +449,7 @@ void nano::telemetry::invoke_callbacks (nano::endpoint const & endpoint_a, bool
size_t nano::telemetry::telemetry_data_size () size_t nano::telemetry::telemetry_data_size ()
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return recent_or_initial_request_telemetry_data.size (); return recent_or_initial_request_telemetry_data.size ();
} }
@ -471,7 +471,7 @@ std::unique_ptr<nano::container_info_component> nano::collect_container_info (te
auto composite = std::make_unique<container_info_composite> (name); auto composite = std::make_unique<container_info_composite> (name);
size_t callbacks_count; size_t callbacks_count;
{ {
nano::lock_guard<std::mutex> guard (telemetry.mutex); nano::lock_guard<nano::mutex> guard (telemetry.mutex);
std::unordered_map<nano::endpoint, std::vector<std::function<void(telemetry_data_response const &)>>> callbacks; std::unordered_map<nano::endpoint, std::vector<std::function<void(telemetry_data_response const &)>>> callbacks;
callbacks_count = std::accumulate (callbacks.begin (), callbacks.end (), static_cast<size_t> (0), [](auto total, auto const & callback_a) { callbacks_count = std::accumulate (callbacks.begin (), callbacks.end (), static_cast<size_t> (0), [](auto total, auto const & callback_a) {
return total += callback_a.second.size (); return total += callback_a.second.size ();

View file

@ -113,7 +113,7 @@ private:
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
std::mutex mutex; nano::mutex mutex{ mutex_identifier (mutexes::telemetry) };
// clang-format off // clang-format off
// This holds the last telemetry data received from peers or can be a placeholder awaiting the first response (check with awaiting_first_response ()) // This holds the last telemetry data received from peers or can be a placeholder awaiting the first response (check with awaiting_first_response ())
boost::multi_index_container<nano::telemetry_info, boost::multi_index_container<nano::telemetry_info,

View file

@ -12,7 +12,7 @@ socket (socket_a)
nano::transport::channel_tcp::~channel_tcp () nano::transport::channel_tcp::~channel_tcp ()
{ {
nano::lock_guard<std::mutex> lk (channel_mutex); nano::lock_guard<nano::mutex> lk (channel_mutex);
// Close socket. Exception: socket is used by bootstrap_server // Close socket. Exception: socket is used by bootstrap_server
if (auto socket_l = socket.lock ()) if (auto socket_l = socket.lock ())
{ {
@ -101,7 +101,7 @@ std::string nano::transport::channel_tcp::to_string () const
void nano::transport::channel_tcp::set_endpoint () void nano::transport::channel_tcp::set_endpoint ()
{ {
nano::lock_guard<std::mutex> lk (channel_mutex); nano::lock_guard<nano::mutex> lk (channel_mutex);
debug_assert (endpoint == nano::tcp_endpoint (boost::asio::ip::address_v6::any (), 0)); // Not initialized endpoint value debug_assert (endpoint == nano::tcp_endpoint (boost::asio::ip::address_v6::any (), 0)); // Not initialized endpoint value
// Calculate TCP socket endpoint // Calculate TCP socket endpoint
if (auto socket_l = socket.lock ()) if (auto socket_l = socket.lock ())
@ -123,7 +123,7 @@ bool nano::transport::tcp_channels::insert (std::shared_ptr<nano::transport::cha
bool error (true); bool error (true);
if (!node.network.not_a_peer (udp_endpoint, node.config.allow_local_peers) && !stopped) if (!node.network.not_a_peer (udp_endpoint, node.config.allow_local_peers) && !stopped)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
auto existing (channels.get<endpoint_tag> ().find (endpoint)); auto existing (channels.get<endpoint_tag> ().find (endpoint));
if (existing == channels.get<endpoint_tag> ().end ()) if (existing == channels.get<endpoint_tag> ().end ())
{ {
@ -148,19 +148,19 @@ bool nano::transport::tcp_channels::insert (std::shared_ptr<nano::transport::cha
void nano::transport::tcp_channels::erase (nano::tcp_endpoint const & endpoint_a) void nano::transport::tcp_channels::erase (nano::tcp_endpoint const & endpoint_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
channels.get<endpoint_tag> ().erase (endpoint_a); channels.get<endpoint_tag> ().erase (endpoint_a);
} }
size_t nano::transport::tcp_channels::size () const size_t nano::transport::tcp_channels::size () const
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
return channels.size (); return channels.size ();
} }
std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::find_channel (nano::tcp_endpoint const & endpoint_a) const std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::find_channel (nano::tcp_endpoint const & endpoint_a) const
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
std::shared_ptr<nano::transport::channel_tcp> result; std::shared_ptr<nano::transport::channel_tcp> result;
auto existing (channels.get<endpoint_tag> ().find (endpoint_a)); auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
if (existing != channels.get<endpoint_tag> ().end ()) if (existing != channels.get<endpoint_tag> ().end ())
@ -174,7 +174,7 @@ std::unordered_set<std::shared_ptr<nano::transport::channel>> nano::transport::t
{ {
std::unordered_set<std::shared_ptr<nano::transport::channel>> result; std::unordered_set<std::shared_ptr<nano::transport::channel>> result;
result.reserve (count_a); result.reserve (count_a);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
// Stop trying to fill result with random samples after this many attempts // Stop trying to fill result with random samples after this many attempts
auto random_cutoff (count_a * 2); auto random_cutoff (count_a * 2);
auto peers_size (channels.size ()); auto peers_size (channels.size ());
@ -218,7 +218,7 @@ bool nano::transport::tcp_channels::store_all (bool clear_peers)
// we collect endpoints to be saved and then relase the lock. // we collect endpoints to be saved and then relase the lock.
std::vector<nano::endpoint> endpoints; std::vector<nano::endpoint> endpoints;
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
endpoints.reserve (channels.size ()); endpoints.reserve (channels.size ());
std::transform (channels.begin (), channels.end (), std::transform (channels.begin (), channels.end (),
std::back_inserter (endpoints), [](const auto & channel) { return nano::transport::map_tcp_to_endpoint (channel.endpoint ()); }); std::back_inserter (endpoints), [](const auto & channel) { return nano::transport::map_tcp_to_endpoint (channel.endpoint ()); });
@ -245,7 +245,7 @@ bool nano::transport::tcp_channels::store_all (bool clear_peers)
std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::find_node_id (nano::account const & node_id_a) std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::find_node_id (nano::account const & node_id_a)
{ {
std::shared_ptr<nano::transport::channel_tcp> result; std::shared_ptr<nano::transport::channel_tcp> result;
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto existing (channels.get<node_id_tag> ().find (node_id_a)); auto existing (channels.get<node_id_tag> ().find (node_id_a));
if (existing != channels.get<node_id_tag> ().end ()) if (existing != channels.get<node_id_tag> ().end ())
{ {
@ -257,7 +257,7 @@ std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::fin
nano::tcp_endpoint nano::transport::tcp_channels::bootstrap_peer (uint8_t connection_protocol_version_min) nano::tcp_endpoint nano::transport::tcp_channels::bootstrap_peer (uint8_t connection_protocol_version_min)
{ {
nano::tcp_endpoint result (boost::asio::ip::address_v6::any (), 0); nano::tcp_endpoint result (boost::asio::ip::address_v6::any (), 0);
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
for (auto i (channels.get<last_bootstrap_attempt_tag> ().begin ()), n (channels.get<last_bootstrap_attempt_tag> ().end ()); i != n;) for (auto i (channels.get<last_bootstrap_attempt_tag> ().begin ()), n (channels.get<last_bootstrap_attempt_tag> ().end ()); i != n;)
{ {
if (i->channel->get_network_version () >= connection_protocol_version_min) if (i->channel->get_network_version () >= connection_protocol_version_min)
@ -347,7 +347,7 @@ void nano::transport::tcp_channels::start ()
void nano::transport::tcp_channels::stop () void nano::transport::tcp_channels::stop ()
{ {
stopped = true; stopped = true;
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Close all TCP sockets // Close all TCP sockets
for (auto i (channels.begin ()), j (channels.end ()); i != j; ++i) for (auto i (channels.begin ()), j (channels.end ()); i != j; ++i)
{ {
@ -370,7 +370,7 @@ bool nano::transport::tcp_channels::max_ip_connections (nano::tcp_endpoint const
bool result (false); bool result (false);
if (!node.flags.disable_max_peers_per_ip) if (!node.flags.disable_max_peers_per_ip)
{ {
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
result = channels.get<ip_address_tag> ().count (endpoint_a.address ()) >= node.network_params.node.max_peers_per_ip; result = channels.get<ip_address_tag> ().count (endpoint_a.address ()) >= node.network_params.node.max_peers_per_ip;
if (!result) if (!result)
{ {
@ -389,7 +389,7 @@ bool nano::transport::tcp_channels::reachout (nano::endpoint const & endpoint_a)
{ {
// Don't keepalive to nodes that already sent us something // Don't keepalive to nodes that already sent us something
error |= find_channel (tcp_endpoint) != nullptr; error |= find_channel (tcp_endpoint) != nullptr;
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto inserted (attempts.emplace (tcp_endpoint)); auto inserted (attempts.emplace (tcp_endpoint));
error |= !inserted.second; error |= !inserted.second;
} }
@ -402,7 +402,7 @@ std::unique_ptr<nano::container_info_component> nano::transport::tcp_channels::c
size_t attemps_count; size_t attemps_count;
size_t node_id_handshake_sockets_count; size_t node_id_handshake_sockets_count;
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
channels_count = channels.size (); channels_count = channels.size ();
attemps_count = attempts.size (); attemps_count = attempts.size ();
node_id_handshake_sockets_count = node_id_handshake_sockets.size (); node_id_handshake_sockets_count = node_id_handshake_sockets.size ();
@ -418,7 +418,7 @@ std::unique_ptr<nano::container_info_component> nano::transport::tcp_channels::c
void nano::transport::tcp_channels::purge (std::chrono::steady_clock::time_point const & cutoff_a) void nano::transport::tcp_channels::purge (std::chrono::steady_clock::time_point const & cutoff_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto disconnect_cutoff (channels.get<last_packet_sent_tag> ().lower_bound (cutoff_a)); auto disconnect_cutoff (channels.get<last_packet_sent_tag> ().lower_bound (cutoff_a));
channels.get<last_packet_sent_tag> ().erase (channels.get<last_packet_sent_tag> ().begin (), disconnect_cutoff); channels.get<last_packet_sent_tag> ().erase (channels.get<last_packet_sent_tag> ().begin (), disconnect_cutoff);
// Remove keepalive attempt tracking for attempts older than cutoff // Remove keepalive attempt tracking for attempts older than cutoff
@ -440,7 +440,7 @@ void nano::transport::tcp_channels::ongoing_keepalive ()
{ {
nano::keepalive message; nano::keepalive message;
node.network.random_fill (message.peers); node.network.random_fill (message.peers);
nano::unique_lock<std::mutex> lock (mutex); nano::unique_lock<nano::mutex> lock (mutex);
// Wake up channels // Wake up channels
std::vector<std::shared_ptr<nano::transport::channel_tcp>> send_list; std::vector<std::shared_ptr<nano::transport::channel_tcp>> send_list;
auto keepalive_sent_cutoff (channels.get<last_packet_sent_tag> ().lower_bound (std::chrono::steady_clock::now () - node.network_params.node.period)); auto keepalive_sent_cutoff (channels.get<last_packet_sent_tag> ().lower_bound (std::chrono::steady_clock::now () - node.network_params.node.period));
@ -481,7 +481,7 @@ void nano::transport::tcp_channels::ongoing_keepalive ()
void nano::transport::tcp_channels::list_below_version (std::vector<std::shared_ptr<nano::transport::channel>> & channels_a, uint8_t cutoff_version_a) void nano::transport::tcp_channels::list_below_version (std::vector<std::shared_ptr<nano::transport::channel>> & channels_a, uint8_t cutoff_version_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
// clang-format off // clang-format off
nano::transform_if (channels.get<random_access_tag> ().begin (), channels.get<random_access_tag> ().end (), std::back_inserter (channels_a), nano::transform_if (channels.get<random_access_tag> ().begin (), channels.get<random_access_tag> ().end (), std::back_inserter (channels_a),
[cutoff_version_a](auto & channel_a) { return channel_a.channel->get_network_version () < cutoff_version_a; }, [cutoff_version_a](auto & channel_a) { return channel_a.channel->get_network_version () < cutoff_version_a; },
@ -491,7 +491,7 @@ void nano::transport::tcp_channels::list_below_version (std::vector<std::shared_
void nano::transport::tcp_channels::list (std::deque<std::shared_ptr<nano::transport::channel>> & deque_a, uint8_t minimum_version_a, bool include_temporary_channels_a) void nano::transport::tcp_channels::list (std::deque<std::shared_ptr<nano::transport::channel>> & deque_a, uint8_t minimum_version_a, bool include_temporary_channels_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
// clang-format off // clang-format off
nano::transform_if (channels.get<random_access_tag> ().begin (), channels.get<random_access_tag> ().end (), std::back_inserter (deque_a), nano::transform_if (channels.get<random_access_tag> ().begin (), channels.get<random_access_tag> ().end (), std::back_inserter (deque_a),
[include_temporary_channels_a, minimum_version_a](auto & channel_a) { return channel_a.channel->get_network_version () >= minimum_version_a && (include_temporary_channels_a || !channel_a.channel->temporary); }, [include_temporary_channels_a, minimum_version_a](auto & channel_a) { return channel_a.channel->get_network_version () >= minimum_version_a && (include_temporary_channels_a || !channel_a.channel->temporary); },
@ -501,7 +501,7 @@ void nano::transport::tcp_channels::list (std::deque<std::shared_ptr<nano::trans
void nano::transport::tcp_channels::modify (std::shared_ptr<nano::transport::channel_tcp> const & channel_a, std::function<void(std::shared_ptr<nano::transport::channel_tcp> const &)> modify_callback_a) void nano::transport::tcp_channels::modify (std::shared_ptr<nano::transport::channel_tcp> const & channel_a, std::function<void(std::shared_ptr<nano::transport::channel_tcp> const &)> modify_callback_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto existing (channels.get<endpoint_tag> ().find (channel_a->get_tcp_endpoint ())); auto existing (channels.get<endpoint_tag> ().find (channel_a->get_tcp_endpoint ()));
if (existing != channels.get<endpoint_tag> ().end ()) if (existing != channels.get<endpoint_tag> ().end ())
{ {
@ -513,7 +513,7 @@ void nano::transport::tcp_channels::modify (std::shared_ptr<nano::transport::cha
void nano::transport::tcp_channels::update (nano::tcp_endpoint const & endpoint_a) void nano::transport::tcp_channels::update (nano::tcp_endpoint const & endpoint_a)
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
auto existing (channels.get<endpoint_tag> ().find (endpoint_a)); auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
if (existing != channels.get<endpoint_tag> ().end ()) if (existing != channels.get<endpoint_tag> ().end ())
{ {
@ -525,13 +525,13 @@ void nano::transport::tcp_channels::update (nano::tcp_endpoint const & endpoint_
bool nano::transport::tcp_channels::node_id_handhake_sockets_empty () const bool nano::transport::tcp_channels::node_id_handhake_sockets_empty () const
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
return node_id_handshake_sockets.empty (); return node_id_handshake_sockets.empty ();
} }
void nano::transport::tcp_channels::push_node_id_handshake_socket (std::shared_ptr<nano::socket> const & socket_a) void nano::transport::tcp_channels::push_node_id_handshake_socket (std::shared_ptr<nano::socket> const & socket_a)
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
node_id_handshake_sockets.push_back (socket_a); node_id_handshake_sockets.push_back (socket_a);
} }
@ -540,7 +540,7 @@ void nano::transport::tcp_channels::remove_node_id_handshake_socket (std::shared
std::weak_ptr<nano::node> node_w (node.shared ()); std::weak_ptr<nano::node> node_w (node.shared ());
if (auto node_l = node_w.lock ()) if (auto node_l = node_w.lock ())
{ {
nano::lock_guard<std::mutex> guard (mutex); nano::lock_guard<nano::mutex> guard (mutex);
node_id_handshake_sockets.erase (std::remove (node_id_handshake_sockets.begin (), node_id_handshake_sockets.end (), socket_a), node_id_handshake_sockets.end ()); node_id_handshake_sockets.erase (std::remove (node_id_handshake_sockets.begin (), node_id_handshake_sockets.end (), socket_a), node_id_handshake_sockets.end ());
} }
} }
@ -720,7 +720,7 @@ void nano::transport::tcp_channels::start_tcp_receive_node_id (std::shared_ptr<n
// Version of channel is not high enough, just abort. Don't fallback to udp, instead cleanup attempt // Version of channel is not high enough, just abort. Don't fallback to udp, instead cleanup attempt
cleanup_node_id_handshake_socket (endpoint_a, callback_a); cleanup_node_id_handshake_socket (endpoint_a, callback_a);
{ {
nano::lock_guard<std::mutex> lock (node_l->network.tcp_channels.mutex); nano::lock_guard<nano::mutex> lock (node_l->network.tcp_channels.mutex);
node_l->network.tcp_channels.attempts.get<endpoint_tag> ().erase (nano::transport::map_endpoint_to_tcp (endpoint_a)); node_l->network.tcp_channels.attempts.get<endpoint_tag> ().erase (nano::transport::map_endpoint_to_tcp (endpoint_a));
} }
} }
@ -746,7 +746,7 @@ void nano::transport::tcp_channels::start_tcp_receive_node_id (std::shared_ptr<n
void nano::transport::tcp_channels::udp_fallback (nano::endpoint const & endpoint_a, std::function<void(std::shared_ptr<nano::transport::channel> const &)> const & callback_a) void nano::transport::tcp_channels::udp_fallback (nano::endpoint const & endpoint_a, std::function<void(std::shared_ptr<nano::transport::channel> const &)> const & callback_a)
{ {
{ {
nano::lock_guard<std::mutex> lock (mutex); nano::lock_guard<nano::mutex> lock (mutex);
attempts.get<endpoint_tag> ().erase (nano::transport::map_endpoint_to_tcp (endpoint_a)); attempts.get<endpoint_tag> ().erase (nano::transport::map_endpoint_to_tcp (endpoint_a));
} }
if (callback_a && !node.flags.disable_udp) if (callback_a && !node.flags.disable_udp)

View file

@ -60,7 +60,7 @@ namespace transport
nano::tcp_endpoint get_tcp_endpoint () const override nano::tcp_endpoint get_tcp_endpoint () const override
{ {
nano::lock_guard<std::mutex> lk (channel_mutex); nano::lock_guard<nano::mutex> lk (channel_mutex);
return endpoint; return endpoint;
} }
@ -189,7 +189,7 @@ namespace transport
{ {
} }
}; };
mutable std::mutex mutex; mutable nano::mutex mutex;
// clang-format off // clang-format off
boost::multi_index_container<channel_tcp_wrapper, boost::multi_index_container<channel_tcp_wrapper,
mi::indexed_by< mi::indexed_by<

Some files were not shown because too many files have changed in this diff Show more