Track length of time mutexes are held for (#2267)
* Track length of time locks are held for * Fix Windows build * Fix linux broken build * Fix MSVC warning
This commit is contained in:
parent
60b13fad2c
commit
732de66de2
78 changed files with 970 additions and 459 deletions
|
@ -31,6 +31,11 @@ set (NANO_TEST OFF CACHE BOOL "")
|
|||
set (NANO_SECURE_RPC OFF CACHE BOOL "")
|
||||
set (NANO_ROCKSDB OFF CACHE BOOL "")
|
||||
set (NANO_WARN_TO_ERR OFF CACHE BOOL "")
|
||||
set (NANO_TIMED_LOCKS 0 CACHE INTEGER "")
|
||||
|
||||
if (${NANO_TIMED_LOCKS} GREATER 0)
|
||||
add_definitions (-DNANO_TIMED_LOCKS=${NANO_TIMED_LOCKS})
|
||||
endif ()
|
||||
|
||||
if (NANO_ROCKSDB)
|
||||
add_definitions (-DNANO_ROCKSDB=1)
|
||||
|
@ -182,7 +187,12 @@ set(Boost_USE_MULTITHREADED ON)
|
|||
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules")
|
||||
|
||||
find_package (Boost 1.67.0 REQUIRED COMPONENTS filesystem log log_setup thread program_options system)
|
||||
if (${NANO_TIMED_LOCKS} GREATER 0)
|
||||
set (timed_locks_boost_libs context fiber)
|
||||
endif ()
|
||||
|
||||
find_package (Boost 1.67.0 REQUIRED COMPONENTS filesystem log log_setup thread program_options system ${timed_locks_boost_libs})
|
||||
|
||||
if (NANO_ROCKSDB)
|
||||
find_package (RocksDB REQUIRED)
|
||||
find_package (ZLIB REQUIRED)
|
||||
|
|
|
@ -14,6 +14,12 @@ if [[ $(grep -rl --exclude="*asio.hpp" "asio::async_write" ./nano) ]]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# prevent unsolicited use of std::lock_guard & std::unique_lock outside of allowed areas
|
||||
if [[ $(grep -rl --exclude={"*random_pool.cpp","*random_pool.hpp","*locks.hpp"} "std::unique_lock\|std::lock_guard" ./nano) ]]; then
|
||||
echo "using std::unique_lock or std::lock_guard is not permitted (except in nano/lib/locks.hpp and non-nano dependent libraries). Use the nano::* versions instead"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir build
|
||||
pushd build
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ add_executable (core_test
|
|||
gap_cache.cpp
|
||||
ipc.cpp
|
||||
ledger.cpp
|
||||
locks.cpp
|
||||
logger.cpp
|
||||
network.cpp
|
||||
node.cpp
|
||||
|
|
|
@ -60,7 +60,7 @@ TEST (active_transactions, adjusted_difficulty_priority)
|
|||
|
||||
// Check adjusted difficulty
|
||||
{
|
||||
std::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
ASSERT_EQ (node1.active.roots.get<1> ().begin ()->election->status.winner->hash (), send1->hash ());
|
||||
ASSERT_LT (node1.active.roots.find (send2->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send1->qualified_root ())->adjusted_difficulty);
|
||||
ASSERT_LT (node1.active.roots.find (open1->qualified_root ())->adjusted_difficulty, node1.active.roots.find (send1->qualified_root ())->adjusted_difficulty);
|
||||
|
@ -70,7 +70,7 @@ TEST (active_transactions, adjusted_difficulty_priority)
|
|||
// Confirm elections
|
||||
while (node1.active.size () != 0)
|
||||
{
|
||||
std::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
auto it (node1.active.roots.begin ());
|
||||
while (!node1.active.roots.empty () && it != node1.active.roots.end ())
|
||||
{
|
||||
|
@ -81,7 +81,7 @@ TEST (active_transactions, adjusted_difficulty_priority)
|
|||
}
|
||||
{
|
||||
system.deadline_set (10s);
|
||||
std::unique_lock<std::mutex> active_lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> active_lock (node1.active.mutex);
|
||||
while (node1.active.confirmed.size () != 4)
|
||||
{
|
||||
active_lock.unlock ();
|
||||
|
@ -113,7 +113,7 @@ TEST (active_transactions, adjusted_difficulty_priority)
|
|||
}
|
||||
|
||||
// Check adjusted difficulty
|
||||
std::lock_guard<std::mutex> lock (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node1.active.mutex);
|
||||
uint64_t last_adjusted (0);
|
||||
for (auto i (node1.active.roots.get<1> ().begin ()), n (node1.active.roots.get<1> ().end ()); i != n; ++i)
|
||||
{
|
||||
|
@ -154,7 +154,7 @@ TEST (active_transactions, adjusted_difficulty_overflow_max)
|
|||
}
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
// Update difficulty to maximum
|
||||
auto send1_root (node1.active.roots.find (send1->qualified_root ()));
|
||||
auto send2_root (node1.active.roots.find (send2->qualified_root ()));
|
||||
|
@ -207,7 +207,7 @@ TEST (active_transactions, adjusted_difficulty_overflow_min)
|
|||
}
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
// Update difficulty to minimum
|
||||
auto send1_root (node1.active.roots.find (send1->qualified_root ()));
|
||||
auto send2_root (node1.active.roots.find (send2->qualified_root ()));
|
||||
|
@ -266,7 +266,7 @@ TEST (active_transactions, keep_local)
|
|||
}
|
||||
while (node1.active.size () != 0)
|
||||
{
|
||||
std::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
auto it (node1.active.roots.begin ());
|
||||
while (!node1.active.roots.empty () && it != node1.active.roots.end ())
|
||||
{
|
||||
|
@ -329,7 +329,7 @@ TEST (active_transactions, prioritize_chains)
|
|||
}
|
||||
while (node1.active.size () != 0)
|
||||
{
|
||||
std::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (node1.active.mutex);
|
||||
auto it (node1.active.roots.get<1> ().begin ());
|
||||
while (!node1.active.roots.empty () && it != node1.active.roots.get<1> ().end ())
|
||||
{
|
||||
|
@ -355,7 +355,7 @@ TEST (active_transactions, prioritize_chains)
|
|||
while (!done)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
done = node1.active.long_unconfirmed_size == 4;
|
||||
}
|
||||
ASSERT_NO_ERROR (system.poll ());
|
||||
|
@ -373,7 +373,7 @@ TEST (active_transactions, prioritize_chains)
|
|||
while (!done)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
done = node1.active.long_unconfirmed_size == 4;
|
||||
}
|
||||
ASSERT_NO_ERROR (system.poll ());
|
||||
|
|
|
@ -18,7 +18,7 @@ TEST (conflicts, start_stop)
|
|||
node1.active.start (send1);
|
||||
ASSERT_EQ (1, node1.active.size ());
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
auto existing1 (node1.active.roots.find (send1->qualified_root ()));
|
||||
ASSERT_NE (node1.active.roots.end (), existing1);
|
||||
auto votes1 (existing1->election);
|
||||
|
@ -45,7 +45,7 @@ TEST (conflicts, add_existing)
|
|||
node1.active.vote (vote1);
|
||||
ASSERT_EQ (1, node1.active.size ());
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send2->qualified_root ())->election);
|
||||
ASSERT_NE (nullptr, votes1);
|
||||
ASSERT_EQ (2, votes1->last_votes.size ());
|
||||
|
@ -172,7 +172,7 @@ TEST (conflicts, reprioritize)
|
|||
node1.process_active (send1);
|
||||
node1.block_processor.flush ();
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
auto existing1 (node1.active.roots.find (send1->qualified_root ()));
|
||||
ASSERT_NE (node1.active.roots.end (), existing1);
|
||||
ASSERT_EQ (difficulty1, existing1->difficulty);
|
||||
|
@ -183,7 +183,7 @@ TEST (conflicts, reprioritize)
|
|||
node1.process_active (std::make_shared<nano::send_block> (send1_copy));
|
||||
node1.block_processor.flush ();
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
auto existing2 (node1.active.roots.find (send1->qualified_root ()));
|
||||
ASSERT_NE (node1.active.roots.end (), existing2);
|
||||
ASSERT_EQ (difficulty2, existing2->difficulty);
|
||||
|
@ -210,7 +210,7 @@ TEST (conflicts, dependency)
|
|||
ASSERT_EQ (2, node1->active.size ());
|
||||
// Check dependency for send block
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1->active.mutex);
|
||||
auto existing1 (node1->active.roots.find (send1->qualified_root ()));
|
||||
ASSERT_NE (node1->active.roots.end (), existing1);
|
||||
auto election1 (existing1->election);
|
||||
|
@ -258,7 +258,7 @@ TEST (conflicts, adjusted_difficulty)
|
|||
}
|
||||
std::unordered_map<nano::block_hash, uint64_t> adjusted_difficulties;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
ASSERT_EQ (node1.active.roots.get<1> ().begin ()->election->status.winner->hash (), send1->hash ());
|
||||
for (auto i (node1.active.roots.get<1> ().begin ()), n (node1.active.roots.get<1> ().end ()); i != n; ++i)
|
||||
{
|
||||
|
@ -292,7 +292,7 @@ TEST (conflicts, adjusted_difficulty)
|
|||
ASSERT_NO_ERROR (system.poll ());
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1.active.mutex);
|
||||
ASSERT_EQ (node1.active.roots.get<1> ().begin ()->election->status.winner->hash (), open_epoch2->hash ());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ TEST (gap_cache, add_existing)
|
|||
auto block1 (std::make_shared<nano::send_block> (0, 1, 2, nano::keypair ().prv, 4, 5));
|
||||
auto transaction (system.nodes[0]->store.tx_begin_write ());
|
||||
cache.add (transaction, block1->hash ());
|
||||
std::unique_lock<std::mutex> lock (cache.mutex);
|
||||
nano::unique_lock<std::mutex> lock (cache.mutex);
|
||||
auto existing1 (cache.blocks.get<1> ().find (block1->hash ()));
|
||||
ASSERT_NE (cache.blocks.get<1> ().end (), existing1);
|
||||
auto arrival (existing1->arrival);
|
||||
|
@ -46,7 +46,7 @@ TEST (gap_cache, comparison)
|
|||
auto block1 (std::make_shared<nano::send_block> (1, 0, 2, nano::keypair ().prv, 4, 5));
|
||||
auto transaction (system.nodes[0]->store.tx_begin_write ());
|
||||
cache.add (transaction, block1->hash ());
|
||||
std::unique_lock<std::mutex> lock (cache.mutex);
|
||||
nano::unique_lock<std::mutex> lock (cache.mutex);
|
||||
auto existing1 (cache.blocks.get<1> ().find (block1->hash ()));
|
||||
ASSERT_NE (cache.blocks.get<1> ().end (), existing1);
|
||||
auto arrival (existing1->arrival);
|
||||
|
|
|
@ -734,7 +734,7 @@ TEST (votes, check_signature)
|
|||
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send1).code);
|
||||
}
|
||||
node1.active.start (send1);
|
||||
std::lock_guard<std::mutex> lock (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
ASSERT_EQ (1, votes1->last_votes.size ());
|
||||
auto vote1 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 1, send1));
|
||||
|
@ -757,7 +757,7 @@ TEST (votes, add_one)
|
|||
auto transaction (node1.store.tx_begin_write ());
|
||||
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send1).code);
|
||||
node1.active.start (send1);
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
ASSERT_EQ (1, votes1->last_votes.size ());
|
||||
lock.unlock ();
|
||||
|
@ -786,7 +786,7 @@ TEST (votes, add_two)
|
|||
auto transaction (node1.store.tx_begin_write ());
|
||||
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send1).code);
|
||||
node1.active.start (send1);
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
lock.unlock ();
|
||||
nano::keypair key2;
|
||||
|
@ -826,7 +826,7 @@ TEST (votes, add_existing)
|
|||
ASSERT_FALSE (node1.active.vote (vote1));
|
||||
// Block is already processed from vote
|
||||
ASSERT_TRUE (node1.active.publish (send1));
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
ASSERT_EQ (1, votes1->last_votes[nano::test_genesis_key.pub].sequence);
|
||||
nano::keypair key2;
|
||||
|
@ -869,7 +869,7 @@ TEST (votes, add_old)
|
|||
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send1).code);
|
||||
node1.active.start (send1);
|
||||
auto vote1 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 2, send1));
|
||||
std::lock_guard<std::mutex> lock (node1.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
auto channel (std::make_shared<nano::transport::channel_udp> (node1.network.udp_channels, node1.network.endpoint (), node1.network_params.protocol.protocol_version));
|
||||
node1.vote_processor.vote_blocking (transaction, vote1, channel);
|
||||
|
@ -902,7 +902,7 @@ TEST (votes, add_old_different_account)
|
|||
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send2).code);
|
||||
node1.active.start (send1);
|
||||
node1.active.start (send2);
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
auto votes2 (node1.active.roots.find (send2->qualified_root ())->election);
|
||||
ASSERT_EQ (1, votes1->last_votes.size ());
|
||||
|
@ -940,7 +940,7 @@ TEST (votes, add_cooldown)
|
|||
auto transaction (node1.store.tx_begin_write ());
|
||||
ASSERT_EQ (nano::process_result::progress, node1.ledger.process (transaction, *send1).code);
|
||||
node1.active.start (send1);
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
auto votes1 (node1.active.roots.find (send1->qualified_root ())->election);
|
||||
auto vote1 (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 1, send1));
|
||||
auto channel (std::make_shared<nano::transport::channel_udp> (node1.network.udp_channels, node1.network.endpoint (), node1.network_params.protocol.protocol_version));
|
||||
|
|
126
nano/core_test/locks.cpp
Normal file
126
nano/core_test/locks.cpp
Normal file
|
@ -0,0 +1,126 @@
|
|||
#include <nano/core_test/testutil.hpp>
|
||||
#include <nano/lib/locks.hpp>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <regex>
|
||||
|
||||
#if NANO_TIMED_LOCKS > 0
|
||||
namespace
|
||||
{
|
||||
unsigned num_matches (std::string const & str)
|
||||
{
|
||||
std::regex regexpr (R"(( \d+)ms)"); // matches things like " 12312ms"
|
||||
std::smatch matches;
|
||||
|
||||
auto count = 0u;
|
||||
std::string::const_iterator search_start (str.cbegin ());
|
||||
while (std::regex_search (search_start, str.cend (), matches, regexpr))
|
||||
{
|
||||
++count;
|
||||
search_start = matches.suffix ().first;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
TEST (locks, no_conflicts)
|
||||
{
|
||||
std::stringstream ss;
|
||||
nano::cout_redirect (ss.rdbuf ());
|
||||
|
||||
std::mutex guard_mutex;
|
||||
nano::lock_guard<std::mutex> guard (guard_mutex);
|
||||
|
||||
std::mutex lk_mutex;
|
||||
nano::unique_lock<std::mutex> lk (lk_mutex);
|
||||
|
||||
// This could fail if NANO_TIMED_LOCKS is such a low value that the above mutexes are held longer than that before reaching this statement
|
||||
ASSERT_EQ (ss.str (), "");
|
||||
}
|
||||
|
||||
TEST (locks, lock_guard)
|
||||
{
|
||||
std::stringstream ss;
|
||||
nano::cout_redirect redirect (ss.rdbuf ());
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
// Depending on timing the mutex could be reached first in
|
||||
std::promise<void> promise;
|
||||
std::thread t;
|
||||
{
|
||||
t = std::thread ([&mutex, &promise] {
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
promise.set_value ();
|
||||
// Tries to make sure that the other guard to held for a minimum of NANO_TIMED_LOCKS, may need to increase this for low NANO_TIMED_LOCKS values
|
||||
std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS * 2));
|
||||
});
|
||||
}
|
||||
|
||||
// Wait until the lock_guard has been reached in the other thread
|
||||
promise.get_future ().wait ();
|
||||
{
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
t.join ();
|
||||
}
|
||||
|
||||
// 2 mutexes held and 1 blocked
|
||||
ASSERT_EQ (num_matches (ss.str ()), 3);
|
||||
}
|
||||
|
||||
TEST (locks, unique_lock)
|
||||
{
|
||||
std::stringstream ss;
|
||||
nano::cout_redirect redirect (ss.rdbuf ());
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
// Depending on timing the mutex could be reached first in
|
||||
std::promise<void> promise;
|
||||
std::thread t ([&mutex, &promise] {
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS));
|
||||
lk.unlock ();
|
||||
lk.lock ();
|
||||
|
||||
promise.set_value ();
|
||||
// Tries to make sure that the other guard to held for a minimum of NANO_TIMED_LOCKS, may need to increase this for low NANO_TIMED_LOCKS values
|
||||
std::this_thread::sleep_for (std::chrono::milliseconds (NANO_TIMED_LOCKS * 2));
|
||||
});
|
||||
|
||||
// Wait until the lock_guard has been reached in the other thread
|
||||
promise.get_future ().wait ();
|
||||
{
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
t.join ();
|
||||
}
|
||||
|
||||
// 3 mutexes held and 1 blocked
|
||||
ASSERT_EQ (num_matches (ss.str ()), 4);
|
||||
}
|
||||
|
||||
TEST (locks, condition_variable)
|
||||
{
|
||||
nano::condition_variable cv;
|
||||
std::mutex mutex;
|
||||
std::promise<void> promise;
|
||||
std::atomic<bool> finished{ false };
|
||||
std::atomic<bool> notified{ false };
|
||||
std::thread t ([&cv, ¬ified, &finished] {
|
||||
while (!finished)
|
||||
{
|
||||
notified = true;
|
||||
cv.notify_one ();
|
||||
}
|
||||
});
|
||||
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
cv.wait (lk, [¬ified] {
|
||||
return notified.load ();
|
||||
});
|
||||
|
||||
finished = true;
|
||||
t.join ();
|
||||
}
|
||||
#endif
|
|
@ -2096,7 +2096,7 @@ TEST (confirmation_height, conflict_rollback_cemented)
|
|||
node1.block_processor.flush ();
|
||||
node2.network.process_message (publish1, channel2);
|
||||
node2.block_processor.flush ();
|
||||
std::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
auto conflict (node2.active.roots.find (nano::qualified_root (genesis.hash (), genesis.hash ())));
|
||||
ASSERT_NE (node2.active.roots.end (), conflict);
|
||||
auto votes1 (conflict->election);
|
||||
|
@ -2233,7 +2233,7 @@ TEST (confirmation_height, pending_observer_callbacks)
|
|||
// Can have timing issues.
|
||||
node->confirmation_height_processor.add (send.hash ());
|
||||
{
|
||||
std::unique_lock<std::mutex> lk (node->pending_confirmation_height.mutex);
|
||||
nano::unique_lock<std::mutex> lk (node->pending_confirmation_height.mutex);
|
||||
while (!node->pending_confirmation_height.current_hash.is_zero ())
|
||||
{
|
||||
lk.unlock ();
|
||||
|
@ -2269,7 +2269,7 @@ TEST (bootstrap, tcp_listener_timeout_empty)
|
|||
while (!disconnected)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node0->bootstrap.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node0->bootstrap.mutex);
|
||||
disconnected = node0->bootstrap.connections.empty ();
|
||||
}
|
||||
ASSERT_NO_ERROR (system.poll ());
|
||||
|
@ -2297,7 +2297,7 @@ TEST (bootstrap, tcp_listener_timeout_node_id_handshake)
|
|||
ASSERT_NO_ERROR (system.poll ());
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node0->bootstrap.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node0->bootstrap.mutex);
|
||||
ASSERT_EQ (node0->bootstrap.connections.size (), 1);
|
||||
}
|
||||
bool disconnected (false);
|
||||
|
@ -2305,7 +2305,7 @@ TEST (bootstrap, tcp_listener_timeout_node_id_handshake)
|
|||
while (!disconnected)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node0->bootstrap.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node0->bootstrap.mutex);
|
||||
disconnected = node0->bootstrap.connections.empty ();
|
||||
}
|
||||
ASSERT_NO_ERROR (system.poll ());
|
||||
|
|
|
@ -202,7 +202,7 @@ TEST (node, node_receive_quorum)
|
|||
while (!done)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (system.nodes[0]->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (system.nodes[0]->active.mutex);
|
||||
auto info (system.nodes[0]->active.roots.find (nano::qualified_root (previous, previous)));
|
||||
ASSERT_NE (system.nodes[0]->active.roots.end (), info);
|
||||
done = info->election->confirmation_request_count > nano::active_transactions::minimum_confirmation_request_count;
|
||||
|
@ -395,7 +395,7 @@ TEST (node, search_pending_confirmed)
|
|||
system.wallet (0)->insert_adhoc (key2.prv);
|
||||
ASSERT_FALSE (system.wallet (0)->search_pending ());
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
auto existing1 (node->active.blocks.find (send1->hash ()));
|
||||
ASSERT_EQ (node->active.blocks.end (), existing1);
|
||||
auto existing2 (node->active.blocks.find (send2->hash ()));
|
||||
|
@ -430,7 +430,7 @@ TEST (node, unlock_search)
|
|||
}
|
||||
system.wallet (0)->insert_adhoc (key2.prv);
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock (system.wallet (0)->store.mutex);
|
||||
nano::lock_guard<std::recursive_mutex> lock (system.wallet (0)->store.mutex);
|
||||
system.wallet (0)->store.password.value_set (nano::keypair ().prv);
|
||||
}
|
||||
auto node (system.nodes[0]);
|
||||
|
@ -1126,7 +1126,7 @@ TEST (node, fork_publish)
|
|||
node1.process_active (send1);
|
||||
node1.block_processor.flush ();
|
||||
ASSERT_EQ (1, node1.active.size ());
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
auto existing (node1.active.roots.find (send1->qualified_root ()));
|
||||
ASSERT_NE (node1.active.roots.end (), existing);
|
||||
auto election (existing->election);
|
||||
|
@ -1176,7 +1176,7 @@ TEST (node, fork_keep)
|
|||
node1.block_processor.flush ();
|
||||
node2.process_active (send2);
|
||||
node2.block_processor.flush ();
|
||||
std::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
auto conflict (node2.active.roots.find (nano::qualified_root (genesis.hash (), genesis.hash ())));
|
||||
ASSERT_NE (node2.active.roots.end (), conflict);
|
||||
auto votes1 (conflict->election);
|
||||
|
@ -1232,7 +1232,7 @@ TEST (node, fork_flip)
|
|||
node1.block_processor.flush ();
|
||||
node2.network.process_message (publish1, channel2);
|
||||
node2.block_processor.flush ();
|
||||
std::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
auto conflict (node2.active.roots.find (nano::qualified_root (genesis.hash (), genesis.hash ())));
|
||||
ASSERT_NE (node2.active.roots.end (), conflict);
|
||||
auto votes1 (conflict->election);
|
||||
|
@ -1293,7 +1293,7 @@ TEST (node, fork_multi_flip)
|
|||
node1.block_processor.flush ();
|
||||
node2.network.process_message (publish1, node2.network.udp_channels.create (node2.network.endpoint ()));
|
||||
node2.block_processor.flush ();
|
||||
std::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
auto conflict (node2.active.roots.find (nano::qualified_root (genesis.hash (), genesis.hash ())));
|
||||
ASSERT_NE (node2.active.roots.end (), conflict);
|
||||
auto votes1 (conflict->election);
|
||||
|
@ -1426,7 +1426,7 @@ TEST (node, fork_open_flip)
|
|||
node1.block_processor.flush ();
|
||||
node2.process_active (open1);
|
||||
node2.block_processor.flush ();
|
||||
std::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node2.active.mutex);
|
||||
auto conflict (node2.active.roots.find (open1->qualified_root ()));
|
||||
ASSERT_NE (node2.active.roots.end (), conflict);
|
||||
auto votes1 (conflict->election);
|
||||
|
@ -1724,7 +1724,7 @@ TEST (node, rep_self_vote)
|
|||
ASSERT_EQ (nano::process_result::progress, node0->process (*block0).code);
|
||||
auto & active (node0->active);
|
||||
active.start (block0);
|
||||
std::unique_lock<std::mutex> lock (active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (active.mutex);
|
||||
auto existing (active.roots.find (block0->qualified_root ()));
|
||||
ASSERT_NE (active.roots.end (), existing);
|
||||
auto election (existing->election);
|
||||
|
@ -1876,7 +1876,7 @@ TEST (node, bootstrap_confirm_frontiers)
|
|||
ASSERT_NO_ERROR (system1.poll ());
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node1->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node1->active.mutex);
|
||||
auto existing1 (node1->active.blocks.find (send0.hash ()));
|
||||
ASSERT_NE (node1->active.blocks.end (), existing1);
|
||||
}
|
||||
|
@ -2099,7 +2099,7 @@ TEST (node, vote_replay)
|
|||
}
|
||||
{
|
||||
auto transaction (system.nodes[0]->store.tx_begin_read ());
|
||||
std::lock_guard<std::mutex> lock (system.nodes[0]->store.get_cache_mutex ());
|
||||
nano::lock_guard<std::mutex> lock (system.nodes[0]->store.get_cache_mutex ());
|
||||
auto vote (system.nodes[0]->store.vote_current (transaction, nano::test_genesis_key.pub));
|
||||
ASSERT_EQ (nullptr, vote);
|
||||
}
|
||||
|
@ -2112,7 +2112,7 @@ TEST (node, vote_replay)
|
|||
{
|
||||
auto ec = system.poll ();
|
||||
auto transaction (system.nodes[0]->store.tx_begin_read ());
|
||||
std::lock_guard<std::mutex> lock (system.nodes[0]->store.get_cache_mutex ());
|
||||
nano::lock_guard<std::mutex> lock (system.nodes[0]->store.get_cache_mutex ());
|
||||
auto vote (system.nodes[0]->store.vote_current (transaction, nano::test_genesis_key.pub));
|
||||
done = vote && (vote->sequence >= 10000);
|
||||
ASSERT_NO_ERROR (ec);
|
||||
|
@ -2309,7 +2309,7 @@ TEST (node, confirm_quorum)
|
|||
{
|
||||
ASSERT_FALSE (system.nodes[0]->active.empty ());
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (system.nodes[0]->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (system.nodes[0]->active.mutex);
|
||||
auto info (system.nodes[0]->active.roots.find (nano::qualified_root (send1->hash (), send1->hash ())));
|
||||
ASSERT_NE (system.nodes[0]->active.roots.end (), info);
|
||||
done = info->election->confirmation_request_count > nano::active_transactions::minimum_confirmation_request_count;
|
||||
|
@ -2342,7 +2342,7 @@ TEST (node, local_votes_cache)
|
|||
node.network.process_message (message2, channel);
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
nano::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
auto current_vote (node.store.vote_current (transaction, nano::test_genesis_key.pub));
|
||||
ASSERT_EQ (current_vote->sequence, 2);
|
||||
|
@ -2358,7 +2358,7 @@ TEST (node, local_votes_cache)
|
|||
node.network.process_message (message3, channel);
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
nano::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
auto current_vote (node.store.vote_current (transaction, nano::test_genesis_key.pub));
|
||||
ASSERT_EQ (current_vote->sequence, 3);
|
||||
|
@ -2392,7 +2392,7 @@ TEST (node, local_votes_cache_generate_new_vote)
|
|||
ASSERT_EQ (1, votes1[0]->blocks.size ());
|
||||
ASSERT_EQ (send1->hash (), boost::get<nano::block_hash> (votes1[0]->blocks[0]));
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
nano::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
auto current_vote (node.store.vote_current (transaction, nano::test_genesis_key.pub));
|
||||
ASSERT_EQ (current_vote->sequence, 1);
|
||||
|
@ -2413,7 +2413,7 @@ TEST (node, local_votes_cache_generate_new_vote)
|
|||
ASSERT_EQ (1, votes2.size ());
|
||||
ASSERT_EQ (2, votes2[0]->blocks.size ());
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
nano::lock_guard<std::mutex> lock (node.store.get_cache_mutex ());
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
auto current_vote (node.store.vote_current (transaction, nano::test_genesis_key.pub));
|
||||
ASSERT_EQ (current_vote->sequence, 2);
|
||||
|
@ -2615,7 +2615,7 @@ TEST (node, epoch_conflict_confirm)
|
|||
ASSERT_NO_ERROR (system.poll ());
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node0->active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node0->active.mutex);
|
||||
ASSERT_TRUE (node0->active.blocks.find (change->hash ()) != node0->active.blocks.end ());
|
||||
ASSERT_TRUE (node0->active.blocks.find (epoch->hash ()) != node0->active.blocks.end ());
|
||||
}
|
||||
|
@ -2686,7 +2686,7 @@ TEST (node, fork_invalid_block_signature_vote_by_hash)
|
|||
auto vote (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, vote_blocks));
|
||||
{
|
||||
auto transaction (system.nodes[0]->store.tx_begin_read ());
|
||||
std::unique_lock<std::mutex> lock (system.nodes[0]->active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (system.nodes[0]->active.mutex);
|
||||
system.nodes[0]->vote_processor.vote_blocking (transaction, vote, std::make_shared<nano::transport::channel_udp> (system.nodes[0]->network.udp_channels, system.nodes[0]->network.endpoint (), system.nodes[0]->network_params.protocol.protocol_version));
|
||||
}
|
||||
while (system.nodes[0]->block (send1->hash ()))
|
||||
|
@ -2882,7 +2882,7 @@ TEST (node, confirm_back)
|
|||
auto vote (std::make_shared<nano::vote> (nano::test_genesis_key.pub, nano::test_genesis_key.prv, 0, vote_blocks));
|
||||
{
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
std::unique_lock<std::mutex> lock (node.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node.active.mutex);
|
||||
node.vote_processor.vote_blocking (transaction, vote, std::make_shared<nano::transport::channel_udp> (node.network.udp_channels, node.network.endpoint (), node.network_params.protocol.protocol_version));
|
||||
}
|
||||
system.deadline_set (10s);
|
||||
|
@ -3363,7 +3363,7 @@ TEST (active_difficulty, recalculate_work)
|
|||
}
|
||||
auto sum (std::accumulate (node1.active.multipliers_cb.begin (), node1.active.multipliers_cb.end (), double(0)));
|
||||
ASSERT_EQ (node1.active.active_difficulty (), nano::difficulty::from_multiplier (sum / node1.active.multipliers_cb.size (), node1.network_params.network.publish_threshold));
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
// Fake history records to force work recalculation
|
||||
for (auto i (0); i < node1.active.multipliers_cb.size (); i++)
|
||||
{
|
||||
|
|
|
@ -56,17 +56,17 @@ TEST (alarm, one)
|
|||
nano::alarm alarm (io_ctx);
|
||||
std::atomic<bool> done (false);
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
alarm.add (std::chrono::steady_clock::now (), [&]() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
done = true;
|
||||
}
|
||||
condition.notify_one ();
|
||||
});
|
||||
boost::asio::io_context::work work (io_ctx);
|
||||
boost::thread thread ([&io_ctx]() { io_ctx.run (); });
|
||||
std::unique_lock<std::mutex> unique (mutex);
|
||||
nano::unique_lock<std::mutex> unique (mutex);
|
||||
condition.wait (unique, [&]() { return !!done; });
|
||||
io_ctx.stop ();
|
||||
thread.join ();
|
||||
|
@ -78,12 +78,12 @@ TEST (alarm, many)
|
|||
nano::alarm alarm (io_ctx);
|
||||
std::atomic<int> count (0);
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
for (auto i (0); i < 50; ++i)
|
||||
{
|
||||
alarm.add (std::chrono::steady_clock::now (), [&]() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
count += 1;
|
||||
}
|
||||
condition.notify_one ();
|
||||
|
@ -95,7 +95,7 @@ TEST (alarm, many)
|
|||
{
|
||||
threads.push_back (boost::thread ([&io_ctx]() { io_ctx.run (); }));
|
||||
}
|
||||
std::unique_lock<std::mutex> unique (mutex);
|
||||
nano::unique_lock<std::mutex> unique (mutex);
|
||||
condition.wait (unique, [&]() { return count == 50; });
|
||||
io_ctx.stop ();
|
||||
for (auto i (threads.begin ()), j (threads.end ()); i != j; ++i)
|
||||
|
@ -113,12 +113,12 @@ TEST (alarm, top_execution)
|
|||
std::mutex mutex;
|
||||
std::promise<bool> promise;
|
||||
alarm.add (std::chrono::steady_clock::now (), [&]() {
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
value1 = 1;
|
||||
value2 = 1;
|
||||
});
|
||||
alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (1), [&]() {
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
value2 = 2;
|
||||
promise.set_value (false);
|
||||
});
|
||||
|
@ -127,7 +127,7 @@ TEST (alarm, top_execution)
|
|||
io_ctx.run ();
|
||||
});
|
||||
promise.get_future ().get ();
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
ASSERT_EQ (1, value1);
|
||||
ASSERT_EQ (2, value2);
|
||||
io_ctx.stop ();
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <nano/lib/timer.hpp>
|
||||
#include <nano/lib/utility.hpp>
|
||||
|
||||
#include <boost/iostreams/concepts.hpp>
|
||||
#include <boost/log/sources/logger.hpp>
|
||||
|
@ -52,20 +53,20 @@ public:
|
|||
stringstream_mt_sink () = default;
|
||||
stringstream_mt_sink (const stringstream_mt_sink & sink)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
ss << sink.ss.str ();
|
||||
}
|
||||
|
||||
std::streamsize write (const char * string_to_write, std::streamsize size)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
ss << std::string (string_to_write, size);
|
||||
return size;
|
||||
}
|
||||
|
||||
std::string str ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return ss.str ();
|
||||
}
|
||||
|
||||
|
@ -94,6 +95,23 @@ private:
|
|||
boost::shared_ptr<boost::log::sinks::synchronous_sink<boost::log::sinks::text_ostream_backend>> console_sink;
|
||||
};
|
||||
|
||||
class cout_redirect
|
||||
{
|
||||
public:
|
||||
cout_redirect (std::streambuf * new_buffer)
|
||||
{
|
||||
std::cout.rdbuf (new_buffer);
|
||||
}
|
||||
|
||||
~cout_redirect ()
|
||||
{
|
||||
std::cout.rdbuf (old);
|
||||
}
|
||||
|
||||
private:
|
||||
std::streambuf * old{ std::cout.rdbuf () };
|
||||
};
|
||||
|
||||
namespace util
|
||||
{
|
||||
/**
|
||||
|
@ -115,7 +133,7 @@ namespace util
|
|||
}
|
||||
|
||||
protected:
|
||||
std::condition_variable cv;
|
||||
nano::condition_variable cv;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
|
@ -149,7 +167,7 @@ namespace util
|
|||
error = count < required_count;
|
||||
if (error)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
cv.wait_for (lock, std::chrono::milliseconds (1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1124,7 +1124,7 @@ TEST (wallet, work_watcher_update)
|
|||
auto multiplier = nano::difficulty::to_multiplier (std::max (difficulty1, difficulty2), node.network_params.network.publish_threshold);
|
||||
uint64_t updated_difficulty1{ difficulty1 }, updated_difficulty2{ difficulty2 };
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (node.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node.active.mutex);
|
||||
// Prevent active difficulty repopulating multipliers
|
||||
node.network_params.network.request_interval_ms = 10000;
|
||||
//fill multipliers_cb and update active difficulty;
|
||||
|
@ -1138,7 +1138,7 @@ TEST (wallet, work_watcher_update)
|
|||
while (updated_difficulty1 == difficulty1 || updated_difficulty2 == difficulty2)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node.active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node.active.mutex);
|
||||
{
|
||||
auto const existing (node.active.roots.find (block1->qualified_root ()));
|
||||
//if existing is junk the block has been confirmed already
|
||||
|
@ -1195,7 +1195,7 @@ TEST (wallet, work_watcher_cancel)
|
|||
uint64_t difficulty1 (0);
|
||||
nano::work_validate (*block1, &difficulty1);
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (node.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node.active.mutex);
|
||||
// Prevent active difficulty repopulating multipliers
|
||||
node.network_params.network.request_interval_ms = 10000;
|
||||
// Fill multipliers_cb and update active difficulty;
|
||||
|
@ -1216,7 +1216,7 @@ TEST (wallet, work_watcher_cancel)
|
|||
node.work.cancel (block1->root ());
|
||||
ASSERT_EQ (0, node.work.size ());
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (wallet.wallets.watcher->mutex);
|
||||
nano::unique_lock<std::mutex> lock (wallet.wallets.watcher->mutex);
|
||||
auto existing (wallet.wallets.watcher->watched.find (block1->qualified_root ()));
|
||||
ASSERT_NE (wallet.wallets.watcher->watched.end (), existing);
|
||||
auto block2 (existing->second);
|
||||
|
|
|
@ -151,7 +151,7 @@ TEST (wallets, reload)
|
|||
ASSERT_FALSE (error);
|
||||
ASSERT_EQ (1, system.nodes[0]->wallets.items.size ());
|
||||
{
|
||||
std::lock_guard<std::mutex> lock_wallet (system.nodes[0]->wallets.mutex);
|
||||
nano::lock_guard<std::mutex> lock_wallet (system.nodes[0]->wallets.mutex);
|
||||
nano::inactive_node node (system.nodes[0]->application_path, 24001);
|
||||
auto wallet (node.node->wallets.create (one));
|
||||
ASSERT_NE (wallet, nullptr);
|
||||
|
|
|
@ -189,7 +189,7 @@ TEST (websocket, active_difficulty)
|
|||
|
||||
// Fake history records to force trended_active_difficulty change
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (node1->active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1->active.mutex);
|
||||
node1->active.multipliers_cb.push_front (10.);
|
||||
}
|
||||
|
||||
|
|
|
@ -5,18 +5,18 @@ CryptoPP::AutoSeededRandomPool nano::random_pool::pool;
|
|||
|
||||
void nano::random_pool::generate_block (unsigned char * output, size_t size)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
pool.GenerateBlock (output, size);
|
||||
}
|
||||
|
||||
unsigned nano::random_pool::generate_word32 (unsigned min, unsigned max)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
return pool.GenerateWord32 (min, max);
|
||||
}
|
||||
|
||||
unsigned char nano::random_pool::generate_byte ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
return pool.GenerateByte ();
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ public:
|
|||
template <class Iter>
|
||||
static void shuffle (Iter begin, Iter end)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
pool.Shuffle (begin, end);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ else ()
|
|||
error ("Unknown platform: ${CMAKE_SYSTEM_NAME}")
|
||||
endif ()
|
||||
|
||||
if (${NANO_TIMED_LOCKS} GREATER 0)
|
||||
set (timed_locks_boost_libs Boost::context Boost::fiber)
|
||||
endif ()
|
||||
|
||||
add_library (nano_lib
|
||||
${platform_sources}
|
||||
alarm.hpp
|
||||
|
@ -33,6 +37,8 @@ add_library (nano_lib
|
|||
ipc_client.cpp
|
||||
json_error_response.hpp
|
||||
jsonconfig.hpp
|
||||
locks.hpp
|
||||
locks.cpp
|
||||
logger_mt.hpp
|
||||
memory.hpp
|
||||
memory.cpp
|
||||
|
@ -62,6 +68,7 @@ target_link_libraries (nano_lib
|
|||
blake2
|
||||
${CRYPTOPP_LIBRARY}
|
||||
${CMAKE_DL_LIBS}
|
||||
${timed_locks_boost_libs}
|
||||
Boost::boost)
|
||||
|
||||
target_compile_definitions(nano_lib
|
||||
|
|
|
@ -23,7 +23,7 @@ nano::alarm::~alarm ()
|
|||
|
||||
void nano::alarm::run ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto done (false);
|
||||
while (!done)
|
||||
{
|
||||
|
@ -58,7 +58,7 @@ void nano::alarm::run ()
|
|||
void nano::alarm::add (std::chrono::steady_clock::time_point const & wakeup_a, std::function<void()> const & operation)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
operations.push (nano::operation ({ wakeup_a, operation }));
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -71,7 +71,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (alarm & alarm, con
|
|||
auto composite = std::make_unique<seq_con_info_composite> (name);
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (alarm.mutex);
|
||||
nano::lock_guard<std::mutex> guard (alarm.mutex);
|
||||
count = alarm.operations.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (alarm.operations)::value_type);
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include <nano/lib/utility.hpp>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/thread.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
|
@ -30,7 +31,7 @@ public:
|
|||
void run ();
|
||||
boost::asio::io_context & io_ctx;
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
std::priority_queue<operation, std::vector<operation>, std::greater<operation>> operations;
|
||||
boost::thread thread;
|
||||
};
|
||||
|
|
|
@ -1567,7 +1567,7 @@ std::shared_ptr<nano::block> nano::block_uniquer::unique (std::shared_ptr<nano::
|
|||
if (result != nullptr)
|
||||
{
|
||||
nano::uint256_union key (block_a->full_hash ());
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto & existing (blocks[key]);
|
||||
if (auto block_l = existing.lock ())
|
||||
{
|
||||
|
@ -1604,7 +1604,7 @@ std::shared_ptr<nano::block> nano::block_uniquer::unique (std::shared_ptr<nano::
|
|||
|
||||
size_t nano::block_uniquer::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return blocks.size ();
|
||||
}
|
||||
|
||||
|
|
195
nano/lib/locks.cpp
Normal file
195
nano/lib/locks.cpp
Normal file
|
@ -0,0 +1,195 @@
|
|||
#include <nano/lib/locks.hpp>
|
||||
#include <nano/lib/utility.hpp>
|
||||
|
||||
#if NANO_TIMED_LOCKS > 0
|
||||
namespace
|
||||
{
|
||||
template <typename Mutex>
|
||||
void output (const char * str, std::chrono::milliseconds time, Mutex & mutex)
|
||||
{
|
||||
auto stacktrace = nano::generate_stacktrace ();
|
||||
std::cout << std::addressof (mutex) << " Mutex " << str << " for: " << time.count () << "ms\n"
|
||||
<< stacktrace << std::endl;
|
||||
}
|
||||
|
||||
template <typename Mutex>
|
||||
void output_if_held_long_enough (nano::timer<std::chrono::milliseconds> & timer, Mutex & mutex)
|
||||
{
|
||||
auto time_held = timer.since_start ();
|
||||
if (time_held >= std::chrono::milliseconds (NANO_TIMED_LOCKS))
|
||||
{
|
||||
output ("held", time_held, mutex);
|
||||
}
|
||||
timer.stop ();
|
||||
}
|
||||
|
||||
template <typename Mutex>
|
||||
void output_if_blocked_long_enough (nano::timer<std::chrono::milliseconds> & timer, Mutex & mutex)
|
||||
{
|
||||
auto time_blocked = timer.since_start ();
|
||||
if (time_blocked >= std::chrono::milliseconds (NANO_TIMED_LOCKS))
|
||||
{
|
||||
output ("blocked", time_blocked, mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespace nano
|
||||
{
|
||||
lock_guard<std::mutex>::lock_guard (std::mutex & mutex) :
|
||||
mut (mutex)
|
||||
{
|
||||
timer.start ();
|
||||
|
||||
mut.lock ();
|
||||
output_if_blocked_long_enough (timer, mut);
|
||||
}
|
||||
|
||||
lock_guard<std::mutex>::~lock_guard () noexcept
|
||||
{
|
||||
mut.unlock ();
|
||||
output_if_held_long_enough (timer, mut);
|
||||
}
|
||||
|
||||
// Explicit instantiations for allowed types
|
||||
template class lock_guard<std::mutex>;
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
unique_lock<Mutex, U>::unique_lock (Mutex & mutex) :
|
||||
mut (std::addressof (mutex))
|
||||
{
|
||||
lock_impl ();
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
void unique_lock<Mutex, U>::lock_impl ()
|
||||
{
|
||||
timer.start ();
|
||||
|
||||
mut->lock ();
|
||||
owns = true;
|
||||
|
||||
output_if_blocked_long_enough (timer, *mut);
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
unique_lock<Mutex, U> & unique_lock<Mutex, U>::operator= (unique_lock<Mutex, U> && other) noexcept
|
||||
{
|
||||
if (this != std::addressof (other))
|
||||
{
|
||||
if (owns)
|
||||
{
|
||||
mut->unlock ();
|
||||
owns = false;
|
||||
|
||||
output_if_held_long_enough (timer, *mut);
|
||||
}
|
||||
|
||||
mut = other.mut;
|
||||
owns = other.owns;
|
||||
timer = other.timer;
|
||||
|
||||
other.mut = nullptr;
|
||||
other.owns = false;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
unique_lock<Mutex, U>::~unique_lock () noexcept
|
||||
{
|
||||
if (owns)
|
||||
{
|
||||
mut->unlock ();
|
||||
owns = false;
|
||||
|
||||
output_if_held_long_enough (timer, *mut);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
void unique_lock<Mutex, U>::lock ()
|
||||
{
|
||||
validate ();
|
||||
lock_impl ();
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
bool unique_lock<Mutex, U>::try_lock ()
|
||||
{
|
||||
validate ();
|
||||
owns = mut->try_lock ();
|
||||
|
||||
if (owns)
|
||||
{
|
||||
timer.start ();
|
||||
}
|
||||
|
||||
return owns;
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
void unique_lock<Mutex, U>::unlock ()
|
||||
{
|
||||
if (!mut || !owns)
|
||||
{
|
||||
throw (std::system_error (std::make_error_code (std::errc::operation_not_permitted)));
|
||||
}
|
||||
|
||||
mut->unlock ();
|
||||
owns = false;
|
||||
|
||||
output_if_held_long_enough (timer, *mut);
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
bool unique_lock<Mutex, U>::owns_lock () const noexcept
|
||||
{
|
||||
return owns;
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
unique_lock<Mutex, U>::operator bool () const noexcept
|
||||
{
|
||||
return owns;
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
Mutex * unique_lock<Mutex, U>::mutex () const noexcept
|
||||
{
|
||||
return mut;
|
||||
}
|
||||
|
||||
template <typename Mutex, typename U>
|
||||
void unique_lock<Mutex, U>::validate () const
|
||||
{
|
||||
if (!mut)
|
||||
{
|
||||
throw (std::system_error (std::make_error_code (std::errc::operation_not_permitted)));
|
||||
}
|
||||
|
||||
if (owns)
|
||||
{
|
||||
throw (std::system_error (std::make_error_code (std::errc::resource_deadlock_would_occur)));
|
||||
}
|
||||
}
|
||||
|
||||
// Explicit instantiations for allowed types
|
||||
template class unique_lock<std::mutex>;
|
||||
|
||||
void condition_variable::notify_one () noexcept
|
||||
{
|
||||
cnd.notify_one ();
|
||||
}
|
||||
|
||||
void condition_variable::notify_all () noexcept
|
||||
{
|
||||
cnd.notify_all ();
|
||||
}
|
||||
|
||||
void condition_variable::wait (nano::unique_lock<std::mutex> & lk)
|
||||
{
|
||||
cnd.wait (lk);
|
||||
}
|
||||
}
|
||||
#endif
|
132
nano/lib/locks.hpp
Normal file
132
nano/lib/locks.hpp
Normal file
|
@ -0,0 +1,132 @@
|
|||
#pragma once
|
||||
|
||||
#include <nano/lib/timer.hpp>
|
||||
#if NANO_TIMED_LOCKS > 0
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
#include <boost/fiber/condition_variable.hpp>
|
||||
#endif
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace nano
|
||||
{
|
||||
#if NANO_TIMED_LOCKS > 0
|
||||
template <typename Mutex>
|
||||
class lock_guard final
|
||||
{
|
||||
public:
|
||||
explicit lock_guard (Mutex & mutex_a) :
|
||||
guard (mutex_a)
|
||||
{
|
||||
}
|
||||
|
||||
lock_guard (const lock_guard &) = delete;
|
||||
lock_guard & operator= (const lock_guard &) = delete;
|
||||
|
||||
private:
|
||||
std::lock_guard<Mutex> guard;
|
||||
};
|
||||
|
||||
template <>
|
||||
class lock_guard<std::mutex> final
|
||||
{
|
||||
public:
|
||||
explicit lock_guard (std::mutex & mutex_a);
|
||||
~lock_guard () noexcept;
|
||||
|
||||
lock_guard (const lock_guard &) = delete;
|
||||
lock_guard & operator= (const lock_guard &) = delete;
|
||||
|
||||
private:
|
||||
std::mutex & mut;
|
||||
nano::timer<std::chrono::milliseconds> timer;
|
||||
};
|
||||
|
||||
template <typename Mutex, typename = std::enable_if_t<std::is_same<Mutex, std::mutex>::value>>
|
||||
class unique_lock final
|
||||
{
|
||||
public:
|
||||
unique_lock () = default;
|
||||
explicit unique_lock (Mutex & mutex_a);
|
||||
unique_lock (unique_lock && other) = delete;
|
||||
unique_lock & operator= (unique_lock && other) noexcept;
|
||||
~unique_lock () noexcept;
|
||||
unique_lock (const unique_lock &) = delete;
|
||||
unique_lock & operator= (const unique_lock &) = delete;
|
||||
|
||||
void lock ();
|
||||
bool try_lock ();
|
||||
void unlock ();
|
||||
bool owns_lock () const noexcept;
|
||||
explicit operator bool () const noexcept;
|
||||
Mutex * mutex () const noexcept;
|
||||
|
||||
private:
|
||||
Mutex * mut{ nullptr };
|
||||
bool owns{ false };
|
||||
|
||||
nano::timer<std::chrono::milliseconds> timer;
|
||||
|
||||
void validate () const;
|
||||
void lock_impl ();
|
||||
};
|
||||
|
||||
class condition_variable final
|
||||
{
|
||||
private:
|
||||
boost::fibers::condition_variable_any cnd;
|
||||
|
||||
public:
|
||||
condition_variable () = default;
|
||||
condition_variable (condition_variable const &) = delete;
|
||||
condition_variable & operator= (condition_variable const &) = delete;
|
||||
|
||||
void notify_one () noexcept;
|
||||
void notify_all () noexcept;
|
||||
void wait (nano::unique_lock<std::mutex> & lt);
|
||||
|
||||
template <typename Pred>
|
||||
void wait (nano::unique_lock<std::mutex> & lt, Pred pred)
|
||||
{
|
||||
cnd.wait (lt, pred);
|
||||
}
|
||||
|
||||
template <typename Clock, typename Duration>
|
||||
void wait_until (nano::unique_lock<std::mutex> & lk, std::chrono::time_point<Clock, Duration> const & timeout_time)
|
||||
{
|
||||
cnd.wait_until (lk, timeout_time);
|
||||
}
|
||||
|
||||
template <typename Clock, typename Duration, typename Pred>
|
||||
bool wait_until (nano::unique_lock<std::mutex> & lk, std::chrono::time_point<Clock, Duration> const & timeout_time, Pred pred)
|
||||
{
|
||||
return cnd.wait_until (lk, timeout_time, pred);
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period>
|
||||
void wait_for (nano::unique_lock<std::mutex> & lk,
|
||||
std::chrono::duration<Rep, Period> const & timeout_duration)
|
||||
{
|
||||
cnd.wait_for (lk, timeout_duration);
|
||||
}
|
||||
|
||||
template <typename Rep, typename Period, typename Pred>
|
||||
bool wait_for (nano::unique_lock<std::mutex> & lk, std::chrono::duration<Rep, Period> const & timeout_duration, Pred pred)
|
||||
{
|
||||
return cnd.wait_for (lk, timeout_duration, pred);
|
||||
}
|
||||
};
|
||||
|
||||
#else
|
||||
template <typename Mutex>
|
||||
using lock_guard = std::lock_guard<Mutex>;
|
||||
|
||||
template <typename Mutex>
|
||||
using unique_lock = std::unique_lock<Mutex>;
|
||||
|
||||
using condition_variable = std::condition_variable;
|
||||
#endif
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <nano/lib/locks.hpp>
|
||||
|
||||
#include <boost/log/sources/severity_logger.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
#include <boost/log/utility/manipulators/to_log.hpp>
|
||||
|
@ -105,7 +107,7 @@ public:
|
|||
{
|
||||
auto error (true);
|
||||
auto time_now = std::chrono::steady_clock::now ();
|
||||
std::unique_lock<std::mutex> lk (last_log_time_mutex);
|
||||
nano::unique_lock<std::mutex> lk (last_log_time_mutex);
|
||||
if (((time_now - last_log_time) > min_log_delta) || last_log_time == std::chrono::steady_clock::time_point{})
|
||||
{
|
||||
last_log_time = time_now;
|
||||
|
|
|
@ -3,27 +3,27 @@
|
|||
|
||||
void nano::rep_weights::representation_add (nano::account const & source_rep, nano::uint128_t const & amount_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
auto source_previous (get (source_rep));
|
||||
put (source_rep, source_previous + amount_a);
|
||||
}
|
||||
|
||||
void nano::rep_weights::representation_put (nano::account const & account_a, nano::uint128_union const & representation_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
put (account_a, representation_a);
|
||||
}
|
||||
|
||||
nano::uint128_t nano::rep_weights::representation_get (nano::account const & account_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
nano::lock_guard<std::mutex> lk (mutex);
|
||||
return get (account_a);
|
||||
}
|
||||
|
||||
/** Makes a copy */
|
||||
std::unordered_map<nano::account, nano::uint128_t> nano::rep_weights::get_rep_amounts ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return rep_amounts;
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::collect_seq_con_info (nano::
|
|||
size_t rep_amounts_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (rep_weights.mutex);
|
||||
nano::lock_guard<std::mutex> guard (rep_weights.mutex);
|
||||
rep_amounts_count = rep_weights.rep_amounts.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (rep_weights.rep_amounts)::value_type);
|
||||
|
|
|
@ -204,7 +204,7 @@ std::shared_ptr<nano::stat_entry> nano::stat::get_entry (uint32_t key)
|
|||
|
||||
std::shared_ptr<nano::stat_entry> nano::stat::get_entry (uint32_t key, size_t interval, size_t capacity)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (stat_mutex);
|
||||
nano::unique_lock<std::mutex> lock (stat_mutex);
|
||||
return get_entry_impl (key, interval, capacity);
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ std::unique_ptr<nano::stat_log_sink> nano::stat::log_sink_json () const
|
|||
|
||||
void nano::stat::log_counters (stat_log_sink & sink)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (stat_mutex);
|
||||
nano::unique_lock<std::mutex> lock (stat_mutex);
|
||||
log_counters_impl (sink);
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ void nano::stat::log_counters_impl (stat_log_sink & sink)
|
|||
|
||||
void nano::stat::log_samples (stat_log_sink & sink)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (stat_mutex);
|
||||
nano::unique_lock<std::mutex> lock (stat_mutex);
|
||||
log_samples_impl (sink);
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ void nano::stat::update (uint32_t key_a, uint64_t value)
|
|||
|
||||
auto now (std::chrono::steady_clock::now ());
|
||||
|
||||
std::unique_lock<std::mutex> lock (stat_mutex);
|
||||
nano::unique_lock<std::mutex> lock (stat_mutex);
|
||||
if (!stopped)
|
||||
{
|
||||
auto entry (get_entry_impl (key_a, config.interval, config.capacity));
|
||||
|
@ -361,20 +361,20 @@ void nano::stat::update (uint32_t key_a, uint64_t value)
|
|||
|
||||
std::chrono::seconds nano::stat::last_reset ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (stat_mutex);
|
||||
nano::unique_lock<std::mutex> lock (stat_mutex);
|
||||
auto now (std::chrono::steady_clock::now ());
|
||||
return std::chrono::duration_cast<std::chrono::seconds> (now - timestamp);
|
||||
}
|
||||
|
||||
void nano::stat::stop ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (stat_mutex);
|
||||
nano::lock_guard<std::mutex> guard (stat_mutex);
|
||||
stopped = true;
|
||||
}
|
||||
|
||||
void nano::stat::clear ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (stat_mutex);
|
||||
nano::unique_lock<std::mutex> lock (stat_mutex);
|
||||
entries.clear ();
|
||||
timestamp = std::chrono::steady_clock::now ();
|
||||
}
|
||||
|
|
|
@ -67,13 +67,13 @@ public:
|
|||
stat_datapoint () = default;
|
||||
stat_datapoint (stat_datapoint const & other_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (other_a.datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (other_a.datapoint_mutex);
|
||||
value = other_a.value;
|
||||
timestamp = other_a.timestamp;
|
||||
}
|
||||
stat_datapoint & operator= (stat_datapoint const & other_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (other_a.datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (other_a.datapoint_mutex);
|
||||
value = other_a.value;
|
||||
timestamp = other_a.timestamp;
|
||||
return *this;
|
||||
|
@ -81,28 +81,28 @@ public:
|
|||
|
||||
uint64_t get_value ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
return value;
|
||||
}
|
||||
void set_value (uint64_t value_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
value = value_a;
|
||||
}
|
||||
std::chrono::system_clock::time_point get_timestamp ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
return timestamp;
|
||||
}
|
||||
void set_timestamp (std::chrono::system_clock::time_point timestamp_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
timestamp = timestamp_a;
|
||||
}
|
||||
/** Add \addend to the current value and optionally update the timestamp */
|
||||
void add (uint64_t addend, bool update_timestamp = true)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
nano::lock_guard<std::mutex> lock (datapoint_mutex);
|
||||
value += addend;
|
||||
if (update_timestamp)
|
||||
{
|
||||
|
|
|
@ -67,6 +67,14 @@ void dump_crash_stacktrace ()
|
|||
boost::stacktrace::safe_dump_to ("nano_node_backtrace.dump");
|
||||
}
|
||||
|
||||
std::string generate_stacktrace ()
|
||||
{
|
||||
auto stacktrace = boost::stacktrace::stacktrace ();
|
||||
std::stringstream ss;
|
||||
ss << stacktrace;
|
||||
return ss.str ();
|
||||
}
|
||||
|
||||
namespace thread_role
|
||||
{
|
||||
/*
|
||||
|
@ -240,7 +248,7 @@ void nano::worker::run ()
|
|||
{
|
||||
while (!stopped)
|
||||
{
|
||||
std::unique_lock<std::mutex> lk (mutex);
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
if (!queue.empty ())
|
||||
{
|
||||
auto func = queue.front ();
|
||||
|
@ -266,7 +274,7 @@ nano::worker::~worker ()
|
|||
void nano::worker::push_task (std::function<void()> func_a)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
queue.emplace_back (func_a);
|
||||
}
|
||||
|
||||
|
@ -289,7 +297,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::collect_seq_con_info (nano::
|
|||
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (worker.mutex);
|
||||
nano::lock_guard<std::mutex> guard (worker.mutex);
|
||||
count = worker.queue.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (worker.queue)::value_type);
|
||||
|
@ -334,10 +342,7 @@ void release_assert_internal (bool check, const char * check_expr, const char *
|
|||
std::cerr << "Assertion (" << check_expr << ") failed " << file << ":" << line << "\n\n";
|
||||
|
||||
// Output stack trace to cerr
|
||||
auto stacktrace = boost::stacktrace::stacktrace ();
|
||||
std::stringstream ss;
|
||||
ss << stacktrace;
|
||||
auto backtrace_str = ss.str ();
|
||||
auto backtrace_str = nano::generate_stacktrace ();
|
||||
std::cerr << backtrace_str << std::endl;
|
||||
|
||||
// "abort" at the end of this function will go into any signal handlers (the daemon ones will generate a stack trace and load memory address files on non-Windows systems).
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <nano/boost/asio.hpp>
|
||||
#include <nano/lib/locks.hpp>
|
||||
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/system/error_code.hpp>
|
||||
|
@ -88,6 +89,11 @@ void create_load_memory_address_files ();
|
|||
*/
|
||||
void dump_crash_stacktrace ();
|
||||
|
||||
/*
|
||||
* Generates the current stacktrace
|
||||
*/
|
||||
std::string generate_stacktrace ();
|
||||
|
||||
/*
|
||||
* Functions for understanding the role of the current thread
|
||||
*/
|
||||
|
@ -163,7 +169,7 @@ public:
|
|||
void stop ();
|
||||
|
||||
private:
|
||||
std::condition_variable cv;
|
||||
nano::condition_variable cv;
|
||||
std::deque<std::function<void()>> queue;
|
||||
std::mutex mutex;
|
||||
std::atomic<bool> stopped{ false };
|
||||
|
@ -188,12 +194,12 @@ class observer_set final
|
|||
public:
|
||||
void add (std::function<void(T...)> const & observer_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
observers.push_back (observer_a);
|
||||
}
|
||||
void notify (T... args)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto & i : observers)
|
||||
{
|
||||
i (args...);
|
||||
|
@ -204,11 +210,11 @@ public:
|
|||
};
|
||||
|
||||
template <typename... T>
|
||||
inline std::unique_ptr<seq_con_info_component> collect_seq_con_info (observer_set<T...> & observer_set, const std::string & name)
|
||||
std::unique_ptr<seq_con_info_component> collect_seq_con_info (observer_set<T...> & observer_set, const std::string & name)
|
||||
{
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (observer_set.mutex);
|
||||
nano::lock_guard<std::mutex> lock (observer_set.mutex);
|
||||
count = observer_set.observers.size ();
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ void nano::work_pool::loop (uint64_t thread)
|
|||
uint64_t output;
|
||||
blake2b_state hash;
|
||||
blake2b_init (&hash, sizeof (output));
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto pow_sleep = pow_rate_limiter;
|
||||
while (!done)
|
||||
{
|
||||
|
@ -156,7 +156,7 @@ void nano::work_pool::loop (uint64_t thread)
|
|||
|
||||
void nano::work_pool::cancel (nano::uint256_union const & root_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
if (!done)
|
||||
{
|
||||
if (!pending.empty ())
|
||||
|
@ -188,7 +188,7 @@ void nano::work_pool::cancel (nano::uint256_union const & root_a)
|
|||
void nano::work_pool::stop ()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
done = true;
|
||||
++ticket;
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ void nano::work_pool::generate (nano::uint256_union const & hash_a, std::functio
|
|||
assert (!hash_a.is_zero ());
|
||||
boost::optional<uint64_t> result;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
pending.push_back ({ hash_a, callback_a, difficulty_a });
|
||||
}
|
||||
producer_condition.notify_all ();
|
||||
|
@ -232,7 +232,7 @@ uint64_t nano::work_pool::generate (nano::uint256_union const & hash_a, uint64_t
|
|||
|
||||
size_t nano::work_pool::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return pending.size ();
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (work_pool & work_p
|
|||
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (work_pool.mutex);
|
||||
nano::lock_guard<std::mutex> guard (work_pool.mutex);
|
||||
count = work_pool.pending.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (work_pool.pending)::value_type);
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
std::vector<boost::thread> threads;
|
||||
std::list<nano::work_item> pending;
|
||||
std::mutex mutex;
|
||||
std::condition_variable producer_condition;
|
||||
nano::condition_variable producer_condition;
|
||||
std::chrono::nanoseconds pow_rate_limiter;
|
||||
std::function<boost::optional<uint64_t> (nano::uint256_union const &, uint64_t, std::atomic<int> &)> opencl;
|
||||
nano::observer_set<bool> work_observers;
|
||||
|
|
|
@ -19,7 +19,7 @@ thread ([this]() {
|
|||
request_loop ();
|
||||
})
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
condition.wait (lock, [& started = started] { return started; });
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ void nano::active_transactions::confirm_frontiers (nano::transaction const & tra
|
|||
int test_network_factor = is_test_network ? 1000 : 1;
|
||||
auto roots_size = size ();
|
||||
auto max_elections = (max_broadcast_queue / 4);
|
||||
std::unique_lock<std::mutex> lk (mutex);
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
auto check_time_exceeded = std::chrono::steady_clock::now () >= next_frontier_check;
|
||||
lk.unlock ();
|
||||
auto low_active_elections = roots_size < max_elections;
|
||||
|
@ -106,7 +106,7 @@ void nano::active_transactions::confirm_frontiers (nano::transaction const & tra
|
|||
}
|
||||
}
|
||||
|
||||
void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::active_transactions::request_confirm (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
std::unordered_set<nano::qualified_root> inactive;
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
|
@ -315,7 +315,7 @@ void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> &
|
|||
|
||||
void nano::active_transactions::request_loop ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
started = true;
|
||||
lock.unlock ();
|
||||
condition.notify_all ();
|
||||
|
@ -349,7 +349,7 @@ void nano::active_transactions::prioritize_account_for_confirmation (nano::activ
|
|||
if (info_a.block_count > confirmation_height && !node.pending_confirmation_height.is_processing_block (info_a.head))
|
||||
{
|
||||
auto num_uncemented = info_a.block_count - confirmation_height;
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
auto it = cementable_frontiers_a.find (account_a);
|
||||
if (it != cementable_frontiers_a.end ())
|
||||
{
|
||||
|
@ -393,7 +393,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
|
|||
size_t priority_cementable_frontiers_size;
|
||||
size_t priority_wallet_cementable_frontiers_size;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
|
||||
priority_wallet_cementable_frontiers_size = priority_wallet_cementable_frontiers.size ();
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
|
|||
{
|
||||
// Prioritize wallet accounts first
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.wallets.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.wallets.mutex);
|
||||
auto wallet_transaction (node.wallets.tx_begin_read ());
|
||||
auto const & items = node.wallets.items;
|
||||
if (items.empty ())
|
||||
|
@ -421,7 +421,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
|
|||
|
||||
nano::account_info info;
|
||||
auto & wallet (item_it->second);
|
||||
std::lock_guard<std::recursive_mutex> wallet_lock (wallet->store.mutex);
|
||||
nano::lock_guard<std::recursive_mutex> wallet_lock (wallet->store.mutex);
|
||||
|
||||
auto & next_wallet_frontier_account = next_wallet_frontier_accounts.emplace (item_it->first, wallet_store::special_count).first->second;
|
||||
|
||||
|
@ -437,7 +437,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
|
|||
auto it = priority_cementable_frontiers.find (account);
|
||||
if (it != priority_cementable_frontiers.end ())
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
priority_cementable_frontiers.erase (it);
|
||||
priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::tra
|
|||
|
||||
void nano::active_transactions::stop ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!started)
|
||||
{
|
||||
condition.wait (lock, [& started = started] { return started; });
|
||||
|
@ -521,7 +521,7 @@ void nano::active_transactions::stop ()
|
|||
|
||||
bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return add (block_a, confirmation_action_a);
|
||||
}
|
||||
|
||||
|
@ -558,10 +558,10 @@ bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool s
|
|||
bool replay (false);
|
||||
bool processed (false);
|
||||
{
|
||||
std::unique_lock<std::mutex> lock;
|
||||
nano::unique_lock<std::mutex> lock;
|
||||
if (!single_lock)
|
||||
{
|
||||
lock = std::unique_lock<std::mutex> (mutex);
|
||||
lock = nano::unique_lock<std::mutex> (mutex);
|
||||
}
|
||||
for (auto vote_block : vote_a->blocks)
|
||||
{
|
||||
|
@ -597,7 +597,7 @@ bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool s
|
|||
|
||||
bool nano::active_transactions::active (nano::qualified_root const & root_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return roots.find (root_a) != roots.end ();
|
||||
}
|
||||
|
||||
|
@ -608,7 +608,7 @@ bool nano::active_transactions::active (nano::block const & block_a)
|
|||
|
||||
void nano::active_transactions::update_difficulty (nano::block const & block_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (roots.find (block_a.qualified_root ()));
|
||||
if (existing != roots.end ())
|
||||
{
|
||||
|
@ -695,7 +695,7 @@ void nano::active_transactions::adjust_difficulty (nano::block_hash const & hash
|
|||
uint64_t average = nano::difficulty::from_multiplier (multiplier, node.network_params.network.publish_threshold);
|
||||
// Prevent overflow
|
||||
int64_t limiter (0);
|
||||
if (std::numeric_limits<std::uint64_t>::max () - average < highest_level)
|
||||
if (std::numeric_limits<std::uint64_t>::max () - average < static_cast<uint64_t> (highest_level))
|
||||
{
|
||||
// Highest adjusted difficulty value should be std::numeric_limits<std::uint64_t>::max ()
|
||||
limiter = std::numeric_limits<std::uint64_t>::max () - average + highest_level;
|
||||
|
@ -720,7 +720,7 @@ void nano::active_transactions::adjust_difficulty (nano::block_hash const & hash
|
|||
}
|
||||
}
|
||||
|
||||
void nano::active_transactions::update_active_difficulty (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::active_transactions::update_active_difficulty (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
assert (lock_a.mutex () == &mutex && lock_a.owns_lock ());
|
||||
double multiplier (1.);
|
||||
|
@ -754,7 +754,7 @@ void nano::active_transactions::update_active_difficulty (std::unique_lock<std::
|
|||
|
||||
uint64_t nano::active_transactions::active_difficulty ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return trended_active_difficulty;
|
||||
}
|
||||
|
||||
|
@ -767,10 +767,10 @@ uint64_t nano::active_transactions::limited_active_difficulty ()
|
|||
std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks (bool single_lock)
|
||||
{
|
||||
std::deque<std::shared_ptr<nano::block>> result;
|
||||
std::unique_lock<std::mutex> lock;
|
||||
nano::unique_lock<std::mutex> lock;
|
||||
if (!single_lock)
|
||||
{
|
||||
lock = std::unique_lock<std::mutex> (mutex);
|
||||
lock = nano::unique_lock<std::mutex> (mutex);
|
||||
}
|
||||
for (auto i (roots.begin ()), n (roots.end ()); i != n; ++i)
|
||||
{
|
||||
|
@ -781,7 +781,7 @@ std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks
|
|||
|
||||
std::deque<nano::election_status> nano::active_transactions::list_confirmed ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return confirmed;
|
||||
}
|
||||
|
||||
|
@ -801,7 +801,7 @@ void nano::active_transactions::add_confirmed (nano::election_status const & sta
|
|||
|
||||
void nano::active_transactions::erase (nano::block const & block_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto root_it (roots.find (block_a.qualified_root ()));
|
||||
if (root_it != roots.end ())
|
||||
{
|
||||
|
@ -845,19 +845,19 @@ void nano::active_transactions::flush_lowest ()
|
|||
|
||||
bool nano::active_transactions::empty ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return roots.empty ();
|
||||
}
|
||||
|
||||
size_t nano::active_transactions::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return roots.size ();
|
||||
}
|
||||
|
||||
bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (roots.find (block_a->qualified_root ()));
|
||||
auto result (true);
|
||||
if (existing != roots.end ())
|
||||
|
@ -875,7 +875,7 @@ bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a)
|
|||
void nano::active_transactions::confirm_block (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a, nano::block_sideband const & sideband_a)
|
||||
{
|
||||
auto hash (block_a->hash ());
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto existing (blocks.find (hash));
|
||||
if (existing != blocks.end ())
|
||||
{
|
||||
|
@ -898,19 +898,19 @@ void nano::active_transactions::confirm_block (nano::transaction const & transac
|
|||
|
||||
size_t nano::active_transactions::priority_cementable_frontiers_size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return priority_cementable_frontiers.size ();
|
||||
}
|
||||
|
||||
size_t nano::active_transactions::priority_wallet_cementable_frontiers_size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return priority_wallet_cementable_frontiers.size ();
|
||||
}
|
||||
|
||||
boost::circular_buffer<double> nano::active_transactions::difficulty_trend ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return multipliers_cb;
|
||||
}
|
||||
|
||||
|
@ -928,7 +928,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (active_transaction
|
|||
size_t confirmed_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (active_transactions.mutex);
|
||||
nano::lock_guard<std::mutex> guard (active_transactions.mutex);
|
||||
roots_count = active_transactions.roots.size ();
|
||||
blocks_count = active_transactions.blocks.size ();
|
||||
confirmed_count = active_transactions.confirmed.size ();
|
||||
|
|
|
@ -94,7 +94,7 @@ public:
|
|||
bool active (nano::qualified_root const &);
|
||||
void update_difficulty (nano::block const &);
|
||||
void adjust_difficulty (nano::block_hash const &);
|
||||
void update_active_difficulty (std::unique_lock<std::mutex> &);
|
||||
void update_active_difficulty (nano::unique_lock<std::mutex> &);
|
||||
uint64_t active_difficulty ();
|
||||
uint64_t limited_active_difficulty ();
|
||||
std::deque<std::shared_ptr<nano::block>> list_blocks (bool = false);
|
||||
|
@ -139,11 +139,11 @@ private:
|
|||
bool add (std::shared_ptr<nano::block>, std::function<void(std::shared_ptr<nano::block>)> const & = [](std::shared_ptr<nano::block>) {});
|
||||
// clang-format on
|
||||
void request_loop ();
|
||||
void request_confirm (std::unique_lock<std::mutex> &);
|
||||
void request_confirm (nano::unique_lock<std::mutex> &);
|
||||
void confirm_frontiers (nano::transaction const &);
|
||||
nano::account next_frontier_account{ 0 };
|
||||
std::chrono::steady_clock::time_point next_frontier_check{ std::chrono::steady_clock::now () };
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
bool started{ false };
|
||||
std::atomic<bool> stopped{ false };
|
||||
boost::multi_index_container<
|
||||
|
|
|
@ -26,7 +26,7 @@ void nano::block_processor::stop ()
|
|||
{
|
||||
generator.stop ();
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
stopped = true;
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -35,7 +35,7 @@ void nano::block_processor::stop ()
|
|||
void nano::block_processor::flush ()
|
||||
{
|
||||
node.checker.flush ();
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while (!stopped && (have_blocks () || active))
|
||||
{
|
||||
condition.wait (lock);
|
||||
|
@ -44,7 +44,7 @@ void nano::block_processor::flush ()
|
|||
|
||||
size_t nano::block_processor::size ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
return (blocks.size () + state_blocks.size () + forced.size ());
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ void nano::block_processor::add (nano::unchecked_info const & info_a)
|
|||
{
|
||||
{
|
||||
auto hash (info_a.block->hash ());
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
if (blocks_hashes.find (hash) == blocks_hashes.end () && rolled_back.get<1> ().find (hash) == rolled_back.get<1> ().end ())
|
||||
{
|
||||
if (info_a.verified == nano::signature_verification::unknown && (info_a.block->type () == nano::block_type::state || info_a.block->type () == nano::block_type::open || !info_a.account.is_zero ()))
|
||||
|
@ -96,7 +96,7 @@ void nano::block_processor::add (nano::unchecked_info const & info_a)
|
|||
void nano::block_processor::force (std::shared_ptr<nano::block> block_a)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
forced.push_back (block_a);
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -104,13 +104,13 @@ void nano::block_processor::force (std::shared_ptr<nano::block> block_a)
|
|||
|
||||
void nano::block_processor::wait_write ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
awaiting_write = true;
|
||||
}
|
||||
|
||||
void nano::block_processor::process_blocks ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while (!stopped)
|
||||
{
|
||||
if (have_blocks ())
|
||||
|
@ -147,7 +147,7 @@ bool nano::block_processor::have_blocks ()
|
|||
return !blocks.empty () || !forced.empty () || !state_blocks.empty ();
|
||||
}
|
||||
|
||||
void nano::block_processor::verify_state_blocks (nano::transaction const & transaction_a, std::unique_lock<std::mutex> & lock_a, size_t max_count)
|
||||
void nano::block_processor::verify_state_blocks (nano::transaction const & transaction_a, nano::unique_lock<std::mutex> & lock_a, size_t max_count)
|
||||
{
|
||||
assert (!mutex.try_lock ());
|
||||
nano::timer<std::chrono::milliseconds> timer_l (nano::timer_state::started);
|
||||
|
@ -242,7 +242,7 @@ void nano::block_processor::verify_state_blocks (nano::transaction const & trans
|
|||
}
|
||||
}
|
||||
|
||||
void nano::block_processor::process_batch (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::block_processor::process_batch (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
nano::timer<std::chrono::milliseconds> timer_l;
|
||||
lock_a.lock ();
|
||||
|
@ -390,7 +390,7 @@ void nano::block_processor::process_live (nano::block_hash const & hash_a, std::
|
|||
// Check if votes were already requested
|
||||
bool send_request (false);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node_l->active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node_l->active.mutex);
|
||||
auto existing (node_l->active.blocks.find (block_a->hash ()));
|
||||
if (existing != node_l->active.blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->confirmation_request_count == 0)
|
||||
{
|
||||
|
|
|
@ -56,8 +56,8 @@ public:
|
|||
|
||||
private:
|
||||
void queue_unchecked (nano::write_transaction const &, nano::block_hash const &);
|
||||
void verify_state_blocks (nano::transaction const & transaction_a, std::unique_lock<std::mutex> &, size_t = std::numeric_limits<size_t>::max ());
|
||||
void process_batch (std::unique_lock<std::mutex> &);
|
||||
void verify_state_blocks (nano::transaction const & transaction_a, nano::unique_lock<std::mutex> &, size_t = std::numeric_limits<size_t>::max ());
|
||||
void process_batch (nano::unique_lock<std::mutex> &);
|
||||
void process_live (nano::block_hash const &, std::shared_ptr<nano::block>, const bool = false);
|
||||
bool stopped;
|
||||
bool active;
|
||||
|
@ -74,7 +74,7 @@ private:
|
|||
boost::multi_index::hashed_unique<boost::multi_index::member<nano::rolled_hash, nano::block_hash, &nano::rolled_hash::hash>>>>
|
||||
rolled_back;
|
||||
static size_t const rolled_back_max = 1024;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
nano::node & node;
|
||||
nano::write_database_queue & write_database_queue;
|
||||
std::mutex mutex;
|
||||
|
|
|
@ -285,7 +285,7 @@ pull (pull_a),
|
|||
pull_blocks (0),
|
||||
unexpected_count (0)
|
||||
{
|
||||
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
|
||||
nano::lock_guard<std::mutex> mutex (connection->attempt->mutex);
|
||||
connection->attempt->condition.notify_all ();
|
||||
}
|
||||
|
||||
|
@ -311,7 +311,7 @@ nano::bulk_pull_client::~bulk_pull_client ()
|
|||
connection->node->bootstrap_initiator.cache.remove (pull);
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
|
||||
nano::lock_guard<std::mutex> mutex (connection->attempt->mutex);
|
||||
--connection->attempt->pulling;
|
||||
}
|
||||
connection->attempt->condition.notify_all ();
|
||||
|
@ -328,12 +328,12 @@ void nano::bulk_pull_client::request ()
|
|||
|
||||
if (connection->node->config.logging.bulk_pull_logging ())
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
nano::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
connection->node->logger.try_log (boost::str (boost::format ("Requesting account %1% from %2%. %3% accounts in queue") % pull.account.to_account () % connection->channel->to_string () % connection->attempt->pulls.size ()));
|
||||
}
|
||||
else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ())
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
nano::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
connection->node->logger.always_log (boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->pulls.size ()));
|
||||
}
|
||||
auto this_l (shared_from_this ());
|
||||
|
@ -568,7 +568,7 @@ void nano::bulk_push_client::push (nano::transaction const & transaction_a)
|
|||
{
|
||||
if (current_target.first.is_zero () || current_target.first == current_target.second)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (connection->attempt->mutex);
|
||||
nano::lock_guard<std::mutex> guard (connection->attempt->mutex);
|
||||
if (!connection->attempt->bulk_push_targets.empty ())
|
||||
{
|
||||
current_target = connection->attempt->bulk_push_targets.back ();
|
||||
|
@ -656,7 +656,7 @@ pull_blocks (0)
|
|||
nano::bulk_pull_account_client::~bulk_pull_account_client ()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> mutex (connection->attempt->mutex);
|
||||
nano::lock_guard<std::mutex> mutex (connection->attempt->mutex);
|
||||
--connection->attempt->pulling;
|
||||
}
|
||||
connection->attempt->condition.notify_all ();
|
||||
|
@ -670,12 +670,12 @@ void nano::bulk_pull_account_client::request ()
|
|||
req.flags = nano::bulk_pull_account_flags::pending_hash_and_amount;
|
||||
if (connection->node->config.logging.bulk_pull_logging ())
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
nano::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
connection->node->logger.try_log (boost::str (boost::format ("Requesting pending for account %1% from %2%. %3% accounts in queue") % req.account.to_account () % connection->channel->to_string () % connection->attempt->wallet_accounts.size ()));
|
||||
}
|
||||
else if (connection->node->config.logging.network_logging () && connection->attempt->should_log ())
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
nano::unique_lock<std::mutex> lock (connection->attempt->mutex);
|
||||
connection->node->logger.always_log (boost::str (boost::format ("%1% accounts in pull queue") % connection->attempt->wallet_accounts.size ()));
|
||||
}
|
||||
auto this_l (shared_from_this ());
|
||||
|
@ -799,7 +799,7 @@ nano::bootstrap_attempt::~bootstrap_attempt ()
|
|||
|
||||
bool nano::bootstrap_attempt::should_log ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto result (false);
|
||||
auto now (std::chrono::steady_clock::now ());
|
||||
if (next_log < now)
|
||||
|
@ -810,7 +810,7 @@ bool nano::bootstrap_attempt::should_log ()
|
|||
return result;
|
||||
}
|
||||
|
||||
bool nano::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & lock_a)
|
||||
bool nano::bootstrap_attempt::request_frontier (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
auto result (true);
|
||||
auto connection_l (connection (lock_a));
|
||||
|
@ -846,7 +846,7 @@ bool nano::bootstrap_attempt::request_frontier (std::unique_lock<std::mutex> & l
|
|||
return result;
|
||||
}
|
||||
|
||||
void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::bootstrap_attempt::request_pull (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
auto connection_l (connection (lock_a));
|
||||
if (connection_l)
|
||||
|
@ -856,7 +856,7 @@ void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_
|
|||
if (mode != nano::bootstrap_mode::legacy)
|
||||
{
|
||||
// Check if pull is obsolete (head was processed)
|
||||
std::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
nano::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
auto transaction (node->store.tx_begin_read ());
|
||||
while (!pulls.empty () && !pull.head.is_zero () && (lazy_blocks.find (pull.head) != lazy_blocks.end () || node->store.block_exists (transaction, pull.head)))
|
||||
{
|
||||
|
@ -874,7 +874,7 @@ void nano::bootstrap_attempt::request_pull (std::unique_lock<std::mutex> & lock_
|
|||
}
|
||||
}
|
||||
|
||||
void nano::bootstrap_attempt::request_push (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::bootstrap_attempt::request_push (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
bool error (false);
|
||||
if (auto connection_shared = connection_frontier_request.lock ())
|
||||
|
@ -913,7 +913,7 @@ void nano::bootstrap_attempt::run ()
|
|||
{
|
||||
assert (!node->flags.disable_legacy_bootstrap);
|
||||
populate_connections ();
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto frontier_failure (true);
|
||||
while (!stopped && frontier_failure)
|
||||
{
|
||||
|
@ -980,7 +980,7 @@ void nano::bootstrap_attempt::run ()
|
|||
idle.clear ();
|
||||
}
|
||||
|
||||
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_attempt::connection (std::unique_lock<std::mutex> & lock_a)
|
||||
std::shared_ptr<nano::bootstrap_client> nano::bootstrap_attempt::connection (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
// clang-format off
|
||||
condition.wait (lock_a, [& stopped = stopped, &idle = idle] { return stopped || !idle.empty (); });
|
||||
|
@ -1036,7 +1036,7 @@ void nano::bootstrap_attempt::populate_connections ()
|
|||
std::priority_queue<std::shared_ptr<nano::bootstrap_client>, std::vector<std::shared_ptr<nano::bootstrap_client>>, block_rate_cmp> sorted_connections;
|
||||
std::unordered_set<nano::tcp_endpoint> endpoints;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
num_pulls = pulls.size ();
|
||||
std::deque<std::weak_ptr<nano::bootstrap_client>> new_clients;
|
||||
for (auto & c : clients)
|
||||
|
@ -1099,7 +1099,7 @@ void nano::bootstrap_attempt::populate_connections ()
|
|||
|
||||
if (node->config.logging.bulk_pull_logging ())
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
node->logger.try_log (boost::str (boost::format ("Bulk pull connections: %1%, rate: %2% blocks/sec, remaining account pulls: %3%, total blocks: %4%") % connections.load () % (int)rate_sum % pulls.size () % (int)total_blocks.load ()));
|
||||
}
|
||||
|
||||
|
@ -1114,7 +1114,7 @@ void nano::bootstrap_attempt::populate_connections ()
|
|||
if (endpoint != nano::tcp_endpoint (boost::asio::ip::address_v6::any (), 0) && endpoints.find (endpoint) == endpoints.end ())
|
||||
{
|
||||
connect_client (endpoint);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
endpoints.insert (endpoint);
|
||||
}
|
||||
else if (connections == 0)
|
||||
|
@ -1182,7 +1182,7 @@ void nano::bootstrap_attempt::connect_client (nano::tcp_endpoint const & endpoin
|
|||
|
||||
void nano::bootstrap_attempt::pool_connection (std::shared_ptr<nano::bootstrap_client> client_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
if (!stopped && !client_a->pending_stop)
|
||||
{
|
||||
// Idle bootstrap client socket
|
||||
|
@ -1195,7 +1195,7 @@ void nano::bootstrap_attempt::pool_connection (std::shared_ptr<nano::bootstrap_c
|
|||
|
||||
void nano::bootstrap_attempt::stop ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
stopped = true;
|
||||
condition.notify_all ();
|
||||
for (auto i : clients)
|
||||
|
@ -1232,7 +1232,7 @@ void nano::bootstrap_attempt::add_pull (nano::pull_info const & pull_a)
|
|||
nano::pull_info pull (pull_a);
|
||||
node->bootstrap_initiator.cache.update_pull (pull);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
pulls.push_back (pull);
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -1243,7 +1243,7 @@ void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a)
|
|||
auto pull (pull_a);
|
||||
if (++pull.attempts < (bootstrap_frontier_retry_limit + (pull.processed / 10000)))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
pulls.push_front (pull);
|
||||
condition.notify_all ();
|
||||
}
|
||||
|
@ -1251,7 +1251,7 @@ void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a)
|
|||
{
|
||||
{
|
||||
// Retry for lazy pulls (not weak state block link assumptions)
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
pull.attempts++;
|
||||
pulls.push_back (pull);
|
||||
}
|
||||
|
@ -1271,13 +1271,13 @@ void nano::bootstrap_attempt::requeue_pull (nano::pull_info const & pull_a)
|
|||
|
||||
void nano::bootstrap_attempt::add_bulk_push_target (nano::block_hash const & head, nano::block_hash const & end)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
bulk_push_targets.push_back (std::make_pair (head, end));
|
||||
}
|
||||
|
||||
void nano::bootstrap_attempt::lazy_start (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
nano::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
// Add start blocks, limit 1024 (32k with disabled legacy bootstrap)
|
||||
size_t max_keys (node->flags.disable_legacy_bootstrap ? 32 * 1024 : 1024);
|
||||
if (lazy_keys.size () < max_keys && lazy_keys.find (hash_a) == lazy_keys.end () && lazy_blocks.find (hash_a) == lazy_blocks.end ())
|
||||
|
@ -1300,7 +1300,7 @@ void nano::bootstrap_attempt::lazy_add (nano::block_hash const & hash_a)
|
|||
void nano::bootstrap_attempt::lazy_pull_flush ()
|
||||
{
|
||||
assert (!mutex.try_lock ());
|
||||
std::unique_lock<std::mutex> lazy_lock (lazy_mutex);
|
||||
nano::unique_lock<std::mutex> lazy_lock (lazy_mutex);
|
||||
auto transaction (node->store.tx_begin_read ());
|
||||
for (auto & pull_start : lazy_pulls)
|
||||
{
|
||||
|
@ -1318,7 +1318,7 @@ bool nano::bootstrap_attempt::lazy_finished ()
|
|||
{
|
||||
bool result (true);
|
||||
auto transaction (node->store.tx_begin_read ());
|
||||
std::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
nano::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
for (auto it (lazy_keys.begin ()), end (lazy_keys.end ()); it != end && !stopped;)
|
||||
{
|
||||
if (node->store.block_exists (transaction, *it))
|
||||
|
@ -1357,7 +1357,7 @@ void nano::bootstrap_attempt::lazy_run ()
|
|||
populate_connections ();
|
||||
auto start_time (std::chrono::steady_clock::now ());
|
||||
auto max_time (std::chrono::minutes (node->flags.disable_legacy_bootstrap ? 48 * 60 : 30));
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while ((still_pulling () || !lazy_finished ()) && lazy_stopped < lazy_max_stopped && std::chrono::steady_clock::now () - start_time < max_time)
|
||||
{
|
||||
unsigned iterations (0);
|
||||
|
@ -1388,7 +1388,7 @@ void nano::bootstrap_attempt::lazy_run ()
|
|||
if (!stopped)
|
||||
{
|
||||
node->logger.try_log ("Completed lazy pulls");
|
||||
std::unique_lock<std::mutex> lazy_lock (lazy_mutex);
|
||||
nano::unique_lock<std::mutex> lazy_lock (lazy_mutex);
|
||||
runs_count++;
|
||||
// Start wallet lazy bootstrap if required
|
||||
if (!wallet_accounts.empty () && !node->flags.disable_wallet_bootstrap)
|
||||
|
@ -1424,7 +1424,7 @@ bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_
|
|||
if (mode != nano::bootstrap_mode::legacy && block_expected)
|
||||
{
|
||||
auto hash (block_a->hash ());
|
||||
std::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
nano::unique_lock<std::mutex> lock (lazy_mutex);
|
||||
// Processing new blocks
|
||||
if (lazy_blocks.find (hash) == lazy_blocks.end ())
|
||||
{
|
||||
|
@ -1574,7 +1574,7 @@ bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> block_
|
|||
return stop_pull;
|
||||
}
|
||||
|
||||
void nano::bootstrap_attempt::request_pending (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::bootstrap_attempt::request_pending (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
auto connection_l (connection (lock_a));
|
||||
if (connection_l)
|
||||
|
@ -1595,7 +1595,7 @@ void nano::bootstrap_attempt::requeue_pending (nano::account const & account_a)
|
|||
{
|
||||
auto account (account_a);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
wallet_accounts.push_front (account);
|
||||
condition.notify_all ();
|
||||
}
|
||||
|
@ -1603,7 +1603,7 @@ void nano::bootstrap_attempt::requeue_pending (nano::account const & account_a)
|
|||
|
||||
void nano::bootstrap_attempt::wallet_start (std::deque<nano::account> & accounts_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
wallet_accounts.swap (accounts_a);
|
||||
}
|
||||
|
||||
|
@ -1622,7 +1622,7 @@ void nano::bootstrap_attempt::wallet_run ()
|
|||
populate_connections ();
|
||||
auto start_time (std::chrono::steady_clock::now ());
|
||||
auto max_time (std::chrono::minutes (10));
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while (wallet_finished () && std::chrono::steady_clock::now () - start_time < max_time)
|
||||
{
|
||||
if (!wallet_accounts.empty ())
|
||||
|
@ -1668,7 +1668,7 @@ nano::bootstrap_initiator::~bootstrap_initiator ()
|
|||
|
||||
void nano::bootstrap_initiator::bootstrap ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!stopped && attempt == nullptr)
|
||||
{
|
||||
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out);
|
||||
|
@ -1683,7 +1683,7 @@ void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bo
|
|||
{
|
||||
node.network.udp_channels.insert (nano::transport::map_endpoint_to_v6 (endpoint_a), node.network_params.protocol.protocol_version);
|
||||
}
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!stopped)
|
||||
{
|
||||
if (attempt != nullptr)
|
||||
|
@ -1703,7 +1703,7 @@ void nano::bootstrap_initiator::bootstrap (nano::endpoint const & endpoint_a, bo
|
|||
void nano::bootstrap_initiator::bootstrap_lazy (nano::block_hash const & hash_a, bool force)
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (force)
|
||||
{
|
||||
if (attempt != nullptr)
|
||||
|
@ -1727,7 +1727,7 @@ void nano::bootstrap_initiator::bootstrap_lazy (nano::block_hash const & hash_a,
|
|||
void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & accounts_a)
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
node.stats.inc (nano::stat::type::bootstrap, nano::stat::detail::initiate_wallet_lazy, nano::stat::dir::out);
|
||||
if (attempt == nullptr)
|
||||
{
|
||||
|
@ -1740,7 +1740,7 @@ void nano::bootstrap_initiator::bootstrap_wallet (std::deque<nano::account> & ac
|
|||
|
||||
void nano::bootstrap_initiator::run_bootstrap ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while (!stopped)
|
||||
{
|
||||
if (attempt != nullptr)
|
||||
|
@ -1771,7 +1771,7 @@ void nano::bootstrap_initiator::run_bootstrap ()
|
|||
|
||||
void nano::bootstrap_initiator::add_observer (std::function<void(bool)> const & observer_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (observers_mutex);
|
||||
nano::lock_guard<std::mutex> lock (observers_mutex);
|
||||
observers.push_back (observer_a);
|
||||
}
|
||||
|
||||
|
@ -1782,7 +1782,7 @@ bool nano::bootstrap_initiator::in_progress ()
|
|||
|
||||
std::shared_ptr<nano::bootstrap_attempt> nano::bootstrap_initiator::current_attempt ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return attempt;
|
||||
}
|
||||
|
||||
|
@ -1791,7 +1791,7 @@ void nano::bootstrap_initiator::stop ()
|
|||
if (!stopped.exchange (true))
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
if (attempt != nullptr)
|
||||
{
|
||||
attempt->stop ();
|
||||
|
@ -1808,7 +1808,7 @@ void nano::bootstrap_initiator::stop ()
|
|||
|
||||
void nano::bootstrap_initiator::notify_listeners (bool in_progress_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (observers_mutex);
|
||||
nano::lock_guard<std::mutex> lock (observers_mutex);
|
||||
for (auto & i : observers)
|
||||
{
|
||||
i (in_progress_a);
|
||||
|
@ -1822,11 +1822,11 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (bootstrap_initiato
|
|||
size_t count = 0;
|
||||
size_t cache_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (bootstrap_initiator.observers_mutex);
|
||||
nano::lock_guard<std::mutex> guard (bootstrap_initiator.observers_mutex);
|
||||
count = bootstrap_initiator.observers.size ();
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (bootstrap_initiator.cache.pulls_cache_mutex);
|
||||
nano::lock_guard<std::mutex> guard (bootstrap_initiator.cache.pulls_cache_mutex);
|
||||
cache_count = bootstrap_initiator.cache.cache.size ();
|
||||
}
|
||||
|
||||
|
@ -1874,7 +1874,7 @@ void nano::bootstrap_listener::stop ()
|
|||
{
|
||||
decltype (connections) connections_l;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
on = false;
|
||||
connections_l.swap (connections);
|
||||
}
|
||||
|
@ -1887,7 +1887,7 @@ void nano::bootstrap_listener::stop ()
|
|||
|
||||
size_t nano::bootstrap_listener::connection_count ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return connections.size ();
|
||||
}
|
||||
|
||||
|
@ -1895,7 +1895,7 @@ void nano::bootstrap_listener::accept_action (boost::system::error_code const &
|
|||
{
|
||||
auto connection (std::make_shared<nano::bootstrap_server> (socket_a, node.shared ()));
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
connections[connection.get ()] = connection;
|
||||
connection->receive ();
|
||||
}
|
||||
|
@ -1940,7 +1940,7 @@ nano::bootstrap_server::~bootstrap_server ()
|
|||
}
|
||||
}
|
||||
stop ();
|
||||
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node->bootstrap.mutex);
|
||||
node->bootstrap.connections.erase (this);
|
||||
}
|
||||
|
||||
|
@ -2282,7 +2282,7 @@ void nano::bootstrap_server::receive_node_id_handshake_action (boost::system::er
|
|||
void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message_a)
|
||||
{
|
||||
assert (message_a != nullptr);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto start (requests.empty ());
|
||||
requests.push (std::move (message_a));
|
||||
if (start)
|
||||
|
@ -2293,7 +2293,7 @@ void nano::bootstrap_server::add_request (std::unique_ptr<nano::message> message
|
|||
|
||||
void nano::bootstrap_server::finish_request ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
requests.pop ();
|
||||
if (!requests.empty ())
|
||||
{
|
||||
|
@ -2333,7 +2333,7 @@ void nano::bootstrap_server::timeout ()
|
|||
node->logger.try_log ("Closing incoming tcp / bootstrap server by timeout");
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node->bootstrap.mutex);
|
||||
node->bootstrap.connections.erase (this);
|
||||
}
|
||||
socket->close ();
|
||||
|
@ -2341,7 +2341,7 @@ void nano::bootstrap_server::timeout ()
|
|||
}
|
||||
else
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node->bootstrap.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node->bootstrap.mutex);
|
||||
node->bootstrap.connections.erase (this);
|
||||
}
|
||||
}
|
||||
|
@ -3304,7 +3304,7 @@ void nano::pulls_cache::add (nano::pull_info const & pull_a)
|
|||
{
|
||||
if (pull_a.processed > 500)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (pulls_cache_mutex);
|
||||
nano::lock_guard<std::mutex> guard (pulls_cache_mutex);
|
||||
// Clean old pull
|
||||
if (cache.size () > cache_size_max)
|
||||
{
|
||||
|
@ -3333,7 +3333,7 @@ void nano::pulls_cache::add (nano::pull_info const & pull_a)
|
|||
|
||||
void nano::pulls_cache::update_pull (nano::pull_info & pull_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (pulls_cache_mutex);
|
||||
nano::lock_guard<std::mutex> guard (pulls_cache_mutex);
|
||||
nano::uint512_union head_512 (pull_a.account, pull_a.head_original);
|
||||
auto existing (cache.get<account_head_tag> ().find (head_512));
|
||||
if (existing != cache.get<account_head_tag> ().end ())
|
||||
|
@ -3344,7 +3344,7 @@ void nano::pulls_cache::update_pull (nano::pull_info & pull_a)
|
|||
|
||||
void nano::pulls_cache::remove (nano::pull_info const & pull_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (pulls_cache_mutex);
|
||||
nano::lock_guard<std::mutex> guard (pulls_cache_mutex);
|
||||
nano::uint512_union head_512 (pull_a.account, pull_a.head_original);
|
||||
cache.get<account_head_tag> ().erase (head_512);
|
||||
}
|
||||
|
@ -3356,7 +3356,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (pulls_cache & pull
|
|||
size_t cache_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (pulls_cache.pulls_cache_mutex);
|
||||
nano::lock_guard<std::mutex> guard (pulls_cache.pulls_cache_mutex);
|
||||
cache_count = pulls_cache.cache.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (pulls_cache.cache)::value_type);
|
||||
|
|
|
@ -65,12 +65,12 @@ public:
|
|||
explicit bootstrap_attempt (std::shared_ptr<nano::node> node_a, nano::bootstrap_mode mode_a = nano::bootstrap_mode::legacy);
|
||||
~bootstrap_attempt ();
|
||||
void run ();
|
||||
std::shared_ptr<nano::bootstrap_client> connection (std::unique_lock<std::mutex> &);
|
||||
std::shared_ptr<nano::bootstrap_client> connection (nano::unique_lock<std::mutex> &);
|
||||
bool consume_future (std::future<bool> &);
|
||||
void populate_connections ();
|
||||
bool request_frontier (std::unique_lock<std::mutex> &);
|
||||
void request_pull (std::unique_lock<std::mutex> &);
|
||||
void request_push (std::unique_lock<std::mutex> &);
|
||||
bool request_frontier (nano::unique_lock<std::mutex> &);
|
||||
void request_pull (nano::unique_lock<std::mutex> &);
|
||||
void request_push (nano::unique_lock<std::mutex> &);
|
||||
void add_connection (nano::endpoint const &);
|
||||
void connect_client (nano::tcp_endpoint const &);
|
||||
void pool_connection (std::shared_ptr<nano::bootstrap_client>);
|
||||
|
@ -88,7 +88,7 @@ public:
|
|||
bool lazy_finished ();
|
||||
void lazy_pull_flush ();
|
||||
void lazy_clear ();
|
||||
void request_pending (std::unique_lock<std::mutex> &);
|
||||
void request_pending (nano::unique_lock<std::mutex> &);
|
||||
void requeue_pending (nano::account const &);
|
||||
void wallet_run ();
|
||||
void wallet_start (std::deque<nano::account> &);
|
||||
|
@ -110,7 +110,7 @@ public:
|
|||
std::atomic<bool> stopped;
|
||||
nano::bootstrap_mode mode;
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
// Lazy bootstrap
|
||||
std::unordered_set<nano::block_hash> lazy_blocks;
|
||||
std::unordered_map<nano::block_hash, std::pair<nano::block_hash, nano::uint128_t>> lazy_state_unknown;
|
||||
|
@ -254,7 +254,7 @@ private:
|
|||
std::shared_ptr<nano::bootstrap_attempt> attempt;
|
||||
std::atomic<bool> stopped;
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
std::mutex observers_mutex;
|
||||
std::vector<std::function<void(bool)>> observers;
|
||||
boost::thread thread;
|
||||
|
|
|
@ -825,7 +825,7 @@ std::error_code nano::handle_node_options (boost::program_options::variables_map
|
|||
{
|
||||
bool error (true);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.node->wallets.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.node->wallets.mutex);
|
||||
auto transaction (node.node->wallets.tx_begin_write ());
|
||||
nano::wallet wallet (error, transaction, node.node->wallets, wallet_id.to_string (), contents.str ());
|
||||
}
|
||||
|
@ -837,7 +837,7 @@ std::error_code nano::handle_node_options (boost::program_options::variables_map
|
|||
else
|
||||
{
|
||||
node.node->wallets.reload ();
|
||||
std::lock_guard<std::mutex> lock (node.node->wallets.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.node->wallets.mutex);
|
||||
release_assert (node.node->wallets.items.find (wallet_id) != node.node->wallets.items.end ());
|
||||
std::cout << "Import completed\n";
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ void nano::confirmation_height_processor::stop ()
|
|||
|
||||
void nano::confirmation_height_processor::run ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lk (pending_confirmations.mutex);
|
||||
nano::unique_lock<std::mutex> lk (pending_confirmations.mutex);
|
||||
while (!stopped)
|
||||
{
|
||||
if (!pending_confirmations.pending.empty ())
|
||||
|
@ -88,7 +88,7 @@ void nano::confirmation_height_processor::run ()
|
|||
void nano::confirmation_height_processor::add (nano::block_hash const & hash_a)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (pending_confirmations.mutex);
|
||||
nano::lock_guard<std::mutex> lk (pending_confirmations.mutex);
|
||||
pending_confirmations.pending.insert (hash_a);
|
||||
}
|
||||
condition.notify_one ();
|
||||
|
@ -412,14 +412,14 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (confirmation_heigh
|
|||
|
||||
size_t nano::pending_confirmation_height::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
nano::lock_guard<std::mutex> lk (mutex);
|
||||
return pending.size ();
|
||||
}
|
||||
|
||||
bool nano::pending_confirmation_height::is_processing_block (nano::block_hash const & hash_a)
|
||||
{
|
||||
// First check the hash currently being processed
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
nano::lock_guard<std::mutex> lk (mutex);
|
||||
if (!current_hash.is_zero () && current_hash == hash_a)
|
||||
{
|
||||
return true;
|
||||
|
@ -431,7 +431,7 @@ bool nano::pending_confirmation_height::is_processing_block (nano::block_hash co
|
|||
|
||||
nano::block_hash nano::pending_confirmation_height::current ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (mutex);
|
||||
nano::lock_guard<std::mutex> lk (mutex);
|
||||
return current_hash;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ private:
|
|||
uint64_t iterated_height;
|
||||
};
|
||||
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
nano::pending_confirmation_height & pending_confirmations;
|
||||
std::atomic<bool> stopped{ false };
|
||||
nano::block_store & store;
|
||||
|
|
|
@ -86,7 +86,7 @@ void nano::distributed_work::start_work ()
|
|||
|
||||
if (!outstanding.empty ())
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
for (auto const & i : outstanding)
|
||||
{
|
||||
auto host (i.first);
|
||||
|
@ -184,7 +184,7 @@ void nano::distributed_work::stop (bool const local_stop_a)
|
|||
{
|
||||
if (!stopped.exchange (true))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
if (local_stop_a && (node.config.work_threads != 0 || node.work.opencl))
|
||||
{
|
||||
node.work.cancel (root);
|
||||
|
@ -303,7 +303,7 @@ void nano::distributed_work::handle_failure (bool const last)
|
|||
|
||||
bool nano::distributed_work::remove (boost::asio::ip::address const & address)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
outstanding.erase (address);
|
||||
return outstanding.empty ();
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ void nano::distributed_work_factory::make (unsigned int backoff_a, nano::block_h
|
|||
cleanup_finished ();
|
||||
auto distributed (std::make_shared<nano::distributed_work> (backoff_a, node, root_a, callback_a, difficulty_a));
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
work[root_a].emplace_back (distributed);
|
||||
}
|
||||
distributed->start ();
|
||||
|
@ -332,7 +332,7 @@ void nano::distributed_work_factory::make (unsigned int backoff_a, nano::block_h
|
|||
void nano::distributed_work_factory::cancel (nano::block_hash const & root_a, bool const local_stop)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
auto existing_l (work.find (root_a));
|
||||
if (existing_l != work.end ())
|
||||
{
|
||||
|
@ -352,7 +352,7 @@ void nano::distributed_work_factory::cancel (nano::block_hash const & root_a, bo
|
|||
|
||||
void nano::distributed_work_factory::cleanup_finished ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
for (auto it (work.begin ()), end (work.end ()); it != end;)
|
||||
{
|
||||
it->second.erase (std::remove_if (it->second.begin (), it->second.end (), [](auto distributed_a) {
|
||||
|
|
|
@ -237,7 +237,7 @@ bool nano::election::publish (std::shared_ptr<nano::block> block_a)
|
|||
|
||||
size_t nano::election::last_votes_size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
return last_votes.size ();
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ node (node_a)
|
|||
|
||||
void nano::gap_cache::add (nano::transaction const & transaction_a, nano::block_hash const & hash_a, std::chrono::steady_clock::time_point time_point_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (blocks.get<1> ().find (hash_a));
|
||||
if (existing != blocks.get<1> ().end ())
|
||||
{
|
||||
|
@ -29,13 +29,13 @@ void nano::gap_cache::add (nano::transaction const & transaction_a, nano::block_
|
|||
|
||||
void nano::gap_cache::erase (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
blocks.get<1> ().erase (hash_a);
|
||||
}
|
||||
|
||||
void nano::gap_cache::vote (std::shared_ptr<nano::vote> vote_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
for (auto hash : *vote_a)
|
||||
{
|
||||
|
@ -107,7 +107,7 @@ nano::uint128_t nano::gap_cache::bootstrap_threshold (nano::transaction const &
|
|||
|
||||
size_t nano::gap_cache::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return blocks.size ();
|
||||
}
|
||||
|
||||
|
|
|
@ -1006,7 +1006,7 @@ void nano::json_handler::block_confirm ()
|
|||
// Add record in confirmation history for confirmed block
|
||||
nano::election_status status{ block_l, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), nano::election_status_type::active_confirmation_height };
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
node.active.confirmed.push_back (status);
|
||||
if (node.active.confirmed.size () > node.config.confirmation_history_size)
|
||||
{
|
||||
|
@ -1677,7 +1677,7 @@ void nano::json_handler::confirmation_active ()
|
|||
}
|
||||
boost::property_tree::ptree elections;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
for (auto i (node.active.roots.begin ()), n (node.active.roots.end ()); i != n; ++i)
|
||||
{
|
||||
if (i->election->confirmation_request_count >= announcements && !i->election->confirmed && !i->election->stopped)
|
||||
|
@ -1753,7 +1753,7 @@ void nano::json_handler::confirmation_info ()
|
|||
nano::qualified_root root;
|
||||
if (!root.decode_hex (root_text))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node.active.mutex);
|
||||
auto conflict_info (node.active.roots.find (root));
|
||||
if (conflict_info != node.active.roots.end ())
|
||||
{
|
||||
|
|
|
@ -28,7 +28,7 @@ public:
|
|||
void observe ();
|
||||
void complete (nano::payment_status);
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
nano::node & node;
|
||||
nano::account account;
|
||||
nano::amount amount;
|
||||
|
|
|
@ -128,7 +128,7 @@ void nano::mdb_txn_tracker::serialize_json (boost::property_tree::ptree & json,
|
|||
// Copying is cheap compared to generating the stack trace strings, so reduce time holding the mutex
|
||||
std::vector<mdb_txn_stats> copy_stats;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
copy_stats = stats;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ void nano::mdb_txn_tracker::output_finished (nano::mdb_txn_stats const & mdb_txn
|
|||
|
||||
void nano::mdb_txn_tracker::add (const nano::transaction_impl * transaction_impl)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
// clang-format off
|
||||
assert (std::find_if (stats.cbegin (), stats.cend (), matches_txn (transaction_impl)) == stats.cend ());
|
||||
// clang-format on
|
||||
|
@ -207,7 +207,7 @@ void nano::mdb_txn_tracker::add (const nano::transaction_impl * transaction_impl
|
|||
/** Can be called without error if transaction does not exist */
|
||||
void nano::mdb_txn_tracker::erase (const nano::transaction_impl * transaction_impl)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
// clang-format off
|
||||
auto it = std::find_if (stats.begin (), stats.end (), matches_txn (transaction_impl));
|
||||
// clang-format on
|
||||
|
|
|
@ -841,7 +841,7 @@ stopped (false)
|
|||
|
||||
nano::message_buffer * nano::message_buffer_manager::allocate ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!stopped && free.empty () && full.empty ())
|
||||
{
|
||||
stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in);
|
||||
|
@ -869,7 +869,7 @@ void nano::message_buffer_manager::enqueue (nano::message_buffer * data_a)
|
|||
{
|
||||
assert (data_a != nullptr);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
full.push_back (data_a);
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -877,7 +877,7 @@ void nano::message_buffer_manager::enqueue (nano::message_buffer * data_a)
|
|||
|
||||
nano::message_buffer * nano::message_buffer_manager::dequeue ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while (!stopped && full.empty ())
|
||||
{
|
||||
condition.wait (lock);
|
||||
|
@ -895,7 +895,7 @@ void nano::message_buffer_manager::release (nano::message_buffer * data_a)
|
|||
{
|
||||
assert (data_a != nullptr);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
free.push_back (data_a);
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -904,7 +904,7 @@ void nano::message_buffer_manager::release (nano::message_buffer * data_a)
|
|||
void nano::message_buffer_manager::stop ()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
stopped = true;
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -912,14 +912,14 @@ void nano::message_buffer_manager::stop ()
|
|||
|
||||
void nano::response_channels::add (nano::tcp_endpoint const & endpoint_a, std::vector<nano::tcp_endpoint> insert_channels)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
nano::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
channels.emplace (endpoint_a, insert_channels);
|
||||
}
|
||||
|
||||
std::vector<nano::tcp_endpoint> nano::response_channels::search (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
std::vector<nano::tcp_endpoint> result;
|
||||
std::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
nano::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
auto existing (channels.find (endpoint_a));
|
||||
if (existing != channels.end ())
|
||||
{
|
||||
|
@ -930,13 +930,13 @@ std::vector<nano::tcp_endpoint> nano::response_channels::search (nano::tcp_endpo
|
|||
|
||||
void nano::response_channels::remove (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
nano::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
channels.erase (endpoint_a);
|
||||
}
|
||||
|
||||
size_t nano::response_channels::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
nano::lock_guard<std::mutex> lock (response_channels_mutex);
|
||||
return channels.size ();
|
||||
}
|
||||
|
||||
|
@ -944,7 +944,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::response_channels::collect_s
|
|||
{
|
||||
size_t channels_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> response_channels_guard (response_channels_mutex);
|
||||
nano::lock_guard<std::mutex> response_channels_guard (response_channels_mutex);
|
||||
channels_count = channels.size ();
|
||||
}
|
||||
auto composite = std::make_unique<seq_con_info_composite> (name);
|
||||
|
@ -956,7 +956,7 @@ boost::optional<nano::uint256_union> nano::syn_cookies::assign (nano::endpoint c
|
|||
{
|
||||
auto ip_addr (endpoint_a.address ());
|
||||
assert (ip_addr.is_v6 ());
|
||||
std::lock_guard<std::mutex> lock (syn_cookie_mutex);
|
||||
nano::lock_guard<std::mutex> lock (syn_cookie_mutex);
|
||||
unsigned & ip_cookies = cookies_per_ip[ip_addr];
|
||||
boost::optional<nano::uint256_union> result;
|
||||
if (ip_cookies < nano::transport::max_peers_per_ip)
|
||||
|
@ -978,7 +978,7 @@ bool nano::syn_cookies::validate (nano::endpoint const & endpoint_a, nano::accou
|
|||
{
|
||||
auto ip_addr (endpoint_a.address ());
|
||||
assert (ip_addr.is_v6 ());
|
||||
std::lock_guard<std::mutex> lock (syn_cookie_mutex);
|
||||
nano::lock_guard<std::mutex> lock (syn_cookie_mutex);
|
||||
auto result (true);
|
||||
auto cookie_it (cookies.find (endpoint_a));
|
||||
if (cookie_it != cookies.end () && !nano::validate_message (node_id, cookie_it->second.cookie, sig))
|
||||
|
@ -1000,7 +1000,7 @@ bool nano::syn_cookies::validate (nano::endpoint const & endpoint_a, nano::accou
|
|||
|
||||
void nano::syn_cookies::purge (std::chrono::steady_clock::time_point const & cutoff_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (syn_cookie_mutex);
|
||||
nano::lock_guard<std::mutex> lock (syn_cookie_mutex);
|
||||
auto it (cookies.begin ());
|
||||
while (it != cookies.end ())
|
||||
{
|
||||
|
@ -1030,7 +1030,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::syn_cookies::collect_seq_con
|
|||
size_t syn_cookies_count = 0;
|
||||
size_t syn_cookies_per_ip_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> syn_cookie_guard (syn_cookie_mutex);
|
||||
nano::lock_guard<std::mutex> syn_cookie_guard (syn_cookie_mutex);
|
||||
syn_cookies_count = cookies.size ();
|
||||
syn_cookies_per_ip_count = cookies_per_ip.size ();
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ public:
|
|||
private:
|
||||
nano::stat & stats;
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
boost::circular_buffer<nano::message_buffer *> free;
|
||||
boost::circular_buffer<nano::message_buffer *> full;
|
||||
std::vector<uint8_t> slab;
|
||||
|
|
|
@ -70,7 +70,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (rep_crawler & rep_
|
|||
{
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (rep_crawler.active_mutex);
|
||||
nano::lock_guard<std::mutex> guard (rep_crawler.active_mutex);
|
||||
count = rep_crawler.active.size ();
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_processor &
|
|||
size_t rolled_back_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (block_processor.mutex);
|
||||
nano::lock_guard<std::mutex> guard (block_processor.mutex);
|
||||
state_blocks_count = block_processor.state_blocks.size ();
|
||||
blocks_count = block_processor.blocks.size ();
|
||||
blocks_hashes_count = block_processor.blocks_hashes.size ();
|
||||
|
@ -881,12 +881,12 @@ void nano::node::bootstrap_wallet ()
|
|||
{
|
||||
std::deque<nano::account> accounts;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (wallets.mutex);
|
||||
nano::lock_guard<std::mutex> lock (wallets.mutex);
|
||||
auto transaction (wallets.tx_begin_read ());
|
||||
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i)
|
||||
{
|
||||
auto & wallet (*i->second);
|
||||
std::lock_guard<std::recursive_mutex> wallet_lock (wallet.store.mutex);
|
||||
nano::lock_guard<std::recursive_mutex> wallet_lock (wallet.store.mutex);
|
||||
for (auto j (wallet.store.begin (transaction)), m (wallet.store.end ()); j != m && accounts.size () < 128; ++j)
|
||||
{
|
||||
nano::account account (j->first);
|
||||
|
@ -1215,7 +1215,7 @@ void nano::node::process_confirmed (nano::election_status const & status_a, uint
|
|||
|
||||
bool nano::block_arrival::add (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto now (std::chrono::steady_clock::now ());
|
||||
auto inserted (arrival.insert (nano::block_arrival_info{ now, hash_a }));
|
||||
auto result (!inserted.second);
|
||||
|
@ -1224,7 +1224,7 @@ bool nano::block_arrival::add (nano::block_hash const & hash_a)
|
|||
|
||||
bool nano::block_arrival::recent (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto now (std::chrono::steady_clock::now ());
|
||||
while (arrival.size () > arrival_size_min && arrival.begin ()->arrival + arrival_time_min < now)
|
||||
{
|
||||
|
@ -1239,7 +1239,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_arrival & bl
|
|||
{
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (block_arrival.mutex);
|
||||
nano::lock_guard<std::mutex> guard (block_arrival.mutex);
|
||||
count = block_arrival.arrival.size ();
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ void nano::online_reps::observe (nano::account const & rep_a)
|
|||
auto transaction (node.ledger.store.tx_begin_read ());
|
||||
if (node.ledger.weight (transaction, rep_a) > 0)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
reps.insert (rep_a);
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ void nano::online_reps::sample ()
|
|||
nano::uint128_t current;
|
||||
std::unordered_set<nano::account> reps_copy;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
reps_copy.swap (reps);
|
||||
}
|
||||
for (auto & i : reps_copy)
|
||||
|
@ -47,7 +47,7 @@ void nano::online_reps::sample ()
|
|||
}
|
||||
node.ledger.store.online_weight_put (transaction, std::chrono::system_clock::now ().time_since_epoch ().count (), current);
|
||||
auto trend_l (trend (transaction));
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
online = trend_l;
|
||||
}
|
||||
|
||||
|
@ -69,14 +69,14 @@ nano::uint128_t nano::online_reps::trend (nano::transaction & transaction_a)
|
|||
|
||||
nano::uint128_t nano::online_reps::online_stake () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return std::max (online, minimum);
|
||||
}
|
||||
|
||||
std::vector<nano::account> nano::online_reps::list ()
|
||||
{
|
||||
std::vector<nano::account> result;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto & i : reps)
|
||||
{
|
||||
result.push_back (i);
|
||||
|
@ -90,7 +90,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (online_reps & onli
|
|||
{
|
||||
size_t count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (online_reps.mutex);
|
||||
nano::lock_guard<std::mutex> guard (online_reps.mutex);
|
||||
count = online_reps.reps.size ();
|
||||
}
|
||||
|
||||
|
|
|
@ -695,7 +695,7 @@ boost::optional<uint64_t> nano::opencl_work::generate_work (nano::uint256_union
|
|||
|
||||
boost::optional<uint64_t> nano::opencl_work::generate_work (nano::uint256_union const & root_a, uint64_t const difficulty_a, std::atomic<int> & ticket_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
bool error (false);
|
||||
int ticket_l (ticket_a);
|
||||
uint64_t result (0);
|
||||
|
|
|
@ -12,7 +12,7 @@ void nano::payment_observer_processor::observer_action (nano::account const & ac
|
|||
{
|
||||
std::shared_ptr<nano::json_payment_observer> observer;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (payment_observers.find (account_a));
|
||||
if (existing != payment_observers.end ())
|
||||
{
|
||||
|
@ -27,14 +27,14 @@ void nano::payment_observer_processor::observer_action (nano::account const & ac
|
|||
|
||||
void nano::payment_observer_processor::add (nano::account const & account_a, std::shared_ptr<nano::json_payment_observer> payment_observer_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
assert (payment_observers.find (account_a) == payment_observers.end ());
|
||||
payment_observers[account_a] = payment_observer_a;
|
||||
}
|
||||
|
||||
void nano::payment_observer_processor::erase (nano::account & account_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
assert (payment_observers.find (account_a) != payment_observers.end ());
|
||||
payment_observers.erase (account_a);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ void nano::port_mapping::refresh_devices ()
|
|||
{
|
||||
if (!network_params.network.is_test_network ())
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
int discover_error = 0;
|
||||
freeUPNPDevlist (devices);
|
||||
devices = upnpDiscover (2000, nullptr, nullptr, UPNP_LOCAL_PORT_ANY, false, 2, &discover_error);
|
||||
|
@ -52,7 +52,7 @@ void nano::port_mapping::refresh_devices ()
|
|||
nano::endpoint nano::port_mapping::external_address ()
|
||||
{
|
||||
nano::endpoint result (boost::asio::ip::address_v6{}, 0);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto & protocol : protocols)
|
||||
{
|
||||
if (protocol.external_port != 0)
|
||||
|
@ -67,7 +67,7 @@ void nano::port_mapping::refresh_mapping ()
|
|||
{
|
||||
if (!network_params.network.is_test_network ())
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto node_port (std::to_string (node.network.endpoint ().port ()));
|
||||
|
||||
// We don't map the RPC port because, unless RPC authentication was added, this would almost always be a security risk
|
||||
|
@ -98,7 +98,7 @@ int nano::port_mapping::check_mapping ()
|
|||
if (!network_params.network.is_test_network ())
|
||||
{
|
||||
// Long discovery time and fast setup/teardown make this impractical for testing
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto node_port (std::to_string (node.network.endpoint ().port ()));
|
||||
for (auto & protocol : protocols)
|
||||
{
|
||||
|
@ -171,7 +171,7 @@ void nano::port_mapping::check_mapping_loop ()
|
|||
void nano::port_mapping::stop ()
|
||||
{
|
||||
on = false;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto & protocol : protocols)
|
||||
{
|
||||
if (protocol.external_port != 0)
|
||||
|
|
|
@ -11,19 +11,19 @@ node (node_a)
|
|||
|
||||
void nano::rep_crawler::add (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (active_mutex);
|
||||
nano::lock_guard<std::mutex> lock (active_mutex);
|
||||
active.insert (hash_a);
|
||||
}
|
||||
|
||||
void nano::rep_crawler::remove (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (active_mutex);
|
||||
nano::lock_guard<std::mutex> lock (active_mutex);
|
||||
active.erase (hash_a);
|
||||
}
|
||||
|
||||
bool nano::rep_crawler::exists (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (active_mutex);
|
||||
nano::lock_guard<std::mutex> lock (active_mutex);
|
||||
return active.count (hash_a) != 0;
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ void nano::rep_crawler::query (std::shared_ptr<nano::transport::channel> channel
|
|||
bool nano::rep_crawler::response (std::shared_ptr<nano::transport::channel> channel_a, nano::account const & rep_account_a, nano::amount const & weight_a)
|
||||
{
|
||||
auto updated (false);
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
auto existing (probable_reps.find (rep_account_a));
|
||||
if (existing != probable_reps.end ())
|
||||
{
|
||||
|
@ -143,7 +143,7 @@ bool nano::rep_crawler::response (std::shared_ptr<nano::transport::channel> chan
|
|||
|
||||
nano::uint128_t nano::rep_crawler::total_weight () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::uint128_t result (0);
|
||||
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n; ++i)
|
||||
{
|
||||
|
@ -163,7 +163,7 @@ nano::uint128_t nano::rep_crawler::total_weight () const
|
|||
std::vector<nano::representative> nano::rep_crawler::representatives_by_weight ()
|
||||
{
|
||||
std::vector<nano::representative> result;
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n; ++i)
|
||||
{
|
||||
auto weight (i->weight.number ());
|
||||
|
@ -181,7 +181,7 @@ std::vector<nano::representative> nano::rep_crawler::representatives_by_weight (
|
|||
|
||||
void nano::rep_crawler::on_rep_request (std::shared_ptr<nano::transport::channel> channel_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
|
||||
using probable_rep_itr_t = probably_rep_t::index<tag_channel_ref>::type::iterator;
|
||||
probably_rep_t::index<tag_channel_ref>::type & channel_ref_index = probable_reps.get<tag_channel_ref> ();
|
||||
|
@ -201,7 +201,7 @@ void nano::rep_crawler::cleanup_reps ()
|
|||
std::vector<std::shared_ptr<nano::transport::channel>> channels;
|
||||
{
|
||||
// Check known rep channels
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
for (auto i (probable_reps.get<tag_last_request> ().begin ()), n (probable_reps.get<tag_last_request> ().end ()); i != n; ++i)
|
||||
{
|
||||
channels.push_back (i->channel);
|
||||
|
@ -229,7 +229,7 @@ void nano::rep_crawler::cleanup_reps ()
|
|||
}
|
||||
if (!equal)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
probable_reps.get<tag_channel_ref> ().erase (*i);
|
||||
}
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ std::vector<nano::representative> nano::rep_crawler::representatives (size_t cou
|
|||
{
|
||||
std::vector<representative> result;
|
||||
result.reserve (std::min (count_a, size_t (16)));
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
for (auto i (probable_reps.get<tag_weight> ().begin ()), n (probable_reps.get<tag_weight> ().end ()); i != n && result.size () < count_a; ++i)
|
||||
{
|
||||
if (!i->weight.is_zero ())
|
||||
|
@ -264,6 +264,6 @@ std::vector<std::shared_ptr<nano::transport::channel>> nano::rep_crawler::repres
|
|||
/** Total number of representatives */
|
||||
size_t nano::rep_crawler::representative_count ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
nano::lock_guard<std::mutex> lock (probable_reps_mutex);
|
||||
return probable_reps.size ();
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ void nano::signature_checker::verify (nano::signature_check_set & check_a)
|
|||
{
|
||||
{
|
||||
// Don't process anything else if we have stopped
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
if (stopped)
|
||||
{
|
||||
return;
|
||||
|
@ -73,7 +73,7 @@ void nano::signature_checker::verify (nano::signature_check_set & check_a)
|
|||
|
||||
void nano::signature_checker::stop ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
if (!stopped)
|
||||
{
|
||||
stopped = true;
|
||||
|
@ -83,7 +83,7 @@ void nano::signature_checker::stop ()
|
|||
|
||||
void nano::signature_checker::flush ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
while (!stopped && tasks_remaining != 0)
|
||||
;
|
||||
}
|
||||
|
@ -128,7 +128,8 @@ void nano::signature_checker::set_thread_names (unsigned num_threads)
|
|||
{
|
||||
auto ready = false;
|
||||
auto pending = num_threads;
|
||||
std::condition_variable cv;
|
||||
nano::condition_variable cv;
|
||||
|
||||
std::vector<std::promise<void>> promises (num_threads);
|
||||
std::vector<std::future<void>> futures;
|
||||
futures.reserve (num_threads);
|
||||
|
@ -140,7 +141,7 @@ void nano::signature_checker::set_thread_names (unsigned num_threads)
|
|||
{
|
||||
// clang-format off
|
||||
boost::asio::post (thread_pool, [&cv, &ready, &pending, &mutex = mutex, &promise = promises[i]]() {
|
||||
std::unique_lock<std::mutex> lk (mutex);
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
nano::thread_role::set (nano::thread_role::name::signature_checking);
|
||||
if (--pending == 0)
|
||||
{
|
||||
|
|
|
@ -10,7 +10,7 @@ socket (socket_a)
|
|||
|
||||
nano::transport::channel_tcp::~channel_tcp ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
// Close socket. Exception: socket is used by bootstrap_server
|
||||
if (socket && !server)
|
||||
{
|
||||
|
@ -93,7 +93,7 @@ bool nano::transport::tcp_channels::insert (std::shared_ptr<nano::transport::cha
|
|||
bool error (true);
|
||||
if (!node.network.not_a_peer (udp_endpoint, node.config.allow_local_peers) && !stopped)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<endpoint_tag> ().find (endpoint));
|
||||
if (existing == channels.get<endpoint_tag> ().end ())
|
||||
{
|
||||
|
@ -117,19 +117,19 @@ bool nano::transport::tcp_channels::insert (std::shared_ptr<nano::transport::cha
|
|||
|
||||
void nano::transport::tcp_channels::erase (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
channels.get<endpoint_tag> ().erase (endpoint_a);
|
||||
}
|
||||
|
||||
size_t nano::transport::tcp_channels::size () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return channels.size ();
|
||||
}
|
||||
|
||||
std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::find_channel (nano::tcp_endpoint const & endpoint_a) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
std::shared_ptr<nano::transport::channel_tcp> result;
|
||||
auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
|
||||
if (existing != channels.get<endpoint_tag> ().end ())
|
||||
|
@ -143,7 +143,7 @@ std::unordered_set<std::shared_ptr<nano::transport::channel>> nano::transport::t
|
|||
{
|
||||
std::unordered_set<std::shared_ptr<nano::transport::channel>> result;
|
||||
result.reserve (count_a);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
// Stop trying to fill result with random samples after this many attempts
|
||||
auto random_cutoff (count_a * 2);
|
||||
auto peers_size (channels.size ());
|
||||
|
@ -182,7 +182,7 @@ bool nano::transport::tcp_channels::store_all (bool clear_peers)
|
|||
// we collect endpoints to be saved and then relase the lock.
|
||||
std::vector<nano::endpoint> endpoints;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
endpoints.reserve (channels.size ());
|
||||
std::transform (channels.begin (), channels.end (),
|
||||
std::back_inserter (endpoints), [](const auto & channel) { return nano::transport::map_tcp_to_endpoint (channel.endpoint ()); });
|
||||
|
@ -209,7 +209,7 @@ bool nano::transport::tcp_channels::store_all (bool clear_peers)
|
|||
std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::find_node_id (nano::account const & node_id_a)
|
||||
{
|
||||
std::shared_ptr<nano::transport::channel_tcp> result;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<node_id_tag> ().find (node_id_a));
|
||||
if (existing != channels.get<node_id_tag> ().end ())
|
||||
{
|
||||
|
@ -221,7 +221,7 @@ std::shared_ptr<nano::transport::channel_tcp> nano::transport::tcp_channels::fin
|
|||
nano::tcp_endpoint nano::transport::tcp_channels::bootstrap_peer ()
|
||||
{
|
||||
nano::tcp_endpoint result (boost::asio::ip::address_v6::any (), 0);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto i (channels.get<last_bootstrap_attempt_tag> ().begin ()), n (channels.get<last_bootstrap_attempt_tag> ().end ()); i != n;)
|
||||
{
|
||||
if (i->channel->get_network_version () >= node.network_params.protocol.protocol_version_bootstrap_min)
|
||||
|
@ -325,7 +325,7 @@ void nano::transport::tcp_channels::start ()
|
|||
void nano::transport::tcp_channels::stop ()
|
||||
{
|
||||
stopped = true;
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
// Close all TCP sockets
|
||||
for (auto i (channels.begin ()), j (channels.end ()); i != j; ++i)
|
||||
{
|
||||
|
@ -345,7 +345,7 @@ void nano::transport::tcp_channels::stop ()
|
|||
|
||||
bool nano::transport::tcp_channels::max_ip_connections (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
bool result (channels.get<ip_address_tag> ().count (endpoint_a.address ()) >= nano::transport::max_peers_per_ip);
|
||||
return result;
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ bool nano::transport::tcp_channels::reachout (nano::endpoint const & endpoint_a)
|
|||
{
|
||||
// Don't keepalive to nodes that already sent us something
|
||||
error |= find_channel (tcp_endpoint) != nullptr;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (attempts.find (tcp_endpoint));
|
||||
error |= existing != attempts.end ();
|
||||
attempts.insert ({ tcp_endpoint, std::chrono::steady_clock::now () });
|
||||
|
@ -372,7 +372,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::transport::tcp_channels::col
|
|||
size_t channels_count = 0;
|
||||
size_t attemps_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
channels_count = channels.size ();
|
||||
attemps_count = attempts.size ();
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::transport::tcp_channels::col
|
|||
|
||||
void nano::transport::tcp_channels::purge (std::chrono::steady_clock::time_point const & cutoff_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto disconnect_cutoff (channels.get<last_packet_sent_tag> ().lower_bound (cutoff_a));
|
||||
channels.get<last_packet_sent_tag> ().erase (channels.get<last_packet_sent_tag> ().begin (), disconnect_cutoff);
|
||||
// Remove keepalive attempt tracking for attempts older than cutoff
|
||||
|
@ -398,7 +398,7 @@ void nano::transport::tcp_channels::ongoing_keepalive ()
|
|||
{
|
||||
nano::keepalive message;
|
||||
node.network.random_fill (message.peers);
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
// Wake up channels
|
||||
std::vector<std::shared_ptr<nano::transport::channel_tcp>> send_list;
|
||||
auto keepalive_sent_cutoff (channels.get<last_packet_sent_tag> ().lower_bound (std::chrono::steady_clock::now () - node.network_params.node.period));
|
||||
|
@ -439,7 +439,7 @@ void nano::transport::tcp_channels::ongoing_keepalive ()
|
|||
|
||||
void nano::transport::tcp_channels::list (std::deque<std::shared_ptr<nano::transport::channel>> & deque_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto i (channels.begin ()), j (channels.end ()); i != j; ++i)
|
||||
{
|
||||
deque_a.push_back (i->channel);
|
||||
|
@ -448,7 +448,7 @@ void nano::transport::tcp_channels::list (std::deque<std::shared_ptr<nano::trans
|
|||
|
||||
void nano::transport::tcp_channels::modify (std::shared_ptr<nano::transport::channel_tcp> channel_a, std::function<void(std::shared_ptr<nano::transport::channel_tcp>)> modify_callback_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<endpoint_tag> ().find (channel_a->get_tcp_endpoint ()));
|
||||
if (existing != channels.get<endpoint_tag> ().end ())
|
||||
{
|
||||
|
@ -460,7 +460,7 @@ void nano::transport::tcp_channels::modify (std::shared_ptr<nano::transport::cha
|
|||
|
||||
void nano::transport::tcp_channels::update (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
|
||||
if (existing != channels.get<endpoint_tag> ().end ())
|
||||
{
|
||||
|
|
|
@ -41,7 +41,7 @@ namespace transport
|
|||
|
||||
nano::endpoint get_endpoint () const override
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
if (socket)
|
||||
{
|
||||
return nano::transport::map_tcp_to_endpoint (socket->remote_endpoint ());
|
||||
|
@ -54,7 +54,7 @@ namespace transport
|
|||
|
||||
nano::tcp_endpoint get_tcp_endpoint () const override
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
if (socket)
|
||||
{
|
||||
return socket->remote_endpoint ();
|
||||
|
|
|
@ -222,7 +222,7 @@ bool nano::bandwidth_limiter::should_drop (const size_t & message_size)
|
|||
{
|
||||
return result;
|
||||
}
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
|
||||
if (message_size > limit / rate_buffer.size () || rate + message_size > limit)
|
||||
{
|
||||
|
@ -244,6 +244,6 @@ bool nano::bandwidth_limiter::should_drop (const size_t & message_size)
|
|||
|
||||
size_t nano::bandwidth_limiter::get_rate ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return trended_rate;
|
||||
}
|
||||
|
|
|
@ -63,49 +63,49 @@ namespace transport
|
|||
|
||||
std::chrono::steady_clock::time_point get_last_bootstrap_attempt () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
return last_bootstrap_attempt;
|
||||
}
|
||||
|
||||
void set_last_bootstrap_attempt (std::chrono::steady_clock::time_point const time_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
last_bootstrap_attempt = time_a;
|
||||
}
|
||||
|
||||
std::chrono::steady_clock::time_point get_last_packet_received () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
return last_packet_received;
|
||||
}
|
||||
|
||||
void set_last_packet_received (std::chrono::steady_clock::time_point const time_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
last_packet_received = time_a;
|
||||
}
|
||||
|
||||
std::chrono::steady_clock::time_point get_last_packet_sent () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
return last_packet_sent;
|
||||
}
|
||||
|
||||
void set_last_packet_sent (std::chrono::steady_clock::time_point const time_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
last_packet_sent = time_a;
|
||||
}
|
||||
|
||||
boost::optional<nano::account> get_node_id_optional () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
return node_id;
|
||||
}
|
||||
|
||||
nano::account get_node_id () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
if (node_id.is_initialized ())
|
||||
{
|
||||
return node_id.get ();
|
||||
|
@ -118,7 +118,7 @@ namespace transport
|
|||
|
||||
void set_node_id (nano::account node_id_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
node_id = node_id_a;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ std::shared_ptr<nano::transport::channel_udp> nano::transport::udp_channels::ins
|
|||
std::shared_ptr<nano::transport::channel_udp> result;
|
||||
if (!node.network.not_a_peer (endpoint_a, node.config.allow_local_peers) && (node.network_params.network.is_test_network () || !max_ip_connections (endpoint_a)))
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
|
||||
if (existing != channels.get<endpoint_tag> ().end ())
|
||||
{
|
||||
|
@ -114,19 +114,19 @@ std::shared_ptr<nano::transport::channel_udp> nano::transport::udp_channels::ins
|
|||
|
||||
void nano::transport::udp_channels::erase (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
channels.get<endpoint_tag> ().erase (endpoint_a);
|
||||
}
|
||||
|
||||
size_t nano::transport::udp_channels::size () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return channels.size ();
|
||||
}
|
||||
|
||||
std::shared_ptr<nano::transport::channel_udp> nano::transport::udp_channels::channel (nano::endpoint const & endpoint_a) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
std::shared_ptr<nano::transport::channel_udp> result;
|
||||
auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
|
||||
if (existing != channels.get<endpoint_tag> ().end ())
|
||||
|
@ -140,7 +140,7 @@ std::unordered_set<std::shared_ptr<nano::transport::channel>> nano::transport::u
|
|||
{
|
||||
std::unordered_set<std::shared_ptr<nano::transport::channel>> result;
|
||||
result.reserve (count_a);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
// Stop trying to fill result with random samples after this many attempts
|
||||
auto random_cutoff (count_a * 2);
|
||||
auto peers_size (channels.size ());
|
||||
|
@ -179,7 +179,7 @@ bool nano::transport::udp_channels::store_all (bool clear_peers)
|
|||
// we collect endpoints to be saved and then relase the lock.
|
||||
std::vector<nano::endpoint> endpoints;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
endpoints.reserve (channels.size ());
|
||||
std::transform (channels.begin (), channels.end (),
|
||||
std::back_inserter (endpoints), [](const auto & channel) { return channel.endpoint (); });
|
||||
|
@ -206,7 +206,7 @@ bool nano::transport::udp_channels::store_all (bool clear_peers)
|
|||
std::shared_ptr<nano::transport::channel_udp> nano::transport::udp_channels::find_node_id (nano::account const & node_id_a)
|
||||
{
|
||||
std::shared_ptr<nano::transport::channel_udp> result;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<node_id_tag> ().find (node_id_a));
|
||||
if (existing != channels.get<node_id_tag> ().end ())
|
||||
{
|
||||
|
@ -217,13 +217,13 @@ std::shared_ptr<nano::transport::channel_udp> nano::transport::udp_channels::fin
|
|||
|
||||
void nano::transport::udp_channels::clean_node_id (nano::account const & node_id_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
channels.get<node_id_tag> ().erase (node_id_a);
|
||||
}
|
||||
|
||||
void nano::transport::udp_channels::clean_node_id (nano::endpoint const & endpoint_a, nano::account const & node_id_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<node_id_tag> ().equal_range (node_id_a));
|
||||
for (auto & record : boost::make_iterator_range (existing))
|
||||
{
|
||||
|
@ -239,7 +239,7 @@ void nano::transport::udp_channels::clean_node_id (nano::endpoint const & endpoi
|
|||
nano::tcp_endpoint nano::transport::udp_channels::bootstrap_peer (uint8_t connection_protocol_version_min)
|
||||
{
|
||||
nano::tcp_endpoint result (boost::asio::ip::address_v6::any (), 0);
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto i (channels.get<last_bootstrap_attempt_tag> ().begin ()), n (channels.get<last_bootstrap_attempt_tag> ().end ()); i != n;)
|
||||
{
|
||||
if (i->channel->get_network_version () >= connection_protocol_version_min)
|
||||
|
@ -309,7 +309,7 @@ void nano::transport::udp_channels::stop ()
|
|||
{
|
||||
// Stop and invalidate local endpoint
|
||||
stopped = true;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
local_endpoint = nano::endpoint (boost::asio::ip::address_v6::loopback (), 0);
|
||||
|
||||
// On test-net, close directly to avoid address-reuse issues. On livenet, close
|
||||
|
@ -337,7 +337,7 @@ void nano::transport::udp_channels::close_socket ()
|
|||
|
||||
nano::endpoint nano::transport::udp_channels::get_local_endpoint () const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return local_endpoint;
|
||||
}
|
||||
|
||||
|
@ -579,7 +579,7 @@ std::shared_ptr<nano::transport::channel> nano::transport::udp_channels::create
|
|||
|
||||
bool nano::transport::udp_channels::max_ip_connections (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
bool result (channels.get<ip_address_tag> ().count (endpoint_a.address ()) >= nano::transport::max_peers_per_ip);
|
||||
return result;
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ bool nano::transport::udp_channels::reachout (nano::endpoint const & endpoint_a)
|
|||
auto endpoint_l (nano::transport::map_endpoint_to_v6 (endpoint_a));
|
||||
// Don't keepalive to nodes that already sent us something
|
||||
error |= channel (endpoint_l) != nullptr;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (attempts.find (endpoint_l));
|
||||
error |= existing != attempts.end ();
|
||||
attempts.insert ({ endpoint_l, std::chrono::steady_clock::now () });
|
||||
|
@ -606,7 +606,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::transport::udp_channels::col
|
|||
size_t channels_count = 0;
|
||||
size_t attemps_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
channels_count = channels.size ();
|
||||
attemps_count = attempts.size ();
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ std::unique_ptr<nano::seq_con_info_component> nano::transport::udp_channels::col
|
|||
|
||||
void nano::transport::udp_channels::purge (std::chrono::steady_clock::time_point const & cutoff_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto disconnect_cutoff (channels.get<last_packet_received_tag> ().lower_bound (cutoff_a));
|
||||
channels.get<last_packet_received_tag> ().erase (channels.get<last_packet_received_tag> ().begin (), disconnect_cutoff);
|
||||
// Remove keepalive attempt tracking for attempts older than cutoff
|
||||
|
@ -633,7 +633,7 @@ void nano::transport::udp_channels::ongoing_keepalive ()
|
|||
nano::keepalive message;
|
||||
node.network.random_fill (message.peers);
|
||||
std::vector<std::shared_ptr<nano::transport::channel_udp>> send_list;
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
auto keepalive_cutoff (channels.get<last_packet_received_tag> ().lower_bound (std::chrono::steady_clock::now () - node.network_params.node.period));
|
||||
for (auto i (channels.get<last_packet_received_tag> ().begin ()); i != keepalive_cutoff; ++i)
|
||||
{
|
||||
|
@ -655,7 +655,7 @@ void nano::transport::udp_channels::ongoing_keepalive ()
|
|||
|
||||
void nano::transport::udp_channels::list (std::deque<std::shared_ptr<nano::transport::channel>> & deque_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto i (channels.begin ()), j (channels.end ()); i != j; ++i)
|
||||
{
|
||||
deque_a.push_back (i->channel);
|
||||
|
@ -664,7 +664,7 @@ void nano::transport::udp_channels::list (std::deque<std::shared_ptr<nano::trans
|
|||
|
||||
void nano::transport::udp_channels::modify (std::shared_ptr<nano::transport::channel_udp> channel_a, std::function<void(std::shared_ptr<nano::transport::channel_udp>)> modify_callback_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (channels.get<endpoint_tag> ().find (channel_a->endpoint));
|
||||
if (existing != channels.get<endpoint_tag> ().end ())
|
||||
{
|
||||
|
|
|
@ -37,13 +37,13 @@ namespace transport
|
|||
|
||||
nano::endpoint get_endpoint () const override
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
return endpoint;
|
||||
}
|
||||
|
||||
nano::tcp_endpoint get_tcp_endpoint () const override
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (channel_mutex);
|
||||
nano::lock_guard<std::mutex> lk (channel_mutex);
|
||||
return nano::transport::map_endpoint_to_tcp (endpoint);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ thread ([this]() {
|
|||
process_loop ();
|
||||
})
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
condition.wait (lock, [& started = started] { return started; });
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ void nano::vote_processor::process_loop ()
|
|||
nano::timer<std::chrono::milliseconds> elapsed;
|
||||
bool log_this_iteration;
|
||||
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
started = true;
|
||||
|
||||
lock.unlock ();
|
||||
|
@ -49,7 +49,7 @@ void nano::vote_processor::process_loop ()
|
|||
lock.unlock ();
|
||||
verify_votes (votes_l);
|
||||
{
|
||||
std::unique_lock<std::mutex> active_single_lock (node.active.mutex);
|
||||
nano::unique_lock<std::mutex> active_single_lock (node.active.mutex);
|
||||
auto transaction (node.store.tx_begin_read ());
|
||||
uint64_t count (1);
|
||||
for (auto & i : votes_l)
|
||||
|
@ -86,7 +86,7 @@ void nano::vote_processor::process_loop ()
|
|||
|
||||
void nano::vote_processor::vote (std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!stopped)
|
||||
{
|
||||
bool process (false);
|
||||
|
@ -231,7 +231,7 @@ nano::vote_code nano::vote_processor::vote_blocking (nano::transaction const & t
|
|||
void nano::vote_processor::stop ()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
stopped = true;
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -243,7 +243,7 @@ void nano::vote_processor::stop ()
|
|||
|
||||
void nano::vote_processor::flush ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
while (active || !votes.empty ())
|
||||
{
|
||||
condition.wait (lock);
|
||||
|
@ -252,7 +252,7 @@ void nano::vote_processor::flush ()
|
|||
|
||||
void nano::vote_processor::calculate_weights ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!stopped)
|
||||
{
|
||||
representatives_1.clear ();
|
||||
|
@ -291,7 +291,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (vote_processor & v
|
|||
size_t representatives_3_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (vote_processor.mutex);
|
||||
nano::lock_guard<std::mutex> guard (vote_processor.mutex);
|
||||
votes_count = vote_processor.votes.size ();
|
||||
representatives_1_count = vote_processor.representatives_1.size ();
|
||||
representatives_2_count = vote_processor.representatives_2.size ();
|
||||
|
|
|
@ -40,7 +40,7 @@ private:
|
|||
std::unordered_set<nano::account> representatives_1;
|
||||
std::unordered_set<nano::account> representatives_2;
|
||||
std::unordered_set<nano::account> representatives_3;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
std::mutex mutex;
|
||||
bool started;
|
||||
bool stopped;
|
||||
|
|
|
@ -7,13 +7,13 @@ nano::vote_generator::vote_generator (nano::node & node_a) :
|
|||
node (node_a),
|
||||
thread ([this]() { run (); })
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
condition.wait (lock, [& started = started] { return started; });
|
||||
}
|
||||
|
||||
void nano::vote_generator::add (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
hashes.push_back (hash_a);
|
||||
if (hashes.size () >= 12)
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ void nano::vote_generator::add (nano::block_hash const & hash_a)
|
|||
|
||||
void nano::vote_generator::stop ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
stopped = true;
|
||||
|
||||
lock.unlock ();
|
||||
|
@ -36,7 +36,7 @@ void nano::vote_generator::stop ()
|
|||
}
|
||||
}
|
||||
|
||||
void nano::vote_generator::send (std::unique_lock<std::mutex> & lock_a)
|
||||
void nano::vote_generator::send (nano::unique_lock<std::mutex> & lock_a)
|
||||
{
|
||||
std::vector<nano::block_hash> hashes_l;
|
||||
hashes_l.reserve (12);
|
||||
|
@ -60,7 +60,7 @@ void nano::vote_generator::send (std::unique_lock<std::mutex> & lock_a)
|
|||
void nano::vote_generator::run ()
|
||||
{
|
||||
nano::thread_role::set (nano::thread_role::name::voting);
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
started = true;
|
||||
lock.unlock ();
|
||||
condition.notify_all ();
|
||||
|
@ -88,7 +88,7 @@ void nano::vote_generator::run ()
|
|||
|
||||
void nano::votes_cache::add (std::shared_ptr<nano::vote> const & vote_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
for (auto & block : vote_a->blocks)
|
||||
{
|
||||
auto hash (boost::get<nano::block_hash> (block));
|
||||
|
@ -132,7 +132,7 @@ void nano::votes_cache::add (std::shared_ptr<nano::vote> const & vote_a)
|
|||
std::vector<std::shared_ptr<nano::vote>> nano::votes_cache::find (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::vector<std::shared_ptr<nano::vote>> result;
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
auto existing (cache.get<1> ().find (hash_a));
|
||||
if (existing != cache.get<1> ().end ())
|
||||
{
|
||||
|
@ -143,7 +143,7 @@ std::vector<std::shared_ptr<nano::vote>> nano::votes_cache::find (nano::block_ha
|
|||
|
||||
void nano::votes_cache::remove (nano::block_hash const & hash_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
cache.get<1> ().erase (hash_a);
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (vote_generator & v
|
|||
size_t hashes_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (vote_generator.mutex);
|
||||
nano::lock_guard<std::mutex> guard (vote_generator.mutex);
|
||||
hashes_count = vote_generator.hashes.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (vote_generator.hashes)::value_type);
|
||||
|
@ -168,7 +168,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (votes_cache & vote
|
|||
size_t cache_count = 0;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (votes_cache.cache_mutex);
|
||||
nano::lock_guard<std::mutex> guard (votes_cache.cache_mutex);
|
||||
cache_count = votes_cache.cache.size ();
|
||||
}
|
||||
auto sizeof_element = sizeof (decltype (votes_cache.cache)::value_type);
|
||||
|
|
|
@ -28,10 +28,10 @@ public:
|
|||
|
||||
private:
|
||||
void run ();
|
||||
void send (std::unique_lock<std::mutex> &);
|
||||
void send (nano::unique_lock<std::mutex> &);
|
||||
nano::node & node;
|
||||
std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
std::deque<nano::block_hash> hashes;
|
||||
nano::network_params network_params;
|
||||
bool stopped{ false };
|
||||
|
|
|
@ -26,7 +26,7 @@ nano::uint256_union nano::wallet_store::salt (nano::transaction const & transact
|
|||
|
||||
void nano::wallet_store::wallet_key (nano::raw_key & prv_a, nano::transaction const & transaction_a)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock (mutex);
|
||||
nano::lock_guard<std::recursive_mutex> lock (mutex);
|
||||
nano::raw_key wallet_l;
|
||||
wallet_key_mem.value (wallet_l);
|
||||
nano::raw_key password_l;
|
||||
|
@ -146,7 +146,7 @@ bool nano::wallet_store::attempt_password (nano::transaction const & transaction
|
|||
{
|
||||
bool result = false;
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock (mutex);
|
||||
nano::lock_guard<std::recursive_mutex> lock (mutex);
|
||||
nano::raw_key password_l;
|
||||
derive_key (password_l, transaction_a, password_a);
|
||||
password.value_set (password_l);
|
||||
|
@ -173,7 +173,7 @@ bool nano::wallet_store::attempt_password (nano::transaction const & transaction
|
|||
|
||||
bool nano::wallet_store::rekey (nano::transaction const & transaction_a, std::string const & password_a)
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock (mutex);
|
||||
nano::lock_guard<std::recursive_mutex> lock (mutex);
|
||||
bool result (false);
|
||||
if (valid_password (transaction_a))
|
||||
{
|
||||
|
@ -219,7 +219,7 @@ nano::fan::fan (nano::uint256_union const & key, size_t count_a)
|
|||
|
||||
void nano::fan::value (nano::raw_key & prv_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
value_get (prv_a);
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ void nano::fan::value_get (nano::raw_key & prv_a)
|
|||
|
||||
void nano::fan::value_set (nano::raw_key const & value_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
nano::raw_key value_l;
|
||||
value_get (value_l);
|
||||
*(values[0]) ^= value_l.data;
|
||||
|
@ -737,7 +737,7 @@ void nano::wallet_store::upgrade_v3_v4 (nano::transaction const & transaction_a)
|
|||
void nano::kdf::phs (nano::raw_key & result_a, std::string const & password_a, nano::uint256_union const & salt_a)
|
||||
{
|
||||
static nano::network_params network_params;
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto success (argon2_hash (1, network_params.kdf_work, 1, password_a.data (), password_a.size (), salt_a.bytes.data (), salt_a.bytes.size (), result_a.data.bytes.data (), result_a.data.bytes.size (), NULL, 0, Argon2_d, 0x10));
|
||||
assert (success == 0);
|
||||
(void)success;
|
||||
|
@ -761,7 +761,7 @@ void nano::wallet::enter_initial_password ()
|
|||
{
|
||||
nano::raw_key password_l;
|
||||
{
|
||||
std::lock_guard<std::recursive_mutex> lock (store.mutex);
|
||||
nano::lock_guard<std::recursive_mutex> lock (store.mutex);
|
||||
store.password.value (password_l);
|
||||
}
|
||||
if (password_l.data.is_zero ())
|
||||
|
@ -812,7 +812,7 @@ nano::public_key nano::wallet::deterministic_insert (nano::transaction const & t
|
|||
auto block_transaction (wallets.node.store.tx_begin_read ());
|
||||
if (wallets.check_rep (block_transaction, key, half_principal_weight))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (representatives_mutex);
|
||||
nano::lock_guard<std::mutex> lock (representatives_mutex);
|
||||
representatives.insert (key);
|
||||
}
|
||||
}
|
||||
|
@ -855,7 +855,7 @@ nano::public_key nano::wallet::insert_adhoc (nano::transaction const & transacti
|
|||
auto half_principal_weight (wallets.node.minimum_principal_weight () / 2);
|
||||
if (wallets.check_rep (block_transaction, key, half_principal_weight))
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (representatives_mutex);
|
||||
nano::lock_guard<std::mutex> lock (representatives_mutex);
|
||||
representatives.insert (key);
|
||||
}
|
||||
}
|
||||
|
@ -1408,7 +1408,7 @@ nano::work_watcher::~work_watcher ()
|
|||
|
||||
void nano::work_watcher::stop ()
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
watched.clear ();
|
||||
stopped = true;
|
||||
}
|
||||
|
@ -1419,7 +1419,7 @@ void nano::work_watcher::add (std::shared_ptr<nano::block> block_a)
|
|||
if (!stopped && block_l != nullptr)
|
||||
{
|
||||
auto root_l (block_l->qualified_root ());
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
watched[root_l] = block_l;
|
||||
lock.unlock ();
|
||||
watching (root_l, block_l);
|
||||
|
@ -1428,7 +1428,7 @@ void nano::work_watcher::add (std::shared_ptr<nano::block> block_a)
|
|||
|
||||
void nano::work_watcher::update (nano::qualified_root const & root_a, std::shared_ptr<nano::state_block> block_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
watched[root_a] = block_a;
|
||||
}
|
||||
|
||||
|
@ -1439,7 +1439,7 @@ void nano::work_watcher::watching (nano::qualified_root const & root_a, std::sha
|
|||
auto watcher_l = watcher_w.lock ();
|
||||
if (watcher_l && !watcher_l->stopped && block_a != nullptr)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (watcher_l->mutex);
|
||||
nano::unique_lock<std::mutex> lock (watcher_l->mutex);
|
||||
if (watcher_l->watched.find (root_a) != watcher_l->watched.end ()) // not yet confirmed or cancelled
|
||||
{
|
||||
lock.unlock ();
|
||||
|
@ -1464,7 +1464,7 @@ void nano::work_watcher::watching (nano::qualified_root const & root_a, std::sha
|
|||
{
|
||||
{
|
||||
auto hash (block_a->hash ());
|
||||
std::lock_guard<std::mutex> active_guard (watcher_l->node.active.mutex);
|
||||
nano::lock_guard<std::mutex> active_guard (watcher_l->node.active.mutex);
|
||||
auto existing (watcher_l->node.active.roots.find (root_a));
|
||||
if (existing != watcher_l->node.active.roots.end ())
|
||||
{
|
||||
|
@ -1505,7 +1505,7 @@ void nano::work_watcher::watching (nano::qualified_root const & root_a, std::sha
|
|||
void nano::work_watcher::remove (std::shared_ptr<nano::block> block_a)
|
||||
{
|
||||
auto root_l (block_a->qualified_root ());
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto existing (watched.find (root_l));
|
||||
if (existing != watched.end () && existing->second->hash () == block_a->hash ())
|
||||
{
|
||||
|
@ -1516,20 +1516,20 @@ void nano::work_watcher::remove (std::shared_ptr<nano::block> block_a)
|
|||
|
||||
bool nano::work_watcher::is_watched (nano::qualified_root const & root_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
auto exists (watched.find (root_a));
|
||||
return exists != watched.end ();
|
||||
}
|
||||
|
||||
size_t nano::work_watcher::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return watched.size ();
|
||||
}
|
||||
|
||||
void nano::wallets::do_wallet_actions ()
|
||||
{
|
||||
std::unique_lock<std::mutex> action_lock (action_mutex);
|
||||
nano::unique_lock<std::mutex> action_lock (action_mutex);
|
||||
while (!stopped)
|
||||
{
|
||||
if (!actions.empty ())
|
||||
|
@ -1565,7 +1565,7 @@ thread ([this]() {
|
|||
do_wallet_actions ();
|
||||
})
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (mutex);
|
||||
nano::unique_lock<std::mutex> lock (mutex);
|
||||
if (!error_a)
|
||||
{
|
||||
auto transaction (tx_begin_write ());
|
||||
|
@ -1634,7 +1634,7 @@ nano::wallets::~wallets ()
|
|||
|
||||
std::shared_ptr<nano::wallet> nano::wallets::open (nano::uint256_union const & id_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
std::shared_ptr<nano::wallet> result;
|
||||
auto existing (items.find (id_a));
|
||||
if (existing != items.end ())
|
||||
|
@ -1646,7 +1646,7 @@ std::shared_ptr<nano::wallet> nano::wallets::open (nano::uint256_union const & i
|
|||
|
||||
std::shared_ptr<nano::wallet> nano::wallets::create (nano::uint256_union const & id_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
assert (items.find (id_a) == items.end ());
|
||||
std::shared_ptr<nano::wallet> result;
|
||||
bool error;
|
||||
|
@ -1664,7 +1664,7 @@ std::shared_ptr<nano::wallet> nano::wallets::create (nano::uint256_union const &
|
|||
|
||||
bool nano::wallets::search_pending (nano::uint256_union const & wallet_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto result (false);
|
||||
auto existing (items.find (wallet_a));
|
||||
result = existing == items.end ();
|
||||
|
@ -1678,7 +1678,7 @@ bool nano::wallets::search_pending (nano::uint256_union const & wallet_a)
|
|||
|
||||
void nano::wallets::search_pending_all ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
for (auto i : items)
|
||||
{
|
||||
i.second->search_pending ();
|
||||
|
@ -1687,10 +1687,10 @@ void nano::wallets::search_pending_all ()
|
|||
|
||||
void nano::wallets::destroy (nano::uint256_union const & id_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto transaction (tx_begin_write ());
|
||||
// action_mutex should be after transactions to prevent deadlocks in deterministic_insert () & insert_adhoc ()
|
||||
std::lock_guard<std::mutex> action_lock (action_mutex);
|
||||
nano::lock_guard<std::mutex> action_lock (action_mutex);
|
||||
auto existing (items.find (id_a));
|
||||
assert (existing != items.end ());
|
||||
auto wallet (existing->second);
|
||||
|
@ -1700,7 +1700,7 @@ void nano::wallets::destroy (nano::uint256_union const & id_a)
|
|||
|
||||
void nano::wallets::reload ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto transaction (tx_begin_write ());
|
||||
std::unordered_set<nano::uint256_union> stored_items;
|
||||
std::string beginning (nano::uint256_union (0).to_string ());
|
||||
|
@ -1744,7 +1744,7 @@ void nano::wallets::reload ()
|
|||
void nano::wallets::queue_wallet_action (nano::uint128_t const & amount_a, std::shared_ptr<nano::wallet> wallet_a, std::function<void(nano::wallet &)> const & action_a)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> action_lock (action_mutex);
|
||||
nano::lock_guard<std::mutex> action_lock (action_mutex);
|
||||
actions.insert (std::make_pair (amount_a, std::make_pair (wallet_a, std::move (action_a))));
|
||||
}
|
||||
condition.notify_all ();
|
||||
|
@ -1754,13 +1754,13 @@ void nano::wallets::foreach_representative (nano::transaction const & transactio
|
|||
{
|
||||
if (node.config.enable_voting)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto transaction_l (tx_begin_read ());
|
||||
for (auto i (items.begin ()), n (items.end ()); i != n; ++i)
|
||||
{
|
||||
auto & wallet (*i->second);
|
||||
std::lock_guard<std::recursive_mutex> store_lock (wallet.store.mutex);
|
||||
std::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
|
||||
nano::lock_guard<std::recursive_mutex> store_lock (wallet.store.mutex);
|
||||
nano::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
|
||||
for (auto ii (wallet.representatives.begin ()), nn (wallet.representatives.end ()); ii != nn; ++ii)
|
||||
{
|
||||
nano::account account (*ii);
|
||||
|
@ -1794,7 +1794,7 @@ void nano::wallets::foreach_representative (nano::transaction const & transactio
|
|||
|
||||
bool nano::wallets::exists (nano::transaction const & transaction_a, nano::public_key const & account_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto result (false);
|
||||
for (auto i (items.begin ()), n (items.end ()); !result && i != n; ++i)
|
||||
{
|
||||
|
@ -1806,7 +1806,7 @@ bool nano::wallets::exists (nano::transaction const & transaction_a, nano::publi
|
|||
void nano::wallets::stop ()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> action_lock (action_mutex);
|
||||
nano::lock_guard<std::mutex> action_lock (action_mutex);
|
||||
stopped = true;
|
||||
actions.clear ();
|
||||
}
|
||||
|
@ -1853,7 +1853,7 @@ bool nano::wallets::check_rep (nano::transaction const & transaction_a, nano::ac
|
|||
|
||||
void nano::wallets::compute_reps ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
reps_count = 0;
|
||||
half_principal_reps_count = 0;
|
||||
auto half_principal_weight (node.minimum_principal_weight () / 2);
|
||||
|
@ -1871,7 +1871,7 @@ void nano::wallets::compute_reps ()
|
|||
representatives_l.insert (account);
|
||||
}
|
||||
}
|
||||
std::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
|
||||
nano::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
|
||||
wallet.representatives.swap (representatives_l);
|
||||
}
|
||||
}
|
||||
|
@ -2025,7 +2025,7 @@ std::unique_ptr<seq_con_info_component> collect_seq_con_info (wallets & wallets,
|
|||
size_t items_count = 0;
|
||||
size_t actions_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (wallets.mutex);
|
||||
nano::lock_guard<std::mutex> guard (wallets.mutex);
|
||||
items_count = wallets.items.size ();
|
||||
actions_count = wallets.actions.size ();
|
||||
}
|
||||
|
|
|
@ -210,7 +210,7 @@ public:
|
|||
std::multimap<nano::uint128_t, std::pair<std::shared_ptr<nano::wallet>, std::function<void(nano::wallet &)>>, std::greater<nano::uint128_t>> actions;
|
||||
std::mutex mutex;
|
||||
std::mutex action_mutex;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
nano::kdf kdf;
|
||||
MDB_dbi handle;
|
||||
MDB_dbi send_action_ids;
|
||||
|
|
|
@ -179,7 +179,7 @@ ws_listener (listener_a), ws (std::move (socket_a)), strand (ws.get_executor ())
|
|||
nano::websocket::session::~session ()
|
||||
{
|
||||
{
|
||||
std::unique_lock<std::mutex> lk (subscriptions_mutex);
|
||||
nano::unique_lock<std::mutex> lk (subscriptions_mutex);
|
||||
for (auto & subscription : subscriptions)
|
||||
{
|
||||
ws_listener.decrease_subscriber_count (subscription.first);
|
||||
|
@ -223,7 +223,7 @@ void nano::websocket::session::close ()
|
|||
void nano::websocket::session::write (nano::websocket::message message_a)
|
||||
{
|
||||
// clang-format off
|
||||
std::unique_lock<std::mutex> lk (subscriptions_mutex);
|
||||
nano::unique_lock<std::mutex> lk (subscriptions_mutex);
|
||||
auto subscription (subscriptions.find (message_a.topic));
|
||||
if (message_a.topic == nano::websocket::topic::ack || (subscription != subscriptions.end () && !subscription->second->should_filter (message_a)))
|
||||
{
|
||||
|
@ -382,7 +382,7 @@ void nano::websocket::session::handle_message (boost::property_tree::ptree const
|
|||
if (action == "subscribe" && topic_l != nano::websocket::topic::invalid)
|
||||
{
|
||||
auto options_text_l (message_a.get_child_optional ("options"));
|
||||
std::lock_guard<std::mutex> lk (subscriptions_mutex);
|
||||
nano::lock_guard<std::mutex> lk (subscriptions_mutex);
|
||||
std::unique_ptr<nano::websocket::options> options_l{ nullptr };
|
||||
if (options_text_l && topic_l == nano::websocket::topic::confirmation)
|
||||
{
|
||||
|
@ -412,7 +412,7 @@ void nano::websocket::session::handle_message (boost::property_tree::ptree const
|
|||
}
|
||||
else if (action == "unsubscribe" && topic_l != nano::websocket::topic::invalid)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (subscriptions_mutex);
|
||||
nano::lock_guard<std::mutex> lk (subscriptions_mutex);
|
||||
if (subscriptions.erase (topic_l))
|
||||
{
|
||||
ws_listener.get_node ().logger.always_log ("Websocket: removed subscription to topic: ", from_topic (topic_l));
|
||||
|
@ -431,7 +431,7 @@ void nano::websocket::listener::stop ()
|
|||
stopped = true;
|
||||
acceptor.close ();
|
||||
|
||||
std::lock_guard<std::mutex> lk (sessions_mutex);
|
||||
nano::lock_guard<std::mutex> lk (sessions_mutex);
|
||||
for (auto & weak_session : sessions)
|
||||
{
|
||||
auto session_ptr (weak_session.lock ());
|
||||
|
@ -506,7 +506,7 @@ void nano::websocket::listener::broadcast_confirmation (std::shared_ptr<nano::bl
|
|||
{
|
||||
nano::websocket::message_builder builder;
|
||||
|
||||
std::lock_guard<std::mutex> lk (sessions_mutex);
|
||||
nano::lock_guard<std::mutex> lk (sessions_mutex);
|
||||
boost::optional<nano::websocket::message> msg_with_block;
|
||||
boost::optional<nano::websocket::message> msg_without_block;
|
||||
for (auto & weak_session : sessions)
|
||||
|
@ -546,7 +546,7 @@ void nano::websocket::listener::broadcast_confirmation (std::shared_ptr<nano::bl
|
|||
|
||||
void nano::websocket::listener::broadcast (nano::websocket::message message_a)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (sessions_mutex);
|
||||
nano::lock_guard<std::mutex> lk (sessions_mutex);
|
||||
for (auto & weak_session : sessions)
|
||||
{
|
||||
auto session_ptr (weak_session.lock ());
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
#include <nano/lib/utility.hpp>
|
||||
#include <nano/node/write_database_queue.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
nano::write_guard::write_guard (std::condition_variable & cv_a, std::function<void()> guard_finish_callback_a) :
|
||||
nano::write_guard::write_guard (nano::condition_variable & cv_a, std::function<void()> guard_finish_callback_a) :
|
||||
cv (cv_a),
|
||||
guard_finish_callback (guard_finish_callback_a)
|
||||
{
|
||||
|
@ -17,7 +18,7 @@ nano::write_guard::~write_guard ()
|
|||
nano::write_database_queue::write_database_queue () :
|
||||
// clang-format off
|
||||
guard_finish_callback ([&queue = queue, &mutex = mutex]() {
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
queue.pop_front ();
|
||||
})
|
||||
// clang-format on
|
||||
|
@ -26,7 +27,7 @@ guard_finish_callback ([&queue = queue, &mutex = mutex]() {
|
|||
|
||||
nano::write_guard nano::write_database_queue::wait (nano::writer writer)
|
||||
{
|
||||
std::unique_lock<std::mutex> lk (mutex);
|
||||
nano::unique_lock<std::mutex> lk (mutex);
|
||||
// Add writer to the end of the queue if it's not already waiting
|
||||
auto exists = std::find (queue.cbegin (), queue.cend (), writer) != queue.cend ();
|
||||
if (!exists)
|
||||
|
@ -44,7 +45,7 @@ nano::write_guard nano::write_database_queue::wait (nano::writer writer)
|
|||
|
||||
bool nano::write_database_queue::contains (nano::writer writer)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
return std::find (queue.cbegin (), queue.cend (), writer) != queue.cend ();
|
||||
}
|
||||
|
||||
|
@ -52,7 +53,7 @@ bool nano::write_database_queue::process (nano::writer writer)
|
|||
{
|
||||
auto result = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (mutex);
|
||||
nano::lock_guard<std::mutex> guard (mutex);
|
||||
// Add writer to the end of the queue if it's not already waiting
|
||||
auto exists = std::find (queue.cbegin (), queue.cend (), writer) != queue.cend ();
|
||||
if (!exists)
|
||||
|
|
|
@ -18,11 +18,11 @@ enum class writer
|
|||
class write_guard final
|
||||
{
|
||||
public:
|
||||
write_guard (std::condition_variable & cv_a, std::function<void()> guard_finish_callback_a);
|
||||
write_guard (nano::condition_variable & cv_a, std::function<void()> guard_finish_callback_a);
|
||||
~write_guard ();
|
||||
|
||||
private:
|
||||
std::condition_variable & cv;
|
||||
nano::condition_variable & cv;
|
||||
std::function<void()> guard_finish_callback;
|
||||
};
|
||||
|
||||
|
@ -48,7 +48,7 @@ public:
|
|||
private:
|
||||
std::deque<nano::writer> queue;
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
nano::condition_variable cv;
|
||||
std::function<void()> guard_finish_callback;
|
||||
std::atomic<bool> stopped{ false };
|
||||
};
|
||||
|
|
|
@ -10,7 +10,7 @@ thread ([this]() {
|
|||
this->run ();
|
||||
})
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (this->request_mutex);
|
||||
nano::lock_guard<std::mutex> lk (this->request_mutex);
|
||||
this->connections.reserve (rpc_config.rpc_process.num_ipc_connections);
|
||||
for (auto i = 0u; i < rpc_config.rpc_process.num_ipc_connections; ++i)
|
||||
{
|
||||
|
@ -19,7 +19,7 @@ thread ([this]() {
|
|||
// clang-format off
|
||||
connection->client.async_connect (ipc_address, ipc_port, [ connection, &connections_mutex = this->connections_mutex ](nano::error err) {
|
||||
// Even if there is an error this needs to be set so that another attempt can be made to connect with the ipc connection
|
||||
std::lock_guard<std::mutex> lk (connections_mutex);
|
||||
nano::lock_guard<std::mutex> lk (connections_mutex);
|
||||
connection->is_available = true;
|
||||
});
|
||||
// clang-format on
|
||||
|
@ -34,7 +34,7 @@ nano::rpc_request_processor::~rpc_request_processor ()
|
|||
void nano::rpc_request_processor::stop ()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (request_mutex);
|
||||
nano::lock_guard<std::mutex> lock (request_mutex);
|
||||
stopped = true;
|
||||
}
|
||||
condition.notify_one ();
|
||||
|
@ -47,7 +47,7 @@ void nano::rpc_request_processor::stop ()
|
|||
void nano::rpc_request_processor::add (std::shared_ptr<rpc_request> request)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (request_mutex);
|
||||
nano::lock_guard<std::mutex> lk (request_mutex);
|
||||
requests.push_back (request);
|
||||
}
|
||||
condition.notify_one ();
|
||||
|
@ -79,7 +79,7 @@ void nano::rpc_request_processor::read_payload (std::shared_ptr<nano::ipc_connec
|
|||
|
||||
void nano::rpc_request_processor::make_available (nano::ipc_connection & connection)
|
||||
{
|
||||
std::lock_guard<std::mutex> lk (connections_mutex);
|
||||
nano::lock_guard<std::mutex> lk (connections_mutex);
|
||||
connection.is_available = true; // Allow people to use it now
|
||||
}
|
||||
|
||||
|
@ -123,13 +123,13 @@ void nano::rpc_request_processor::try_reconnect_and_execute_request (std::shared
|
|||
void nano::rpc_request_processor::run ()
|
||||
{
|
||||
// This should be a conditioned wait
|
||||
std::unique_lock<std::mutex> lk (request_mutex);
|
||||
nano::unique_lock<std::mutex> lk (request_mutex);
|
||||
while (!stopped)
|
||||
{
|
||||
if (!requests.empty ())
|
||||
{
|
||||
lk.unlock ();
|
||||
std::unique_lock<std::mutex> conditions_lk (connections_mutex);
|
||||
nano::unique_lock<std::mutex> conditions_lk (connections_mutex);
|
||||
// Find the first free ipc_client
|
||||
auto it = std::find_if (connections.begin (), connections.end (), [](auto connection) -> bool {
|
||||
return connection->is_available;
|
||||
|
|
|
@ -56,7 +56,7 @@ private:
|
|||
std::mutex connections_mutex;
|
||||
bool stopped{ false };
|
||||
std::deque<std::shared_ptr<nano::rpc_request>> requests;
|
||||
std::condition_variable condition;
|
||||
nano::condition_variable condition;
|
||||
const std::string ipc_address;
|
||||
const uint16_t ipc_port;
|
||||
std::thread thread;
|
||||
|
|
|
@ -1713,7 +1713,7 @@ TEST (rpc, process_block_with_work_watcher)
|
|||
uint64_t updated_difficulty;
|
||||
while (!updated)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node1.active.mutex);
|
||||
//fill multipliers_cb and update active difficulty;
|
||||
for (auto i (0); i < node1.active.multipliers_cb.size (); i++)
|
||||
{
|
||||
|
@ -6808,7 +6808,7 @@ TEST (rpc, active_difficulty)
|
|||
rpc.start ();
|
||||
boost::property_tree::ptree request;
|
||||
request.put ("action", "active_difficulty");
|
||||
std::unique_lock<std::mutex> lock (node->active.mutex);
|
||||
nano::unique_lock<std::mutex> lock (node->active.mutex);
|
||||
node->active.multipliers_cb.push_front (1.5);
|
||||
node->active.multipliers_cb.push_front (4.2);
|
||||
// Also pushes 1.0 to the front of multipliers_cb
|
||||
|
|
|
@ -273,7 +273,7 @@ nano::uint128_t nano::summation_visitor::compute_internal (nano::summation_visit
|
|||
{
|
||||
if (current->amount_hash == network_params.ledger.genesis_account)
|
||||
{
|
||||
sum_set (std::numeric_limits<nano::uint128_t>::max ());
|
||||
sum_set ((std::numeric_limits<nano::uint128_t>::max) ());
|
||||
current->amount_hash = 0;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -279,7 +279,7 @@ public:
|
|||
|
||||
std::shared_ptr<nano::vote> vote_generate (nano::transaction const & transaction_a, nano::account const & account_a, nano::raw_key const & key_a, std::shared_ptr<nano::block> block_a) override
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
auto result (vote_current (transaction_a, account_a));
|
||||
uint64_t sequence ((result ? result->sequence : 0) + 1);
|
||||
result = std::make_shared<nano::vote> (account_a, key_a, sequence, block_a);
|
||||
|
@ -289,7 +289,7 @@ public:
|
|||
|
||||
std::shared_ptr<nano::vote> vote_generate (nano::transaction const & transaction_a, nano::account const & account_a, nano::raw_key const & key_a, std::vector<nano::block_hash> blocks_a) override
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
auto result (vote_current (transaction_a, account_a));
|
||||
uint64_t sequence ((result ? result->sequence : 0) + 1);
|
||||
result = std::make_shared<nano::vote> (account_a, key_a, sequence, blocks_a);
|
||||
|
@ -299,7 +299,7 @@ public:
|
|||
|
||||
std::shared_ptr<nano::vote> vote_max (nano::transaction const & transaction_a, std::shared_ptr<nano::vote> vote_a) override
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
auto current (vote_current (transaction_a, vote_a->account));
|
||||
auto result (vote_a);
|
||||
if (current != nullptr && current->sequence > result->sequence)
|
||||
|
@ -540,7 +540,7 @@ public:
|
|||
void flush (nano::write_transaction const & transaction_a) override
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (cache_mutex);
|
||||
nano::lock_guard<std::mutex> lock (cache_mutex);
|
||||
vote_cache_l1.swap (vote_cache_l2);
|
||||
vote_cache_l1.clear ();
|
||||
}
|
||||
|
|
|
@ -683,7 +683,7 @@ std::shared_ptr<nano::vote> nano::vote_uniquer::unique (std::shared_ptr<nano::vo
|
|||
result->blocks.front () = uniquer.unique (boost::get<std::shared_ptr<nano::block>> (result->blocks.front ()));
|
||||
}
|
||||
nano::uint256_union key (vote_a->full_hash ());
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
auto & existing (votes[key]);
|
||||
if (auto block_l = existing.lock ())
|
||||
{
|
||||
|
@ -722,7 +722,7 @@ std::shared_ptr<nano::vote> nano::vote_uniquer::unique (std::shared_ptr<nano::vo
|
|||
|
||||
size_t nano::vote_uniquer::size ()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (mutex);
|
||||
nano::lock_guard<std::mutex> lock (mutex);
|
||||
return votes.size ();
|
||||
}
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ TEST (node, fork_storm)
|
|||
}
|
||||
else
|
||||
{
|
||||
std::lock_guard<std::mutex> lock (node_a->active.mutex);
|
||||
nano::lock_guard<std::mutex> lock (node_a->active.mutex);
|
||||
if (node_a->active.roots.begin ()->election->last_votes_size () == 1)
|
||||
{
|
||||
++single;
|
||||
|
@ -466,7 +466,7 @@ TEST (confirmation_height, many_accounts_single_confirmation)
|
|||
|
||||
// As this test can take a while extend the next frontier check
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
node->active.next_frontier_check = std::chrono::steady_clock::now () + 7200s;
|
||||
}
|
||||
|
||||
|
@ -537,7 +537,7 @@ TEST (confirmation_height, many_accounts_many_confirmations)
|
|||
|
||||
// As this test can take a while extend the next frontier check
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
node->active.next_frontier_check = std::chrono::steady_clock::now () + 7200s;
|
||||
}
|
||||
|
||||
|
@ -586,7 +586,7 @@ TEST (confirmation_height, long_chains)
|
|||
|
||||
// As this test can take a while extend the next frontier check
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
node->active.next_frontier_check = std::chrono::steady_clock::now () + 7200s;
|
||||
}
|
||||
|
||||
|
@ -675,7 +675,7 @@ TEST (confirmation_height, prioritize_frontiers_overwrite)
|
|||
|
||||
// As this test can take a while extend the next frontier check
|
||||
{
|
||||
std::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
nano::lock_guard<std::mutex> guard (node->active.mutex);
|
||||
node->active.next_frontier_check = std::chrono::steady_clock::now () + 7200s;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ while getopts 'hmcCkpvB:' OPT; do
|
|||
exit 0
|
||||
;;
|
||||
m)
|
||||
bootstrapArgs+=('--with-libraries=system,thread,log,filesystem,program_options')
|
||||
bootstrapArgs+=('--with-libraries=system,thread,log,filesystem,program_options,context,fiber')
|
||||
;;
|
||||
c)
|
||||
useClang='true'
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue