Lock before stopping when it is necessary to notify other threads (#2608)

This prevents having all threads waiting and the system freezing. Mostly important for tests, especially with sanitizers, but could happen on stopping the node normally.

Portmapping requires an atomic bool, and the database queue doesn't after this change.

Other small related bootstrap changes included in this PR per Serg's suggestion.
This commit is contained in:
Guilherme Lawless 2020-02-28 09:31:45 +00:00 committed by GitHub
commit 78d12342ba
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 33 additions and 20 deletions

View file

@ -202,9 +202,11 @@ bool nano::bootstrap_attempt_legacy::consume_future (std::future<bool> & future_
void nano::bootstrap_attempt_legacy::stop ()
{
stopped = true;
condition.notify_all ();
nano::unique_lock<std::mutex> lock (mutex);
stopped = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
if (auto i = frontiers.lock ())
{
try
@ -478,7 +480,7 @@ bool nano::bootstrap_attempt_legacy::request_frontier (nano::unique_lock<std::mu
lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this (), first_attempt));
lock_a.lock ();
if (connection_l)
if (connection_l && !stopped)
{
endpoint_frontier_request = connection_l->channel->get_tcp_endpoint ();
std::future<bool> future;

View file

@ -76,7 +76,8 @@ std::shared_ptr<nano::bootstrap_client> nano::bootstrap_connections::connection
if (result == nullptr && connections_count == 0 && new_connections_empty && attempt_a != nullptr)
{
node.logger.try_log (boost::str (boost::format ("Bootstrap attempt stopped because there are no peers")));
attempt_a->stopped = true;
lock.unlock ();
attempt_a->stop ();
}
return result;
}
@ -440,19 +441,22 @@ void nano::bootstrap_connections::requeue_pull (nano::pull_info const & pull_a,
void nano::bootstrap_connections::clear_pulls (uint64_t bootstrap_id_a)
{
nano::lock_guard<std::mutex> lock (mutex);
auto i (pulls.begin ());
while (i != pulls.end ())
{
if (i->bootstrap_id == bootstrap_id_a)
nano::lock_guard<std::mutex> lock (mutex);
auto i (pulls.begin ());
while (i != pulls.end ())
{
i = pulls.erase (i);
}
else
{
++i;
if (i->bootstrap_id == bootstrap_id_a)
{
i = pulls.erase (i);
}
else
{
++i;
}
}
}
condition.notify_all ();
}
void nano::bootstrap_connections::run ()
@ -477,9 +481,11 @@ void nano::bootstrap_connections::run ()
void nano::bootstrap_connections::stop ()
{
nano::unique_lock<std::mutex> lock (mutex);
stopped = true;
lock.unlock ();
condition.notify_all ();
nano::lock_guard<std::mutex> lock (mutex);
lock.lock ();
for (auto i : clients)
{
if (auto client = i.lock ())

View file

@ -519,7 +519,7 @@ void nano::bootstrap_attempt_wallet::request_pending (nano::unique_lock<std::mut
lock_a.unlock ();
auto connection_l (node->bootstrap_initiator.connections->connection (shared_from_this ()));
lock_a.lock ();
if (connection_l)
if (connection_l && !stopped)
{
auto account (wallet_accounts.front ());
wallet_accounts.pop_front ();

View file

@ -34,7 +34,10 @@ nano::confirmation_height_processor::~confirmation_height_processor ()
void nano::confirmation_height_processor::stop ()
{
stopped = true;
{
nano::lock_guard<std::mutex> guard (mutex);
stopped = true;
}
condition.notify_one ();
if (thread.joinable ())
{

View file

@ -59,7 +59,7 @@ private:
boost::asio::ip::address_v4 address;
std::array<mapping_protocol, 2> protocols;
uint64_t check_count{ 0 };
bool on{ false };
std::atomic<bool> on{ false };
std::mutex mutex;
};
}

View file

@ -77,6 +77,9 @@ nano::write_guard nano::write_database_queue::pop ()
void nano::write_database_queue::stop ()
{
stopped = true;
{
nano::lock_guard<std::mutex> guard (mutex);
stopped = true;
}
cv.notify_all ();
}

View file

@ -2,7 +2,6 @@
#include <nano/lib/locks.hpp>
#include <atomic>
#include <condition_variable>
#include <deque>
#include <functional>
@ -53,6 +52,6 @@ private:
std::mutex mutex;
nano::condition_variable cv;
std::function<void()> guard_finish_callback;
std::atomic<bool> stopped{ false };
bool stopped{ false };
};
}