Moving responsibility of trimming active_transaction container in to the container itself rather than in the election scheduler. (#4235)

This commit is contained in:
clemahieu 2023-05-18 19:57:38 +01:00 committed by GitHub
commit d570e645fa
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 20 additions and 20 deletions

View file

@ -372,6 +372,20 @@ nano::election_insertion_result nano::active_transactions::insert (const std::sh
return result;
}
void nano::active_transactions::trim ()
{
/*
* Both normal and hinted election schedulers are well-behaved, meaning they first check for AEC vacancy before inserting new elections.
* However, it is possible that AEC will be temporarily overfilled in case it's running at full capacity and election hinting or manual queue kicks in.
* That case will lead to unwanted churning of elections, so this allows for AEC to be overfilled to 125% until erasing of elections happens.
*/
while (vacancy () < -(limit () / 4))
{
node.stats.inc (nano::stat::type::active, nano::stat::detail::erase_oldest);
erase_oldest ();
}
}
nano::election_insertion_result nano::active_transactions::insert_impl (nano::unique_lock<nano::mutex> & lock_a, std::shared_ptr<nano::block> const & block_a, nano::election_behavior election_behavior_a, std::function<void (std::shared_ptr<nano::block> const &)> const & confirmation_action_a)
{
debug_assert (!mutex.try_lock ());
@ -425,6 +439,7 @@ nano::election_insertion_result nano::active_transactions::insert_impl (nano::un
{
result.election->broadcast_vote ();
}
trim ();
}
return result;
}

View file

@ -182,6 +182,8 @@ public:
void remove_election_winner_details (nano::block_hash const &);
private:
// Erase elections if we're over capacity
void trim ();
// Call action with confirmed block, may be different than what we started with
nano::election_insertion_result insert_impl (nano::unique_lock<nano::mutex> &, std::shared_ptr<nano::block> const &, nano::election_behavior = nano::election_behavior::normal, std::function<void (std::shared_ptr<nano::block> const &)> const & = nullptr);
void request_loop ();

View file

@ -114,36 +114,20 @@ bool nano::election_scheduler::manual_queue_predicate () const
return !manual_queue.empty ();
}
bool nano::election_scheduler::overfill_predicate () const
{
/*
* Both normal and hinted election schedulers are well-behaved, meaning they first check for AEC vacancy before inserting new elections.
* However, it is possible that AEC will be temporarily overfilled in case it's running at full capacity and election hinting or manual queue kicks in.
* That case will lead to unwanted churning of elections, so this allows for AEC to be overfilled to 125% until erasing of elections happens.
*/
return node.active.vacancy () < -(node.active.limit () / 4);
}
void nano::election_scheduler::run ()
{
nano::unique_lock<nano::mutex> lock{ mutex };
while (!stopped)
{
condition.wait (lock, [this] () {
return stopped || priority_queue_predicate () || manual_queue_predicate () || overfill_predicate ();
return stopped || priority_queue_predicate () || manual_queue_predicate ();
});
debug_assert ((std::this_thread::yield (), true)); // Introduce some random delay in debug builds
if (!stopped)
{
stats.inc (nano::stat::type::election_scheduler, nano::stat::detail::loop);
if (overfill_predicate ())
{
lock.unlock ();
stats.inc (nano::stat::type::election_scheduler, nano::stat::detail::erase_oldest);
node.active.erase_oldest ();
}
else if (manual_queue_predicate ())
if (manual_queue_predicate ())
{
auto const [block, previous_balance, election_behavior] = manual_queue.front ();
manual_queue.pop_front ();

View file

@ -50,7 +50,6 @@ private:
bool empty_locked () const;
bool priority_queue_predicate () const;
bool manual_queue_predicate () const;
bool overfill_predicate () const;
nano::prioritization priority;
@ -60,4 +59,4 @@ private:
mutable nano::mutex mutex;
std::thread thread;
};
}
}