Block processor cleanup (#4165)

* Removing unused function.
* Removing duplicate functionality in block_processor::add_local.
* Removing usages of unchecked_info in block_processor.
This commit is contained in:
clemahieu 2023-03-02 11:33:28 +00:00 committed by GitHub
commit 97abf643c8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 28 additions and 75 deletions

View file

@ -1793,7 +1793,7 @@ int main (int argc, char * const * argv)
auto begin (std::chrono::high_resolution_clock::now ());
uint64_t block_count (0);
size_t count (0);
std::deque<nano::unchecked_info> epoch_open_blocks;
std::deque<std::shared_ptr<nano::block>> epoch_open_blocks;
{
auto node_flags = nano::inactive_node_flag_defaults ();
nano::update_flags (node_flags, vm);
@ -1819,12 +1819,11 @@ int main (int argc, char * const * argv)
{
std::cout << boost::str (boost::format ("%1% blocks retrieved") % count) << std::endl;
}
nano::unchecked_info unchecked_info (block);
node.node->block_processor.add (unchecked_info);
node.node->block_processor.add (block);
if (block->type () == nano::block_type::state && block->previous ().is_zero () && source_node->ledger.is_epoch_link (block->link ()))
{
// Epoch open blocks can be rejected without processed pending blocks to account, push it later again
epoch_open_blocks.push_back (unchecked_info);
epoch_open_blocks.push_back (block);
}
// Retrieving previous block hash
hash = block->previous ();
@ -1839,9 +1838,9 @@ int main (int argc, char * const * argv)
// Add epoch open blocks again if required
if (node.node->block_processor.size () == 0)
{
for (auto & unchecked_info : epoch_open_blocks)
for (auto & block : epoch_open_blocks)
{
node.node->block_processor.add (unchecked_info);
node.node->block_processor.add (block);
}
}
// Message each 60 seconds

View file

@ -89,27 +89,18 @@ bool nano::block_processor::half_full ()
return size () >= node.flags.block_processor_full_size / 2;
}
void nano::block_processor::add (std::shared_ptr<nano::block> const & block_a)
{
nano::unchecked_info info (block_a);
add (info);
}
void nano::block_processor::add (nano::unchecked_info const & info_a)
void nano::block_processor::add (std::shared_ptr<nano::block> const & block)
{
if (full ())
{
node.stats.inc (nano::stat::type::blockprocessor, nano::stat::detail::overfill);
return;
}
if (node.network_params.work.validate_entry (*info_a.block)) // true => error
if (node.network_params.work.validate_entry (*block)) // true => error
{
node.stats.inc (nano::stat::type::blockprocessor, nano::stat::detail::insufficient_work);
return;
}
auto const & block = info_a.block;
if (block->type () == nano::block_type::state || block->type () == nano::block_type::open)
{
state_block_signature_verification.add ({ block });
@ -118,28 +109,12 @@ void nano::block_processor::add (nano::unchecked_info const & info_a)
{
{
nano::lock_guard<nano::mutex> guard{ mutex };
blocks.emplace_back (info_a);
blocks.emplace_back (block);
}
condition.notify_all ();
}
}
void nano::block_processor::add_local (nano::unchecked_info const & info_a)
{
if (full ())
{
node.stats.inc (nano::stat::type::blockprocessor, nano::stat::detail::overfill);
return;
}
if (node.network_params.work.validate_entry (*info_a.block)) // true => error
{
node.stats.inc (nano::stat::type::blockprocessor, nano::stat::detail::insufficient_work);
return;
}
state_block_signature_verification.add ({ info_a.block });
}
void nano::block_processor::force (std::shared_ptr<nano::block> const & block_a)
{
{
@ -252,27 +227,27 @@ void nano::block_processor::process_batch (nano::unique_lock<nano::mutex> & lock
{
node.logger.always_log (boost::str (boost::format ("%1% blocks (+ %2% state blocks) (+ %3% forced) in processing queue") % blocks.size () % state_block_signature_verification.size () % forced.size ()));
}
nano::unchecked_info info;
std::shared_ptr<nano::block> block;
nano::block_hash hash (0);
bool force (false);
if (forced.empty ())
{
info = blocks.front ();
block = blocks.front ();
blocks.pop_front ();
hash = info.block->hash ();
hash = block->hash ();
}
else
{
info = nano::unchecked_info (forced.front ());
block = forced.front ();
forced.pop_front ();
hash = info.block->hash ();
hash = block->hash ();
force = true;
number_of_forced_processed++;
}
lock_a.unlock ();
if (force)
{
auto successor (node.ledger.successor (transaction, info.block->qualified_root ()));
auto successor = node.ledger.successor (transaction, block->qualified_root ());
if (successor != nullptr && successor->hash () != hash)
{
// Replace our block with the winner and roll back any dependent blocks
@ -303,7 +278,7 @@ void nano::block_processor::process_batch (nano::unique_lock<nano::mutex> & lock
}
}
number_of_blocks_processed++;
process_one (transaction, post_events, info, force);
process_one (transaction, post_events, block, force);
lock_a.lock ();
}
awaiting_write = false;
@ -343,13 +318,12 @@ void nano::block_processor::process_live (nano::transaction const & transaction_
}
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, nano::unchecked_info info_a, bool const forced_a, nano::block_origin const origin_a)
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, std::shared_ptr<nano::block> block, bool const forced_a, nano::block_origin const origin_a)
{
nano::process_return result;
auto block (info_a.block);
auto hash (block->hash ());
result = node.ledger.process (transaction_a, *block);
events_a.events.emplace_back ([this, result, block = info_a.block] (nano::transaction const & tx) {
events_a.events.emplace_back ([this, result, block] (nano::transaction const & tx) {
processed.notify (tx, result, *block);
});
switch (result.code)
@ -362,7 +336,7 @@ nano::process_return nano::block_processor::process_one (nano::write_transaction
block->serialize_json (block_string, node.config.logging.single_line_record ());
node.logger.try_log (boost::str (boost::format ("Processing block %1%: %2%") % hash.to_string () % block_string));
}
events_a.events.emplace_back ([this, hash, block = info_a.block, result, origin_a] (nano::transaction const & post_event_transaction_a) {
events_a.events.emplace_back ([this, hash, block, result, origin_a] (nano::transaction const & post_event_transaction_a) {
process_live (post_event_transaction_a, hash, block, result, origin_a);
});
queue_unchecked (transaction_a, hash);
@ -383,10 +357,7 @@ nano::process_return nano::block_processor::process_one (nano::write_transaction
{
node.logger.try_log (boost::str (boost::format ("Gap previous for: %1%") % hash.to_string ()));
}
debug_assert (info_a.modified () != 0);
node.unchecked.put (block->previous (), info_a);
node.unchecked.put (block->previous (), block);
events_a.events.emplace_back ([this, hash] (nano::transaction const & /* unused */) { this->node.gap_cache.add (hash); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_previous);
break;
@ -397,10 +368,7 @@ nano::process_return nano::block_processor::process_one (nano::write_transaction
{
node.logger.try_log (boost::str (boost::format ("Gap source for: %1%") % hash.to_string ()));
}
debug_assert (info_a.modified () != 0);
node.unchecked.put (node.ledger.block_source (transaction_a, *(block)), info_a);
node.unchecked.put (node.ledger.block_source (transaction_a, *block), block);
events_a.events.emplace_back ([this, hash] (nano::transaction const & /* unused */) { this->node.gap_cache.add (hash); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
@ -411,10 +379,7 @@ nano::process_return nano::block_processor::process_one (nano::write_transaction
{
node.logger.try_log (boost::str (boost::format ("Gap pending entries for epoch open: %1%") % hash.to_string ()));
}
debug_assert (info_a.modified () != 0);
node.unchecked.put (block->account (), info_a); // Specific unchecked key starting with epoch open block account public key
node.unchecked.put (block->account (), block); // Specific unchecked key starting with epoch open block account public key
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
}
@ -508,13 +473,6 @@ nano::process_return nano::block_processor::process_one (nano::write_transaction
return result;
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, std::shared_ptr<nano::block> const & block_a)
{
nano::unchecked_info info (block_a);
auto result (process_one (transaction_a, events_a, info));
return result;
}
void nano::block_processor::queue_unchecked (nano::write_transaction const & transaction_a, nano::hash_or_account const & hash_or_account_a)
{
node.unchecked.trigger (hash_or_account_a);

View file

@ -46,8 +46,6 @@ public:
std::size_t size ();
bool full ();
bool half_full ();
void add_local (nano::unchecked_info const & info_a);
void add (nano::unchecked_info const &);
void add (std::shared_ptr<nano::block> const &);
void force (std::shared_ptr<nano::block> const &);
void wait_write ();
@ -55,8 +53,8 @@ public:
bool have_blocks_ready ();
bool have_blocks ();
void process_blocks ();
nano::process_return process_one (nano::write_transaction const &, block_post_events &, nano::unchecked_info, bool const = false, nano::block_origin const = nano::block_origin::remote);
nano::process_return process_one (nano::write_transaction const &, block_post_events &, std::shared_ptr<nano::block> const &);
nano::process_return process_one (nano::write_transaction const &, block_post_events &, std::shared_ptr<nano::block> block, bool const = false, nano::block_origin const = nano::block_origin::remote);
std::atomic<bool> flushing{ false };
// Delay required for average network propagartion before requesting confirmation
static std::chrono::milliseconds constexpr confirmation_request_delay{ 1500 };
@ -71,7 +69,7 @@ private:
bool active{ false };
bool awaiting_write{ false };
std::chrono::steady_clock::time_point next_log;
std::deque<nano::unchecked_info> blocks;
std::deque<std::shared_ptr<nano::block>> blocks;
std::deque<std::shared_ptr<nano::block>> forced;
nano::condition_variable condition;
nano::node & node;

View file

@ -113,8 +113,7 @@ bool nano::bootstrap_attempt::process_block (std::shared_ptr<nano::block> const
}
else
{
nano::unchecked_info info (block_a);
node->block_processor.add (info);
node->block_processor.add (block_a);
}
return stop_pull;
}

View file

@ -266,8 +266,7 @@ bool nano::bootstrap_attempt_lazy::process_block_lazy (std::shared_ptr<nano::blo
}
lazy_block_state_backlog_check (block_a, hash);
lock.unlock ();
nano::unchecked_info info (block_a);
node->block_processor.add (info);
node->block_processor.add (block_a);
}
// Force drop lazy bootstrap connection for long bulk_pull
if (pull_blocks_processed > max_blocks)

View file

@ -208,7 +208,7 @@ nano::node::node (boost::asio::io_context & io_ctx_a, boost::filesystem::path co
{
unchecked.use_memory = [this] () { return ledger.bootstrap_weight_reached (); };
unchecked.satisfied = [this] (nano::unchecked_info const & info) {
this->block_processor.add (info);
this->block_processor.add (info.block);
};
inactive_vote_cache.rep_weight_query = [this] (nano::account const & rep) {
@ -614,7 +614,7 @@ void nano::node::process_local_async (std::shared_ptr<nano::block> const & block
// Add block hash as recently arrived to trigger automatic rebroadcast and election
block_arrival.add (block_a->hash ());
// Set current time to trigger automatic rebroadcast and election
block_processor.add_local (block_a);
block_processor.add (block_a);
}
void nano::node::start ()