Cleanup unused broadcast_confirm_req variants (#4440)

This commit is contained in:
Piotr Wójcik 2024-02-19 17:48:36 +01:00 committed by GitHub
commit cd770d7a3a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 0 additions and 125 deletions

View file

@ -226,127 +226,6 @@ void nano::network::send_confirm_req (std::shared_ptr<nano::transport::channel>
channel_a->send (req);
}
void nano::network::broadcast_confirm_req (std::shared_ptr<nano::block> const & block_a)
{
auto list (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> (node.rep_crawler.representative_endpoints (std::numeric_limits<std::size_t>::max ())));
if (list->empty () || node.rep_crawler.total_weight () < node.online_reps.delta ())
{
// broadcast request to all peers (with max limit 2 * sqrt (peers count))
auto peers (node.network.list (std::min<std::size_t> (100, node.network.fanout (2.0))));
list->clear ();
list->insert (list->end (), peers.begin (), peers.end ());
}
/*
* In either case (broadcasting to all representatives, or broadcasting to
* all peers because there are not enough connected representatives),
* limit each instance to a single random up-to-32 selection. The invoker
* of "broadcast_confirm_req" will be responsible for calling it again
* if the votes for a block have not arrived in time.
*/
std::size_t const max_endpoints = 32;
nano::random_pool_shuffle (list->begin (), list->end ());
if (list->size () > max_endpoints)
{
list->erase (list->begin () + max_endpoints, list->end ());
}
broadcast_confirm_req_base (block_a, list, 0);
}
void nano::network::broadcast_confirm_req_base (std::shared_ptr<nano::block> const & block_a, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>> const & endpoints_a, unsigned delay_a, bool resumption)
{
std::size_t const max_reps = 10;
auto count (0);
while (!endpoints_a->empty () && count < max_reps)
{
auto channel (endpoints_a->back ());
send_confirm_req (channel, std::make_pair (block_a->hash (), block_a->root ().as_block_hash ()));
endpoints_a->pop_back ();
count++;
}
if (!endpoints_a->empty ())
{
delay_a += std::rand () % broadcast_interval_ms;
std::weak_ptr<nano::node> node_w (node.shared ());
node.workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, block_a, endpoints_a, delay_a] () {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_base (block_a, endpoints_a, delay_a, true);
}
});
}
}
void nano::network::broadcast_confirm_req_batched_many (std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>> request_bundle_a, std::function<void ()> callback_a, unsigned delay_a, bool resumption_a)
{
for (auto i (request_bundle_a.begin ()), n (request_bundle_a.end ()); i != n;)
{
std::vector<std::pair<nano::block_hash, nano::root>> roots_hashes_l;
// Limit max request size hash + root to 7 pairs
while (roots_hashes_l.size () < confirm_req_hashes_max && !i->second.empty ())
{
// expects ordering by priority, descending
roots_hashes_l.push_back (i->second.front ());
i->second.pop_front ();
}
nano::confirm_req req{ node.network_params.network, roots_hashes_l };
i->first->send (req);
if (i->second.empty ())
{
i = request_bundle_a.erase (i);
}
else
{
++i;
}
}
if (!request_bundle_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, request_bundle_a, callback_a, delay_a] () {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_batched_many (request_bundle_a, callback_a, delay_a, true);
}
});
}
else if (callback_a)
{
callback_a ();
}
}
void nano::network::broadcast_confirm_req_many (std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> requests_a, std::function<void ()> callback_a, unsigned delay_a)
{
auto pair_l (requests_a.front ());
requests_a.pop_front ();
auto block_l (pair_l.first);
// confirm_req to representatives
auto endpoints (pair_l.second);
if (!endpoints->empty ())
{
broadcast_confirm_req_base (block_l, endpoints, delay_a);
}
/* Continue while blocks remain
Broadcast with random delay between delay_a & 2*delay_a */
if (!requests_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, requests_a, callback_a, delay_a] () {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_many (requests_a, callback_a, delay_a);
}
});
}
else if (callback_a)
{
callback_a ();
}
}
namespace
{
class network_message_visitor : public nano::message_visitor

View file

@ -96,10 +96,6 @@ public:
void send_keepalive_self (std::shared_ptr<nano::transport::channel> const &);
void send_node_id_handshake (std::shared_ptr<nano::transport::channel> const &, std::optional<nano::uint256_union> const & cookie, std::optional<nano::uint256_union> const & respond_to);
void send_confirm_req (std::shared_ptr<nano::transport::channel> const & channel_a, std::pair<nano::block_hash, nano::block_hash> const & hash_root_a);
void broadcast_confirm_req (std::shared_ptr<nano::block> const &);
void broadcast_confirm_req_base (std::shared_ptr<nano::block> const &, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>> const &, unsigned, bool = false);
void broadcast_confirm_req_batched_many (std::unordered_map<std::shared_ptr<nano::transport::channel>, std::deque<std::pair<nano::block_hash, nano::root>>>, std::function<void ()> = nullptr, unsigned = broadcast_interval_ms, bool = false);
void broadcast_confirm_req_many (std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>>, std::function<void ()> = nullptr, unsigned = broadcast_interval_ms);
std::shared_ptr<nano::transport::channel> find_node_id (nano::account const &);
std::shared_ptr<nano::transport::channel> find_channel (nano::endpoint const &);
bool not_a_peer (nano::endpoint const &, bool);