From d41789d5770719f64c1854367f66b6eae9c449ac Mon Sep 17 00:00:00 2001 From: Thiago Silva <82097354+thsfs@users.noreply.github.com> Date: Wed, 3 Nov 2021 20:48:38 -0300 Subject: [PATCH] Separate the logic for sub-network limiting on outgoing connections (#90) * Makes the disabling flags for max ip/subnetwork coherent --- nano/core_test/network.cpp | 10 +++++--- nano/lib/stats.cpp | 6 +++++ nano/lib/stats.hpp | 2 ++ nano/node/transport/tcp.cpp | 48 ++++++++++++++++++++++++++++--------- nano/node/transport/tcp.hpp | 4 +++- nano/node/transport/udp.cpp | 42 +++++++++++++++++++++++++------- nano/node/transport/udp.hpp | 4 +++- 7 files changed, 91 insertions(+), 25 deletions(-) diff --git a/nano/core_test/network.cpp b/nano/core_test/network.cpp index 8dda64b8..37e35b89 100644 --- a/nano/core_test/network.cpp +++ b/nano/core_test/network.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -918,7 +919,10 @@ namespace transport { TEST (network, peer_max_tcp_attempts_subnetwork) { - nano::system system (1); + nano::node_flags node_flags; + node_flags.disable_max_peers_per_ip = true; + nano::system system; + system.add_node (node_flags); auto node (system.nodes[0]); for (auto i (0); i < node->network_params.network.max_peers_per_subnetwork; ++i) { @@ -927,9 +931,9 @@ namespace transport ASSERT_FALSE (node->network.tcp_channels.reachout (endpoint)); } ASSERT_EQ (0, node->network.size ()); - ASSERT_EQ (0, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out)); + ASSERT_EQ (0, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::out)); ASSERT_TRUE (node->network.tcp_channels.reachout (nano::endpoint (boost::asio::ip::make_address_v6 ("::ffff:127.0.0.1"), nano::get_available_port ()))); - ASSERT_EQ (1, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out)); + ASSERT_EQ (1, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::out)); } } } diff --git a/nano/lib/stats.cpp b/nano/lib/stats.cpp index 74fe6358..3325ba0d 100644 --- a/nano/lib/stats.cpp +++ b/nano/lib/stats.cpp @@ -817,6 +817,12 @@ std::string nano::stat::detail_to_string (uint32_t key) case nano::stat::detail::outdated_version: res = "outdated_version"; break; + case nano::stat::detail::udp_max_per_ip: + res = "udp_max_per_ip"; + break; + case nano::stat::detail::udp_max_per_subnetwork: + res = "udp_max_per_subnetwork"; + break; case nano::stat::detail::blocks_confirmed: res = "blocks_confirmed"; break; diff --git a/nano/lib/stats.hpp b/nano/lib/stats.hpp index 6002aa52..3eed7d40 100644 --- a/nano/lib/stats.hpp +++ b/nano/lib/stats.hpp @@ -337,6 +337,8 @@ public: invalid_telemetry_req_message, invalid_telemetry_ack_message, outdated_version, + udp_max_per_ip, + udp_max_per_subnetwork, // tcp tcp_accept_success, diff --git a/nano/node/transport/tcp.cpp b/nano/node/transport/tcp.cpp index 75ba49de..d5bfffef 100644 --- a/nano/node/transport/tcp.cpp +++ b/nano/node/transport/tcp.cpp @@ -362,17 +362,17 @@ void nano::transport::tcp_channels::stop () bool nano::transport::tcp_channels::max_ip_connections (nano::tcp_endpoint const & endpoint_a) { - bool result (false); - if (!node.flags.disable_max_peers_per_ip) + if (node.flags.disable_max_peers_per_ip) { - auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ())); - auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ())); - nano::unique_lock lock (mutex); - result = channels.get ().count (address) >= node.network_params.network.max_peers_per_ip || channels.get ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork; - if (!result) - { - result = attempts.get ().count (address) >= node.network_params.network.max_peers_per_ip || attempts.get ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork; - } + return false; + } + bool result{ false }; + auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ())); + nano::unique_lock lock (mutex); + result = channels.get ().count (address) >= node.network_params.network.max_peers_per_ip; + if (!result) + { + result = attempts.get ().count (address) >= node.network_params.network.max_peers_per_ip; } if (result) { @@ -381,11 +381,37 @@ bool nano::transport::tcp_channels::max_ip_connections (nano::tcp_endpoint const return result; } +bool nano::transport::tcp_channels::max_subnetwork_connections (nano::tcp_endpoint const & endpoint_a) +{ + if (node.flags.disable_max_peers_per_subnetwork) + { + return false; + } + bool result{ false }; + auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ())); + nano::unique_lock lock (mutex); + result = channels.get ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork; + if (!result) + { + result = attempts.get ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork; + } + if (result) + { + node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::out); + } + return result; +} + +bool nano::transport::tcp_channels::max_ip_or_subnetwork_connections (nano::tcp_endpoint const & endpoint_a) +{ + return max_ip_connections (endpoint_a) || max_subnetwork_connections (endpoint_a); +} + bool nano::transport::tcp_channels::reachout (nano::endpoint const & endpoint_a) { auto tcp_endpoint (nano::transport::map_endpoint_to_tcp (endpoint_a)); // Don't overload single IP - bool error = node.network.excluded_peers.check (tcp_endpoint) || max_ip_connections (tcp_endpoint); + bool error = node.network.excluded_peers.check (tcp_endpoint) || max_ip_or_subnetwork_connections (tcp_endpoint); if (!error && !node.flags.disable_tcp_realtime) { // Don't keepalive to nodes that already sent us something diff --git a/nano/node/transport/tcp.hpp b/nano/node/transport/tcp.hpp index 611791b5..353f35de 100644 --- a/nano/node/transport/tcp.hpp +++ b/nano/node/transport/tcp.hpp @@ -91,7 +91,9 @@ namespace transport void stop (); void process_messages (); void process_message (nano::message const &, nano::tcp_endpoint const &, nano::account const &, std::shared_ptr const &); - bool max_ip_connections (nano::tcp_endpoint const &); + bool max_ip_connections (nano::tcp_endpoint const & endpoint_a); + bool max_subnetwork_connections (nano::tcp_endpoint const & endpoint_a); + bool max_ip_or_subnetwork_connections (nano::tcp_endpoint const & endpoint_a); // Should we reach out to this endpoint with a keepalive message bool reachout (nano::endpoint const &); std::unique_ptr collect_container_info (std::string const &); diff --git a/nano/node/transport/udp.cpp b/nano/node/transport/udp.cpp index 9b4885ba..8013e1a8 100644 --- a/nano/node/transport/udp.cpp +++ b/nano/node/transport/udp.cpp @@ -100,7 +100,7 @@ std::shared_ptr nano::transport::udp_channels::ins { debug_assert (endpoint_a.address ().is_v6 ()); std::shared_ptr result; - if (!node.network.not_a_peer (endpoint_a, node.config.allow_local_peers) && (node.network_params.network.is_dev_network () || !max_ip_connections (endpoint_a))) + if (!node.network.not_a_peer (endpoint_a, node.config.allow_local_peers) && (node.network_params.network.is_dev_network () || !max_ip_or_subnetwork_connections (endpoint_a))) { nano::unique_lock lock (mutex); auto existing (channels.get ().find (endpoint_a)); @@ -373,7 +373,7 @@ public: } void keepalive (nano::keepalive const & message_a) override { - if (!node.network.udp_channels.max_ip_connections (endpoint)) + if (!node.network.udp_channels.max_ip_or_subnetwork_connections (endpoint)) { auto cookie (node.network.syn_cookies.assign (endpoint)); if (cookie) @@ -630,21 +630,45 @@ std::shared_ptr nano::transport::udp_channels::create bool nano::transport::udp_channels::max_ip_connections (nano::endpoint const & endpoint_a) { - bool result (false); - if (!node.flags.disable_max_peers_per_ip) + if (node.flags.disable_max_peers_per_ip) { - auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ())); - auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ())); - nano::unique_lock lock (mutex); - result = channels.get ().count (address) >= node.network_params.network.max_peers_per_ip || channels.get ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork; + return false; + } + auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ())); + nano::unique_lock lock (mutex); + auto const result = channels.get ().count (address) >= node.network_params.network.max_peers_per_ip; + if (!result) + { + node.stats.inc (nano::stat::type::udp, nano::stat::detail::udp_max_per_ip, nano::stat::dir::out); } return result; } +bool nano::transport::udp_channels::max_subnetwork_connections (nano::endpoint const & endpoint_a) +{ + if (node.flags.disable_max_peers_per_subnetwork) + { + return false; + } + auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ())); + nano::unique_lock lock (mutex); + auto const result = channels.get ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork; + if (!result) + { + node.stats.inc (nano::stat::type::udp, nano::stat::detail::udp_max_per_subnetwork, nano::stat::dir::out); + } + return result; +} + +bool nano::transport::udp_channels::max_ip_or_subnetwork_connections (nano::endpoint const & endpoint_a) +{ + return max_ip_connections (endpoint_a) || max_subnetwork_connections (endpoint_a); +} + bool nano::transport::udp_channels::reachout (nano::endpoint const & endpoint_a) { // Don't overload single IP - bool error = max_ip_connections (endpoint_a); + bool error = max_ip_or_subnetwork_connections (endpoint_a); if (!error && !node.flags.disable_udp) { auto endpoint_l (nano::transport::map_endpoint_to_v6 (endpoint_a)); diff --git a/nano/node/transport/udp.hpp b/nano/node/transport/udp.hpp index 6eb0e9bc..25e88e56 100644 --- a/nano/node/transport/udp.hpp +++ b/nano/node/transport/udp.hpp @@ -96,7 +96,9 @@ namespace transport void receive_action (nano::message_buffer *); void process_packets (); std::shared_ptr create (nano::endpoint const &); - bool max_ip_connections (nano::endpoint const &); + bool max_ip_connections (nano::endpoint const & endpoint_a); + bool max_subnetwork_connections (nano::endpoint const & endpoint_a); + bool max_ip_or_subnetwork_connections (nano::endpoint const & endpoint_a); // Should we reach out to this endpoint with a keepalive message bool reachout (nano::endpoint const &); std::unique_ptr collect_container_info (std::string const &);