Separate the logic for sub-network limiting on outgoing connections (#90)
* Makes the disabling flags for max ip/subnetwork coherent
This commit is contained in:
parent
82a7fcf45b
commit
d41789d577
7 changed files with 91 additions and 25 deletions
|
@ -1,3 +1,4 @@
|
|||
#include <nano/node/nodeconfig.hpp>
|
||||
#include <nano/node/transport/udp.hpp>
|
||||
#include <nano/test_common/network.hpp>
|
||||
#include <nano/test_common/system.hpp>
|
||||
|
@ -918,7 +919,10 @@ namespace transport
|
|||
{
|
||||
TEST (network, peer_max_tcp_attempts_subnetwork)
|
||||
{
|
||||
nano::system system (1);
|
||||
nano::node_flags node_flags;
|
||||
node_flags.disable_max_peers_per_ip = true;
|
||||
nano::system system;
|
||||
system.add_node (node_flags);
|
||||
auto node (system.nodes[0]);
|
||||
for (auto i (0); i < node->network_params.network.max_peers_per_subnetwork; ++i)
|
||||
{
|
||||
|
@ -927,9 +931,9 @@ namespace transport
|
|||
ASSERT_FALSE (node->network.tcp_channels.reachout (endpoint));
|
||||
}
|
||||
ASSERT_EQ (0, node->network.size ());
|
||||
ASSERT_EQ (0, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out));
|
||||
ASSERT_EQ (0, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::out));
|
||||
ASSERT_TRUE (node->network.tcp_channels.reachout (nano::endpoint (boost::asio::ip::make_address_v6 ("::ffff:127.0.0.1"), nano::get_available_port ())));
|
||||
ASSERT_EQ (1, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_ip, nano::stat::dir::out));
|
||||
ASSERT_EQ (1, node->stats.count (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::out));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -817,6 +817,12 @@ std::string nano::stat::detail_to_string (uint32_t key)
|
|||
case nano::stat::detail::outdated_version:
|
||||
res = "outdated_version";
|
||||
break;
|
||||
case nano::stat::detail::udp_max_per_ip:
|
||||
res = "udp_max_per_ip";
|
||||
break;
|
||||
case nano::stat::detail::udp_max_per_subnetwork:
|
||||
res = "udp_max_per_subnetwork";
|
||||
break;
|
||||
case nano::stat::detail::blocks_confirmed:
|
||||
res = "blocks_confirmed";
|
||||
break;
|
||||
|
|
|
@ -337,6 +337,8 @@ public:
|
|||
invalid_telemetry_req_message,
|
||||
invalid_telemetry_ack_message,
|
||||
outdated_version,
|
||||
udp_max_per_ip,
|
||||
udp_max_per_subnetwork,
|
||||
|
||||
// tcp
|
||||
tcp_accept_success,
|
||||
|
|
|
@ -362,17 +362,17 @@ void nano::transport::tcp_channels::stop ()
|
|||
|
||||
bool nano::transport::tcp_channels::max_ip_connections (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
bool result (false);
|
||||
if (!node.flags.disable_max_peers_per_ip)
|
||||
if (node.flags.disable_max_peers_per_ip)
|
||||
{
|
||||
auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ()));
|
||||
auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ()));
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
result = channels.get<ip_address_tag> ().count (address) >= node.network_params.network.max_peers_per_ip || channels.get<subnetwork_tag> ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork;
|
||||
if (!result)
|
||||
{
|
||||
result = attempts.get<ip_address_tag> ().count (address) >= node.network_params.network.max_peers_per_ip || attempts.get<subnetwork_tag> ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool result{ false };
|
||||
auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ()));
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
result = channels.get<ip_address_tag> ().count (address) >= node.network_params.network.max_peers_per_ip;
|
||||
if (!result)
|
||||
{
|
||||
result = attempts.get<ip_address_tag> ().count (address) >= node.network_params.network.max_peers_per_ip;
|
||||
}
|
||||
if (result)
|
||||
{
|
||||
|
@ -381,11 +381,37 @@ bool nano::transport::tcp_channels::max_ip_connections (nano::tcp_endpoint const
|
|||
return result;
|
||||
}
|
||||
|
||||
bool nano::transport::tcp_channels::max_subnetwork_connections (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
if (node.flags.disable_max_peers_per_subnetwork)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
bool result{ false };
|
||||
auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ()));
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
result = channels.get<subnetwork_tag> ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork;
|
||||
if (!result)
|
||||
{
|
||||
result = attempts.get<subnetwork_tag> ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork;
|
||||
}
|
||||
if (result)
|
||||
{
|
||||
node.stats.inc (nano::stat::type::tcp, nano::stat::detail::tcp_max_per_subnetwork, nano::stat::dir::out);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool nano::transport::tcp_channels::max_ip_or_subnetwork_connections (nano::tcp_endpoint const & endpoint_a)
|
||||
{
|
||||
return max_ip_connections (endpoint_a) || max_subnetwork_connections (endpoint_a);
|
||||
}
|
||||
|
||||
bool nano::transport::tcp_channels::reachout (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
auto tcp_endpoint (nano::transport::map_endpoint_to_tcp (endpoint_a));
|
||||
// Don't overload single IP
|
||||
bool error = node.network.excluded_peers.check (tcp_endpoint) || max_ip_connections (tcp_endpoint);
|
||||
bool error = node.network.excluded_peers.check (tcp_endpoint) || max_ip_or_subnetwork_connections (tcp_endpoint);
|
||||
if (!error && !node.flags.disable_tcp_realtime)
|
||||
{
|
||||
// Don't keepalive to nodes that already sent us something
|
||||
|
|
|
@ -91,7 +91,9 @@ namespace transport
|
|||
void stop ();
|
||||
void process_messages ();
|
||||
void process_message (nano::message const &, nano::tcp_endpoint const &, nano::account const &, std::shared_ptr<nano::socket> const &);
|
||||
bool max_ip_connections (nano::tcp_endpoint const &);
|
||||
bool max_ip_connections (nano::tcp_endpoint const & endpoint_a);
|
||||
bool max_subnetwork_connections (nano::tcp_endpoint const & endpoint_a);
|
||||
bool max_ip_or_subnetwork_connections (nano::tcp_endpoint const & endpoint_a);
|
||||
// Should we reach out to this endpoint with a keepalive message
|
||||
bool reachout (nano::endpoint const &);
|
||||
std::unique_ptr<container_info_component> collect_container_info (std::string const &);
|
||||
|
|
|
@ -100,7 +100,7 @@ std::shared_ptr<nano::transport::channel_udp> nano::transport::udp_channels::ins
|
|||
{
|
||||
debug_assert (endpoint_a.address ().is_v6 ());
|
||||
std::shared_ptr<nano::transport::channel_udp> result;
|
||||
if (!node.network.not_a_peer (endpoint_a, node.config.allow_local_peers) && (node.network_params.network.is_dev_network () || !max_ip_connections (endpoint_a)))
|
||||
if (!node.network.not_a_peer (endpoint_a, node.config.allow_local_peers) && (node.network_params.network.is_dev_network () || !max_ip_or_subnetwork_connections (endpoint_a)))
|
||||
{
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
auto existing (channels.get<endpoint_tag> ().find (endpoint_a));
|
||||
|
@ -373,7 +373,7 @@ public:
|
|||
}
|
||||
void keepalive (nano::keepalive const & message_a) override
|
||||
{
|
||||
if (!node.network.udp_channels.max_ip_connections (endpoint))
|
||||
if (!node.network.udp_channels.max_ip_or_subnetwork_connections (endpoint))
|
||||
{
|
||||
auto cookie (node.network.syn_cookies.assign (endpoint));
|
||||
if (cookie)
|
||||
|
@ -630,21 +630,45 @@ std::shared_ptr<nano::transport::channel> nano::transport::udp_channels::create
|
|||
|
||||
bool nano::transport::udp_channels::max_ip_connections (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
bool result (false);
|
||||
if (!node.flags.disable_max_peers_per_ip)
|
||||
if (node.flags.disable_max_peers_per_ip)
|
||||
{
|
||||
auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ()));
|
||||
auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ()));
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
result = channels.get<ip_address_tag> ().count (address) >= node.network_params.network.max_peers_per_ip || channels.get<subnetwork_tag> ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork;
|
||||
return false;
|
||||
}
|
||||
auto const address (nano::transport::ipv4_address_or_ipv6_subnet (endpoint_a.address ()));
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
auto const result = channels.get<ip_address_tag> ().count (address) >= node.network_params.network.max_peers_per_ip;
|
||||
if (!result)
|
||||
{
|
||||
node.stats.inc (nano::stat::type::udp, nano::stat::detail::udp_max_per_ip, nano::stat::dir::out);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool nano::transport::udp_channels::max_subnetwork_connections (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
if (node.flags.disable_max_peers_per_subnetwork)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
auto const subnet (nano::transport::map_address_to_subnetwork (endpoint_a.address ()));
|
||||
nano::unique_lock<nano::mutex> lock (mutex);
|
||||
auto const result = channels.get<subnetwork_tag> ().count (subnet) >= node.network_params.network.max_peers_per_subnetwork;
|
||||
if (!result)
|
||||
{
|
||||
node.stats.inc (nano::stat::type::udp, nano::stat::detail::udp_max_per_subnetwork, nano::stat::dir::out);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool nano::transport::udp_channels::max_ip_or_subnetwork_connections (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
return max_ip_connections (endpoint_a) || max_subnetwork_connections (endpoint_a);
|
||||
}
|
||||
|
||||
bool nano::transport::udp_channels::reachout (nano::endpoint const & endpoint_a)
|
||||
{
|
||||
// Don't overload single IP
|
||||
bool error = max_ip_connections (endpoint_a);
|
||||
bool error = max_ip_or_subnetwork_connections (endpoint_a);
|
||||
if (!error && !node.flags.disable_udp)
|
||||
{
|
||||
auto endpoint_l (nano::transport::map_endpoint_to_v6 (endpoint_a));
|
||||
|
|
|
@ -96,7 +96,9 @@ namespace transport
|
|||
void receive_action (nano::message_buffer *);
|
||||
void process_packets ();
|
||||
std::shared_ptr<nano::transport::channel> create (nano::endpoint const &);
|
||||
bool max_ip_connections (nano::endpoint const &);
|
||||
bool max_ip_connections (nano::endpoint const & endpoint_a);
|
||||
bool max_subnetwork_connections (nano::endpoint const & endpoint_a);
|
||||
bool max_ip_or_subnetwork_connections (nano::endpoint const & endpoint_a);
|
||||
// Should we reach out to this endpoint with a keepalive message
|
||||
bool reachout (nano::endpoint const &);
|
||||
std::unique_ptr<container_info_component> collect_container_info (std::string const &);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue