Remove live updating bandwidth limits (#4219)

* Rewrite and separate bandwidth limiter tests to avoid using set_bandwidth_params which was identified as problematic by tsan.

* Remove live updating of bandwidth parameters as it has race conditions according to TSAN.
Live config updating was never fully implemented so removing this until a full solution is implemented.
This commit is contained in:
clemahieu 2023-04-24 21:02:01 +01:00 committed by GitHub
commit a8d8d2899e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 29 additions and 40 deletions

View file

@ -897,7 +897,7 @@ TEST (network, duplicate_revert_publish)
}
// The test must be completed in less than 1 second
TEST (network, bandwidth_limiter)
TEST (network, bandwidth_limiter_4_messages)
{
nano::test::system system;
nano::publish message{ nano::dev::network_params.network, nano::dev::genesis };
@ -925,24 +925,46 @@ TEST (network, bandwidth_limiter)
// Send non-droppable message, i.e. drop stats should not increase
channel2.send (message, nullptr, nano::transport::buffer_drop_policy::no_limiter_drop);
ASSERT_TIMELY (1s, 1 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out));
}
TEST (network, bandwidth_limiter_2_messages)
{
nano::test::system system;
nano::publish message{ nano::dev::network_params.network, nano::dev::genesis };
auto message_size = message.to_bytes ()->size ();
auto message_limit = 2; // must be multiple of the number of channels
nano::node_config node_config (nano::test::get_available_port (), system.logging);
node_config.bandwidth_limit = message_limit * message_size;
node_config.bandwidth_limit_burst_ratio = 1.0;
auto & node = *system.add_node (node_config);
nano::transport::inproc::channel channel1{ node, node };
nano::transport::inproc::channel channel2{ node, node };
// change the bandwidth settings, 2 packets will be dropped
node.set_bandwidth_params (message_size * 2, 1.1);
channel1.send (message);
channel2.send (message);
channel1.send (message);
channel2.send (message);
ASSERT_TIMELY (1s, 3 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out));
ASSERT_TIMELY (1s, 2 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out));
}
TEST (network, bandwidth_limiter_with_burst)
{
nano::test::system system;
nano::publish message{ nano::dev::network_params.network, nano::dev::genesis };
auto message_size = message.to_bytes ()->size ();
auto message_limit = 2; // must be multiple of the number of channels
nano::node_config node_config (nano::test::get_available_port (), system.logging);
node_config.bandwidth_limit = message_limit * message_size;
node_config.bandwidth_limit_burst_ratio = 4.0; // High burst
auto & node = *system.add_node (node_config);
nano::transport::inproc::channel channel1{ node, node };
nano::transport::inproc::channel channel2{ node, node };
// change the bandwidth settings, no packet will be dropped
node.set_bandwidth_params (message_size, 4);
channel1.send (message);
channel2.send (message);
channel1.send (message);
channel2.send (message);
ASSERT_TIMELY (1s, 3 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out));
node.stop ();
ASSERT_TIMELY (1s, 0 == node.stats.count (nano::stat::type::drop, nano::stat::detail::publish, nano::stat::dir::out));
}
namespace nano

View file

@ -57,21 +57,6 @@ volatile sig_atomic_t sig_int_or_term = 0;
constexpr std::size_t OPEN_FILE_DESCRIPTORS_LIMIT = 16384;
}
static void load_and_set_bandwidth_params (std::shared_ptr<nano::node> const & node, boost::filesystem::path const & data_path, nano::node_flags const & flags)
{
nano::daemon_config config{ data_path, node->network_params };
auto error = nano::read_node_config_toml (data_path, config, flags.config_overrides);
if (!error)
{
error = nano::flags_config_conflicts (flags, config.node);
if (!error)
{
node->set_bandwidth_params (config.node.bandwidth_limit, config.node.bandwidth_limit_burst_ratio);
}
}
}
void nano_daemon::daemon::run (boost::filesystem::path const & data_path, nano::node_flags const & flags)
{
install_abort_signal_handler ();
@ -212,15 +197,6 @@ void nano_daemon::daemon::run (boost::filesystem::path const & data_path, nano::
// sigterm is less likely to come in bunches so only trap it once
sigman.register_signal_handler (SIGTERM, &nano::signal_handler, false);
#ifndef _WIN32
// on sighup we should reload the bandwidth parameters
std::function<void (int)> sighup_signal_handler ([&node, &data_path, &flags] (int signum) {
debug_assert (signum == SIGHUP);
load_and_set_bandwidth_params (node, data_path, flags);
});
sigman.register_signal_handler (SIGHUP, sighup_signal_handler, true);
#endif
runner = std::make_unique<nano::thread_runner> (io_ctx, node->config.io_threads);
runner->join ();

View file

@ -1438,14 +1438,6 @@ bool nano::node::init_error () const
return store.init_error () || wallets_store.init_error ();
}
void nano::node::set_bandwidth_params (std::size_t limit, double ratio)
{
config.bandwidth_limit_burst_ratio = ratio;
config.bandwidth_limit = limit;
outbound_limiter.reset (limit, ratio);
logger.always_log (boost::str (boost::format ("set_bandwidth_params(%1%, %2%)") % limit % ratio));
}
std::pair<uint64_t, decltype (nano::ledger::bootstrap_weights)> nano::node::get_bootstrap_weights () const
{
std::unordered_map<nano::account, nano::uint128_t> weights;

View file

@ -132,7 +132,6 @@ public:
void ongoing_online_weight_calculation_queue ();
bool online () const;
bool init_error () const;
void set_bandwidth_params (std::size_t limit, double ratio);
std::pair<uint64_t, decltype (nano::ledger::bootstrap_weights)> get_bootstrap_weights () const;
uint64_t get_confirmation_height (nano::transaction const &, nano::account &);
/*