Directly configurable rate limit

This commit is contained in:
Piotr Wójcik 2024-11-26 19:37:03 +01:00
commit 59b053287d
3 changed files with 11 additions and 10 deletions

View file

@ -199,7 +199,7 @@ TEST (toml, daemon_config_deserialize_defaults)
ASSERT_EQ (conf.node.max_unchecked_blocks, defaults.node.max_unchecked_blocks); ASSERT_EQ (conf.node.max_unchecked_blocks, defaults.node.max_unchecked_blocks);
ASSERT_EQ (conf.node.backlog_scan.enable, defaults.node.backlog_scan.enable); ASSERT_EQ (conf.node.backlog_scan.enable, defaults.node.backlog_scan.enable);
ASSERT_EQ (conf.node.backlog_scan.batch_size, defaults.node.backlog_scan.batch_size); ASSERT_EQ (conf.node.backlog_scan.batch_size, defaults.node.backlog_scan.batch_size);
ASSERT_EQ (conf.node.backlog_scan.frequency, defaults.node.backlog_scan.frequency); ASSERT_EQ (conf.node.backlog_scan.rate_limit, defaults.node.backlog_scan.rate_limit);
ASSERT_EQ (conf.node.enable_upnp, defaults.node.enable_upnp); ASSERT_EQ (conf.node.enable_upnp, defaults.node.enable_upnp);
ASSERT_EQ (conf.node.websocket_config.enabled, defaults.node.websocket_config.enabled); ASSERT_EQ (conf.node.websocket_config.enabled, defaults.node.websocket_config.enabled);
@ -468,7 +468,7 @@ TEST (toml, daemon_config_deserialize_no_defaults)
[node.backlog_scan] [node.backlog_scan]
enable = false enable = false
batch_size = 999 batch_size = 999
frequency = 999 rate_limit = 999
[node.block_processor] [node.block_processor]
max_peer_queue = 999 max_peer_queue = 999
@ -706,7 +706,7 @@ TEST (toml, daemon_config_deserialize_no_defaults)
ASSERT_NE (conf.node.request_aggregator_threads, defaults.node.request_aggregator_threads); ASSERT_NE (conf.node.request_aggregator_threads, defaults.node.request_aggregator_threads);
ASSERT_NE (conf.node.backlog_scan.enable, defaults.node.backlog_scan.enable); ASSERT_NE (conf.node.backlog_scan.enable, defaults.node.backlog_scan.enable);
ASSERT_NE (conf.node.backlog_scan.batch_size, defaults.node.backlog_scan.batch_size); ASSERT_NE (conf.node.backlog_scan.batch_size, defaults.node.backlog_scan.batch_size);
ASSERT_NE (conf.node.backlog_scan.frequency, defaults.node.backlog_scan.frequency); ASSERT_NE (conf.node.backlog_scan.rate_limit, defaults.node.backlog_scan.rate_limit);
ASSERT_NE (conf.node.enable_upnp, defaults.node.enable_upnp); ASSERT_NE (conf.node.enable_upnp, defaults.node.enable_upnp);
ASSERT_NE (conf.node.websocket_config.enabled, defaults.node.websocket_config.enabled); ASSERT_NE (conf.node.websocket_config.enabled, defaults.node.websocket_config.enabled);

View file

@ -12,7 +12,7 @@ nano::backlog_scan::backlog_scan (backlog_scan_config const & config_a, nano::le
config{ config_a }, config{ config_a },
ledger{ ledger_a }, ledger{ ledger_a },
stats{ stats_a }, stats{ stats_a },
limiter{ config.batch_size * config.frequency } limiter{ config.rate_limit }
{ {
} }
@ -93,7 +93,8 @@ void nano::backlog_scan::populate_backlog (nano::unique_lock<nano::mutex> & lock
// Wait for the rate limiter // Wait for the rate limiter
while (!limiter.should_pass (config.batch_size)) while (!limiter.should_pass (config.batch_size))
{ {
condition.wait_for (lock, std::chrono::milliseconds{ 1000 / config.frequency / 2 }); std::chrono::milliseconds const wait_time{ 1000 / std::max ((config.rate_limit / config.batch_size), size_t{ 1 }) / 2 };
condition.wait_for (lock, std::max (wait_time, 10ms));
if (stopped) if (stopped)
{ {
return; return;
@ -158,8 +159,8 @@ nano::container_info nano::backlog_scan::container_info () const
nano::error nano::backlog_scan_config::serialize (nano::tomlconfig & toml) const nano::error nano::backlog_scan_config::serialize (nano::tomlconfig & toml) const
{ {
toml.put ("enable", enable, "Control if ongoing backlog population is enabled. If not, backlog population can still be triggered by RPC \ntype:bool"); toml.put ("enable", enable, "Control if ongoing backlog population is enabled. If not, backlog population can still be triggered by RPC \ntype:bool");
toml.put ("batch_size", batch_size, "Number of accounts per second to process when doing backlog population scan. Increasing this value will help unconfirmed frontiers get into election prioritization queue faster, however it will also increase resource usage. \ntype:uint"); toml.put ("batch_size", batch_size, "Size of a single batch. Larger batches reduce overhead, but may put more pressure on other node components. \ntype:uint");
toml.put ("frequency", frequency, "Number of batches to process per second. Higher frequency and smaller batch size helps to utilize resources more uniformly, however it also introduces more overhead. Use 0 to process as fast as possible, but be aware that it may consume a lot of resources. \ntype:uint"); toml.put ("rate_limit", rate_limit, "Number of accounts per second to process when doing backlog population scan. Increasing this value will help unconfirmed frontiers get into election prioritization queue faster. Use 0 to process as fast as possible, but be aware that it may consume a lot of resources. \ntype:uint");
return toml.get_error (); return toml.get_error ();
} }
@ -168,7 +169,7 @@ nano::error nano::backlog_scan_config::deserialize (nano::tomlconfig & toml)
{ {
toml.get ("enable", enable); toml.get ("enable", enable);
toml.get ("batch_size", batch_size); toml.get ("batch_size", batch_size);
toml.get ("frequency", frequency); toml.get ("rate_limit", rate_limit);
return toml.get_error (); return toml.get_error ();
} }

View file

@ -23,10 +23,10 @@ public:
public: public:
/** Control if ongoing backlog population is enabled. If not, backlog population can still be triggered by RPC */ /** Control if ongoing backlog population is enabled. If not, backlog population can still be triggered by RPC */
bool enable{ true }; bool enable{ true };
/** Number of accounts to scan per second. */
size_t rate_limit{ 10000 };
/** Number of accounts per second to process. */ /** Number of accounts per second to process. */
size_t batch_size{ 1000 }; size_t batch_size{ 1000 };
/** Number of batches to run per second. */
size_t frequency{ 10 };
}; };
class backlog_scan final class backlog_scan final