Multithreaded interval utility

This commit is contained in:
Piotr Wójcik 2025-01-04 14:26:51 +01:00
commit c833b02989
9 changed files with 30 additions and 9 deletions

View file

@ -1,13 +1,14 @@
#pragma once
#include <chrono>
#include <mutex>
namespace nano
{
class interval
{
public:
bool elapsed (auto target)
bool elapse (auto target)
{
auto const now = std::chrono::steady_clock::now ();
if (now - last >= target)
@ -21,4 +22,24 @@ public:
private:
std::chrono::steady_clock::time_point last{ std::chrono::steady_clock::now () };
};
class interval_mt
{
public:
bool elapse (auto target)
{
std::lock_guard guard{ mutex };
auto const now = std::chrono::steady_clock::now ();
if (now - last >= target)
{
last = now;
return true;
}
return false;
}
private:
std::mutex mutex;
std::chrono::steady_clock::time_point last{ std::chrono::steady_clock::now () };
};
}

View file

@ -27,7 +27,7 @@ public:
nano::lock_guard<nano::mutex> guard{ mutex };
if (cleanup_interval.elapsed (cleanup_cutoff))
if (cleanup_interval.elapse (cleanup_cutoff))
{
cleanup ();
}

View file

@ -223,7 +223,7 @@ void nano::block_processor::run ()
}
}
if (log_interval.elapsed (15s))
if (log_interval.elapse (15s))
{
logger.info (nano::log::type::block_processor, "{} blocks (+ {} forced) in processing queue",
queue.size (),

View file

@ -727,7 +727,7 @@ void nano::bootstrap_service::cleanup_and_sync ()
tags_by_order.pop_front ();
}
if (sync_dependencies_interval.elapsed (60s))
if (sync_dependencies_interval.elapse (60s))
{
stats.inc (nano::stat::type::bootstrap, nano::stat::detail::sync_dependencies);
accounts.sync_dependencies ();

View file

@ -127,7 +127,7 @@ void nano::local_block_broadcaster::run ()
{
stats.inc (nano::stat::type::local_block_broadcaster, nano::stat::detail::loop);
if (cleanup_interval.elapsed (config.cleanup_interval))
if (cleanup_interval.elapse (config.cleanup_interval))
{
cleanup (lock);
debug_assert (lock.owns_lock ());

View file

@ -53,9 +53,8 @@ void nano::http_callbacks::setup_callbacks ()
stats.inc (nano::stat::type::http_callbacks_notified, nano::stat::detail::block_confirmed);
constexpr size_t warning_threshold = 10000;
static nano::interval warning_interval;
if (workers.queued_tasks () > warning_threshold && warning_interval.elapsed (15s))
if (workers.queued_tasks () > warning_threshold && warning_interval.elapse (15s))
{
stats.inc (nano::stat::type::http_callbacks, nano::stat::detail::large_backlog);
logger.warn (nano::log::type::http_callbacks, "Backlog of {} http callback notifications to process", workers.queued_tasks ());

View file

@ -28,5 +28,6 @@ private:
void do_rpc_callback (boost::asio::ip::tcp::resolver::iterator i_a, std::string const &, uint16_t, std::shared_ptr<std::string> const &, std::shared_ptr<std::string> const &, std::shared_ptr<boost::asio::ip::tcp::resolver> const &);
nano::thread_pool workers;
nano::interval_mt warning_interval;
};
}

View file

@ -362,7 +362,7 @@ asio::awaitable<void> nano::transport::tcp_listener::wait_available_slots () con
nano::interval log_interval;
while (connection_count () >= config.max_inbound_connections && !stopped)
{
if (log_interval.elapsed (node.network_params.network.is_dev_network () ? 1s : 15s))
if (log_interval.elapse (node.network_params.network.is_dev_network () ? 1s : 15s))
{
logger.warn (nano::log::type::tcp_listener, "Waiting for available slots to accept new connections (current: {} / max: {})",
connection_count (), config.max_inbound_connections);

View file

@ -238,7 +238,7 @@ std::deque<nano::vote_cache::top_entry> nano::vote_cache::top (const nano::uint1
{
nano::lock_guard<nano::mutex> lock{ mutex };
if (cleanup_interval.elapsed (config.age_cutoff / 2))
if (cleanup_interval.elapse (config.age_cutoff / 2))
{
cleanup ();
}