Optimize stats (#4951)

This commit is contained in:
Piotr Wójcik 2025-09-24 13:15:10 +02:00 committed by GitHub
commit 5a17165b2c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 118 additions and 92 deletions

View file

@ -11,6 +11,11 @@ TEST (stats, counters)
nano::test::system system; nano::test::system system;
auto & node = *system.add_node (); auto & node = *system.add_node ();
// Initial state
ASSERT_EQ (0, node.stats.count (nano::stat::type::ledger, nano::stat::dir::in));
ASSERT_EQ (0, node.stats.count (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in));
ASSERT_EQ (0, node.stats.count (nano::stat::type::ledger, nano::stat::detail::send, nano::stat::dir::out));
node.stats.add (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in, 1); node.stats.add (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in, 1);
node.stats.add (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in, 5); node.stats.add (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in, 5);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in); node.stats.inc (nano::stat::type::ledger, nano::stat::detail::test, nano::stat::dir::in);

View file

@ -15,8 +15,19 @@
#include <fstream> #include <fstream>
#include <sstream> #include <sstream>
#include <magic_enum.hpp>
using namespace std::chrono_literals; using namespace std::chrono_literals;
// Static assertions to ensure our predefined array sizes are sufficient for the enums
// We check the _last value's integer value, which should be the highest valid index we need
static_assert (magic_enum::enum_integer (nano::stat::type::_last) <= nano::stats::types_count,
"stat::type enum has grown beyond the predefined array size. Increase types_count in stats.hpp");
static_assert (magic_enum::enum_integer (nano::stat::detail::_last) <= nano::stats::details_count,
"stat::detail enum has grown beyond the predefined array size. Increase details_count in stats.hpp");
static_assert (magic_enum::enum_integer (nano::stat::dir::_last) <= nano::stats::dirs_count,
"stat::dir enum has grown beyond the predefined array size. Increase dirs_count in stats.hpp");
/* /*
* stat_log_sink * stat_log_sink
*/ */
@ -31,6 +42,8 @@ std::string nano::stat_log_sink::tm_to_string (tm & tm)
*/ */
nano::stats::stats (nano::logger & logger_a, nano::stats_config config_a) : nano::stats::stats (nano::logger & logger_a, nano::stats_config config_a) :
counters_impl{ std::make_unique<counters_array_t> () },
counters{ *counters_impl },
config{ std::move (config_a) }, config{ std::move (config_a) },
logger{ logger_a }, logger{ logger_a },
enable_logging{ is_stat_logging_enabled () } enable_logging{ is_stat_logging_enabled () }
@ -71,21 +84,50 @@ void nano::stats::stop ()
void nano::stats::clear () void nano::stats::clear ()
{ {
std::lock_guard guard{ mutex }; // Clear all counters
counters.clear (); for (auto & counter : counters)
samplers.clear (); {
timestamp = std::chrono::steady_clock::now (); counter.store (0, std::memory_order_relaxed);
}
// Clear samplers (still needs mutex)
{
std::lock_guard guard{ mutex };
samplers.clear ();
timestamp = std::chrono::steady_clock::now ();
}
}
size_t nano::stats::idx (stat::type type, stat::detail detail, stat::dir dir)
{
// Dir is the slowest changing dimension, so it goes first for better cache locality
auto type_idx = magic_enum::enum_integer (type);
auto detail_idx = magic_enum::enum_integer (detail);
auto dir_idx = magic_enum::enum_integer (dir);
return (dir_idx * types_count * details_count) + (type_idx * details_count) + detail_idx;
}
std::atomic<nano::stats::counter_value_t> & nano::stats::counter_ref (stat::type type, stat::detail detail, stat::dir dir)
{
auto index = idx (type, detail, dir);
debug_assert (index < counters.size ());
return counters[index];
}
std::atomic<nano::stats::counter_value_t> const & nano::stats::counter_ref (stat::type type, stat::detail detail, stat::dir dir) const
{
auto index = idx (type, detail, dir);
debug_assert (index < counters.size ());
return counters[index];
} }
void nano::stats::add (stat::type type, stat::detail detail, stat::dir dir, counter_value_t value, bool aggregate_all) void nano::stats::add (stat::type type, stat::detail detail, stat::dir dir, counter_value_t value, bool aggregate_all)
{ {
debug_assert (type != stat::type::_invalid); debug_assert (type != stat::type::_invalid);
debug_assert (type != stat::type::_last);
debug_assert (detail != stat::detail::_invalid); debug_assert (detail != stat::detail::_invalid);
debug_assert (detail != stat::detail::_last);
if (value == 0) debug_assert (dir != stat::dir::_last);
{
return;
}
if (enable_logging) if (enable_logging)
{ {
@ -96,71 +138,30 @@ void nano::stats::add (stat::type type, stat::detail detail, stat::dir dir, coun
value); value);
} }
// Updates need to happen while holding the mutex counter_ref (type, detail, dir).fetch_add (value, std::memory_order_relaxed);
auto update_counter = [this, aggregate_all] (nano::stats::counter_key key, auto && updater) {
counter_key all_key{ key.type, stat::detail::all, key.dir };
// This is a two-step process to avoid exclusively locking the mutex in the common case if (aggregate_all && detail != stat::detail::all)
{ {
std::shared_lock lock{ mutex }; counter_ref (type, stat::detail::all, dir).fetch_add (value, std::memory_order_relaxed);
}
if (auto it = counters.find (key); it != counters.end ())
{
updater (*it->second);
if (aggregate_all && key != all_key)
{
auto it_all = counters.find (all_key);
release_assert (it_all != counters.end ()); // The `all` counter should always be created together
updater (*it_all->second); // Also update the `all` counter
}
return;
}
}
// Not found, create a new entry
{
std::unique_lock lock{ mutex };
// Insertions will be ignored if the key already exists
auto [it, inserted] = counters.emplace (key, std::make_unique<counter_entry> ());
updater (*it->second);
if (aggregate_all && key != all_key)
{
auto [it_all, inserted_all] = counters.emplace (all_key, std::make_unique<counter_entry> ());
updater (*it_all->second); // Also update the `all` counter
}
}
};
update_counter (counter_key{ type, detail, dir }, [value] (counter_entry & counter) {
counter.value += value;
});
} }
nano::stats::counter_value_t nano::stats::count (stat::type type, stat::detail detail, stat::dir dir) const nano::stats::counter_value_t nano::stats::count (stat::type type, stat::detail detail, stat::dir dir) const
{ {
std::shared_lock lock{ mutex }; return counter_ref (type, detail, dir).load (std::memory_order_relaxed);
if (auto it = counters.find (counter_key{ type, detail, dir }); it != counters.end ())
{
return it->second->value;
}
return 0;
} }
nano::stats::counter_value_t nano::stats::count (stat::type type, stat::dir dir) const nano::stats::counter_value_t nano::stats::count (stat::type type, stat::dir dir) const
{ {
std::shared_lock lock{ mutex };
counter_value_t result = 0; counter_value_t result = 0;
auto it = counters.lower_bound (counter_key{ type, stat::detail::all, dir });
while (it != counters.end () && it->first.type == type) // Sum all detail counters for this type and direction (except the 'all' detail)
for (auto detail : magic_enum::enum_values<stat::detail> ())
{ {
if (it->first.dir == dir && it->first.detail != stat::detail::all) if (detail != stat::detail::all && detail != stat::detail::_invalid && detail != stat::detail::_last)
{ {
result += it->second->value; result += counter_ref (type, detail, dir).load (std::memory_order_relaxed);
} }
++it;
} }
return result; return result;
} }
@ -225,6 +226,7 @@ void nano::stats::log_counters (stat_log_sink & sink)
void nano::stats::log_counters_impl (stat_log_sink & sink, tm & tm) void nano::stats::log_counters_impl (stat_log_sink & sink, tm & tm)
{ {
sink.begin (); sink.begin ();
if (sink.entries () >= config.log_rotation_count) if (sink.entries () >= config.log_rotation_count)
{ {
sink.rotate (); sink.rotate ();
@ -236,14 +238,35 @@ void nano::stats::log_counters_impl (stat_log_sink & sink, tm & tm)
sink.write_header ("counters", walltime); sink.write_header ("counters", walltime);
} }
for (auto const & [key, entry] : counters) for (auto dir : magic_enum::enum_values<stat::dir> ())
{ {
std::string type{ to_string (key.type) }; if (dir == stat::dir::_last)
std::string detail{ to_string (key.detail) }; continue;
std::string dir{ to_string (key.dir) };
sink.write_counter_entry (tm, type, detail, dir, entry->value); for (auto type : magic_enum::enum_values<stat::type> ())
{
if (type == stat::type::_invalid || type == stat::type::_last)
continue;
for (auto detail : magic_enum::enum_values<stat::detail> ())
{
if (detail == stat::detail::_invalid || detail == stat::detail::_last)
continue;
auto value = counter_ref (type, detail, dir).load (std::memory_order_relaxed);
if (value > 0) // Only log non-zero counters
{
std::string type_str{ to_string (type) };
std::string detail_str{ to_string (detail) };
std::string dir_str{ to_string (dir) };
sink.write_counter_entry (tm, type_str, detail_str, dir_str, value);
}
}
}
} }
sink.entries ()++; sink.entries ()++;
sink.finalize (); sink.finalize ();
} }
@ -301,7 +324,9 @@ void nano::stats::run ()
std::unique_lock lock{ mutex }; std::unique_lock lock{ mutex };
while (!stopped) while (!stopped)
{ {
condition.wait_for (lock, 1s); condition.wait_for (lock, 1s, [this] {
return stopped;
});
if (!stopped) if (!stopped)
{ {
run_one (lock); run_one (lock);
@ -446,4 +471,4 @@ nano::error nano::stats_config::deserialize_toml (nano::tomlconfig & toml)
} }
return toml.get_error (); return toml.get_error ();
} }

View file

@ -7,6 +7,8 @@
#include <boost/circular_buffer.hpp> #include <boost/circular_buffer.hpp>
#include <array>
#include <atomic>
#include <chrono> #include <chrono>
#include <initializer_list> #include <initializer_list>
#include <map> #include <map>
@ -71,6 +73,11 @@ public:
using counter_value_t = uint64_t; using counter_value_t = uint64_t;
using sampler_value_t = int64_t; using sampler_value_t = int64_t;
public: // Array dimensions - must match the enum sizes in stats_enums.hpp
static constexpr size_t types_count = 256; // Enough for stat::type enum range
static constexpr size_t details_count = 1024; // Enough for stat::detail enum range
static constexpr size_t dirs_count = 2; // Enough for stat::dir enum range
public: public:
explicit stats (nano::logger &, nano::stats_config = {}); explicit stats (nano::logger &, nano::stats_config = {});
~stats (); ~stats ();
@ -132,15 +139,6 @@ public:
std::string dump (category category = category::counters); std::string dump (category category = category::counters);
private: private:
struct counter_key
{
stat::type type;
stat::detail detail;
stat::dir dir;
auto operator<=> (const counter_key &) const = default;
};
struct sampler_key struct sampler_key
{ {
stat::sample sample; stat::sample sample;
@ -149,18 +147,6 @@ private:
}; };
private: private:
class counter_entry
{
public:
// Prevent copying
counter_entry () = default;
counter_entry (counter_entry const &) = delete;
counter_entry & operator= (counter_entry const &) = delete;
public:
std::atomic<counter_value_t> value{ 0 };
};
class sampler_entry class sampler_entry
{ {
public: public:
@ -183,9 +169,13 @@ private:
mutable nano::mutex mutex; mutable nano::mutex mutex;
}; };
// Wrap in unique_ptrs because mutex/atomic members are not movable // Flat array for direct-indexed counter access
// TODO: Compare performance of map vs unordered_map // Allocate on the heap to avoid stack overflows when intantiating the stats class in tests
std::map<counter_key, std::unique_ptr<counter_entry>> counters; using counters_array_t = std::array<std::atomic<counter_value_t>, types_count * details_count * dirs_count>;
std::unique_ptr<counters_array_t> counters_impl;
counters_array_t & counters;
// Keep samplers as map since they have different behavior and lower frequency
std::map<sampler_key, std::unique_ptr<sampler_entry>> samplers; std::map<sampler_key, std::unique_ptr<sampler_entry>> samplers;
private: private:
@ -201,6 +191,12 @@ private:
static bool is_stat_logging_enabled (); static bool is_stat_logging_enabled ();
std::atomic<counter_value_t> & counter_ref (stat::type type, stat::detail detail, stat::dir dir);
std::atomic<counter_value_t> const & counter_ref (stat::type type, stat::detail detail, stat::dir dir) const;
// Helper function to calculate flat array index
static size_t idx (stat::type type, stat::detail detail, stat::dir dir);
private: private:
nano::stats_config const config; nano::stats_config const config;
nano::logger & logger; nano::logger & logger;