Merge branch 'develop' into frontier-scan-5
This commit is contained in:
commit
6e68af5b2e
15 changed files with 242 additions and 103 deletions
|
|
@ -37,6 +37,7 @@ add_executable(
|
|||
node.cpp
|
||||
numbers.cpp
|
||||
object_stream.cpp
|
||||
observer_set.cpp
|
||||
optimistic_scheduler.cpp
|
||||
processing_queue.cpp
|
||||
processor_service.cpp
|
||||
|
|
|
|||
129
nano/core_test/observer_set.cpp
Normal file
129
nano/core_test/observer_set.cpp
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
#include <nano/lib/observer_set.hpp>
|
||||
#include <nano/lib/timer.hpp>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
TEST (observer_set, notify_one)
|
||||
{
|
||||
nano::observer_set<int> set;
|
||||
int value{ 0 };
|
||||
set.add ([&value] (int v) {
|
||||
value = v;
|
||||
});
|
||||
set.notify (1);
|
||||
ASSERT_EQ (1, value);
|
||||
}
|
||||
|
||||
TEST (observer_set, notify_multiple)
|
||||
{
|
||||
nano::observer_set<int> set;
|
||||
int value{ 0 };
|
||||
set.add ([&value] (int v) {
|
||||
value = v;
|
||||
});
|
||||
set.add ([&value] (int v) {
|
||||
value += v;
|
||||
});
|
||||
set.notify (1);
|
||||
ASSERT_EQ (2, value);
|
||||
}
|
||||
|
||||
TEST (observer_set, notify_empty)
|
||||
{
|
||||
nano::observer_set<int> set;
|
||||
set.notify (1);
|
||||
}
|
||||
|
||||
TEST (observer_set, notify_multiple_types)
|
||||
{
|
||||
nano::observer_set<int, std::string> set;
|
||||
int value{ 0 };
|
||||
std::string str;
|
||||
set.add ([&value, &str] (int v, std::string s) {
|
||||
value = v;
|
||||
str = s;
|
||||
});
|
||||
set.notify (1, "test");
|
||||
ASSERT_EQ (1, value);
|
||||
ASSERT_EQ ("test", str);
|
||||
}
|
||||
|
||||
TEST (observer_set, empty_params)
|
||||
{
|
||||
nano::observer_set<> set;
|
||||
set.notify ();
|
||||
}
|
||||
|
||||
// Make sure there are no TSAN warnings
|
||||
TEST (observer_set, parallel_notify)
|
||||
{
|
||||
nano::observer_set<int> set;
|
||||
std::atomic<int> value{ 0 };
|
||||
set.add ([&value] (int v) {
|
||||
std::this_thread::sleep_for (100ms);
|
||||
value = v;
|
||||
});
|
||||
nano::timer timer{ nano::timer_state::started };
|
||||
std::vector<std::thread> threads;
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
threads.emplace_back ([&set] {
|
||||
set.notify (1);
|
||||
});
|
||||
}
|
||||
for (auto & thread : threads)
|
||||
{
|
||||
thread.join ();
|
||||
}
|
||||
ASSERT_EQ (1, value);
|
||||
// Notification should be done in parallel
|
||||
ASSERT_LT (timer.since_start (), 300ms);
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
struct move_only
|
||||
{
|
||||
move_only () = default;
|
||||
move_only (move_only &&) = default;
|
||||
move_only & operator= (move_only &&) = default;
|
||||
move_only (move_only const &) = delete;
|
||||
move_only & operator= (move_only const &) = delete;
|
||||
};
|
||||
|
||||
struct copy_throw
|
||||
{
|
||||
copy_throw () = default;
|
||||
copy_throw (copy_throw &&) = default;
|
||||
copy_throw & operator= (copy_throw &&) = default;
|
||||
copy_throw (copy_throw const &)
|
||||
{
|
||||
throw std::runtime_error ("copy_throw");
|
||||
}
|
||||
copy_throw & operator= (copy_throw const &) = delete;
|
||||
};
|
||||
}
|
||||
|
||||
// Ensure that parameters are not unnecessarily copied, this should compile
|
||||
TEST (observer_set, move_only)
|
||||
{
|
||||
nano::observer_set<move_only> set;
|
||||
set.add ([] (move_only const &) {
|
||||
});
|
||||
move_only value;
|
||||
set.notify (value);
|
||||
}
|
||||
|
||||
TEST (observer_set, copy_throw)
|
||||
{
|
||||
nano::observer_set<copy_throw> set;
|
||||
set.add ([] (copy_throw const &) {
|
||||
});
|
||||
copy_throw value;
|
||||
ASSERT_NO_THROW (set.notify (value));
|
||||
}
|
||||
|
|
@ -12,21 +12,25 @@ template <typename... T>
|
|||
class observer_set final
|
||||
{
|
||||
public:
|
||||
void add (std::function<void (T...)> const & observer_a)
|
||||
using observer_type = std::function<void (T const &...)>;
|
||||
|
||||
public:
|
||||
void add (observer_type observer)
|
||||
{
|
||||
nano::lock_guard<nano::mutex> lock{ mutex };
|
||||
observers.push_back (observer_a);
|
||||
observers.push_back (observer);
|
||||
}
|
||||
|
||||
void notify (T... args) const
|
||||
void notify (T const &... args) const
|
||||
{
|
||||
// Make observers copy to allow parallel notifications
|
||||
nano::unique_lock<nano::mutex> lock{ mutex };
|
||||
auto observers_copy = observers;
|
||||
lock.unlock ();
|
||||
|
||||
for (auto & i : observers_copy)
|
||||
for (auto const & observer : observers_copy)
|
||||
{
|
||||
i (args...);
|
||||
observer (args...);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -53,7 +57,7 @@ public:
|
|||
|
||||
private:
|
||||
mutable nano::mutex mutex{ mutex_identifier (mutexes::observer_set) };
|
||||
std::vector<std::function<void (T...)>> observers;
|
||||
std::vector<observer_type> observers;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,9 @@ std::string nano::thread_role::get_string (nano::thread_role::name role)
|
|||
case nano::thread_role::name::block_processing:
|
||||
thread_role_name_string = "Blck processing";
|
||||
break;
|
||||
case nano::thread_role::name::block_processing_notifications:
|
||||
thread_role_name_string = "Blck proc notif";
|
||||
break;
|
||||
case nano::thread_role::name::request_loop:
|
||||
thread_role_name_string = "Request loop";
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ enum class name
|
|||
vote_processing,
|
||||
vote_cache_processing,
|
||||
block_processing,
|
||||
block_processing_notifications,
|
||||
request_loop,
|
||||
wallet_actions,
|
||||
bootstrap_initiator,
|
||||
|
|
|
|||
|
|
@ -12,36 +12,14 @@
|
|||
|
||||
#include <utility>
|
||||
|
||||
/*
|
||||
* block_processor::context
|
||||
*/
|
||||
|
||||
nano::block_processor::context::context (std::shared_ptr<nano::block> block, nano::block_source source_a, callback_t callback_a) :
|
||||
block{ std::move (block) },
|
||||
source{ source_a },
|
||||
callback{ std::move (callback_a) }
|
||||
{
|
||||
debug_assert (source != nano::block_source::unknown);
|
||||
}
|
||||
|
||||
auto nano::block_processor::context::get_future () -> std::future<result_t>
|
||||
{
|
||||
return promise.get_future ();
|
||||
}
|
||||
|
||||
void nano::block_processor::context::set_result (result_t const & result)
|
||||
{
|
||||
promise.set_value (result);
|
||||
}
|
||||
|
||||
/*
|
||||
* block_processor
|
||||
*/
|
||||
|
||||
nano::block_processor::block_processor (nano::node & node_a) :
|
||||
config{ node_a.config.block_processor },
|
||||
node (node_a),
|
||||
next_log (std::chrono::steady_clock::now ())
|
||||
node{ node_a },
|
||||
workers{ 1, nano::thread_role::name::block_processing_notifications }
|
||||
{
|
||||
batch_processed.add ([this] (auto const & items) {
|
||||
// For every batch item: notify the 'processed' observer.
|
||||
|
|
@ -84,12 +62,15 @@ nano::block_processor::~block_processor ()
|
|||
{
|
||||
// Thread must be stopped before destruction
|
||||
debug_assert (!thread.joinable ());
|
||||
debug_assert (!workers.alive ());
|
||||
}
|
||||
|
||||
void nano::block_processor::start ()
|
||||
{
|
||||
debug_assert (!thread.joinable ());
|
||||
|
||||
workers.start ();
|
||||
|
||||
thread = std::thread ([this] () {
|
||||
nano::thread_role::set (nano::thread_role::name::block_processing);
|
||||
run ();
|
||||
|
|
@ -107,6 +88,7 @@ void nano::block_processor::stop ()
|
|||
{
|
||||
thread.join ();
|
||||
}
|
||||
workers.stop ();
|
||||
}
|
||||
|
||||
// TODO: Remove and replace all checks with calls to size (block_source)
|
||||
|
|
@ -229,13 +211,24 @@ void nano::block_processor::rollback_competitor (secure::write_transaction const
|
|||
|
||||
void nano::block_processor::run ()
|
||||
{
|
||||
nano::interval log_interval;
|
||||
nano::unique_lock<nano::mutex> lock{ mutex };
|
||||
while (!stopped)
|
||||
{
|
||||
if (!queue.empty ())
|
||||
{
|
||||
// TODO: Cleaner periodical logging
|
||||
if (should_log ())
|
||||
// It's possible that ledger processing happens faster than the notifications can be processed by other components, cooldown here
|
||||
while (workers.queued_tasks () >= config.max_queued_notifications)
|
||||
{
|
||||
node.stats.inc (nano::stat::type::blockprocessor, nano::stat::detail::cooldown);
|
||||
condition.wait_for (lock, 100ms, [this] { return stopped; });
|
||||
if (stopped)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (log_interval.elapsed (15s))
|
||||
{
|
||||
node.logger.info (nano::log::type::blockprocessor, "{} blocks (+ {} forced) in processing queue",
|
||||
queue.size (),
|
||||
|
|
@ -244,7 +237,11 @@ void nano::block_processor::run ()
|
|||
|
||||
auto processed = process_batch (lock);
|
||||
debug_assert (!lock.owns_lock ());
|
||||
lock.lock ();
|
||||
|
||||
// Queue notifications to be dispatched in the background
|
||||
workers.post ([this, processed = std::move (processed)] () mutable {
|
||||
node.stats.inc (nano::stat::type::blockprocessor, nano::stat::detail::notify);
|
||||
// Set results for futures when not holding the lock
|
||||
for (auto & [result, context] : processed)
|
||||
{
|
||||
|
|
@ -254,31 +251,18 @@ void nano::block_processor::run ()
|
|||
}
|
||||
context.set_result (result);
|
||||
}
|
||||
|
||||
batch_processed.notify (processed);
|
||||
|
||||
lock.lock ();
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
condition.notify_one ();
|
||||
condition.wait (lock);
|
||||
condition.wait (lock, [this] {
|
||||
return stopped || !queue.empty ();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool nano::block_processor::should_log ()
|
||||
{
|
||||
auto result (false);
|
||||
auto now (std::chrono::steady_clock::now ());
|
||||
if (next_log < now)
|
||||
{
|
||||
next_log = now + std::chrono::seconds (15);
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
auto nano::block_processor::next () -> context
|
||||
{
|
||||
debug_assert (!mutex.try_lock ());
|
||||
|
|
@ -315,7 +299,7 @@ auto nano::block_processor::process_batch (nano::unique_lock<nano::mutex> & lock
|
|||
debug_assert (!mutex.try_lock ());
|
||||
debug_assert (!queue.empty ());
|
||||
|
||||
auto batch = next_batch (256);
|
||||
auto batch = next_batch (config.batch_size);
|
||||
|
||||
lock.unlock ();
|
||||
|
||||
|
|
@ -466,9 +450,32 @@ nano::container_info nano::block_processor::container_info () const
|
|||
info.put ("blocks", queue.size ());
|
||||
info.put ("forced", queue.size ({ nano::block_source::forced }));
|
||||
info.add ("queue", queue.container_info ());
|
||||
info.add ("workers", workers.container_info ());
|
||||
return info;
|
||||
}
|
||||
|
||||
/*
|
||||
* block_processor::context
|
||||
*/
|
||||
|
||||
nano::block_processor::context::context (std::shared_ptr<nano::block> block, nano::block_source source_a, callback_t callback_a) :
|
||||
block{ std::move (block) },
|
||||
source{ source_a },
|
||||
callback{ std::move (callback_a) }
|
||||
{
|
||||
debug_assert (source != nano::block_source::unknown);
|
||||
}
|
||||
|
||||
auto nano::block_processor::context::get_future () -> std::future<result_t>
|
||||
{
|
||||
return promise.get_future ();
|
||||
}
|
||||
|
||||
void nano::block_processor::context::set_result (result_t const & result)
|
||||
{
|
||||
promise.set_value (result);
|
||||
}
|
||||
|
||||
/*
|
||||
* block_processor_config
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <nano/lib/logging.hpp>
|
||||
#include <nano/lib/thread_pool.hpp>
|
||||
#include <nano/node/fair_queue.hpp>
|
||||
#include <nano/node/fwd.hpp>
|
||||
#include <nano/secure/common.hpp>
|
||||
|
|
@ -46,6 +47,9 @@ public:
|
|||
size_t priority_live{ 1 };
|
||||
size_t priority_bootstrap{ 8 };
|
||||
size_t priority_local{ 16 };
|
||||
|
||||
size_t batch_size{ 256 };
|
||||
size_t max_queued_notifications{ 8 };
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -89,7 +93,6 @@ public:
|
|||
bool add (std::shared_ptr<nano::block> const &, nano::block_source = nano::block_source::live, std::shared_ptr<nano::transport::channel> const & channel = nullptr, std::function<void (nano::block_status)> callback = {});
|
||||
std::optional<nano::block_status> add_blocking (std::shared_ptr<nano::block> const & block, nano::block_source);
|
||||
void force (std::shared_ptr<nano::block> const &);
|
||||
bool should_log ();
|
||||
|
||||
nano::container_info container_info () const;
|
||||
|
||||
|
|
@ -122,11 +125,11 @@ private: // Dependencies
|
|||
private:
|
||||
nano::fair_queue<context, nano::block_source> queue;
|
||||
|
||||
std::chrono::steady_clock::time_point next_log;
|
||||
|
||||
bool stopped{ false };
|
||||
nano::condition_variable condition;
|
||||
mutable nano::mutex mutex{ mutex_identifier (mutexes::block_processor) };
|
||||
std::thread thread;
|
||||
|
||||
nano::thread_pool workers;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ nano::bootstrap_ascending::service::service (nano::node_config const & node_conf
|
|||
frontiers_limiter{ config.frontier_rate_limit },
|
||||
workers{ 1, nano::thread_role::name::ascending_bootstrap_worker }
|
||||
{
|
||||
// TODO: This is called from a very congested blockprocessor thread. Offload this work to a dedicated processing thread
|
||||
block_processor.batch_processed.add ([this] (auto const & batch) {
|
||||
{
|
||||
nano::lock_guard<nano::mutex> lock{ mutex };
|
||||
|
|
@ -266,13 +265,16 @@ void nano::bootstrap_ascending::service::inspect (secure::transaction const & tx
|
|||
{
|
||||
if (source == nano::block_source::bootstrap)
|
||||
{
|
||||
const auto account = block.previous ().is_zero () ? block.account_field ().value () : ledger.any.block_account (tx, block.previous ()).value ();
|
||||
const auto account = block.previous ().is_zero () ? block.account_field ().value () : ledger.any.block_account (tx, block.previous ()).value_or (0);
|
||||
const auto source_hash = block.source_field ().value_or (block.link_field ().value_or (0).as_block_hash ());
|
||||
|
||||
if (!account.is_zero () && !source_hash.is_zero ())
|
||||
{
|
||||
// Mark account as blocked because it is missing the source block
|
||||
accounts.block (account, source_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case nano::block_status::gap_previous:
|
||||
{
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ nano::confirming_set::confirming_set (confirming_set_config const & config_a, na
|
|||
ledger{ ledger_a },
|
||||
stats{ stats_a },
|
||||
logger{ logger_a },
|
||||
notification_workers{ 1, nano::thread_role::name::confirmation_height_notifications }
|
||||
workers{ 1, nano::thread_role::name::confirmation_height_notifications }
|
||||
{
|
||||
batch_cemented.add ([this] (auto const & cemented) {
|
||||
for (auto const & context : cemented)
|
||||
|
|
@ -55,7 +55,7 @@ void nano::confirming_set::start ()
|
|||
return;
|
||||
}
|
||||
|
||||
notification_workers.start ();
|
||||
workers.start ();
|
||||
|
||||
thread = std::thread{ [this] () {
|
||||
nano::thread_role::set (nano::thread_role::name::confirmation_height);
|
||||
|
|
@ -74,7 +74,7 @@ void nano::confirming_set::stop ()
|
|||
{
|
||||
thread.join ();
|
||||
}
|
||||
notification_workers.stop ();
|
||||
workers.stop ();
|
||||
}
|
||||
|
||||
bool nano::confirming_set::contains (nano::block_hash const & hash) const
|
||||
|
|
@ -150,7 +150,7 @@ void nano::confirming_set::run_batch (std::unique_lock<std::mutex> & lock)
|
|||
std::unique_lock lock{ mutex };
|
||||
|
||||
// It's possible that ledger cementing happens faster than the notifications can be processed by other components, cooldown here
|
||||
while (notification_workers.queued_tasks () >= config.max_queued_notifications)
|
||||
while (workers.queued_tasks () >= config.max_queued_notifications)
|
||||
{
|
||||
stats.inc (nano::stat::type::confirming_set, nano::stat::detail::cooldown);
|
||||
condition.wait_for (lock, 100ms, [this] { return stopped.load (); });
|
||||
|
|
@ -160,7 +160,7 @@ void nano::confirming_set::run_batch (std::unique_lock<std::mutex> & lock)
|
|||
}
|
||||
}
|
||||
|
||||
notification_workers.post ([this, batch = std::move (batch)] () {
|
||||
workers.post ([this, batch = std::move (batch)] () {
|
||||
stats.inc (nano::stat::type::confirming_set, nano::stat::detail::notify);
|
||||
batch_cemented.notify (batch);
|
||||
});
|
||||
|
|
@ -255,6 +255,7 @@ nano::container_info nano::confirming_set::container_info () const
|
|||
|
||||
nano::container_info info;
|
||||
info.put ("set", set);
|
||||
info.add ("notification_workers", notification_workers.container_info ());
|
||||
info.put ("notifications", workers.queued_tasks ());
|
||||
info.add ("workers", workers.container_info ());
|
||||
return info;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,11 +105,11 @@ private:
|
|||
ordered_entries set;
|
||||
std::unordered_set<nano::block_hash> current;
|
||||
|
||||
nano::thread_pool notification_workers;
|
||||
|
||||
std::atomic<bool> stopped{ false };
|
||||
mutable std::mutex mutex;
|
||||
std::condition_variable condition;
|
||||
std::thread thread;
|
||||
|
||||
nano::thread_pool workers;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ void iterator::update (int status)
|
|||
if (status == MDB_SUCCESS)
|
||||
{
|
||||
value_type init;
|
||||
auto status = mdb_cursor_get (cursor, &init.first, &init.second, MDB_GET_CURRENT);
|
||||
auto status = mdb_cursor_get (cursor.get (), &init.first, &init.second, MDB_GET_CURRENT);
|
||||
release_assert (status == MDB_SUCCESS);
|
||||
current = init;
|
||||
}
|
||||
|
|
@ -33,8 +33,10 @@ void iterator::update (int status)
|
|||
|
||||
iterator::iterator (MDB_txn * tx, MDB_dbi dbi) noexcept
|
||||
{
|
||||
MDB_cursor * cursor;
|
||||
auto open_status = mdb_cursor_open (tx, dbi, &cursor);
|
||||
release_assert (open_status == MDB_SUCCESS);
|
||||
this->cursor.reset (cursor);
|
||||
this->current = std::monostate{};
|
||||
}
|
||||
|
||||
|
|
@ -53,7 +55,7 @@ auto iterator::end (MDB_txn * tx, MDB_dbi dbi) -> iterator
|
|||
auto iterator::lower_bound (MDB_txn * tx, MDB_dbi dbi, MDB_val const & lower_bound) -> iterator
|
||||
{
|
||||
iterator result{ tx, dbi };
|
||||
auto status = mdb_cursor_get (result.cursor, const_cast<MDB_val *> (&lower_bound), nullptr, MDB_SET_RANGE);
|
||||
auto status = mdb_cursor_get (result.cursor.get (), const_cast<MDB_val *> (&lower_bound), nullptr, MDB_SET_RANGE);
|
||||
result.update (status);
|
||||
return std::move (result);
|
||||
}
|
||||
|
|
@ -63,18 +65,9 @@ iterator::iterator (iterator && other) noexcept
|
|||
*this = std::move (other);
|
||||
}
|
||||
|
||||
iterator::~iterator ()
|
||||
{
|
||||
if (cursor)
|
||||
{
|
||||
mdb_cursor_close (cursor);
|
||||
}
|
||||
}
|
||||
|
||||
auto iterator::operator= (iterator && other) noexcept -> iterator &
|
||||
{
|
||||
cursor = other.cursor;
|
||||
other.cursor = nullptr;
|
||||
cursor = std::move (other.cursor);
|
||||
current = other.current;
|
||||
other.current = std::monostate{};
|
||||
return *this;
|
||||
|
|
@ -83,7 +76,7 @@ auto iterator::operator= (iterator && other) noexcept -> iterator &
|
|||
auto iterator::operator++ () -> iterator &
|
||||
{
|
||||
auto operation = is_end () ? MDB_FIRST : MDB_NEXT;
|
||||
auto status = mdb_cursor_get (cursor, nullptr, nullptr, operation);
|
||||
auto status = mdb_cursor_get (cursor.get (), nullptr, nullptr, operation);
|
||||
release_assert (status == MDB_SUCCESS || status == MDB_NOTFOUND);
|
||||
update (status);
|
||||
return *this;
|
||||
|
|
@ -92,7 +85,7 @@ auto iterator::operator++ () -> iterator &
|
|||
auto iterator::operator-- () -> iterator &
|
||||
{
|
||||
auto operation = is_end () ? MDB_LAST : MDB_PREV;
|
||||
auto status = mdb_cursor_get (cursor, nullptr, nullptr, operation);
|
||||
auto status = mdb_cursor_get (cursor.get (), nullptr, nullptr, operation);
|
||||
release_assert (status == MDB_SUCCESS || status == MDB_NOTFOUND);
|
||||
update (status);
|
||||
return *this;
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ namespace nano::store::lmdb
|
|||
*/
|
||||
class iterator
|
||||
{
|
||||
MDB_cursor * cursor{ nullptr };
|
||||
std::unique_ptr<MDB_cursor, decltype (&mdb_cursor_close)> cursor{ nullptr, mdb_cursor_close };
|
||||
std::variant<std::monostate, std::pair<MDB_val, MDB_val>> current;
|
||||
void update (int status);
|
||||
iterator (MDB_txn * tx, MDB_dbi dbi) noexcept;
|
||||
|
|
@ -39,8 +39,6 @@ public:
|
|||
static auto end (MDB_txn * tx, MDB_dbi dbi) -> iterator;
|
||||
static auto lower_bound (MDB_txn * tx, MDB_dbi dbi, MDB_val const & lower_bound) -> iterator;
|
||||
|
||||
~iterator ();
|
||||
|
||||
iterator (iterator const &) = delete;
|
||||
auto operator= (iterator const &) -> iterator & = delete;
|
||||
|
||||
|
|
|
|||
|
|
@ -121,9 +121,7 @@ bool nano::store::lmdb::component::vacuum_after_upgrade (std::filesystem::path c
|
|||
if (vacuum_success)
|
||||
{
|
||||
// Need to close the database to release the file handle
|
||||
mdb_env_sync (env.environment, true);
|
||||
mdb_env_close (env.environment);
|
||||
env.environment = nullptr;
|
||||
mdb_env_sync (env, true);
|
||||
|
||||
// Replace the ledger file with the vacuumed one
|
||||
std::filesystem::rename (vacuum_path, path_a);
|
||||
|
|
@ -155,7 +153,7 @@ void nano::store::lmdb::component::serialize_mdb_tracker (boost::property_tree::
|
|||
void nano::store::lmdb::component::serialize_memory_stats (boost::property_tree::ptree & json)
|
||||
{
|
||||
MDB_stat stats;
|
||||
auto status (mdb_env_stat (env.environment, &stats));
|
||||
auto status (mdb_env_stat (env, &stats));
|
||||
release_assert (status == 0);
|
||||
json.put ("branch_pages", stats.ms_branch_pages);
|
||||
json.put ("depth", stats.ms_depth);
|
||||
|
|
@ -448,7 +446,7 @@ std::string nano::store::lmdb::component::error_string (int status) const
|
|||
|
||||
bool nano::store::lmdb::component::copy_db (std::filesystem::path const & destination_file)
|
||||
{
|
||||
return !mdb_env_copy2 (env.environment, destination_file.string ().c_str (), MDB_CP_COMPACT);
|
||||
return !mdb_env_copy2 (env, destination_file.string ().c_str (), MDB_CP_COMPACT);
|
||||
}
|
||||
|
||||
void nano::store::lmdb::component::rebuild_db (store::write_transaction const & transaction_a)
|
||||
|
|
|
|||
|
|
@ -19,8 +19,10 @@ void nano::store::lmdb::env::init (bool & error_a, std::filesystem::path const &
|
|||
nano::set_secure_perm_directory (path_a.parent_path (), error_chmod);
|
||||
if (!error_mkdir)
|
||||
{
|
||||
MDB_env * environment;
|
||||
auto status1 (mdb_env_create (&environment));
|
||||
release_assert (status1 == 0);
|
||||
this->environment.reset (environment);
|
||||
auto status2 (mdb_env_set_maxdbs (environment, options_a.config.max_databases));
|
||||
release_assert (status2 == 0);
|
||||
auto map_size = options_a.config.map_size;
|
||||
|
|
@ -66,13 +68,11 @@ void nano::store::lmdb::env::init (bool & error_a, std::filesystem::path const &
|
|||
else
|
||||
{
|
||||
error_a = true;
|
||||
environment = nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
error_a = true;
|
||||
environment = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -81,14 +81,13 @@ nano::store::lmdb::env::~env ()
|
|||
if (environment != nullptr)
|
||||
{
|
||||
// Make sure the commits are flushed. This is a no-op unless MDB_NOSYNC is used.
|
||||
mdb_env_sync (environment, true);
|
||||
mdb_env_close (environment);
|
||||
mdb_env_sync (environment.get (), true);
|
||||
}
|
||||
}
|
||||
|
||||
nano::store::lmdb::env::operator MDB_env * () const
|
||||
{
|
||||
return environment;
|
||||
return environment.get ();
|
||||
}
|
||||
|
||||
nano::store::read_transaction nano::store::lmdb::env::tx_begin_read (store::lmdb::txn_callbacks mdb_txn_callbacks) const
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ public:
|
|||
store::read_transaction tx_begin_read (txn_callbacks callbacks = txn_callbacks{}) const;
|
||||
store::write_transaction tx_begin_write (txn_callbacks callbacks = txn_callbacks{}) const;
|
||||
MDB_txn * tx (store::transaction const & transaction_a) const;
|
||||
MDB_env * environment;
|
||||
std::unique_ptr<MDB_env, decltype (&mdb_env_close)> environment{ nullptr, mdb_env_close };
|
||||
nano::id_t const store_id{ nano::next_id () };
|
||||
};
|
||||
} // namespace nano::store::lmdb
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue