dncurrency/nano/lib/work.cpp
Piotr Wójcik ab093d58d6
Rework collect_container_info (..) functions (#4736)
* Move container info classes to separate file

* Introduce better `container_info` class

* Rename legacy to `container_info_entry`

* Conversion

* Test

* Fixes
2024-10-03 15:36:34 +02:00

243 lines
No EOL
6.5 KiB
C++

#include <nano/crypto/blake2/blake2.h>
#include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/blocks.hpp>
#include <nano/lib/epoch.hpp>
#include <nano/lib/thread_roles.hpp>
#include <nano/lib/threading.hpp>
#include <nano/lib/work.hpp>
#include <nano/node/xorshift.hpp>
#include <future>
std::string nano::to_string (nano::work_version const version_a)
{
std::string result ("invalid");
switch (version_a)
{
case nano::work_version::work_1:
result = "work_1";
break;
case nano::work_version::unspecified:
result = "unspecified";
break;
}
return result;
}
nano::work_pool::work_pool (nano::network_constants & network_constants, unsigned max_threads_a, std::chrono::nanoseconds pow_rate_limiter_a, nano::opencl_work_func_t opencl_a) :
network_constants{ network_constants },
ticket (0),
done (false),
pow_rate_limiter (pow_rate_limiter_a),
opencl (opencl_a)
{
static_assert (ATOMIC_INT_LOCK_FREE == 2, "Atomic int needed");
auto count (network_constants.is_dev_network () ? std::min (max_threads_a, 1u) : std::min (max_threads_a, std::max (1u, nano::hardware_concurrency ())));
if (opencl)
{
// One thread to handle OpenCL
++count;
}
for (auto i (0u); i < count; ++i)
{
threads.emplace_back (nano::thread_attributes::get_default (), [this, i] () {
nano::thread_role::set (nano::thread_role::name::work);
nano::work_thread_reprioritize ();
loop (i);
});
}
}
nano::work_pool::~work_pool ()
{
stop ();
for (auto & i : threads)
{
i.join ();
}
}
void nano::work_pool::loop (uint64_t thread)
{
// Quick RNG for work attempts.
xorshift1024star rng;
nano::random_pool::generate_block (reinterpret_cast<uint8_t *> (rng.s.data ()), rng.s.size () * sizeof (decltype (rng.s)::value_type));
uint64_t work;
uint64_t output;
blake2b_state hash;
blake2b_init (&hash, sizeof (output));
nano::unique_lock<nano::mutex> lock{ mutex };
auto pow_sleep = pow_rate_limiter;
while (!done)
{
auto empty (pending.empty ());
if (thread == 0)
{
// Only work thread 0 notifies work observers
work_observers.notify (!empty);
}
if (!empty)
{
auto current_l (pending.front ());
int ticket_l (ticket);
lock.unlock ();
output = 0;
boost::optional<uint64_t> opt_work;
if (thread == 0 && opencl)
{
opt_work = opencl (current_l.version, current_l.item, current_l.difficulty, ticket);
}
if (opt_work.is_initialized ())
{
work = *opt_work;
output = network_constants.work.value (current_l.item, work);
}
else
{
// ticket != ticket_l indicates a different thread found a solution and we should stop
while (ticket == ticket_l && output < current_l.difficulty)
{
// Don't query main memory every iteration in order to reduce memory bus traffic
// All operations here operate on stack memory
// Count iterations down to zero since comparing to zero is easier than comparing to another number
unsigned iteration (256);
while (iteration && output < current_l.difficulty)
{
work = rng.next ();
blake2b_update (&hash, reinterpret_cast<uint8_t *> (&work), sizeof (work));
blake2b_update (&hash, current_l.item.bytes.data (), current_l.item.bytes.size ());
blake2b_final (&hash, reinterpret_cast<uint8_t *> (&output), sizeof (output));
blake2b_init (&hash, sizeof (output));
iteration -= 1;
}
// Add a rate limiter (if specified) to the pow calculation to save some CPUs which don't want to operate at full throttle
if (pow_sleep != std::chrono::nanoseconds (0))
{
std::this_thread::sleep_for (pow_sleep);
}
}
}
lock.lock ();
if (ticket == ticket_l)
{
// If the ticket matches what we started with, we're the ones that found the solution
debug_assert (output >= current_l.difficulty);
debug_assert (current_l.difficulty == 0 || network_constants.work.value (current_l.item, work) == output);
// Signal other threads to stop their work next time they check ticket
++ticket;
pending.pop_front ();
lock.unlock ();
current_l.callback (work);
lock.lock ();
}
else
{
// A different thread found a solution
}
}
else
{
// Wait for a work request
producer_condition.wait (lock);
}
}
}
void nano::work_pool::cancel (nano::root const & root_a)
{
nano::lock_guard<nano::mutex> lock{ mutex };
if (!done)
{
if (!pending.empty ())
{
if (pending.front ().item == root_a)
{
++ticket;
}
}
pending.remove_if ([&root_a] (decltype (pending)::value_type const & item_a) {
bool result{ false };
if (item_a.item == root_a)
{
if (item_a.callback)
{
item_a.callback (boost::none);
}
result = true;
}
return result;
});
}
}
void nano::work_pool::stop ()
{
{
nano::lock_guard<nano::mutex> lock{ mutex };
done = true;
++ticket;
}
producer_condition.notify_all ();
}
void nano::work_pool::generate (nano::work_version const version_a, nano::root const & root_a, uint64_t difficulty_a, std::function<void (boost::optional<uint64_t> const &)> callback_a)
{
debug_assert (!root_a.is_zero ());
if (!threads.empty ())
{
{
nano::lock_guard<nano::mutex> lock{ mutex };
pending.emplace_back (version_a, root_a, difficulty_a, callback_a);
}
producer_condition.notify_all ();
}
else if (callback_a)
{
callback_a (boost::none);
}
}
boost::optional<uint64_t> nano::work_pool::generate (nano::root const & root_a)
{
debug_assert (network_constants.is_dev_network ());
return generate (nano::work_version::work_1, root_a, network_constants.work.base);
}
boost::optional<uint64_t> nano::work_pool::generate (nano::root const & root_a, uint64_t difficulty_a)
{
debug_assert (network_constants.is_dev_network ());
return generate (nano::work_version::work_1, root_a, difficulty_a);
}
boost::optional<uint64_t> nano::work_pool::generate (nano::work_version const version_a, nano::root const & root_a, uint64_t difficulty_a)
{
boost::optional<uint64_t> result;
if (!threads.empty ())
{
std::promise<boost::optional<uint64_t>> work;
std::future<boost::optional<uint64_t>> future = work.get_future ();
generate (version_a, root_a, difficulty_a, [&work] (boost::optional<uint64_t> work_a) {
work.set_value (work_a);
});
result = future.get ().value ();
}
return result;
}
size_t nano::work_pool::size ()
{
nano::lock_guard<nano::mutex> lock{ mutex };
return pending.size ();
}
nano::container_info nano::work_pool::container_info () const
{
nano::lock_guard<nano::mutex> guard{ mutex };
nano::container_info info;
info.put ("pending", pending);
info.add ("work_observers", work_observers.container_info ());
return info;
}