Fixed spelling in comments (#4726)

This commit is contained in:
RickiNano 2024-09-10 14:55:09 +02:00 committed by GitHub
commit 628a597de5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 20 additions and 20 deletions

View file

@ -149,7 +149,7 @@ void assert_internal (char const * check_expr, char const * func, char const * f
// As there is no async-signal-safe way to generate stacktraces on Windows it must be done before aborting // As there is no async-signal-safe way to generate stacktraces on Windows it must be done before aborting
#ifdef _WIN32 #ifdef _WIN32
{ {
// Try construct the stacktrace dump in the same folder as the the running executable, otherwise use the current directory. // Try construct the stacktrace dump in the same folder as the running executable, otherwise use the current directory.
boost::system::error_code err; boost::system::error_code err;
auto running_executable_filepath = boost::dll::program_location (err); auto running_executable_filepath = boost::dll::program_location (err);
std::string filename = is_release_assert ? "nano_node_backtrace_release_assert.txt" : "nano_node_backtrace_assert.txt"; std::string filename = is_release_assert ? "nano_node_backtrace_release_assert.txt" : "nano_node_backtrace_assert.txt";

View file

@ -218,7 +218,7 @@ int main (int argc, char * const * argv)
return std::accumulate (reps.begin (), reps.end (), nano::uint128_t{ 0 }, [] (auto sum, auto const & rep) { return sum + rep.second; }); return std::accumulate (reps.begin (), reps.end (), nano::uint128_t{ 0 }, [] (auto sum, auto const & rep) { return sum + rep.second; });
}; };
// Hardcoded weights are filtered to a cummulative weight of 99%, need to do the same for ledger weights // Hardcoded weights are filtered to a cumulative weight of 99%, need to do the same for ledger weights
std::remove_const_t<decltype (ledger_unfiltered)> ledger; std::remove_const_t<decltype (ledger_unfiltered)> ledger;
{ {
std::vector<std::pair<nano::account, nano::uint128_t>> sorted; std::vector<std::pair<nano::account, nano::uint128_t>> sorted;
@ -1721,7 +1721,7 @@ int main (int argc, char * const * argv)
{ {
std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count); std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count);
} }
// Check block existance // Check block existence
auto block = node->ledger.any.block_get (transaction, key.hash); auto block = node->ledger.any.block_get (transaction, key.hash);
bool pruned (false); bool pruned (false);
if (block == nullptr) if (block == nullptr)

View file

@ -16,7 +16,7 @@ namespace bootstrap
{ {
/** /**
* Class to read a block-type byte followed by a serialised block from a stream. * Class to read a block-type byte followed by a serialised block from a stream.
* It is typically used to used to read a series of block-types and blocks terminated by a not-a-block type. * It is typically used to read a series of block-types and blocks terminated by a not-a-block type.
*/ */
class block_deserializer : public std::enable_shared_from_this<nano::bootstrap::block_deserializer> class block_deserializer : public std::enable_shared_from_this<nano::bootstrap::block_deserializer>
{ {

View file

@ -345,9 +345,9 @@ void nano::bulk_pull_account_client::receive_pending ()
* The account is supplied as the "start" member, and the final block to * The account is supplied as the "start" member, and the final block to
* send is the "end" member. The "start" member may also be a block * send is the "end" member. The "start" member may also be a block
* hash, in which case the that hash is used as the start of a chain * hash, in which case the that hash is used as the start of a chain
* to send. To determine if "start" is interpretted as an account or * to send. To determine if "start" is interpreted as an account or
* hash, the ledger is checked to see if the block specified exists, * hash, the ledger is checked to see if the block specified exists,
* if not then it is interpretted as an account. * if not then it is interpreted as an account.
* *
* Additionally, if "start" is specified as a block hash the range * Additionally, if "start" is specified as a block hash the range
* is inclusive of that block hash, that is the range will be: * is inclusive of that block hash, that is the range will be:
@ -776,7 +776,7 @@ std::pair<std::unique_ptr<nano::pending_key>, std::unique_ptr<nano::pending_info
/* /*
* If the pending_address_only flag is set, de-duplicate the * If the pending_address_only flag is set, de-duplicate the
* responses. The responses are the address of the sender, * responses. The responses are the address of the sender,
* so they are are part of the pending table's information * so they are part of the pending table's information
* and not key, so we have to de-duplicate them manually. * and not key, so we have to de-duplicate them manually.
*/ */
if (pending_address_only) if (pending_address_only)

View file

@ -67,7 +67,7 @@ nano::error nano::bootstrap_ascending_config::serialize (nano::tomlconfig & toml
toml.put ("request_timeout", request_timeout.count (), "Timeout in milliseconds for incoming ascending bootstrap messages to be processed.\ntype:milliseconds"); toml.put ("request_timeout", request_timeout.count (), "Timeout in milliseconds for incoming ascending bootstrap messages to be processed.\ntype:milliseconds");
toml.put ("throttle_coefficient", throttle_coefficient, "Scales the number of samples to track for bootstrap throttling.\ntype:uint64"); toml.put ("throttle_coefficient", throttle_coefficient, "Scales the number of samples to track for bootstrap throttling.\ntype:uint64");
toml.put ("throttle_wait", throttle_wait.count (), "Length of time to wait between requests when throttled.\ntype:milliseconds"); toml.put ("throttle_wait", throttle_wait.count (), "Length of time to wait between requests when throttled.\ntype:milliseconds");
toml.put ("block_processor_threshold", block_processor_threshold, "Asending bootstrap will wait while block processor has more than this many blocks queued.\ntype:uint64"); toml.put ("block_processor_threshold", block_processor_threshold, "Ascending bootstrap will wait while block processor has more than this many blocks queued.\ntype:uint64");
toml.put ("max_requests", max_requests, "Maximum total number of in flight requests.\ntype:uint64"); toml.put ("max_requests", max_requests, "Maximum total number of in flight requests.\ntype:uint64");
nano::tomlconfig account_sets_l; nano::tomlconfig account_sets_l;

View file

@ -120,7 +120,7 @@ bool nano::bootstrap_attempt_legacy::request_bulk_push_target (std::pair<nano::b
void nano::bootstrap_attempt_legacy::set_start_account (nano::account const & start_account_a) void nano::bootstrap_attempt_legacy::set_start_account (nano::account const & start_account_a)
{ {
// Add last account fron frontier request // Add last account from frontier request
nano::lock_guard<nano::mutex> lock{ mutex }; nano::lock_guard<nano::mutex> lock{ mutex };
start_account = start_account_a; start_account = start_account_a;
} }

View file

@ -680,7 +680,7 @@ void nano::bootstrap_ascending::service::process (const nano::asc_pull_ack::bloc
{ {
if (block == blocks.back ()) if (block == blocks.back ())
{ {
// It's the last block submitted for this account chanin, reset timestamp to allow more requests // It's the last block submitted for this account chain, reset timestamp to allow more requests
block_processor.add (block, nano::block_source::bootstrap, nullptr, [this, account = tag.account] (auto result) { block_processor.add (block, nano::block_source::bootstrap, nullptr, [this, account = tag.account] (auto result) {
stats.inc (nano::stat::type::bootstrap_ascending, nano::stat::detail::timestamp_reset); stats.inc (nano::stat::type::bootstrap_ascending, nano::stat::detail::timestamp_reset);
{ {

View file

@ -23,7 +23,7 @@ std::string nano::error_cli_messages::message (int ev) const
case nano::error_cli::generic: case nano::error_cli::generic:
return "Unknown error"; return "Unknown error";
case nano::error_cli::parse_error: case nano::error_cli::parse_error:
return "Coud not parse command line"; return "Could not parse command line";
case nano::error_cli::invalid_arguments: case nano::error_cli::invalid_arguments:
return "Invalid arguments"; return "Invalid arguments";
case nano::error_cli::unknown_command: case nano::error_cli::unknown_command:
@ -342,7 +342,7 @@ std::error_code nano::handle_node_options (boost::program_options::variables_map
} }
else else
{ {
std::cerr << "account comand requires one <key> option\n"; std::cerr << "account command requires one <key> option\n";
ec = nano::error_cli::invalid_arguments; ec = nano::error_cli::invalid_arguments;
} }
} }

View file

@ -263,7 +263,7 @@ void nano::distributed_work::success (std::string const & body_a, nano::tcp_endp
} }
else else
{ {
node.logger.error (nano::log::type::distributed_work, "Incorrect work response from {}:{} for root {} with diffuculty {}: {}", node.logger.error (nano::log::type::distributed_work, "Incorrect work response from {}:{} for root {} with difficulty {}: {}",
nano::util::to_str (endpoint_a.address ()), nano::util::to_str (endpoint_a.address ()),
endpoint_a.port (), endpoint_a.port (),
request.root.to_string (), request.root.to_string (),

View file

@ -156,7 +156,7 @@ public:
/** /**
* Write to underlying socket. Writes goes through a queue protected by the strand. Thus, this function * Write to underlying socket. Writes goes through a queue protected by the strand. Thus, this function
* can be called concurrently with other writes. * can be called concurrently with other writes.
* @note This function explicitely doesn't use nano::shared_const_buffer, as buffers usually originate from Flatbuffers * @note This function explicitly doesn't use nano::shared_const_buffer, as buffers usually originate from Flatbuffers
* and copying into the shared_const_buffer vector would impose a significant overhead for large requests and responses. * and copying into the shared_const_buffer vector would impose a significant overhead for large requests and responses.
*/ */
void queued_write (boost::asio::const_buffer const & buffer_a, std::function<void (boost::system::error_code const &, std::size_t)> callback_a) void queued_write (boost::asio::const_buffer const & buffer_a, std::function<void (boost::system::error_code const &, std::size_t)> callback_a)

View file

@ -184,7 +184,7 @@ public:
void publish (nano::publish const & message) override void publish (nano::publish const & message) override
{ {
// Put blocks that are being initally broadcasted in a separate queue, so that they won't have to compete with rebroadcasted blocks // Put blocks that are being initially broadcasted in a separate queue, so that they won't have to compete with rebroadcasted blocks
// Both queues have the same priority and size, so the potential for exploiting this is limited // Both queues have the same priority and size, so the potential for exploiting this is limited
bool added = node.block_processor.add (message.block, message.is_originator () ? nano::block_source::live_originator : nano::block_source::live, channel); bool added = node.block_processor.add (message.block, message.is_originator () ? nano::block_source::live_originator : nano::block_source::live, channel);
if (!added) if (!added)

View file

@ -37,7 +37,7 @@ public:
void start (); void start ();
void stop (); void stop ();
// Manualy start an election for a block // Manually start an election for a block
// Call action with confirmed block, may be different than what we started with // Call action with confirmed block, may be different than what we started with
void push (std::shared_ptr<nano::block> const &, boost::optional<nano::uint128_t> const & = boost::none); void push (std::shared_ptr<nano::block> const &, boost::optional<nano::uint128_t> const & = boost::none);

View file

@ -389,7 +389,7 @@ auto nano::transport::tcp_listener::accept_one (asio::ip::tcp::socket raw_socket
try try
{ {
// Best effor attempt to gracefully close the socket, shutdown before closing to avoid zombie sockets // Best effort attempt to gracefully close the socket, shutdown before closing to avoid zombie sockets
raw_socket.shutdown (asio::ip::tcp::socket::shutdown_both); raw_socket.shutdown (asio::ip::tcp::socket::shutdown_both);
raw_socket.close (); raw_socket.close ();
} }

View file

@ -36,7 +36,7 @@ public:
nano::mutex mutex; nano::mutex mutex;
std::atomic<bool> stopped{ false }; std::atomic<bool> stopped{ false };
std::atomic<bool> handshake_received{ false }; std::atomic<bool> handshake_received{ false };
// Remote enpoint used to remove response channel even after socket closing // Remote endpoint used to remove response channel even after socket closing
nano::tcp_endpoint remote_endpoint{ boost::asio::ip::address_v6::any (), 0 }; nano::tcp_endpoint remote_endpoint{ boost::asio::ip::address_v6::any (), 0 };
std::chrono::steady_clock::time_point last_telemetry_req{}; std::chrono::steady_clock::time_point last_telemetry_req{};

View file

@ -40,7 +40,7 @@ nano::stat::detail to_stat_detail (vote_source);
// This class routes votes to their associated election // This class routes votes to their associated election
// This class holds a weak_ptr as this container does not own the elections // This class holds a weak_ptr as this container does not own the elections
// Routing entries are removed perodically if the weak_ptr has expired // Routing entries are removed periodically if the weak_ptr has expired
class vote_router final class vote_router final
{ {
public: public:

View file

@ -255,7 +255,7 @@ namespace websocket
/** Close the websocket and end the session */ /** Close the websocket and end the session */
void close (); void close ();
/** Read the next message. This implicitely handles incoming websocket pings. */ /** Read the next message. This implicitly handles incoming websocket pings. */
void read (); void read ();
/** Enqueue \p message_a for writing to the websockets */ /** Enqueue \p message_a for writing to the websockets */