doc: fix typos

Co-authored-by: Ragnar <rodiondenmark@gmail.com>
Co-authored-by: VolodymyrBg <aqdrgg19@gmail.com>
This commit is contained in:
fanquake
2025-05-29 15:05:30 +01:00
parent c797e50dda
commit e50312eab0
20 changed files with 24 additions and 26 deletions

View File

@@ -54,7 +54,7 @@ def run():
out_sdktgz_path = pathlib.Path("./{}.tar.gz".format(out_name))
def tarfp_add_with_base_change(tarfp, dir_to_add, alt_base_dir):
"""Add all files in dir_to_add to tarfp, but prepent alt_base_dir to the files'
"""Add all files in dir_to_add to tarfp, but prepend alt_base_dir to the files'
names
e.g. if the only file under /root/bazdir is /root/bazdir/qux, invoking:

View File

@@ -152,7 +152,7 @@ if res:
elif res.status_code == 404:
print('The specified faucet URL does not exist. Please check for any server issues/typo.')
elif res.status_code == 429:
print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually')
print('The script does not allow for repeated transactions as the global faucet is rate-limited to 1 request/IP/day. You can access the faucet website to get more coins manually')
else:
print(f'Returned Error Code {res.status_code}\n{res.text}\n')
print('Please check the provided arguments for their validity and/or any possible typo.')

View File

@@ -194,7 +194,7 @@ You can do:
git range-diff master previously-reviewed-head new-head
```
Note that `git range-diff` also work for rebases:
Note that `git range-diff` also works for rebases:
```
P1--P2--P3--P4--P5 <-- previously-reviewed-head

View File

@@ -1,4 +1,4 @@
- Logs now include which peer sent us a header. Additionaly there are fewer
- Logs now include which peer sent us a header. Additionally there are fewer
redundant header log messages. A side-effect of this change is that for
some untypical cases new headers aren't logged anymore, e.g. a direct
`BLOCK` message with a previously unknown header and `submitheader` RPC. (#27826)

View File

@@ -127,7 +127,7 @@ serialized transaction.
Notifies about all transactions, both when they are added to mempool or when a new block
arrives. This means a transaction could be published multiple times: first when it enters
mempool and then again in each block that includes it. The body part of the mesage is the
mempool and then again in each block that includes it. The body part of the message is the
32-byte transaction hash in reversed byte order.
#### rawblock

View File

@@ -309,7 +309,7 @@ std::vector<CNetAddr> GetLocalAddresses()
if (status != NO_ERROR) {
// This includes ERROR_NO_DATA if there are no addresses and thus there's not even one PIP_ADAPTER_ADDRESSES
// record in the returned structure.
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get local adapter addreses: %s\n", NetworkErrorString(status));
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Could not get local adapter addresses: %s\n", NetworkErrorString(status));
return addresses;
}

View File

@@ -198,7 +198,7 @@ FUZZ_TARGET(muhash)
muhash3.Finalize(out2);
},
[&] {
// Test that dividing a MuHash by itself brings it back to it's initial state
// Test that dividing a MuHash by itself brings it back to its initial state
// See note about clang + self-assignment in test/uint256_tests.cpp
#if defined(__clang__)
@@ -216,7 +216,7 @@ FUZZ_TARGET(muhash)
out2 = initial_state_hash;
},
[&] {
// Test that removing all added elements brings the object back to it's initial state
// Test that removing all added elements brings the object back to its initial state
muhash.Remove(data);
muhash.Remove(data2);
muhash.Finalize(out);

View File

@@ -32,9 +32,9 @@ public:
PeerManager::Options peerman_opts;
node::ApplyArgsManOptions(*m_node.args, peerman_opts);
peerman_opts.max_headers_result = FUZZ_MAX_HEADERS_RESULTS;
// The peerman's rng is a global that is re-used, so it will be re-used
// The peerman's rng is a global that is reused, so it will be reused
// and may cause non-determinism between runs. This may even influence
// the global RNG, because seeding may be done from the gloabl one. For
// the global RNG, because seeding may be done from the global one. For
// now, avoid it influencing the global RNG, and initialize it with a
// constant instead.
peerman_opts.deterministic_rng = true;

View File

@@ -27,7 +27,7 @@ FUZZ_TARGET(policy_estimator_io, .init = initialize_policy_estimator_io)
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
FuzzedFileProvider fuzzed_file_provider{fuzzed_data_provider};
AutoFile fuzzed_auto_file{fuzzed_file_provider.open()};
// Re-using block_policy_estimator across runs to avoid costly creation of CBlockPolicyEstimator object.
// Reusing block_policy_estimator across runs to avoid costly creation of CBlockPolicyEstimator object.
static CBlockPolicyEstimator block_policy_estimator{FeeestPath(*g_setup->m_node.args), DEFAULT_ACCEPT_STALE_FEE_ESTIMATES};
if (block_policy_estimator.Read(fuzzed_auto_file)) {
block_policy_estimator.Write(fuzzed_auto_file);

View File

@@ -104,7 +104,7 @@ void utxo_snapshot_fuzz(FuzzBufferType buffer)
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
SetMockTime(ConsumeTime(fuzzed_data_provider, /*min=*/1296688602)); // regtest genesis block timestamp
auto& setup{*g_setup};
bool dirty_chainman{false}; // Re-use the global chainman, but reset it when it is dirty
bool dirty_chainman{false}; // Reuse the global chainman, but reset it when it is dirty
auto& chainman{*setup.m_node.chainman};
const auto snapshot_path = gArgs.GetDataDirNet() / "fuzzed_snapshot.dat";

View File

@@ -53,7 +53,7 @@ static std::string TempPath(std::string_view pattern)
//! on the object through FooInterface (defined in ipc_test.capnp).
void IpcPipeTest()
{
// Setup: create FooImplemention object and listen for FooInterface requests
// Setup: create FooImplementation object and listen for FooInterface requests
std::promise<std::unique_ptr<mp::ProxyClient<gen::FooInterface>>> foo_promise;
std::function<void()> disconnect_client;
std::thread thread([&]() {

View File

@@ -563,7 +563,7 @@ BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup)
BOOST_CHECK(replace_two_chunks_single_cluster->second == expected_new_chunks);
}
// You can have more than two direct conflicts if the there are multiple affected clusters, all of size 2 or less
// You can have more than two direct conflicts if there are multiple affected clusters, all of size 2 or less
const auto conflict_1 = make_tx(/*inputs=*/ {m_coinbase_txns[2]}, /*output_values=*/ {10 * COIN});
AddToMempool(pool, entry.Fee(low_fee).FromTx(conflict_1));
const auto conflict_1_entry = pool.GetIter(conflict_1->GetHash()).value();

View File

@@ -51,7 +51,7 @@ BOOST_AUTO_TEST_CASE(ForgetPeerTest)
TxReconciliationTracker tracker(TXRECONCILIATION_VERSION);
NodeId peer_id0 = 0;
// Removing peer after pre-registring works and does not let to register the peer.
// Removing peer after pre-registering works and does not let to register the peer.
tracker.PreRegisterPeer(peer_id0);
tracker.ForgetPeer(peer_id0);
BOOST_CHECK_EQUAL(tracker.RegisterPeer(peer_id0, true, 1, 1), ReconciliationRegisterResult::NOT_FOUND);

View File

@@ -324,7 +324,7 @@ private:
ChunkIndex m_main_chunkindex;
/** Number of index-observing objects in existence (BlockBuilderImpls). */
size_t m_main_chunkindex_observers{0};
/** Cache of discarded ChunkIndex node handles to re-use, avoiding additional allocation. */
/** Cache of discarded ChunkIndex node handles to reuse, avoiding additional allocation. */
std::vector<ChunkIndex::node_type> m_main_chunkindex_discarded;
/** A Locator that describes whether, where, and in which Cluster an Entry appears.

View File

@@ -61,7 +61,7 @@ bool DumpWallet(const ArgsManager& args, WalletDatabase& db, bilingual_str& erro
// Write out the file format
std::string format = db.Format();
// BDB files that are opened using BerkeleyRODatabase have it's format as "bdb_ro"
// BDB files that are opened using BerkeleyRODatabase have its format as "bdb_ro"
// We want to override that format back to "bdb"
if (format == "bdb_ro") {
format = "bdb";

View File

@@ -1042,7 +1042,7 @@ bool DescriptorScriptPubKeyMan::TopUpWithDB(WalletBatch& batch, unsigned int siz
const CPubKey& pubkey = pk_pair.second;
if (m_map_pubkeys.count(pubkey) != 0) {
// We don't need to give an error here.
// It doesn't matter which of many valid indexes the pubkey has, we just need an index where we can derive it and it's private key
// It doesn't matter which of many valid indexes the pubkey has, we just need an index where we can derive it and its private key
continue;
}
m_map_pubkeys[pubkey] = i;
@@ -1447,7 +1447,7 @@ void DescriptorScriptPubKeyMan::SetCache(const DescriptorCache& cache)
const CPubKey& pubkey = pk_pair.second;
if (m_map_pubkeys.count(pubkey) != 0) {
// We don't need to give an error here.
// It doesn't matter which of many valid indexes the pubkey has, we just need an index where we can derive it and it's private key
// It doesn't matter which of many valid indexes the pubkey has, we just need an index where we can derive it and its private key
continue;
}
m_map_pubkeys[pubkey] = i;

View File

@@ -1090,7 +1090,7 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
// Get size of spending the change output
int change_spend_size = CalculateMaximumSignedInputSize(change_prototype_txout, &wallet, /*coin_control=*/nullptr);
// If the wallet doesn't know how to sign change output, assume p2sh-p2wpkh
// as lower-bound to allow BnB to do it's thing
// as lower-bound to allow BnB to do its thing
if (change_spend_size == -1) {
coin_selection_params.change_spend_size = DUMMY_NESTED_P2WPKH_INPUT_SIZE;
} else {

View File

@@ -211,7 +211,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
# Last tx fits now
self.nodes[0].sendrawtransaction(chain[-1]["hex"])
# Finally, reorg to empty chain kick everything back into mempool
# Finally, reorg to empty chain to kick everything back into mempool
# at normal chain limits
for block in fork_blocks:
self.nodes[0].submitblock(block.serialize().hex())

View File

@@ -105,7 +105,7 @@ class MiningTest(BitcoinTestFramework):
wallet_sigops = MiniWallet(node, mode=MiniWalletMode.RAW_P2PK)
self.generate(wallet_sigops, 1, sync_fun=self.no_op)
# Mature with regular coinbases to prevent inteference with other tests
# Mature with regular coinbases to prevent interference with other tests
self.generate(self.wallet, 100, sync_fun=self.no_op)
# Generate three transactions that must be mined in sequence

View File

@@ -1,15 +1,13 @@
afile
amountIn
asend
anc
blockin
bu
cachable
clen
crypted
debbugs
deques
fo
fpr
hashIn
hights
incomin
@@ -18,8 +16,8 @@ lief
mor
nd
nin
ot
outIn
re-use
requestor
ser
siz