From 662e8db2d3efc651951315b295952a2eebb822cd Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 8 Jul 2024 15:21:40 +0100 Subject: [PATCH 1/5] [net processing] Lazily initialize m_recent_rejects --- src/net_processing.cpp | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 87fd69ba502..fef8a7da5d1 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -897,7 +897,18 @@ private: * * Memory used: 1.3 MB */ - CRollingBloomFilter m_recent_rejects GUARDED_BY(m_tx_download_mutex){120'000, 0.000'001}; + std::unique_ptr m_recent_rejects GUARDED_BY(m_tx_download_mutex){nullptr}; + + CRollingBloomFilter& RecentRejectsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) + { + AssertLockHeld(m_tx_download_mutex); + + if (!m_recent_rejects) { + m_recent_rejects = std::make_unique(120'000, 0.000'001); + } + + return *m_recent_rejects; + } /** * Filter for: @@ -2080,7 +2091,7 @@ void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due // to a timelock. Reset the rejection filters to give those transactions another chance if we // see them again. - m_recent_rejects.reset(); + RecentRejectsFilter().reset(); m_recent_rejects_reconsiderable.reset(); } } @@ -2304,7 +2315,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside if (m_recent_confirmed_transactions.contains(hash)) return true; - return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid); + return RecentRejectsFilter().contains(hash) || m_mempool.exists(gtxid); } bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) @@ -3197,7 +3208,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // submit it as part of a package later. m_recent_rejects_reconsiderable.insert(ptx->GetWitnessHash().ToUint256()); } else { - m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256()); + RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256()); } m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, @@ -3211,7 +3222,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // We only add the txid if it differs from the wtxid, to avoid wasting entries in the // rolling bloom filter. if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) { - m_recent_rejects.insert(ptx->GetHash().ToUint256()); + RecentRejectsFilter().insert(ptx->GetHash().ToUint256()); m_txrequest.ForgetTxHash(ptx->GetHash()); } if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) { @@ -4609,7 +4620,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // submit 1p1c packages. However, fail immediately if any are in m_recent_rejects. std::optional rejected_parent_reconsiderable; for (const uint256& parent_txid : unique_parents) { - if (m_recent_rejects.contains(parent_txid)) { + if (RecentRejectsFilter().contains(parent_txid)) { fRejectedParents = true; break; } else if (m_recent_rejects_reconsiderable.contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) { @@ -4658,8 +4669,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // regardless of what witness is provided, we will not accept // this, so we don't need to allow for redownload of this txid // from any of our non-wtxidrelay peers. - m_recent_rejects.insert(tx.GetHash().ToUint256()); - m_recent_rejects.insert(tx.GetWitnessHash().ToUint256()); + RecentRejectsFilter().insert(tx.GetHash().ToUint256()); + RecentRejectsFilter().insert(tx.GetWitnessHash().ToUint256()); m_txrequest.ForgetTxHash(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); } From fa0c87f19c1eca47ee7052f3c988ff7273801ff3 Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 8 Jul 2024 15:27:27 +0100 Subject: [PATCH 2/5] [net processing] Lazily initialize m_recent_rejects_reconsiderable --- src/net_processing.cpp | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index fef8a7da5d1..75465b28d3d 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -930,7 +930,18 @@ private: * * Parameters are picked to be the same as m_recent_rejects, with the same rationale. */ - CRollingBloomFilter m_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){120'000, 0.000'001}; + std::unique_ptr m_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){nullptr}; + + CRollingBloomFilter& RecentRejectsReconsiderableFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) + { + AssertLockHeld(m_tx_download_mutex); + + if (!m_recent_rejects_reconsiderable) { + m_recent_rejects_reconsiderable = std::make_unique(120'000, 0.000'001); + } + + return *m_recent_rejects_reconsiderable; + } /* * Filter for transactions that have been recently confirmed. @@ -2092,7 +2103,7 @@ void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) // to a timelock. Reset the rejection filters to give those transactions another chance if we // see them again. RecentRejectsFilter().reset(); - m_recent_rejects_reconsiderable.reset(); + RecentRejectsReconsiderableFilter().reset(); } } @@ -2311,7 +2322,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true; } - if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true; + if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true; if (m_recent_confirmed_transactions.contains(hash)) return true; @@ -3206,7 +3217,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // If the result is TX_RECONSIDERABLE, add it to m_recent_rejects_reconsiderable // because we should not download or submit this transaction by itself again, but may // submit it as part of a package later. - m_recent_rejects_reconsiderable.insert(ptx->GetWitnessHash().ToUint256()); + RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256()); } else { RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256()); } @@ -3277,7 +3288,7 @@ void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_v const auto& senders = package_to_validate.m_senders; if (package_result.m_state.IsInvalid()) { - m_recent_rejects_reconsiderable.insert(GetPackageHash(package)); + RecentRejectsReconsiderableFilter().insert(GetPackageHash(package)); } // We currently only expect to process 1-parent-1-child packages. Remove if this changes. if (!Assume(package.size() == 2)) return; @@ -3331,7 +3342,7 @@ std::optional PeerManagerImpl::Find1P1CPacka const auto& parent_wtxid{ptx->GetWitnessHash()}; - Assume(m_recent_rejects_reconsiderable.contains(parent_wtxid.ToUint256())); + Assume(RecentRejectsReconsiderableFilter().contains(parent_wtxid.ToUint256())); // Prefer children from this peer. This helps prevent censorship attempts in which an attacker // sends lots of fake children for the parent, and we (unluckily) keep selecting the fake @@ -3343,7 +3354,7 @@ std::optional PeerManagerImpl::Find1P1CPacka // most recent) one efficiently. for (const auto& child : cpfp_candidates_same_peer) { Package maybe_cpfp_package{ptx, child}; - if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) { + if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) { return PeerManagerImpl::PackageToValidate{ptx, child, nodeid, nodeid}; } } @@ -3370,7 +3381,7 @@ std::optional PeerManagerImpl::Find1P1CPacka // cached in m_recent_rejects_reconsiderable. const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index); Package maybe_cpfp_package{ptx, child_tx}; - if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) { + if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) { return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid, child_sender}; } } @@ -4562,7 +4573,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } } - if (m_recent_rejects_reconsiderable.contains(wtxid)) { + if (RecentRejectsReconsiderableFilter().contains(wtxid)) { // When a transaction is already in m_recent_rejects_reconsiderable, we shouldn't submit // it by itself again. However, look for a matching child in the orphanage, as it is // possible that they succeed as a package. @@ -4623,7 +4634,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (RecentRejectsFilter().contains(parent_txid)) { fRejectedParents = true; break; - } else if (m_recent_rejects_reconsiderable.contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) { + } else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) { // More than 1 parent in m_recent_rejects_reconsiderable: 1p1c will not be // sufficient to accept this package, so just give up here. if (rejected_parent_reconsiderable.has_value()) { From 82de1bc478d54bea6125b459febfb2fb23929c61 Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 8 Jul 2024 15:31:28 +0100 Subject: [PATCH 3/5] [net processing] Lazily initialize m_recent_confirmed_transactions --- src/net_processing.cpp | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 75465b28d3d..2adb4ed7072 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -958,7 +958,18 @@ private: * transaction per day that would be inadvertently ignored (which is the * same probability that we have in the reject filter). */ - CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){48'000, 0.000'001}; + std::unique_ptr m_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){nullptr}; + + CRollingBloomFilter& RecentConfirmedTransactionsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) + { + AssertLockHeld(m_tx_download_mutex); + + if (!m_recent_confirmed_transactions) { + m_recent_confirmed_transactions = std::make_unique(48'000, 0.000'001); + } + + return *m_recent_confirmed_transactions; + } /** * For sending `inv`s to inbound peers, we use a single (exponentially @@ -2141,9 +2152,9 @@ void PeerManagerImpl::BlockConnected( m_orphanage.EraseForBlock(*pblock); for (const auto& ptx : pblock->vtx) { - m_recent_confirmed_transactions.insert(ptx->GetHash().ToUint256()); + RecentConfirmedTransactionsFilter().insert(ptx->GetHash().ToUint256()); if (ptx->HasWitness()) { - m_recent_confirmed_transactions.insert(ptx->GetWitnessHash().ToUint256()); + RecentConfirmedTransactionsFilter().insert(ptx->GetWitnessHash().ToUint256()); } m_txrequest.ForgetTxHash(ptx->GetHash()); m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); @@ -2161,7 +2172,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr &blo // presumably the most common case of relaying a confirmed transaction // should be just after a new block containing it is found. LOCK(m_tx_download_mutex); - m_recent_confirmed_transactions.reset(); + RecentConfirmedTransactionsFilter().reset(); } /** @@ -2324,7 +2335,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true; - if (m_recent_confirmed_transactions.contains(hash)) return true; + if (RecentConfirmedTransactionsFilter().contains(hash)) return true; return RecentRejectsFilter().contains(hash) || m_mempool.exists(gtxid); } From a90ab4aec9cccd44d867842d6a1161d5758fcb2c Mon Sep 17 00:00:00 2001 From: dergoegge Date: Wed, 31 Jul 2024 13:19:28 +0100 Subject: [PATCH 4/5] scripted-diff: Rename lazily initialized bloom filters -BEGIN VERIFY SCRIPT- sed -i 's/m_recent_confirmed_transactions/m_lazy_recent_confirmed_transactions/g' $(git grep -l 'm_recent_confirmed_transactions') sed -i 's/m_recent_rejects_reconsiderable/m_lazy_recent_rejects_reconsiderable/g' $(git grep -l 'm_recent_rejects_reconsiderable') sed -i 's/m_recent_rejects/m_lazy_recent_rejects/g' $(git grep -l 'm_recent_rejects') -END VERIFY SCRIPT- --- src/net_processing.cpp | 68 +++++++++++++++--------------- test/functional/mempool_reorg.py | 2 +- test/functional/p2p_permissions.py | 2 +- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 2adb4ed7072..0b9b8fbbe28 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -584,7 +584,7 @@ private: * @param[in] maybe_add_extra_compact_tx Whether this tx should be added to vExtraTxnForCompact. * Set to false if the tx has already been rejected before, * e.g. is an orphan, to avoid adding duplicate entries. - * Updates m_txrequest, m_recent_rejects, m_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */ + * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */ void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, bool maybe_add_extra_compact_tx) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); @@ -776,9 +776,9 @@ private: /** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage. * Lock invariants: * - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage. - * - A txhash (txid or wtxid) in m_txrequest is not also in m_recent_rejects. - * - A txhash (txid or wtxid) in m_txrequest is not also in m_recent_rejects_reconsiderable. - * - A txhash (txid or wtxid) in m_txrequest is not also in m_recent_confirmed_transactions. + * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects. + * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable. + * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions. * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc). */ Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs); @@ -856,9 +856,9 @@ private: /** Check whether we already have this gtxid in: * - mempool * - orphanage - * - m_recent_rejects - * - m_recent_rejects_reconsiderable (if include_reconsiderable = true) - * - m_recent_confirmed_transactions + * - m_lazy_recent_rejects + * - m_lazy_recent_rejects_reconsiderable (if include_reconsiderable = true) + * - m_lazy_recent_confirmed_transactions * */ bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable) EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex); @@ -897,17 +897,17 @@ private: * * Memory used: 1.3 MB */ - std::unique_ptr m_recent_rejects GUARDED_BY(m_tx_download_mutex){nullptr}; + std::unique_ptr m_lazy_recent_rejects GUARDED_BY(m_tx_download_mutex){nullptr}; CRollingBloomFilter& RecentRejectsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) { AssertLockHeld(m_tx_download_mutex); - if (!m_recent_rejects) { - m_recent_rejects = std::make_unique(120'000, 0.000'001); + if (!m_lazy_recent_rejects) { + m_lazy_recent_rejects = std::make_unique(120'000, 0.000'001); } - return *m_recent_rejects; + return *m_lazy_recent_rejects; } /** @@ -916,7 +916,7 @@ private: * eligible for reconsideration if submitted with other transactions. * (2) packages (see GetPackageHash) we have already rejected before and should not retry. * - * Similar to m_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers + * Similar to m_lazy_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers * have larger mempools and thus lower minimum feerates than us. * * When a transaction's error is TxValidationResult::TX_RECONSIDERABLE (in a package or by @@ -928,19 +928,19 @@ private: * * Reset this filter when the chain tip changes. * - * Parameters are picked to be the same as m_recent_rejects, with the same rationale. + * Parameters are picked to be the same as m_lazy_recent_rejects, with the same rationale. */ - std::unique_ptr m_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){nullptr}; + std::unique_ptr m_lazy_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){nullptr}; CRollingBloomFilter& RecentRejectsReconsiderableFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) { AssertLockHeld(m_tx_download_mutex); - if (!m_recent_rejects_reconsiderable) { - m_recent_rejects_reconsiderable = std::make_unique(120'000, 0.000'001); + if (!m_lazy_recent_rejects_reconsiderable) { + m_lazy_recent_rejects_reconsiderable = std::make_unique(120'000, 0.000'001); } - return *m_recent_rejects_reconsiderable; + return *m_lazy_recent_rejects_reconsiderable; } /* @@ -958,17 +958,17 @@ private: * transaction per day that would be inadvertently ignored (which is the * same probability that we have in the reject filter). */ - std::unique_ptr m_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){nullptr}; + std::unique_ptr m_lazy_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){nullptr}; CRollingBloomFilter& RecentConfirmedTransactionsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex) { AssertLockHeld(m_tx_download_mutex); - if (!m_recent_confirmed_transactions) { - m_recent_confirmed_transactions = std::make_unique(48'000, 0.000'001); + if (!m_lazy_recent_confirmed_transactions) { + m_lazy_recent_confirmed_transactions = std::make_unique(48'000, 0.000'001); } - return *m_recent_confirmed_transactions; + return *m_lazy_recent_confirmed_transactions; } /** @@ -3225,7 +3225,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx // for concerns around weakening security of unupgraded nodes // if we start doing this too early. if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) { - // If the result is TX_RECONSIDERABLE, add it to m_recent_rejects_reconsiderable + // If the result is TX_RECONSIDERABLE, add it to m_lazy_recent_rejects_reconsiderable // because we should not download or submit this transaction by itself again, but may // submit it as part of a package later. RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256()); @@ -3389,7 +3389,7 @@ std::optional PeerManagerImpl::Find1P1CPacka for (const auto index : tx_indices) { // If we already tried a package and failed for any reason, the combined hash was - // cached in m_recent_rejects_reconsiderable. + // cached in m_lazy_recent_rejects_reconsiderable. const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index); Package maybe_cpfp_package{ptx, child_tx}; if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) { @@ -4585,7 +4585,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (RecentRejectsReconsiderableFilter().contains(wtxid)) { - // When a transaction is already in m_recent_rejects_reconsiderable, we shouldn't submit + // When a transaction is already in m_lazy_recent_rejects_reconsiderable, we shouldn't submit // it by itself again. However, look for a matching child in the orphanage, as it is // possible that they succeed as a package. LogPrint(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n", @@ -4597,20 +4597,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, ProcessPackageResult(package_to_validate.value(), package_result); } } - // If a tx is detected by m_recent_rejects it is ignored. Because we haven't + // If a tx is detected by m_lazy_recent_rejects it is ignored. Because we haven't // submitted the tx to our mempool, we won't have computed a DoS // score for it or determined exactly why we consider it invalid. // // This means we won't penalize any peer subsequently relaying a DoSy // tx (even if we penalized the first peer who gave it to us) because - // we have to account for m_recent_rejects showing false positives. In + // we have to account for m_lazy_recent_rejects showing false positives. In // other words, we shouldn't penalize a peer if we aren't *sure* they // submitted a DoSy tx. // - // Note that m_recent_rejects doesn't just record DoSy or invalid + // Note that m_lazy_recent_rejects doesn't just record DoSy or invalid // transactions, but any tx not accepted by the mempool, which may be // due to node policy (vs. consensus). So we can't blanket penalize a - // peer simply for relaying a tx that our m_recent_rejects has caught, + // peer simply for relaying a tx that our m_lazy_recent_rejects has caught, // regardless of false positives. return; } @@ -4637,16 +4637,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, std::sort(unique_parents.begin(), unique_parents.end()); unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); - // Distinguish between parents in m_recent_rejects and m_recent_rejects_reconsiderable. - // We can tolerate having up to 1 parent in m_recent_rejects_reconsiderable since we - // submit 1p1c packages. However, fail immediately if any are in m_recent_rejects. + // Distinguish between parents in m_lazy_recent_rejects and m_lazy_recent_rejects_reconsiderable. + // We can tolerate having up to 1 parent in m_lazy_recent_rejects_reconsiderable since we + // submit 1p1c packages. However, fail immediately if any are in m_lazy_recent_rejects. std::optional rejected_parent_reconsiderable; for (const uint256& parent_txid : unique_parents) { if (RecentRejectsFilter().contains(parent_txid)) { fRejectedParents = true; break; } else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) { - // More than 1 parent in m_recent_rejects_reconsiderable: 1p1c will not be + // More than 1 parent in m_lazy_recent_rejects_reconsiderable: 1p1c will not be // sufficient to accept this package, so just give up here. if (rejected_parent_reconsiderable.has_value()) { fRejectedParents = true; @@ -4666,7 +4666,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // protocol for getting all unconfirmed parents. const auto gtxid{GenTxid::Txid(parent_txid)}; AddKnownTx(*peer, parent_txid); - // Exclude m_recent_rejects_reconsiderable: the missing parent may have been + // Exclude m_lazy_recent_rejects_reconsiderable: the missing parent may have been // previously rejected for being too low feerate. This orphan might CPFP it. if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) AddTxAnnouncement(pfrom, gtxid, current_time); } @@ -6339,7 +6339,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) entry.second.GetHash().ToString(), entry.first); } for (const GenTxid& gtxid : requestable) { - // Exclude m_recent_rejects_reconsiderable: we may be requesting a missing parent + // Exclude m_lazy_recent_rejects_reconsiderable: we may be requesting a missing parent // that was previously rejected for being too low feerate. if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) { LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py index e27942760cc..74a353ab014 100755 --- a/test/functional/mempool_reorg.py +++ b/test/functional/mempool_reorg.py @@ -162,7 +162,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework): self.log.info("Generate a block") last_block = self.generate(self.nodes[0], 1) # generate() implicitly syncs blocks, so that peer 1 gets the block before timelock_tx - # Otherwise, peer 1 would put the timelock_tx in m_recent_rejects + # Otherwise, peer 1 would put the timelock_tx in m_lazy_recent_rejects self.log.info("The time-locked transaction can now be spent") timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx) diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index 5ca51016132..a9b164b0781 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -121,7 +121,7 @@ class P2PPermissionsTests(BitcoinTestFramework): tx.vout[0].nValue += 1 txid = tx.rehash() # Send the transaction twice. The first time, it'll be rejected by ATMP because it conflicts - # with a mempool transaction. The second time, it'll be in the m_recent_rejects filter. + # with a mempool transaction. The second time, it'll be in the m_lazy_recent_rejects filter. p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], From afd237bb5d85923273a69f7b45dc6aae6aa1680e Mon Sep 17 00:00:00 2001 From: dergoegge Date: Thu, 4 Jul 2024 11:49:16 +0100 Subject: [PATCH 5/5] [fuzz] Harness for version handshake --- src/Makefile.test.include | 1 + src/test/fuzz/p2p_handshake.cpp | 107 ++++++++++++++++++++++++++++++++ src/test/util/net.h | 5 ++ 3 files changed, 113 insertions(+) create mode 100644 src/test/fuzz/p2p_handshake.cpp diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 3d044983699..e2d6f94b249 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -350,6 +350,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/netaddress.cpp \ test/fuzz/netbase_dns_lookup.cpp \ test/fuzz/node_eviction.cpp \ + test/fuzz/p2p_handshake.cpp \ test/fuzz/p2p_transport_serialization.cpp \ test/fuzz/package_eval.cpp \ test/fuzz/parse_hd_keypath.cpp \ diff --git a/src/test/fuzz/p2p_handshake.cpp b/src/test/fuzz/p2p_handshake.cpp new file mode 100644 index 00000000000..217655ab70d --- /dev/null +++ b/src/test/fuzz/p2p_handshake.cpp @@ -0,0 +1,107 @@ +// Copyright (c) 2020-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include