Merge bitcoin/bitcoin#21562: [net processing] Various tidying up of PeerManagerImpl ctor

fde1bf4f61 [net processing] Default initialize m_recent_confirmed_transactions (John Newbery)
37dcd12d53 scripted-diff: Rename recentRejects (John Newbery)
cd9902ac50 [net processing] Default initialize recentRejects (John Newbery)
a28bfd1d4c [net processing] Default initialize m_stale_tip_check_time (John Newbery)
9190b01d8d [net processing] Add Orphanage empty consistency check (John Newbery)

Pull request description:

  - Use default initialization of PeerManagerImpl members where possible
  - Remove unique_ptr indirection where it's not needed

ACKs for top commit:
  MarcoFalke:
    ACK fde1bf4f61 👞
  theStack:
    re-ACK fde1bf4f61

Tree-SHA512: 7ddedcc972df8e933e1fbe5c88b8ea17df89e1e58fc769518512c5540e49dc8eddb3f47e78d1329a6fc5644d2c1d11c981f681fd633f5218bfa4b3e6a86f3d7b
This commit is contained in:
MarcoFalke
2021-07-28 16:31:35 +02:00
4 changed files with 41 additions and 40 deletions

View File

@ -397,7 +397,8 @@ private:
/** The height of the best chain */
std::atomic<int> m_best_height{-1};
int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
/** Next time to check for stale tip */
int64_t m_stale_tip_check_time{0};
/** Whether this node is running in blocks only mode */
const bool m_ignore_incoming_txs;
@ -470,16 +471,26 @@ private:
*
* Memory used: 1.3 MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
/*
* Filter for transactions that have been recently confirmed.
* We use this to avoid requesting transactions that have already been
* confirnmed.
*
* Blocks don't typically have more than 4000 transactions, so this should
* be at least six blocks (~1 hr) worth of transactions that we can store,
* inserting both a txid and wtxid for every observed transaction.
* If the number of transactions appearing in a block goes up, or if we are
* seeing getdata requests more than an hour after initial announcement, we
* can increase this number.
* The false positive rate of 1/1M should come out to less than 1
* transaction per day that would be inadvertently ignored (which is the
* same probability that we have in the reject filter).
*/
Mutex m_recent_confirmed_transactions_mutex;
std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex);
CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
/** Have we requested this block from a peer */
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@ -1195,6 +1206,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
assert(m_outbound_peers_with_protect_from_disconnect == 0);
assert(m_wtxid_relay_peers == 0);
assert(m_txrequest.Size() == 0);
assert(m_orphanage.Size() == 0);
}
} // cs_main
if (node.fSuccessfullyConnected && misbehavior == 0 &&
@ -1399,23 +1411,8 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn
m_banman(banman),
m_chainman(chainman),
m_mempool(pool),
m_stale_tip_check_time(0),
m_ignore_incoming_txs(ignore_incoming_txs)
{
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
// Blocks don't typically have more than 4000 transactions, so this should
// be at least six blocks (~1 hr) worth of transactions that we can store,
// inserting both a txid and wtxid for every observed transaction.
// If the number of transactions appearing in a block goes up, or if we are
// seeing getdata requests more than an hour after initial announcement, we
// can increase this number.
// The false positive rate of 1/1M should come out to less than 1
// transaction per day that would be inadvertently ignored (which is the
// same probability that we have in the reject filter).
m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
// combine them in one function and schedule at the quicker (peer-eviction)
@ -1441,9 +1438,9 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
{
LOCK(m_recent_confirmed_transactions_mutex);
for (const auto& ptx : pblock->vtx) {
m_recent_confirmed_transactions->insert(ptx->GetHash());
m_recent_confirmed_transactions.insert(ptx->GetHash());
if (ptx->GetHash() != ptx->GetWitnessHash()) {
m_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
m_recent_confirmed_transactions.insert(ptx->GetWitnessHash());
}
}
}
@ -1467,7 +1464,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo
// presumably the most common case of relaying a confirmed transaction
// should be just after a new block containing it is found.
LOCK(m_recent_confirmed_transactions_mutex);
m_recent_confirmed_transactions->reset();
m_recent_confirmed_transactions.reset();
}
// All of the following cache a recent block, and are protected by cs_most_recent_block
@ -1607,14 +1604,13 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta
bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
{
assert(recentRejects);
if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
recentRejects->reset();
m_recent_rejects.reset();
}
const uint256& hash = gtxid.GetHash();
@ -1623,10 +1619,10 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
{
LOCK(m_recent_confirmed_transactions_mutex);
if (m_recent_confirmed_transactions->contains(hash)) return true;
if (m_recent_confirmed_transactions.contains(hash)) return true;
}
return recentRejects->contains(hash) || m_mempool.exists(gtxid);
return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
}
bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
@ -2245,8 +2241,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(porphanTx->GetWitnessHash());
m_recent_rejects.insert(porphanTx->GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
// failure, since this check depends only on the txid
@ -2258,7 +2253,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
// We only add the txid if it differs from the wtxid, to
// avoid wasting entries in the rolling bloom filter.
recentRejects->insert(porphanTx->GetHash());
m_recent_rejects.insert(porphanTx->GetHash());
}
}
m_orphanage.EraseTx(orphanHash);
@ -3259,7 +3254,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
std::sort(unique_parents.begin(), unique_parents.end());
unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
for (const uint256& parent_txid : unique_parents) {
if (recentRejects->contains(parent_txid)) {
if (m_recent_rejects.contains(parent_txid)) {
fRejectedParents = true;
break;
}
@ -3300,8 +3295,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// regardless of what witness is provided, we will not accept
// this, so we don't need to allow for redownload of this txid
// from any of our non-wtxidrelay peers.
recentRejects->insert(tx.GetHash());
recentRejects->insert(tx.GetWitnessHash());
m_recent_rejects.insert(tx.GetHash());
m_recent_rejects.insert(tx.GetWitnessHash());
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
}
@ -3320,8 +3315,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(tx.GetWitnessHash());
m_recent_rejects.insert(tx.GetWitnessHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
@ -3332,7 +3326,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// transactions are later received (resulting in
// parent-fetching by txid via the orphan-handling logic).
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
recentRejects->insert(tx.GetHash());
m_recent_rejects.insert(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetHash());
}
if (RecursiveDynamicUsage(*ptx) < 100000) {
@ -3341,21 +3335,21 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
// If a tx has been detected by recentRejects, we will have reached
// If a tx has been detected by m_recent_rejects, we will have reached
// this point and the tx will have been ignored. Because we haven't run
// the tx through AcceptToMemoryPool, we won't have computed a DoS
// score for it or determined exactly why we consider it invalid.
//
// This means we won't penalize any peer subsequently relaying a DoSy
// tx (even if we penalized the first peer who gave it to us) because
// we have to account for recentRejects showing false positives. In
// we have to account for m_recent_rejects showing false positives. In
// other words, we shouldn't penalize a peer if we aren't *sure* they
// submitted a DoSy tx.
//
// Note that recentRejects doesn't just record DoSy or invalid
// Note that m_recent_rejects doesn't just record DoSy or invalid
// transactions, but any tx not accepted by the mempool, which may be
// due to node policy (vs. consensus). So we can't blanket penalize a
// peer simply for relaying a tx that our recentRejects has caught,
// peer simply for relaying a tx that our m_recent_rejects has caught,
// regardless of false positives.
if (state.IsInvalid()) {

View File

@ -47,6 +47,13 @@ public:
* (ie orphans that may have found their final missing parent, and so should be reconsidered for the mempool) */
void AddChildrenToWorkSet(const CTransaction& tx, std::set<uint256>& orphan_work_set) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
/** Return how many entries exist in the orphange */
size_t Size() LOCKS_EXCLUDED(::g_cs_orphans)
{
LOCK(::g_cs_orphans);
return m_orphans.size();
}
protected:
struct OrphanTx {
CTransactionRef tx;

View File

@ -80,7 +80,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
self.log.info("Generate a block")
last_block = self.nodes[0].generate(1)
# Sync blocks, so that peer 1 gets the block before timelock_tx
# Otherwise, peer 1 would put the timelock_tx in recentRejects
# Otherwise, peer 1 would put the timelock_tx in m_recent_rejects
self.sync_all()
self.log.info("The time-locked transaction can now be spent")

View File

@ -130,7 +130,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
tx.vout[0].nValue += 1
txid = tx.rehash()
# Send the transaction twice. The first time, it'll be rejected by ATMP because it conflicts
# with a mempool transaction. The second time, it'll be in the recentRejects filter.
# with a mempool transaction. The second time, it'll be in the m_recent_rejects filter.
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],