Merge #20942: [refactor] Move some net_processing globals into PeerManagerImpl

6452190841f8da1cdaf899d064974136ab37659f net_processing: simplify MaybeSetPeerAsAnnouncingHeaderAndIDs args (Anthony Towns)
39c2a69bc28eb3e3b5fa15a3965773b459bbf7ad net_processing: move MaybeSetPeerAsAnnouncingHeadersAndIDs into PeerManagerImpl (Anthony Towns)
7b7117efd00acf7609e65d3b4fe5f76e400dda12 net_processing: simplify ProcessGetData and FindTxForGetData args (Anthony Towns)
34207b9004d2069a8fcb32758cd796143eccfb4d net_processing: move FindTxForGetData and ProcessGetData to PeerManagerImpl (Anthony Towns)
d44084883adcf00f50d3d5a9e0c88e3a0b276817 net_processing: simplify PeerManageImpl method args (Anthony Towns)
a490f0a056456d683dd8ef6f89a5af1a13792118 net_processing: move MarkBlockAs*, TipMayBeStale, FindNextBlocksToDL to PeerManagerImpl (Anthony Towns)
052d9bc7e52aea373a316f08d42460ead4ed16c8 net_processing: simplify AlreadyHaveTx args (Anthony Towns)
eeac5062508c98fe58daaec471cdd27f3909b6ec net_processing: move AlreadyHaveTx into PeerManageImpl (Anthony Towns)
9781c08a33569370f191b30cc7e2ce9b5317eb3e net_processing: move some globals into PeerManagerImpl (Anthony Towns)

Pull request description:

  Turns some globals into member variables, and simplifies the parameter list for some of net_processing's internal functions. Mostly just serves as a code cleanup at this point.

ACKs for top commit:
  jnewbery:
    Code review ACK 6452190841f8da1cdaf899d064974136ab37659f
  ariard:
    Code Review ACK 6452190, changes are pretty straightforward.
  MarcoFalke:
    Concept ACK 6452190841 I have not reviewed this, but I left a comment 🐡

Tree-SHA512: 381361f9dbfeb851a5522ead3165ce1447a0f212ddea4b483aa38975559ee5ed03a4ba69c24fd69f36847a1eddfef05785f5cbb2fcec5fe50f8b336e8047c3b1
This commit is contained in:
MarcoFalke 2021-02-15 12:02:37 +01:00
commit 45ea103f8f
No known key found for this signature in database
GPG Key ID: D2EA4850E7528B25

View File

@ -169,6 +169,14 @@ void EraseOrphansFor(NodeId peer);
// Internal stuff // Internal stuff
namespace { namespace {
/** Blocks that are in flight, and that are in the queue to be downloaded. */
struct QueuedBlock {
uint256 hash;
const CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
};
/** /**
* Data structure for an individual peer. This struct is not protected by * Data structure for an individual peer. This struct is not protected by
* cs_main since it does not contain validation-critical data. * cs_main since it does not contain validation-critical data.
@ -345,10 +353,7 @@ private:
* their own locks. * their own locks.
*/ */
std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
};
} // namespace
namespace {
/** Number of nodes with fSyncStarted. */ /** Number of nodes with fSyncStarted. */
int nSyncStarted GUARDED_BY(cs_main) = 0; int nSyncStarted GUARDED_BY(cs_main) = 0;
@ -360,6 +365,14 @@ namespace {
*/ */
std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
/** Number of peers with wtxid relay. */
int m_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
/** Number of outbound peers with m_chain_sync.m_protect. */
int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
bool AlreadyHaveTx(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** /**
* Filter for transactions that were recently rejected by * Filter for transactions that were recently rejected by
* AcceptToMemoryPool. These are not rerequested until the chain tip * AcceptToMemoryPool. These are not rerequested until the chain tip
@ -402,35 +415,36 @@ namespace {
* We use this to avoid requesting transactions that have already been * We use this to avoid requesting transactions that have already been
* confirnmed. * confirnmed.
*/ */
Mutex g_cs_recent_confirmed_transactions; Mutex m_recent_confirmed_transactions_mutex;
std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions); std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex);
/* Returns a bool indicating whether we requested this block.
* Also used if a block was /not/ received and timed out or started with another peer
*/
bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/* Mark a block as in flight
* Returns false, still setting pit, if the block was already in flight from the same peer
* pit will only be valid as long as the same cs_main lock is being held
*/
bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries.
*/
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Blocks that are in flight, and that are in the queue to be downloaded. */
struct QueuedBlock {
uint256 hash;
const CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
};
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
/** Number of preferable block download peers. */
int nPreferredDownload GUARDED_BY(cs_main) = 0;
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
/** Number of peers with wtxid relay. */
int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0;
/** Number of outbound peers with m_chain_sync.m_protect. */
int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
/** When our tip was last updated. */ /** When our tip was last updated. */
std::atomic<int64_t> g_last_tip_update(0); std::atomic<int64_t> m_last_tip_update{0};
/** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
CTransactionRef FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main);
void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex);
/** Relay map (txid or wtxid -> CTransactionRef) */ /** Relay map (txid or wtxid -> CTransactionRef) */
typedef std::map<uint256, CTransactionRef> MapRelay; typedef std::map<uint256, CTransactionRef> MapRelay;
@ -438,6 +452,28 @@ namespace {
/** Expiration-time ordered list of (expire time, relay map entry) pairs. */ /** Expiration-time ordered list of (expire time, relay map entry) pairs. */
std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main); std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
/**
* When a peer sends us a valid block, instruct it to announce blocks to us
* using CMPCTBLOCK if possible by adding its nodeid to the end of
* lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
* removing the first element if necessary.
*/
void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
};
} // namespace
namespace {
/** Number of preferable block download peers. */
int nPreferredDownload GUARDED_BY(cs_main) = 0;
struct IteratorComparator struct IteratorComparator
{ {
template<typename I> template<typename I>
@ -610,9 +646,8 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS
nPreferredDownload += state->fPreferredDownload; nPreferredDownload += state->fPreferredDownload;
} }
// Returns a bool indicating whether we requested this block. bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
// Also used if a block was /not/ received and timed out or started with another peer {
static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) { if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first); CNodeState *state = State(itInFlight->second.first);
@ -635,9 +670,8 @@ static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs
return false; return false;
} }
// returns false, still setting pit, if the block was already in flight from the same peer bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex, std::list<QueuedBlock>::iterator** pit) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
// pit will only be valid as long as the same cs_main lock is being held {
static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
CNodeState *state = State(nodeid); CNodeState *state = State(nodeid);
assert(state != nullptr); assert(state != nullptr);
@ -654,7 +688,7 @@ static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint25
MarkBlockAsReceived(hash); MarkBlockAsReceived(hash);
std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
{hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)}); {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
state->nBlocksInFlight++; state->nBlocksInFlight++;
state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
if (state->nBlocksInFlight == 1) { if (state->nBlocksInFlight == 1) {
@ -705,13 +739,7 @@ static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIV
} }
} }
/** void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
* When a peer sends us a valid block, instruct it to announce blocks to us
* using CMPCTBLOCK if possible by adding its nodeid to the end of
* lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
* removing the first element if necessary.
*/
static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{ {
AssertLockHeld(cs_main); AssertLockHeld(cs_main);
CNodeState* nodestate = State(nodeid); CNodeState* nodestate = State(nodeid);
@ -727,21 +755,21 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma
return; return;
} }
} }
connman.ForNode(nodeid, [&connman](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main); AssertLockHeld(::cs_main);
uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce // As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings. // blocks using compact encodings.
connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){ m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this, nCMPCTBLOCKVersion](CNode* pnodeStop){
connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
// save BIP152 bandwidth state: we select peer to be low-bandwidth // save BIP152 bandwidth state: we select peer to be low-bandwidth
pnodeStop->m_bip152_highbandwidth_to = false; pnodeStop->m_bip152_highbandwidth_to = false;
return true; return true;
}); });
lNodesAnnouncingHeaderAndIDs.pop_front(); lNodesAnnouncingHeaderAndIDs.pop_front();
} }
connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
// save BIP152 bandwidth state: we select peer to be high-bandwidth // save BIP152 bandwidth state: we select peer to be high-bandwidth
pfrom->m_bip152_highbandwidth_to = true; pfrom->m_bip152_highbandwidth_to = true;
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
@ -750,13 +778,14 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma
} }
} }
static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) bool PeerManagerImpl::TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{ {
AssertLockHeld(cs_main); AssertLockHeld(cs_main);
if (g_last_tip_update == 0) { const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
g_last_tip_update = GetTime(); if (m_last_tip_update == 0) {
m_last_tip_update = GetTime();
} }
return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); return m_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
} }
static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@ -773,9 +802,7 @@ static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIV
return false; return false;
} }
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
* at most count entries. */
static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{ {
if (count == 0) if (count == 0)
return; return;
@ -804,6 +831,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return; return;
const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
std::vector<const CBlockIndex*> vToFetch; std::vector<const CBlockIndex*> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
@ -911,7 +939,7 @@ void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid,
auto delay = std::chrono::microseconds{0}; auto delay = std::chrono::microseconds{0};
const bool preferred = state->fPreferredDownload; const bool preferred = state->fPreferredDownload;
if (!preferred) delay += NONPREF_PEER_TX_DELAY; if (!preferred) delay += NONPREF_PEER_TX_DELAY;
if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
const bool overloaded = !node.HasPermission(PF_RELAY) && const bool overloaded = !node.HasPermission(PF_RELAY) &&
m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT;
if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; if (overloaded) delay += OVERLOADED_PEER_TX_DELAY;
@ -1004,10 +1032,10 @@ void PeerManagerImpl::FinalizeNode(const CNode& node, bool& fUpdateConnectionTim
nPreferredDownload -= state->fPreferredDownload; nPreferredDownload -= state->fPreferredDownload;
nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
assert(nPeersWithValidatedDownloads >= 0); assert(nPeersWithValidatedDownloads >= 0);
g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
assert(g_outbound_peers_with_protect_from_disconnect >= 0); assert(m_outbound_peers_with_protect_from_disconnect >= 0);
g_wtxid_relay_peers -= state->m_wtxid_relay; m_wtxid_relay_peers -= state->m_wtxid_relay;
assert(g_wtxid_relay_peers >= 0); assert(m_wtxid_relay_peers >= 0);
mapNodeState.erase(nodeid); mapNodeState.erase(nodeid);
@ -1016,8 +1044,8 @@ void PeerManagerImpl::FinalizeNode(const CNode& node, bool& fUpdateConnectionTim
assert(mapBlocksInFlight.empty()); assert(mapBlocksInFlight.empty());
assert(nPreferredDownload == 0); assert(nPreferredDownload == 0);
assert(nPeersWithValidatedDownloads == 0); assert(nPeersWithValidatedDownloads == 0);
assert(g_outbound_peers_with_protect_from_disconnect == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0);
assert(g_wtxid_relay_peers == 0); assert(m_wtxid_relay_peers == 0);
assert(m_txrequest.Size() == 0); assert(m_txrequest.Size() == 0);
} }
LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
@ -1344,7 +1372,7 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn
// The false positive rate of 1/1M should come out to less than 1 // The false positive rate of 1/1M should come out to less than 1
// transaction per day that would be inadvertently ignored (which is the // transaction per day that would be inadvertently ignored (which is the
// same probability that we have in the reject filter). // same probability that we have in the reject filter).
g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001));
// Stale tip checking and peer eviction are on two different timers, but we // Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we // don't want them to get out of sync due to drift in the scheduler, so we
@ -1394,14 +1422,14 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
} }
g_last_tip_update = GetTime(); m_last_tip_update = GetTime();
} }
{ {
LOCK(g_cs_recent_confirmed_transactions); LOCK(m_recent_confirmed_transactions_mutex);
for (const auto& ptx : pblock->vtx) { for (const auto& ptx : pblock->vtx) {
g_recent_confirmed_transactions->insert(ptx->GetHash()); m_recent_confirmed_transactions->insert(ptx->GetHash());
if (ptx->GetHash() != ptx->GetWitnessHash()) { if (ptx->GetHash() != ptx->GetWitnessHash()) {
g_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); m_recent_confirmed_transactions->insert(ptx->GetWitnessHash());
} }
} }
} }
@ -1424,8 +1452,8 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo
// block's worth of transactions in it, but that should be fine, since // block's worth of transactions in it, but that should be fine, since
// presumably the most common case of relaying a confirmed transaction // presumably the most common case of relaying a confirmed transaction
// should be just after a new block containing it is found. // should be just after a new block containing it is found.
LOCK(g_cs_recent_confirmed_transactions); LOCK(m_recent_confirmed_transactions_mutex);
g_recent_confirmed_transactions->reset(); m_recent_confirmed_transactions->reset();
} }
// All of the following cache a recent block, and are protected by cs_most_recent_block // All of the following cache a recent block, and are protected by cs_most_recent_block
@ -1550,7 +1578,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta
!::ChainstateActive().IsInitialBlockDownload() && !::ChainstateActive().IsInitialBlockDownload() &&
mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
if (it != mapBlockSource.end()) { if (it != mapBlockSource.end()) {
MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman); MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
} }
} }
if (it != mapBlockSource.end()) if (it != mapBlockSource.end())
@ -1563,7 +1591,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta
// //
bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main) bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{ {
assert(recentRejects); assert(recentRejects);
if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) {
@ -1587,11 +1615,11 @@ bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLU
} }
{ {
LOCK(g_cs_recent_confirmed_transactions); LOCK(m_recent_confirmed_transactions_mutex);
if (g_recent_confirmed_transactions->contains(hash)) return true; if (m_recent_confirmed_transactions->contains(hash)) return true;
} }
return recentRejects->contains(hash) || mempool.exists(gtxid); return recentRejects->contains(hash) || m_mempool.exists(gtxid);
} }
bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@ -1825,10 +1853,9 @@ void static ProcessGetBlockData(CNode& pfrom, Peer& peer, const CChainParams& ch
} }
} }
//! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main)
{ {
auto txinfo = mempool.info(gtxid); auto txinfo = m_mempool.info(gtxid);
if (txinfo.tx) { if (txinfo.tx) {
// If a TX could have been INVed in reply to a MEMPOOL request, // If a TX could have been INVed in reply to a MEMPOOL request,
// or is older than UNCONDITIONAL_RELAY_DELAY, permit the request // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
@ -1853,7 +1880,7 @@ static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode&
return {}; return {};
} }
void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex) void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex)
{ {
AssertLockNotHeld(cs_main); AssertLockNotHeld(cs_main);
@ -1882,17 +1909,17 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
continue; continue;
} }
CTransactionRef tx = FindTxForGetData(mempool, pfrom, ToGenTxid(inv), mempool_req, now); CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now);
if (tx) { if (tx) {
// WTX and WITNESS_TX imply we serialize with witness // WTX and WITNESS_TX imply we serialize with witness
int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
mempool.RemoveUnbroadcastTx(tx->GetHash()); m_mempool.RemoveUnbroadcastTx(tx->GetHash());
// As we're going to send tx, make sure its unconfirmed parents are made requestable. // As we're going to send tx, make sure its unconfirmed parents are made requestable.
std::vector<uint256> parent_ids_to_add; std::vector<uint256> parent_ids_to_add;
{ {
LOCK(mempool.cs); LOCK(m_mempool.cs);
auto txiter = mempool.GetIter(tx->GetHash()); auto txiter = m_mempool.GetIter(tx->GetHash());
if (txiter) { if (txiter) {
const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst(); const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst();
parent_ids_to_add.reserve(parents.size()); parent_ids_to_add.reserve(parents.size());
@ -1920,7 +1947,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
const CInv &inv = *it++; const CInv &inv = *it++;
if (inv.IsGenBlkMsg()) { if (inv.IsGenBlkMsg()) {
ProcessGetBlockData(pfrom, peer, chainparams, inv, connman); ProcessGetBlockData(pfrom, peer, m_chainparams, inv, m_connman);
} }
// else: If the first item on the queue is an unknown type, we erase it // else: If the first item on the queue is an unknown type, we erase it
// and continue processing the queue on the next call. // and continue processing the queue on the next call.
@ -1943,7 +1970,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
// In normal operation, we often send NOTFOUND messages for parents of // In normal operation, we often send NOTFOUND messages for parents of
// transactions that we relay; if a peer is missing a parent, they may // transactions that we relay; if a peer is missing a parent, they may
// assume we have them and request the parents from us. // assume we have them and request the parents from us.
connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
} }
} }
@ -2102,7 +2129,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
} }
uint32_t nFetchFlags = GetFetchFlags(pfrom); uint32_t nFetchFlags = GetFetchFlags(pfrom);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex); MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex);
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom.GetId()); pindex->GetBlockHash().ToString(), pfrom.GetId());
} }
@ -2146,10 +2173,10 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer,
// thus always subject to eviction under the bad/lagging chain logic. // thus always subject to eviction under the bad/lagging chain logic.
// See ChainSyncTimeoutState. // See ChainSyncTimeoutState.
if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
nodestate->m_chain_sync.m_protect = true; nodestate->m_chain_sync.m_protect = true;
++g_outbound_peers_with_protect_from_disconnect; ++m_outbound_peers_with_protect_from_disconnect;
} }
} }
} }
@ -2761,7 +2788,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LOCK(cs_main); LOCK(cs_main);
if (!State(pfrom.GetId())->m_wtxid_relay) { if (!State(pfrom.GetId())->m_wtxid_relay) {
State(pfrom.GetId())->m_wtxid_relay = true; State(pfrom.GetId())->m_wtxid_relay = true;
g_wtxid_relay_peers++; m_wtxid_relay_peers++;
} else { } else {
LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId());
} }
@ -2904,7 +2931,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
} }
} else if (inv.IsGenTxMsg()) { } else if (inv.IsGenTxMsg()) {
const GenTxid gtxid = ToGenTxid(inv); const GenTxid gtxid = ToGenTxid(inv);
const bool fAlreadyHave = AlreadyHaveTx(gtxid, m_mempool); const bool fAlreadyHave = AlreadyHaveTx(gtxid);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
pfrom.AddKnownTx(inv.hash); pfrom.AddKnownTx(inv.hash);
@ -2946,7 +2973,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
{ {
LOCK(peer->m_getdata_requests_mutex); LOCK(peer->m_getdata_requests_mutex);
peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); ProcessGetData(pfrom, *peer, interruptMsgProc);
} }
return; return;
@ -3185,7 +3212,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// already; and an adversary can already relay us old transactions // already; and an adversary can already relay us old transactions
// (older than our recency filter) if trying to DoS us, without any need // (older than our recency filter) if trying to DoS us, without any need
// for witness malleation. // for witness malleation.
if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool)) { if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid))) {
if (pfrom.HasPermission(PF_FORCERELAY)) { if (pfrom.HasPermission(PF_FORCERELAY)) {
// Always relay transactions received from peers with forcerelay // Always relay transactions received from peers with forcerelay
// permission, even if they were already in the mempool, allowing // permission, even if they were already in the mempool, allowing
@ -3264,7 +3291,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// protocol for getting all unconfirmed parents. // protocol for getting all unconfirmed parents.
const GenTxid gtxid{/* is_wtxid=*/false, parent_txid}; const GenTxid gtxid{/* is_wtxid=*/false, parent_txid};
pfrom.AddKnownTx(parent_txid); pfrom.AddKnownTx(parent_txid);
if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time); if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time);
} }
AddOrphanTx(ptx, pfrom.GetId()); AddOrphanTx(ptx, pfrom.GetId());
@ -3454,7 +3481,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
(fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { if (!MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
if (!(*queuedBlockIt)->partialBlock) if (!(*queuedBlockIt)->partialBlock)
(*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool));
else { else {
@ -4021,7 +4048,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
{ {
LOCK(peer->m_getdata_requests_mutex); LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) { if (!peer->m_getdata_requests.empty()) {
ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); ProcessGetData(*pfrom, *peer, interruptMsgProc);
} }
} }
@ -4253,8 +4280,8 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
if (time_in_seconds > m_stale_tip_check_time) { if (time_in_seconds > m_stale_tip_check_time) {
// Check whether our tip is stale, and if so, allow using an extra // Check whether our tip is stale, and if so, allow using an extra
// outbound peer // outbound peer
if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(m_chainparams.GetConsensus())) { if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update); LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - m_last_tip_update);
m_connman.SetTryNewOutboundPeer(true); m_connman.SetTryNewOutboundPeer(true);
} else if (m_connman.GetTryNewOutboundPeer()) { } else if (m_connman.GetTryNewOutboundPeer()) {
m_connman.SetTryNewOutboundPeer(false); m_connman.SetTryNewOutboundPeer(false);
@ -4781,11 +4808,11 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<const CBlockIndex*> vToDownload; std::vector<const CBlockIndex*> vToDownload;
NodeId staller = -1; NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
for (const CBlockIndex *pindex : vToDownload) { for (const CBlockIndex *pindex : vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(*pto); uint32_t nFetchFlags = GetFetchFlags(*pto);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex); MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->GetId()); pindex->nHeight, pto->GetId());
} }
@ -4807,7 +4834,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
entry.second.GetHash().ToString(), entry.first); entry.second.GetHash().ToString(), entry.first);
} }
for (const GenTxid& gtxid : requestable) { for (const GenTxid& gtxid : requestable) {
if (!AlreadyHaveTx(gtxid, m_mempool)) { if (!AlreadyHaveTx(gtxid)) {
LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
gtxid.GetHash().ToString(), pto->GetId()); gtxid.GetHash().ToString(), pto->GetId());
vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash());