diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 9287a67a04e..5a157b2b65f 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -312,7 +312,7 @@ struct Peer { std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0}; /** The mempool sequence num at which we sent the last `inv` message to this peer. * Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */ - uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1}; + uint64_t m_last_inv_sequence GUARDED_BY(m_tx_inventory_mutex){1}; /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ std::atomic m_fee_filter_received{0}; @@ -942,7 +942,7 @@ private: /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) - EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, !tx_relay.m_tx_inventory_mutex); void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex) @@ -1728,9 +1728,13 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs); stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load(); + LOCK(tx_relay->m_tx_inventory_mutex); + stats.m_last_inv_seq = tx_relay->m_last_inv_sequence; + stats.m_inv_to_send = tx_relay->m_tx_inventory_to_send.size(); } else { stats.m_relay_txs = false; stats.m_fee_filter_received = 0; + stats.m_inv_to_send = 0; } stats.m_ping_wait = ping_wait; @@ -2362,8 +2366,8 @@ CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, { // If a tx was in the mempool prior to the last INV for this peer, permit the request. auto txinfo{std::visit( - [&](const auto& id) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex) { - return m_mempool.info_for_relay(id, tx_relay.m_last_inv_sequence); + [&](const auto& id) { + return m_mempool.info_for_relay(id, WITH_LOCK(tx_relay.m_tx_inventory_mutex, return tx_relay.m_last_inv_sequence)); }, gtxid)}; if (txinfo.tx) { diff --git a/src/net_processing.h b/src/net_processing.h index 8c140d98ad6..6eb4a5e16a2 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -54,6 +54,8 @@ struct CNodeStateStats { std::chrono::microseconds m_ping_wait; std::vector vHeightInFlight; bool m_relay_txs; + int m_inv_to_send = 0; + uint64_t m_last_inv_seq{0}; CAmount m_fee_filter_received; uint64_t m_addr_processed = 0; uint64_t m_addr_rate_limited = 0; diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index fbb70d72161..ba74283d237 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -142,6 +142,8 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "SERVICE_NAME", "the service name if it is recognised"} }}, {RPCResult::Type::BOOL, "relaytxes", "Whether we relay transactions to this peer"}, + {RPCResult::Type::NUM, "last_inv_sequence", "Mempool sequence number of this peer's last INV"}, + {RPCResult::Type::NUM, "inv_to_send", "How many txs we have queued to announce to this peer"}, {RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"}, {RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"}, {RPCResult::Type::NUM_TIME, "last_transaction", "The " + UNIX_EPOCH_TIME + " of the last valid transaction received from this peer"}, @@ -238,6 +240,8 @@ static RPCHelpMan getpeerinfo() obj.pushKV("services", strprintf("%016x", services)); obj.pushKV("servicesnames", GetServicesNames(services)); obj.pushKV("relaytxes", statestats.m_relay_txs); + obj.pushKV("last_inv_sequence", statestats.m_last_inv_seq); + obj.pushKV("inv_to_send", statestats.m_inv_to_send); obj.pushKV("lastsend", count_seconds(stats.m_last_send)); obj.pushKV("lastrecv", count_seconds(stats.m_last_recv)); obj.pushKV("last_transaction", count_seconds(stats.m_last_tx_time)); diff --git a/test/functional/p2p_leak_tx.py b/test/functional/p2p_leak_tx.py index a1a00751d12..42e586fca3d 100755 --- a/test/functional/p2p_leak_tx.py +++ b/test/functional/p2p_leak_tx.py @@ -12,6 +12,7 @@ from test_framework.util import ( ) from test_framework.wallet import MiniWallet +import time class P2PNode(P2PDataStore): def on_inv(self, msg): @@ -36,8 +37,24 @@ class P2PLeakTxTest(BitcoinTestFramework): self.log.debug("Generate transaction and block") inbound_peer.last_message.pop("inv", None) + + self.gen_node.setmocktime(int(time.time())) # pause time based activities wtxid = self.miniwallet.send_self_transfer(from_node=self.gen_node)["wtxid"] + rawmp = self.gen_node.getrawmempool(False, True) + pi = self.gen_node.getpeerinfo()[0] + assert_equal(rawmp["mempool_sequence"], 2) # our tx cause mempool activity + assert_equal(pi["last_inv_sequence"], 1) # that is after the last inv + assert_equal(pi["inv_to_send"], 1) # and our tx has been queued + self.gen_node.setmocktime(0) + inbound_peer.wait_until(lambda: "inv" in inbound_peer.last_message and inbound_peer.last_message.get("inv").inv[0].hash == int(wtxid, 16)) + + rawmp = self.gen_node.getrawmempool(False, True) + pi = self.gen_node.getpeerinfo()[0] + assert_equal(rawmp["mempool_sequence"], 2) # no mempool update + assert_equal(pi["last_inv_sequence"], 2) # announced the current mempool + assert_equal(pi["inv_to_send"], 0) # nothing left in the queue + want_tx = msg_getdata(inv=inbound_peer.last_message.get("inv").inv) self.generate(self.gen_node, 1) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 41ecbbed22d..aeaf20c23d5 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -166,6 +166,8 @@ class NetTest(BitcoinTestFramework): "permissions": [], "presynced_headers": -1, "relaytxes": False, + "inv_to_send": 0, + "last_inv_sequence": 0, "services": "0000000000000000", "servicesnames": [], "session_id": "" if not self.options.v2transport else no_version_peer.v2_state.peer['session_id'].hex(),