Reduce MAX_PEER_TX_ANNOUNCEMENTS for non-PF_RELAY peers

Maintaining up to 100000 INVs per peer is excessive, as that is far more
than fits in a typical mempool.

Also disable the "overload" penalty for PF_RELAY peers.
This commit is contained in:
Pieter Wuille
2020-09-23 17:00:46 -07:00
parent 242d16477d
commit de11b0a4ef
5 changed files with 38 additions and 6 deletions

View File

@ -46,6 +46,7 @@ INBOUND_PEER_TX_DELAY = 2 # seconds
TXID_RELAY_DELAY = 2 # seconds
OVERLOADED_PEER_DELAY = 2 # seconds
MAX_GETDATA_IN_FLIGHT = 100
MAX_PEER_TX_ANNOUNCEMENTS = 5000
# Python test constants
NUM_INBOUND = 10
@ -215,6 +216,22 @@ class TxDownloadTest(BitcoinTestFramework):
with p2p_lock:
assert_equal(peer.tx_getdata_count, 1)
def test_large_inv_batch(self):
self.log.info('Test how large inv batches are handled with relay permission')
self.restart_node(0, extra_args=['-whitelist=relay@127.0.0.1'])
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS + 1)
self.log.info('Test how large inv batches are handled without relay permission')
self.restart_node(0)
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
peer.send_message(msg_inv([CInv(t=MSG_WTX, h=wtxid) for wtxid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)]))
peer.wait_until(lambda: peer.tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS)
peer.sync_with_ping()
with p2p_lock:
assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS)
def test_spurious_notfound(self):
self.log.info('Check that spurious notfound is ignored')
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)]))
@ -225,6 +242,7 @@ class TxDownloadTest(BitcoinTestFramework):
self.test_disconnect_fallback()
self.test_notfound_fallback()
self.test_preferred_inv()
self.test_large_inv_batch()
self.test_spurious_notfound()
# Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when