Stop enforcing ancestor size/count limits

The cluster limits should be sufficient.

Co-Authored-By: Gregory Sanders <gsanders87@gmail.com>
This commit is contained in:
Suhas Daftuar
2023-09-28 17:59:15 -04:00
parent 1f93227a84
commit 9cda64b86c
9 changed files with 22 additions and 216 deletions

View File

@@ -55,7 +55,7 @@ bool TestLockPointValidity(CChain& active_chain, const LockPoints& lp)
}
void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
const std::set<Txid>& setExclude, std::set<Txid>& descendants_to_remove)
const std::set<Txid>& setExclude)
{
CTxMemPoolEntry::Children stageEntries, descendants;
stageEntries = updateIt->GetMemPoolChildrenConst();
@@ -94,12 +94,6 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendan
mapTx.modify(mapTx.iterator_to(descendant), [=](CTxMemPoolEntry& e) {
e.UpdateAncestorState(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCost());
});
// Don't directly remove the transaction here -- doing so would
// invalidate iterators in cachedDescendants. Mark it for removal
// by inserting into descendants_to_remove.
if (descendant.GetCountWithAncestors() > uint64_t(m_opts.limits.ancestor_count) || descendant.GetSizeWithAncestors() > m_opts.limits.ancestor_size_vbytes) {
descendants_to_remove.insert(descendant.GetTx().GetHash());
}
}
}
mapTx.modify(updateIt, [=](CTxMemPoolEntry& e) { e.UpdateDescendantState(modifySize, modifyFee, modifyCount); });
@@ -150,7 +144,7 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<Txid>& vHashesToU
m_txgraph->AddDependency(/*parent=*/*it, /*child=*/*childIter);
}
} // release epoch guard for UpdateForDescendants
UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded, descendants_to_remove);
UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded);
}
auto txs_to_remove = m_txgraph->Trim(); // Enforce cluster size limits.
@@ -174,7 +168,6 @@ util::Result<CTxMemPool::setEntries> CTxMemPool::CalculateAncestorsAndCheckLimit
CTxMemPoolEntry::Parents& staged_ancestors,
const Limits& limits) const
{
int64_t totalSizeWithAncestors = entry_size;
setEntries ancestors;
while (!staged_ancestors.empty()) {
@@ -183,14 +176,11 @@ util::Result<CTxMemPool::setEntries> CTxMemPool::CalculateAncestorsAndCheckLimit
ancestors.insert(stageit);
staged_ancestors.erase(stage);
totalSizeWithAncestors += stageit->GetTxSize();
if (stageit->GetSizeWithDescendants() + entry_size > limits.descendant_size_vbytes) {
return util::Error{Untranslated(strprintf("exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limits.descendant_size_vbytes))};
} else if (stageit->GetCountWithDescendants() + entry_count > static_cast<uint64_t>(limits.descendant_count)) {
return util::Error{Untranslated(strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limits.descendant_count))};
} else if (totalSizeWithAncestors > limits.ancestor_size_vbytes) {
return util::Error{Untranslated(strprintf("exceeds ancestor size limit [limit: %u]", limits.ancestor_size_vbytes))};
}
const CTxMemPoolEntry::Parents& parents = stageit->GetMemPoolParentsConst();
@@ -201,9 +191,6 @@ util::Result<CTxMemPool::setEntries> CTxMemPool::CalculateAncestorsAndCheckLimit
if (ancestors.count(parent_it) == 0) {
staged_ancestors.insert(parent);
}
if (staged_ancestors.size() + ancestors.size() + entry_count > static_cast<uint64_t>(limits.ancestor_count)) {
return util::Error{Untranslated(strprintf("too many unconfirmed ancestors [limit: %u]", limits.ancestor_count))};
}
}
}
@@ -216,12 +203,8 @@ util::Result<void> CTxMemPool::CheckPackageLimits(const Package& package,
size_t pack_count = package.size();
// Package itself is busting mempool limits; should be rejected even if no staged_ancestors exist
if (pack_count > static_cast<uint64_t>(m_opts.limits.ancestor_count)) {
return util::Error{Untranslated(strprintf("package count %u exceeds ancestor count limit [limit: %u]", pack_count, m_opts.limits.ancestor_count))};
} else if (pack_count > static_cast<uint64_t>(m_opts.limits.descendant_count)) {
if (pack_count > static_cast<uint64_t>(m_opts.limits.descendant_count)) {
return util::Error{Untranslated(strprintf("package count %u exceeds descendant count limit [limit: %u]", pack_count, m_opts.limits.descendant_count))};
} else if (total_vsize > m_opts.limits.ancestor_size_vbytes) {
return util::Error{Untranslated(strprintf("package size %u exceeds ancestor size limit [limit: %u]", total_vsize, m_opts.limits.ancestor_size_vbytes))};
} else if (total_vsize > m_opts.limits.descendant_size_vbytes) {
return util::Error{Untranslated(strprintf("package size %u exceeds descendant size limit [limit: %u]", total_vsize, m_opts.limits.descendant_size_vbytes))};
}
@@ -232,9 +215,6 @@ util::Result<void> CTxMemPool::CheckPackageLimits(const Package& package,
std::optional<txiter> piter = GetIter(input.prevout.hash);
if (piter) {
staged_ancestors.insert(**piter);
if (staged_ancestors.size() + package.size() > static_cast<uint64_t>(m_opts.limits.ancestor_count)) {
return util::Error{Untranslated(strprintf("too many unconfirmed parents [limit: %u]", m_opts.limits.ancestor_count))};
}
}
}
}
@@ -265,9 +245,6 @@ util::Result<CTxMemPool::setEntries> CTxMemPool::CalculateMemPoolAncestors(
std::optional<txiter> piter = GetIter(tx.vin[i].prevout.hash);
if (piter) {
staged_ancestors.insert(**piter);
if (staged_ancestors.size() + 1 > static_cast<uint64_t>(limits.ancestor_count)) {
return util::Error{Untranslated(strprintf("too many unconfirmed parents [limit: %u]", limits.ancestor_count))};
}
}
}
} else {

View File

@@ -647,8 +647,6 @@ private:
* be removed for violation of ancestor limits.
* @post if updateIt has any non-excluded descendants, cachedDescendants has
* a new cache line for updateIt.
* @post descendants_to_remove has a new entry for any descendant which exceeded
* ancestor limits relative to updateIt.
*
* @param[in] updateIt the entry to update for its descendants
* @param[in,out] cachedDescendants a cache where each line corresponds to all
@@ -659,12 +657,9 @@ private:
* that must not be accounted for (because any descendants in setExclude
* were added to the mempool after the transaction being updated and hence
* their state is already reflected in the parent state).
* @param[out] descendants_to_remove Populated with the txids of entries that
* exceed ancestor limits. It's the responsibility of the caller to
* removeRecursive them.
*/
void UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
const std::set<Txid>& setExclude, std::set<Txid>& descendants_to_remove) EXCLUSIVE_LOCKS_REQUIRED(cs);
const std::set<Txid>& setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Update ancestors of hash to add/remove it as a descendant transaction. */
void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Set ancestor state for an entry */

View File

@@ -48,10 +48,6 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
self.test_chain_limits()
self.test_desc_count_limits()
self.test_desc_count_limits_2()
self.test_anc_count_limits()
self.test_anc_count_limits_2()
self.test_anc_count_limits_bushy()
self.test_anc_size_limits()
self.test_desc_size_limits()
@check_package_limits
@@ -162,144 +158,6 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
assert_equal(2, len(package_hex))
return package_hex
@check_package_limits
def test_anc_count_limits(self):
"""Create a 'V' shaped chain with 24 transactions in the mempool and 3 in the package:
M1a M1b
^ ^
M2a M2b
. .
. .
. .
M12a M12b
^ ^
Pa Pb
^ ^
Pc
The lowest descendant, Pc, exceeds ancestor limits, but only if the in-mempool
and in-package ancestors are all considered together.
"""
node = self.nodes[0]
package_hex = []
pc_parent_utxos = []
self.log.info("Check that in-mempool and in-package ancestors are calculated properly in packages")
# Two chains of 13 transactions each
for _ in range(2):
chain_tip_utxo = self.wallet.send_self_transfer_chain(from_node=node, chain_length=12)[-1]["new_utxo"]
# Save the 13th transaction for the package
tx = self.wallet.create_self_transfer(utxo_to_spend=chain_tip_utxo)
package_hex.append(tx["hex"])
pc_parent_utxos.append(tx["new_utxo"])
# Child Pc
pc_hex = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_parent_utxos)["hex"]
package_hex.append(pc_hex)
assert_equal(24, node.getmempoolinfo()["size"])
assert_equal(3, len(package_hex))
return package_hex
@check_package_limits
def test_anc_count_limits_2(self):
"""Create a 'Y' shaped chain with 24 transactions in the mempool and 2 in the package:
M1a M1b
^ ^
M2a M2b
. .
. .
. .
M12a M12b
^ ^
Pc
^
Pd
The lowest descendant, Pd, exceeds ancestor limits, but only if the in-mempool
and in-package ancestors are all considered together.
"""
node = self.nodes[0]
pc_parent_utxos = []
self.log.info("Check that in-mempool and in-package ancestors are calculated properly in packages")
# Two chains of 12 transactions each
for _ in range(2):
chaintip_utxo = self.wallet.send_self_transfer_chain(from_node=node, chain_length=12)[-1]["new_utxo"]
# last 2 transactions will be the parents of Pc
pc_parent_utxos.append(chaintip_utxo)
# Child Pc
pc_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_parent_utxos)
# Child Pd
pd_tx = self.wallet.create_self_transfer(utxo_to_spend=pc_tx["new_utxos"][0])
assert_equal(24, node.getmempoolinfo()["size"])
return [pc_tx["hex"], pd_tx["hex"]]
@check_package_limits
def test_anc_count_limits_bushy(self):
"""Create a tree with 20 transactions in the mempool and 6 in the package:
M1...M4 M5...M8 M9...M12 M13...M16 M17...M20
^ ^ ^ ^ ^ (each with 4 parents)
P0 P1 P2 P3 P4
^ ^ ^ ^ ^ (5 parents)
PC
Where M(4i+1)...M+(4i+4) are the parents of Pi and P0, P1, P2, P3, and P4 are the parents of PC.
P0... P4 individually only have 4 parents each, and PC has no in-mempool parents. But
combined, PC has 25 in-mempool and in-package parents.
"""
node = self.nodes[0]
package_hex = []
pc_parent_utxos = []
for _ in range(5): # Make package transactions P0 ... P4
pc_grandparent_utxos = []
for _ in range(4): # Make mempool transactions M(4i+1)...M(4i+4)
pc_grandparent_utxos.append(self.wallet.send_self_transfer(from_node=node)["new_utxo"])
# Package transaction Pi
pi_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_grandparent_utxos)
package_hex.append(pi_tx["hex"])
pc_parent_utxos.append(pi_tx["new_utxos"][0])
# Package transaction PC
pc_hex = self.wallet.create_self_transfer_multi(utxos_to_spend=pc_parent_utxos)["hex"]
package_hex.append(pc_hex)
assert_equal(20, node.getmempoolinfo()["size"])
assert_equal(6, len(package_hex))
return package_hex
@check_package_limits
def test_anc_size_limits(self):
"""Test Case with 2 independent transactions in the mempool and a parent + child in the
package, where the package parent is the child of both mempool transactions (30KvB each):
A B
^ ^
C
^
D
The lowest descendant, D, exceeds ancestor size limits, but only if the in-mempool
and in-package ancestors are all considered together.
"""
node = self.nodes[0]
parent_utxos = []
target_vsize = 30_000
high_fee = 10 * target_vsize # 10 sats/vB
self.log.info("Check that in-mempool and in-package ancestor size limits are calculated properly in packages")
# Mempool transactions A and B
for _ in range(2):
bulked_tx = self.wallet.create_self_transfer(target_vsize=target_vsize)
self.wallet.sendrawtransaction(from_node=node, tx_hex=bulked_tx["hex"])
parent_utxos.append(bulked_tx["new_utxo"])
# Package transaction C
pc_tx = self.wallet.create_self_transfer_multi(utxos_to_spend=parent_utxos, fee_per_output=high_fee, target_vsize=target_vsize)
# Package transaction D
pd_tx = self.wallet.create_self_transfer(utxo_to_spend=pc_tx["new_utxos"][0], target_vsize=target_vsize)
assert_equal(2, node.getmempoolinfo()["size"])
return [pc_tx["hex"], pd_tx["hex"]]
@check_package_limits
def test_desc_size_limits(self):
"""Create 3 mempool transactions and 2 package transactions (21KvB each):

View File

@@ -7,8 +7,8 @@
from decimal import Decimal
from test_framework.messages import (
DEFAULT_ANCESTOR_LIMIT,
DEFAULT_DESCENDANT_LIMIT,
DEFAULT_CLUSTER_LIMIT,
)
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
@@ -19,10 +19,7 @@ from test_framework.util import (
from test_framework.wallet import MiniWallet
# custom limits for node1
CUSTOM_ANCESTOR_LIMIT = 5
CUSTOM_DESCENDANT_LIMIT = 11
assert CUSTOM_DESCENDANT_LIMIT >= CUSTOM_ANCESTOR_LIMIT
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
@@ -33,7 +30,6 @@ class MempoolPackagesTest(BitcoinTestFramework):
[
],
[
"-limitancestorcount={}".format(CUSTOM_ANCESTOR_LIMIT),
"-limitdescendantcount={}".format(CUSTOM_DESCENDANT_LIMIT),
],
]
@@ -44,8 +40,8 @@ class MempoolPackagesTest(BitcoinTestFramework):
peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs
# DEFAULT_ANCESTOR_LIMIT transactions off a confirmed tx should be fine
chain = self.wallet.create_self_transfer_chain(chain_length=DEFAULT_ANCESTOR_LIMIT)
# DEFAULT_DESCENDANT_LIMIT transactions off a confirmed tx should be fine
chain = self.wallet.create_self_transfer_chain(chain_length=DEFAULT_DESCENDANT_LIMIT)
witness_chain = [t["wtxid"] for t in chain]
ancestor_vsize = 0
ancestor_fees = Decimal(0)
@@ -59,16 +55,16 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
peer_inv_store.wait_for_broadcast(witness_chain)
# Check mempool has DEFAULT_ANCESTOR_LIMIT transactions in it, and descendant and ancestor
# Check mempool has DEFAULT_DESCENDANT_LIMIT transactions in it, and descendant and ancestor
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), DEFAULT_ANCESTOR_LIMIT)
assert_equal(len(mempool), DEFAULT_DESCENDANT_LIMIT)
descendant_count = 1
descendant_fees = 0
descendant_vsize = 0
assert_equal(ancestor_vsize, sum([mempool[tx]['vsize'] for tx in mempool]))
ancestor_count = DEFAULT_ANCESTOR_LIMIT
ancestor_count = DEFAULT_DESCENDANT_LIMIT
assert_equal(ancestor_fees, sum([mempool[tx]['fees']['base'] for tx in mempool]))
# Adding one more transaction on to the chain should fail.
@@ -193,9 +189,9 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Check that node1's mempool is as expected (-> custom ancestor limit)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert_equal(len(mempool1), CUSTOM_ANCESTOR_LIMIT)
assert_equal(len(mempool1), CUSTOM_DESCENDANT_LIMIT)
assert set(mempool1).issubset(set(mempool0))
for tx in chain[:CUSTOM_ANCESTOR_LIMIT]:
for tx in chain[:CUSTOM_DESCENDANT_LIMIT]:
assert tx in mempool1
entry0 = self.nodes[0].getmempoolentry(tx)
entry1 = self.nodes[1].getmempoolentry(tx)
@@ -240,7 +236,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# - parent tx for descendant test
# - txs chained off parent tx (-> custom descendant limit)
self.wait_until(lambda: len(self.nodes[1].getrawmempool()) ==
CUSTOM_ANCESTOR_LIMIT + CUSTOM_DESCENDANT_LIMIT, timeout=10)
2*CUSTOM_DESCENDANT_LIMIT, timeout=10)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert set(mempool1).issubset(set(mempool0))

View File

@@ -171,7 +171,7 @@ class BytesPerSigOpTest(BitcoinTestFramework):
# But together, it's exceeding limits in the *package* context. If sigops adjusted vsize wasn't being checked
# here, it would get further in validation and give too-long-mempool-chain error instead.
packet_test = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex(), tx_child.serialize().hex()])
expected_package_error = f"package-mempool-limits, package size {2*max_multisig_vsize} exceeds ancestor size limit [limit: 101000]"
expected_package_error = f"package-mempool-limits, package size {2*max_multisig_vsize} exceeds descendant size limit [limit: 101000]"
assert_equal([x["package-error"] for x in packet_test], [expected_package_error] * 2)
# When we actually try to submit, the parent makes it into the mempool, but the child would exceed ancestor vsize limits

View File

@@ -228,27 +228,6 @@ class MempoolTRUC(BitcoinTestFramework):
assert_equal(node.getmempoolentry(tx_v3_parent_large1["txid"])["descendantcount"], 1)
self.generate(node, 1)
self.log.info("Test that a decreased limitancestorsize also applies to v3 parent")
self.restart_node(0, extra_args=["-limitancestorsize=10"])
tx_v3_parent_large2 = self.wallet.send_self_transfer(
from_node=node,
target_vsize=parent_target_vsize,
version=3
)
tx_v3_child_large2 = self.wallet.create_self_transfer(
utxo_to_spend=tx_v3_parent_large2["new_utxo"],
target_vsize=child_target_vsize,
version=3
)
# Parent and child are within TRUC limits
assert_greater_than_or_equal(TRUC_MAX_VSIZE, tx_v3_parent_large2["tx"].get_vsize())
assert_greater_than_or_equal(TRUC_CHILD_MAX_VSIZE, tx_v3_child_large2["tx"].get_vsize())
assert_greater_than(tx_v3_parent_large2["tx"].get_vsize() + tx_v3_child_large2["tx"].get_vsize(), 10000)
assert_raises_rpc_error(-26, "too-long-mempool-chain, exceeds ancestor size limit", node.sendrawtransaction, tx_v3_child_large2["hex"])
self.check_mempool([tx_v3_parent_large2["txid"]])
@cleanup()
def test_truc_ancestors_package(self):
self.log.info("Test that TRUC ancestor limits are checked within the package")

View File

@@ -72,6 +72,7 @@ WITNESS_SCALE_FACTOR = 4
DEFAULT_ANCESTOR_LIMIT = 25 # default max number of in-mempool ancestors
DEFAULT_DESCENDANT_LIMIT = 25 # default max number of in-mempool descendants
DEFAULT_CLUSTER_LIMIT = 64 # default max number of transactions in a cluster
# Default setting for -datacarriersize.

View File

@@ -55,9 +55,9 @@ class WalletTest(BitcoinTestFramework):
# whitelist peers to speed up tx relay / mempool sync
self.noban_tx_relay = True
self.extra_args = [
# Limit mempool descendants as a hack to have wallet txs rejected from the mempool.
# Limit mempool clusters as a hack to have wallet txs rejected from the mempool.
# Set walletrejectlongchains=0 so the wallet still creates the transactions.
['-limitdescendantcount=3', '-walletrejectlongchains=0'],
['-limitclustercount=3', '-walletrejectlongchains=0'],
[],
]

View File

@@ -467,9 +467,9 @@ class WalletTest(BitcoinTestFramework):
self.log.info("Test -reindex")
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, ['-reindex', "-walletrejectlongchains=0", "-limitancestorcount=" + str(chainlimit)])
self.start_node(1, ['-reindex', "-limitancestorcount=" + str(chainlimit)])
self.start_node(2, ['-reindex', "-limitancestorcount=" + str(chainlimit)])
self.start_node(0, ['-reindex', "-walletrejectlongchains=0", "-limitancestorcount=" + str(chainlimit), "-limitclustercount=" + str(chainlimit)])
self.start_node(1, ['-reindex', "-limitclustercount=" + str(chainlimit)])
self.start_node(2, ['-reindex', "-limitclustercount=" + str(chainlimit)])
# reindex will leave rpc warm up "early"; Wait for it to finish
self.wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
@@ -510,7 +510,7 @@ class WalletTest(BitcoinTestFramework):
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass AttemptSelection
self.stop_node(0)
extra_args = ["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]
extra_args = ["-walletrejectlongchains", "-limitclustercount=" + str(2 * chainlimit), "-limitancestorcount=" + str(2*chainlimit)]
self.start_node(0, extra_args=extra_args)
# wait until the wallet has submitted all transactions to the mempool
@@ -521,7 +521,7 @@ class WalletTest(BitcoinTestFramework):
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-6, f"too many unconfirmed ancestors [limit: {chainlimit * 2}]", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
assert_raises_rpc_error(-6, "too many unconfirmed transactions in cluster", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))