rpc: improve getmempoolcluster output

This commit is contained in:
Suhas Daftuar
2025-11-12 12:46:14 -05:00
parent d2dcd37aac
commit 23d6f457c4
2 changed files with 122 additions and 14 deletions

View File

@@ -269,12 +269,13 @@ static std::vector<RPCResult> ClusterDescription()
return {
RPCResult{RPCResult::Type::NUM, "weight", "total sigops-adjusted weight (as defined in BIP 141 and modified by '-bytespersigop'"},
RPCResult{RPCResult::Type::NUM, "txcount", "number of transactions"},
RPCResult{RPCResult::Type::ARR, "txs", "transactions in this cluster in mining order",
{RPCResult{RPCResult::Type::OBJ, "txentry", "",
RPCResult{RPCResult::Type::ARR, "chunks", "chunks in this cluster (in mining order)",
{RPCResult{RPCResult::Type::OBJ, "chunk", "",
{
RPCResult{RPCResult::Type::STR_HEX, "txid", "the transaction id"},
RPCResult{RPCResult::Type::NUM, "chunkfee", "fee of the chunk containing this tx"},
RPCResult{RPCResult::Type::NUM, "chunkweight", "sigops-adjusted weight of the chunk containing this transaction"}
RPCResult{RPCResult::Type::NUM, "chunkfee", "fees of the transactions in this chunk"},
RPCResult{RPCResult::Type::NUM, "chunkweight", "sigops-adjusted weight of all transactions in this chunk"},
RPCResult{RPCResult::Type::ARR, "txs", "transactions in this chunk in mining order",
{RPCResult{RPCResult::Type::STR_HEX, "txid", "transaction id"}}},
}
}}
}
@@ -311,6 +312,19 @@ static std::vector<RPCResult> MempoolEntryDescription()
};
}
void AppendChunkInfo(UniValue& all_chunks, FeePerWeight chunk_feerate, std::vector<const CTxMemPoolEntry *> chunk_txs)
{
UniValue chunk(UniValue::VOBJ);
chunk.pushKV("chunkfee", ValueFromAmount((int)chunk_feerate.fee));
chunk.pushKV("chunkweight", chunk_feerate.size);
UniValue chunk_txids(UniValue::VARR);
for (const auto& chunk_tx : chunk_txs) {
chunk_txids.push_back(chunk_tx->GetTx().GetHash().ToString());
}
chunk.pushKV("txs", std::move(chunk_txids));
all_chunks.push_back(std::move(chunk));
}
static void clusterToJSON(const CTxMemPool& pool, UniValue& info, std::vector<const CTxMemPoolEntry *> cluster) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
{
AssertLockHeld(pool.cs);
@@ -320,16 +334,29 @@ static void clusterToJSON(const CTxMemPool& pool, UniValue& info, std::vector<co
}
info.pushKV("weight", total_weight);
info.pushKV("txcount", (int)cluster.size());
UniValue txs(UniValue::VARR);
// Output the cluster by chunk. This isn't handed to us by the mempool, but
// we can calculate it by looking at the chunk feerates of each transaction
// in the cluster.
FeePerWeight current_chunk_feerate = pool.GetMainChunkFeerate(*cluster[0]);
std::vector<const CTxMemPoolEntry *> current_chunk;
current_chunk.reserve(cluster.size());
UniValue all_chunks(UniValue::VARR);
for (const auto& tx : cluster) {
UniValue txentry(UniValue::VOBJ);
auto feerate = pool.GetMainChunkFeerate(*tx);
txentry.pushKV("txid", tx->GetTx().GetHash().ToString());
txentry.pushKV("chunkfee", ValueFromAmount((int)feerate.fee));
txentry.pushKV("chunkweight", feerate.size);
txs.push_back(txentry);
if (current_chunk_feerate.size == 0) {
// We've iterated all the transactions in the previous chunk; so
// append it to the output.
AppendChunkInfo(all_chunks, pool.GetMainChunkFeerate(*current_chunk[0]), current_chunk);
current_chunk.clear();
current_chunk_feerate = pool.GetMainChunkFeerate(*tx);
}
current_chunk.push_back(tx);
current_chunk_feerate.size -= tx->GetAdjustedWeight();
}
info.pushKV("txs", txs);
AppendChunkInfo(all_chunks, pool.GetMainChunkFeerate(*current_chunk[0]), current_chunk);
current_chunk.clear();
info.pushKV("chunks", std::move(all_chunks));
}
static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPoolEntry& e) EXCLUSIVE_LOCKS_REQUIRED(pool.cs)
@@ -673,7 +700,13 @@ static RPCHelpMan getmempoolcluster()
const CTxMemPool& mempool = EnsureAnyMemPool(request.context);
LOCK(mempool.cs);
auto cluster = mempool.GetCluster(Txid::FromUint256(hash));
auto txid = Txid::FromUint256(hash);
const auto entry{mempool.GetEntry(txid)};
if (entry == nullptr) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool");
}
auto cluster = mempool.GetCluster(txid);
UniValue info(UniValue::VOBJ);
clusterToJSON(mempool, info, cluster);

View File

@@ -298,12 +298,87 @@ class MempoolClusterTest(BitcoinTestFramework):
assert tx_replacer_sponsor["txid"] in node.getrawmempool()
assert_equal(node.getmempoolcluster(tx_replacer["txid"])['txcount'], 2)
@cleanup
def test_getmempoolcluster(self):
node = self.nodes[0]
self.log.info("Testing getmempoolcluster")
assert_equal(node.getrawmempool(), [])
# Not in-mempool
not_mempool_tx = self.wallet.create_self_transfer()
assert_raises_rpc_error(-5, "Transaction not in mempool", node.getmempoolcluster, not_mempool_tx["txid"])
# Test that chunks are being recomputed properly
# One chunk with one tx
first_chunk_tx = self.wallet.send_self_transfer(from_node=node)
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
assert_equal(first_chunk_info, {'weight': first_chunk_tx["tx"].get_weight(), 'txcount': 1, 'chunks': [{'chunkfee': first_chunk_tx["fee"], 'chunkweight': first_chunk_tx["tx"].get_weight(), 'txs': [first_chunk_tx["txid"]]}]})
# Another unconnected tx, nothing should change
self.wallet.send_self_transfer(from_node=node)
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
assert_equal(first_chunk_info, {'weight': first_chunk_tx["tx"].get_weight(), 'txcount': 1, 'chunks': [{'chunkfee': first_chunk_tx["fee"], 'chunkweight': first_chunk_tx["tx"].get_weight(), 'txs': [first_chunk_tx["txid"]]}]})
# Second connected tx, makes one chunk still with high enough fee
second_chunk_tx = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=first_chunk_tx["new_utxo"], fee_rate=Decimal("0.01"))
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
# output is same across same cluster transactions
assert_equal(first_chunk_info, node.getmempoolcluster(second_chunk_tx["txid"]))
chunkweight = first_chunk_tx["tx"].get_weight() + second_chunk_tx["tx"].get_weight()
chunkfee = first_chunk_tx["fee"] + second_chunk_tx["fee"]
assert_equal(first_chunk_info, {'weight': chunkweight, 'txcount': 2, 'chunks': [{'chunkfee': chunkfee, 'chunkweight': chunkweight, 'txs': [first_chunk_tx["txid"], second_chunk_tx["txid"]]}]})
# Third connected tx, makes one chunk still with high enough fee
third_chunk_tx = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=second_chunk_tx["new_utxo"], fee_rate=Decimal("0.1"))
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
# output is same across same cluster transactions
assert_equal(first_chunk_info, node.getmempoolcluster(third_chunk_tx["txid"]))
chunkweight = first_chunk_tx["tx"].get_weight() + second_chunk_tx["tx"].get_weight() + third_chunk_tx["tx"].get_weight()
chunkfee = first_chunk_tx["fee"] + second_chunk_tx["fee"] + third_chunk_tx["fee"]
assert_equal(first_chunk_info, {'weight': chunkweight, 'txcount': 3, 'chunks': [{'chunkfee': chunkfee, 'chunkweight': chunkweight, 'txs': [first_chunk_tx["txid"], second_chunk_tx["txid"], third_chunk_tx["txid"]]}]})
# Now test single cluster with each tx being its own chunk
# One chunk with one tx
first_chunk_tx = self.wallet.send_self_transfer(from_node=node)
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
assert_equal(first_chunk_info, {'weight': first_chunk_tx["tx"].get_weight(), 'txcount': 1, 'chunks': [{'chunkfee': first_chunk_tx["fee"], 'chunkweight': first_chunk_tx["tx"].get_weight(), 'txs': [first_chunk_tx["txid"]]}]})
# Second connected tx, lower fee
second_chunk_tx = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=first_chunk_tx["new_utxo"], fee_rate=Decimal("0.000002"))
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
# output is same across same cluster transactions
assert_equal(first_chunk_info, node.getmempoolcluster(second_chunk_tx["txid"]))
first_chunkweight = first_chunk_tx["tx"].get_weight()
second_chunkweight = second_chunk_tx["tx"].get_weight()
assert_equal(first_chunk_info, {'weight': first_chunkweight + second_chunkweight, 'txcount': 2, 'chunks': [{'chunkfee': first_chunk_tx["fee"], 'chunkweight': first_chunkweight, 'txs': [first_chunk_tx["txid"]]}, {'chunkfee': second_chunk_tx["fee"], 'chunkweight': second_chunkweight, 'txs': [second_chunk_tx["txid"]]}]})
# Third connected tx, even lower fee
third_chunk_tx = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=second_chunk_tx["new_utxo"], fee_rate=Decimal("0.000001"))
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
# output is same across same cluster transactions
assert_equal(first_chunk_info, node.getmempoolcluster(third_chunk_tx["txid"]))
first_chunkweight = first_chunk_tx["tx"].get_weight()
second_chunkweight = second_chunk_tx["tx"].get_weight()
third_chunkweight = third_chunk_tx["tx"].get_weight()
chunkfee = first_chunk_tx["fee"] + second_chunk_tx["fee"] + third_chunk_tx["fee"]
assert_equal(first_chunk_info, {'weight': first_chunkweight + second_chunkweight + third_chunkweight, 'txcount': 3, 'chunks': [{'chunkfee': first_chunk_tx["fee"], 'chunkweight': first_chunkweight, 'txs': [first_chunk_tx["txid"]]}, {'chunkfee': second_chunk_tx["fee"], 'chunkweight': second_chunkweight, 'txs': [second_chunk_tx["txid"]]}, {'chunkfee': third_chunk_tx["fee"], 'chunkweight': third_chunkweight, 'txs': [third_chunk_tx["txid"]]}]})
# If we prioritise the last transaction it can join the second transaction's chunk.
node.prioritisetransaction(third_chunk_tx["txid"], 0, int(third_chunk_tx["fee"]*COIN) + 1)
first_chunk_info = node.getmempoolcluster(first_chunk_tx["txid"])
assert_equal(first_chunk_info, {'weight': first_chunkweight + second_chunkweight + third_chunkweight, 'txcount': 3, 'chunks': [{'chunkfee': first_chunk_tx["fee"], 'chunkweight': first_chunkweight, 'txs': [first_chunk_tx["txid"]]}, {'chunkfee': second_chunk_tx["fee"] + 2*third_chunk_tx["fee"] + Decimal("0.00000001"), 'chunkweight': second_chunkweight + third_chunkweight, 'txs': [second_chunk_tx["txid"], third_chunk_tx["txid"]]}]})
def run_test(self):
node = self.nodes[0]
self.wallet = MiniWallet(node)
self.generate(self.wallet, 400)
self.test_getmempoolcluster()
self.test_cluster_limit_rbf(DEFAULT_CLUSTER_LIMIT)
for cluster_size_limit_kvb in [10, 20, 33, 100, DEFAULT_CLUSTER_SIZE_LIMIT_KVB]: