mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-02-19 22:04:38 +01:00
txgraph: use fallback order to sort chunks (feature)
This makes TxGraph also use the fallback order to decide the order of chunks from distinct clusters. The order of chunks across clusters becomes: 1. Feerate (high to low) 2. Equal-feerate-chunk-prefix (small to large) 3. Max-txid (chunk with lowest maximum-txid first) This makes the full TxGraph ordering fully deterministic as long as all clusters in it are optimally linearized.
This commit is contained in:
@@ -1114,13 +1114,13 @@ FUZZ_TARGET(txgraph)
|
||||
std::map<SimTxGraph::Pos, int32_t> comp_prefix_sizes;
|
||||
/** Current chunk feerate. */
|
||||
FeeFrac last_chunk_feerate;
|
||||
/** Largest seen equal-feerate chunk prefix size. */
|
||||
int32_t max_chunk_prefix_size{0};
|
||||
/** Largest seen (equal-feerate chunk prefix size, max txid). */
|
||||
std::pair<int32_t, uint64_t> max_chunk_tiebreak{0, 0};
|
||||
for (const auto& chunk : real_chunking) {
|
||||
// If this is the first chunk with a strictly lower feerate, reset.
|
||||
if (chunk.feerate << last_chunk_feerate) {
|
||||
comp_prefix_sizes.clear();
|
||||
max_chunk_prefix_size = 0;
|
||||
max_chunk_tiebreak = {0, 0};
|
||||
}
|
||||
last_chunk_feerate = chunk.feerate;
|
||||
// Find which sim component this chunk belongs to.
|
||||
@@ -1129,10 +1129,17 @@ FUZZ_TARGET(txgraph)
|
||||
auto comp_key = component.First();
|
||||
auto& comp_prefix_size = comp_prefix_sizes[comp_key];
|
||||
comp_prefix_size += chunk.feerate.size;
|
||||
// Verify consistency: within each group of equal-feerate chunks, the equal-feerate
|
||||
// chunk prefix size must be monotonically increasing.
|
||||
assert(comp_prefix_size >= max_chunk_prefix_size);
|
||||
max_chunk_prefix_size = comp_prefix_size;
|
||||
// Determine the chunk's max txid.
|
||||
uint64_t chunk_max_txid{0};
|
||||
for (auto tx : chunk.transactions) {
|
||||
auto txid = sims[0].GetRef(tx)->m_txid;
|
||||
chunk_max_txid = std::max(txid, chunk_max_txid);
|
||||
}
|
||||
// Verify consistency: within each group of equal-feerate chunks, the
|
||||
// (equal-feerate chunk prefix size, max txid) must be increasing.
|
||||
std::pair<int32_t, uint64_t> chunk_tiebreak{comp_prefix_size, chunk_max_txid};
|
||||
assert(chunk_tiebreak > max_chunk_tiebreak);
|
||||
max_chunk_tiebreak = chunk_tiebreak;
|
||||
}
|
||||
|
||||
// Verify that within each cluster, the internal ordering matches that of the
|
||||
@@ -1150,6 +1157,52 @@ FUZZ_TARGET(txgraph)
|
||||
assert(sim_chunk_lin == real_chunk_lin);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that a fresh TxGraph, with the same transactions and txids, but constructed
|
||||
// in a different order, and with a different RNG state, recreates the exact same
|
||||
// ordering, showing that for optimal graphs, the full mempool ordering is
|
||||
// deterministic.
|
||||
auto real_redo = MakeTxGraph(
|
||||
/*max_cluster_count=*/max_cluster_count,
|
||||
/*max_cluster_size=*/max_cluster_size,
|
||||
/*acceptable_iters=*/acceptable_iters,
|
||||
/*fallback_order=*/fallback_order);
|
||||
/** Vector (indexed by SimTxGraph::Pos) of TxObjects in real_redo). */
|
||||
std::vector<std::optional<SimTxObject>> txobjects_redo;
|
||||
txobjects_redo.resize(sims[0].graph.PositionRange());
|
||||
// Recreate the graph's transactions with same feerate and txid.
|
||||
std::vector<DepGraphIndex> positions;
|
||||
for (auto i : sims[0].graph.Positions()) positions.push_back(i);
|
||||
std::shuffle(positions.begin(), positions.end(), rng);
|
||||
for (auto i : positions) {
|
||||
txobjects_redo[i].emplace(sims[0].GetRef(i)->m_txid);
|
||||
real_redo->AddTransaction(*txobjects_redo[i], FeePerWeight::FromFeeFrac(sims[0].graph.FeeRate(i)));
|
||||
}
|
||||
// Recreate the graph's dependencies.
|
||||
std::vector<std::pair<DepGraphIndex, DepGraphIndex>> deps;
|
||||
for (auto i : sims[0].graph.Positions()) {
|
||||
for (auto j : sims[0].graph.GetReducedParents(i)) {
|
||||
deps.emplace_back(j, i);
|
||||
}
|
||||
}
|
||||
std::shuffle(deps.begin(), deps.end(), rng);
|
||||
for (auto [parent, child] : deps) {
|
||||
real_redo->AddDependency(*txobjects_redo[parent], *txobjects_redo[child]);
|
||||
}
|
||||
// Do work to reach optimality.
|
||||
if (real_redo->DoWork(300000)) {
|
||||
// Start from a random permutation.
|
||||
auto vec_redo = vec1;
|
||||
std::shuffle(vec_redo.begin(), vec_redo.end(), rng);
|
||||
if (vec_redo == vec1) std::next_permutation(vec_redo.begin(), vec_redo.end());
|
||||
// Sort it according to the main graph order in real_redo.
|
||||
auto cmp_redo = [&](SimTxGraph::Pos a, SimTxGraph::Pos b) noexcept {
|
||||
return real_redo->CompareMainOrder(*txobjects_redo[a], *txobjects_redo[b]) < 0;
|
||||
};
|
||||
std::sort(vec_redo.begin(), vec_redo.end(), cmp_redo);
|
||||
// Compare with the ordering we got from real.
|
||||
assert(vec1 == vec_redo);
|
||||
}
|
||||
}
|
||||
|
||||
// For every transaction in the total ordering, find a random one before it and after it,
|
||||
|
||||
@@ -492,6 +492,7 @@ private:
|
||||
/** Compare two entries (which must both exist within the main graph). */
|
||||
std::strong_ordering CompareMainTransactions(GraphIndex a, GraphIndex b) const noexcept
|
||||
{
|
||||
if (a == b) return std::strong_ordering::equal;
|
||||
Assume(a < m_entries.size() && b < m_entries.size());
|
||||
const auto& entry_a = m_entries[a];
|
||||
const auto& entry_b = m_entries[b];
|
||||
@@ -506,12 +507,17 @@ private:
|
||||
if (entry_a.m_main_equal_feerate_chunk_prefix_size != entry_b.m_main_equal_feerate_chunk_prefix_size) {
|
||||
return entry_a.m_main_equal_feerate_chunk_prefix_size <=> entry_b.m_main_equal_feerate_chunk_prefix_size;
|
||||
}
|
||||
// Compare Cluster m_sequence as tie-break for equal chunk feerates in distinct clusters,
|
||||
// when the equal-feerate-prefix size is also the same.
|
||||
// Compare by maximum m_fallback_order element to order equal-feerate chunks in distinct
|
||||
// clusters, when the equal-feerate-prefix size is also the same.
|
||||
const auto& locator_a = entry_a.m_locator[0];
|
||||
const auto& locator_b = entry_b.m_locator[0];
|
||||
Assume(locator_a.IsPresent() && locator_b.IsPresent());
|
||||
if (locator_a.cluster != locator_b.cluster) {
|
||||
auto fallback_cmp = m_fallback_order(*m_entries[entry_a.m_main_max_chunk_fallback].m_ref,
|
||||
*m_entries[entry_b.m_main_max_chunk_fallback].m_ref);
|
||||
if (fallback_cmp != 0) return fallback_cmp;
|
||||
// This shouldn't be reachable as m_fallback_order defines a strong ordering.
|
||||
Assume(false);
|
||||
return CompareClusters(locator_a.cluster, locator_b.cluster);
|
||||
}
|
||||
// Within a single chunk, sort by position within cluster linearization.
|
||||
@@ -614,6 +620,9 @@ private:
|
||||
int32_t m_main_equal_feerate_chunk_prefix_size;
|
||||
/** The position this transaction has in the main linearization (if present). */
|
||||
LinearizationIndex m_main_lin_index;
|
||||
/** Of all transactions within this transaction's chunk in main (if present there), the
|
||||
* maximal one according to m_fallback_order. */
|
||||
GraphIndex m_main_max_chunk_fallback = GraphIndex(-1);
|
||||
};
|
||||
|
||||
/** The set of all transactions (in all levels combined). GraphIndex values index into this. */
|
||||
@@ -884,6 +893,9 @@ void TxGraphImpl::ClearChunkData(Entry& entry) noexcept
|
||||
void TxGraphImpl::CreateChunkData(GraphIndex idx, LinearizationIndex chunk_count) noexcept
|
||||
{
|
||||
auto& entry = m_entries[idx];
|
||||
// Make sure to not create chunk data for unlinked entries, which would make invoking
|
||||
// m_fallback_order on them impossible.
|
||||
Assume(entry.m_ref != nullptr);
|
||||
if (!m_main_chunkindex_discarded.empty()) {
|
||||
// Reuse an discarded node handle.
|
||||
auto& node = m_main_chunkindex_discarded.back().value();
|
||||
@@ -1096,6 +1108,17 @@ void GenericClusterImpl::Updated(TxGraphImpl& graph, int level, bool rename) noe
|
||||
// ratio remains the same; it's just accounting for the size of the added chunk.
|
||||
equal_feerate_chunk_feerate += chunk.feerate;
|
||||
}
|
||||
// Determine the m_fallback_order maximum transaction in the chunk.
|
||||
auto it = chunk.transactions.begin();
|
||||
GraphIndex max_element = m_mapping[*it];
|
||||
++it;
|
||||
while (it != chunk.transactions.end()) {
|
||||
GraphIndex this_element = m_mapping[*it];
|
||||
if (graph.m_fallback_order(*graph.m_entries[this_element].m_ref, *graph.m_entries[max_element].m_ref) > 0) {
|
||||
max_element = this_element;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
// Iterate over the transactions in the linearization, which must match those in chunk.
|
||||
while (true) {
|
||||
DepGraphIndex idx = m_linearization[lin_idx];
|
||||
@@ -1104,6 +1127,7 @@ void GenericClusterImpl::Updated(TxGraphImpl& graph, int level, bool rename) noe
|
||||
entry.m_main_lin_index = lin_idx++;
|
||||
entry.m_main_chunk_feerate = FeePerWeight::FromFeeFrac(chunk.feerate);
|
||||
entry.m_main_equal_feerate_chunk_prefix_size = equal_feerate_chunk_feerate.size;
|
||||
entry.m_main_max_chunk_fallback = max_element;
|
||||
Assume(chunk.transactions[idx]);
|
||||
chunk.transactions.Reset(idx);
|
||||
if (chunk.transactions.None()) {
|
||||
@@ -1138,6 +1162,7 @@ void SingletonClusterImpl::Updated(TxGraphImpl& graph, int level, bool rename) n
|
||||
entry.m_main_lin_index = 0;
|
||||
entry.m_main_chunk_feerate = m_feerate;
|
||||
entry.m_main_equal_feerate_chunk_prefix_size = m_feerate.size;
|
||||
entry.m_main_max_chunk_fallback = m_graph_index;
|
||||
// Always use the special LinearizationIndex(-1), indicating singleton chunk at end of
|
||||
// Cluster, here.
|
||||
if (!rename) graph.CreateChunkData(m_graph_index, LinearizationIndex(-1));
|
||||
@@ -1792,10 +1817,8 @@ void TxGraphImpl::Compact() noexcept
|
||||
m_entries.pop_back();
|
||||
}
|
||||
|
||||
// In a future commit, chunk information will end up containing a GraphIndex of the
|
||||
// max-fallback transaction in the chunk. Since GraphIndex values may have been reassigned, we
|
||||
// will need to recompute the chunk information (even if not IsAcceptable), so that the index
|
||||
// order and comparisons remain consistent.
|
||||
// Update the affected clusters, to fixup Entry::m_main_max_chunk_fallback values which may
|
||||
// have become outdated due to the compaction above.
|
||||
std::sort(affected_main.begin(), affected_main.end());
|
||||
affected_main.erase(std::unique(affected_main.begin(), affected_main.end()), affected_main.end());
|
||||
for (Cluster* cluster : affected_main) {
|
||||
@@ -2655,6 +2678,10 @@ void TxGraphImpl::CommitStaging() noexcept
|
||||
// Staging must exist.
|
||||
Assume(m_staging_clusterset.has_value());
|
||||
Assume(m_main_chunkindex_observers == 0);
|
||||
// Get rid of removed transactions in staging before moving to main, so they do not need to be
|
||||
// added to the chunk index there. Doing so is impossible if they were unlinked, and thus have
|
||||
// no Ref anymore to pass to the fallback comparator.
|
||||
ApplyRemovals(/*up_to_level=*/1);
|
||||
// Delete all conflicting Clusters in main, to make place for moving the staging ones
|
||||
// there. All of these have been copied to staging in PullIn().
|
||||
auto conflicts = GetConflicts();
|
||||
@@ -2841,6 +2868,7 @@ void GenericClusterImpl::SanityCheck(const TxGraphImpl& graph, int level) const
|
||||
if (!linchunking[chunk_num].transactions[lin_pos]) {
|
||||
// First transaction of a new chunk.
|
||||
++chunk_num;
|
||||
assert(chunk_num < linchunking.size());
|
||||
chunk_pos = 0;
|
||||
if (linchunking[chunk_num].feerate << equal_feerate_prefix) {
|
||||
equal_feerate_prefix = linchunking[chunk_num].feerate;
|
||||
|
||||
@@ -239,9 +239,9 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
||||
self.generate(self.nodes[0], 1)
|
||||
self.trigger_reorg(fork_blocks, self.nodes[0])
|
||||
|
||||
# Check if the txs are returned to the mempool (though the transaction ordering may
|
||||
# change as it is non-deterministic).
|
||||
assert_equal(set(self.nodes[0].getrawmempool()), set(mempool0))
|
||||
# Check that the txs are returned to the mempool, and that transaction ordering is
|
||||
# unchanged, as it is deterministic.
|
||||
assert_equal(self.nodes[0].getrawmempool(), mempool0)
|
||||
|
||||
# Clean-up the mempool
|
||||
self.generate(self.nodes[0], 1)
|
||||
|
||||
Reference in New Issue
Block a user