diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index 366cd4a07df..3309057b6f3 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -137,7 +137,7 @@ BUILDDIR=$PWD/my-build-dir contrib/devtools/gen-manpages.py headerssync-params.py ===================== -A script to generate optimal parameters for the headerssync module (src/headerssync.cpp). It takes no command-line +A script to generate optimal parameters for the headerssync module (stored in src/kernel/chainparams.cpp). It takes no command-line options, as all its configuration is set at the top of the file. It runs many times faster inside PyPy. Invocation: ```bash diff --git a/contrib/devtools/headerssync-params.py b/contrib/devtools/headerssync-params.py index ece1a786302..82bdaadb233 100755 --- a/contrib/devtools/headerssync-params.py +++ b/contrib/devtools/headerssync-params.py @@ -5,8 +5,8 @@ """Script to find the optimal parameters for the headerssync module through simulation.""" -from math import log, exp, sqrt from datetime import datetime, timedelta +from math import log, exp, sqrt import random # Parameters: @@ -337,15 +337,15 @@ def analyze(when): attack_volume = NET_HEADER_SIZE * MINCHAINWORK_HEADERS # And report them. print() - print("Optimal configuration:") + print(f"Given current min chainwork headers of {MINCHAINWORK_HEADERS}, the optimal parameters for low") + print(f"memory usage on mainchain for release until {TIME:%Y-%m-%d} is:") print() - print("//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.") - print(f"constexpr size_t HEADER_COMMITMENT_PERIOD{{{period}}};") - print() - print("//! Only feed headers to validation once this many headers on top have been") - print("//! received and validated against commitments.") - print(f"constexpr size_t REDOWNLOAD_BUFFER_SIZE{{{bufsize}}};" + print(f" // Generated by headerssync-params.py on {datetime.today():%Y-%m-%d}.") + print( " m_headers_sync_params = HeadersSyncParams{") + print(f" .commitment_period = {period},") + print(f" .redownload_buffer_size = {bufsize}," f" // {bufsize}/{period} = ~{bufsize/period:.1f} commitments") + print( " };") print() print("Properties:") print(f"- Per-peer memory for mainchain sync: {mem_mainchain / 8192:.3f} KiB") diff --git a/doc/release-process.md b/doc/release-process.md index 9159b8cc8c8..272f36eadcf 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -53,7 +53,7 @@ Release Process - Set `MINCHAINWORK_HEADERS` to the height used for the `nMinimumChainWork` calculation above. - Check that the other variables still look reasonable. - Run the script. It works fine in CPython, but PyPy is much faster (seconds instead of minutes): `pypy3 contrib/devtools/headerssync-params.py`. - - Paste the output defining `HEADER_COMMITMENT_PERIOD` and `REDOWNLOAD_BUFFER_SIZE` into the top of [`src/headerssync.cpp`](/src/headerssync.cpp). + - Paste the output defining the header `commitment_period` and `redownload_buffer_size` into the mainnet section of [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp). - Clear the release notes and move them to the wiki (see "Write the release notes" below). - Translations on Transifex: - Pull translations from Transifex into the master branch. diff --git a/src/headerssync.cpp b/src/headerssync.cpp index f966b13a922..e52a8cdc486 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -3,30 +3,24 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include + #include #include #include #include #include -// The two constants below are computed using the simulation script in -// contrib/devtools/headerssync-params.py. - -//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks. -constexpr size_t HEADER_COMMITMENT_PERIOD{632}; - -//! Only feed headers to validation once this many headers on top have been -//! received and validated against commitments. -constexpr size_t REDOWNLOAD_BUFFER_SIZE{15009}; // 15009/632 = ~23.7 commitments - -// Our memory analysis assumes 48 bytes for a CompressedHeader (so we should -// re-calculate parameters if we compress further) +// Our memory analysis in headerssync-params.py assumes this many bytes for a +// CompressedHeader (we should re-calculate parameters if we compress further). static_assert(sizeof(CompressedHeader) == 48); HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, - const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) : - m_commit_offset(FastRandomContext().randrange(HEADER_COMMITMENT_PERIOD)), + const HeadersSyncParams& params, const CBlockIndex* chain_start, + const arith_uint256& minimum_required_work) : + m_commit_offset((assert(params.commitment_period > 0), // HeadersSyncParams field must be initialized to non-zero. + FastRandomContext().randrange(params.commitment_period))), m_id(id), m_consensus_params(consensus_params), + m_params(params), m_chain_start(chain_start), m_minimum_required_work(minimum_required_work), m_current_chain_work(chain_start->nChainWork), @@ -41,7 +35,9 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus // exceeds this bound, because it's not possible for a consensus-valid // chain to be longer than this (at the current time -- in the future we // could try again, if necessary, to sync a longer chain). - m_max_commitments = 6*(Ticks(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD; + const auto max_seconds_since_start{(Ticks(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}})) + + MAX_FUTURE_BLOCK_TIME}; + m_max_commitments = 6 * max_seconds_since_start / m_params.commitment_period; LogDebug(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString()); } @@ -193,7 +189,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren return false; } - if (next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { + if (next_height % m_params.commitment_period == m_commit_offset) { // Add a commitment. m_header_commitments.push_back(m_hasher(current.GetHash()) & 1); if (m_header_commitments.size() > m_max_commitments) { @@ -254,7 +250,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he // it's possible our peer has extended its chain between our first sync and // our second, and we don't want to return failure after we've seen our // target blockhash just because we ran out of commitments. - if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { + if (!m_process_all_remaining_headers && next_height % m_params.commitment_period == m_commit_offset) { if (m_header_commitments.size() == 0) { LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height); // Somehow our peer managed to feed us a different chain and @@ -285,7 +281,7 @@ std::vector HeadersSyncState::PopHeadersReadyForAcceptance() Assume(m_download_state == State::REDOWNLOAD); if (m_download_state != State::REDOWNLOAD) return ret; - while (m_redownloaded_headers.size() > REDOWNLOAD_BUFFER_SIZE || + while (m_redownloaded_headers.size() > m_params.redownload_buffer_size || (m_redownloaded_headers.size() > 0 && m_process_all_remaining_headers)) { ret.emplace_back(m_redownloaded_headers.front().GetFullHeader(m_redownload_buffer_first_prev_hash)); m_redownloaded_headers.pop_front(); diff --git a/src/headerssync.h b/src/headerssync.h index 12afe66097b..9e3af58d60a 100644 --- a/src/headerssync.h +++ b/src/headerssync.h @@ -136,7 +136,8 @@ public: * minimum_required_work: amount of chain work required to accept the chain */ HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, - const CBlockIndex* chain_start, const arith_uint256& minimum_required_work); + const HeadersSyncParams& params, const CBlockIndex* chain_start, + const arith_uint256& minimum_required_work); /** Result data structure for ProcessNextHeaders. */ struct ProcessingResult { @@ -179,8 +180,8 @@ protected: /** The (secret) offset on the heights for which to create commitments. * * m_header_commitments entries are created at any height h for which - * (h % HEADER_COMMITMENT_PERIOD) == m_commit_offset. */ - const unsigned m_commit_offset; + * (h % m_params.commitment_period) == m_commit_offset. */ + const size_t m_commit_offset; private: /** Clear out all download state that might be in progress (freeing any used @@ -214,6 +215,9 @@ private: /** We use the consensus params in our anti-DoS calculations */ const Consensus::Params& m_consensus_params; + /** Parameters that impact memory usage for a given chain, especially when attacked. */ + const HeadersSyncParams m_params; + /** Store the last block in our block index that the peer's chain builds from */ const CBlockIndex* m_chain_start{nullptr}; diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 3955794f896..062f91af4fa 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -195,6 +195,12 @@ public: .tx_count = 1235299397, .dTxRate = 5.456290459519495, }; + + // Generated by headerssync-params.py on 2025-09-01. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 632, + .redownload_buffer_size = 15009, // 15009/632 = ~23.7 commitments + }; } }; @@ -292,6 +298,12 @@ public: .tx_count = 508468699, .dTxRate = 7.172978845985714, }; + + // Generated by headerssync-params.py on 2025-09-03. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 628, + .redownload_buffer_size = 13460, // 13460/628 = ~21.4 commitments + }; } }; @@ -393,6 +405,12 @@ public: .tx_count = 11414302, .dTxRate = 0.2842619757327476, }; + + // Generated by headerssync-params.py on 2025-09-03. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 275, + .redownload_buffer_size = 7017, // 7017/275 = ~25.5 commitments + }; } }; @@ -506,6 +524,12 @@ public: fDefaultConsistencyChecks = false; m_is_mockable_chain = false; + + // Generated by headerssync-params.py on 2025-09-03. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 390, + .redownload_buffer_size = 9584, // 9584/390 = ~24.6 commitments + }; } }; @@ -636,6 +660,12 @@ public: base58Prefixes[EXT_SECRET_KEY] = {0x04, 0x35, 0x83, 0x94}; bech32_hrp = "bcrt"; + + // Copied from Testnet4. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 275, + .redownload_buffer_size = 7017, // 7017/275 = ~25.5 commitments + }; } }; diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index 77991d497dd..0a1a5bdff8d 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -61,6 +61,15 @@ struct ChainTxData { double dTxRate; //!< estimated number of transactions per second after that timestamp }; +//! Configuration for headers sync memory usage. +struct HeadersSyncParams { + //! Distance in blocks between header commitments. + size_t commitment_period{0}; + //! Minimum number of validated headers to accumulate in the redownload + //! buffer before feeding them into the permanent block index. + size_t redownload_buffer_size{0}; +}; + /** * CChainParams defines various tweakable parameters of a given instance of the * Bitcoin system. @@ -106,6 +115,7 @@ public: const std::vector& Base58Prefix(Base58Type type) const { return base58Prefixes[type]; } const std::string& Bech32HRP() const { return bech32_hrp; } const std::vector& FixedSeeds() const { return vFixedSeeds; } + const HeadersSyncParams& HeadersSync() const { return m_headers_sync_params; } std::optional AssumeutxoForHeight(int height) const { @@ -170,6 +180,7 @@ protected: bool m_is_mockable_chain; std::vector m_assumeutxo_data; ChainTxData chainTxData; + HeadersSyncParams m_headers_sync_params; }; std::optional GetNetworkForMagic(const MessageStartChars& pchMessageStart); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 336669a8545..352aba8e684 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2654,7 +2654,7 @@ bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlo // advancing to the first unknown header would be a small effect. LOCK(peer.m_headers_sync_mutex); peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), - chain_start_header, minimum_chain_work)); + m_chainparams.HeadersSync(), chain_start_header, minimum_chain_work)); // Now a HeadersSyncState object for tracking this synchronization // is created, process the headers using it as normal. Failures are diff --git a/src/test/fuzz/headerssync.cpp b/src/test/fuzz/headerssync.cpp index f42bdf9562a..5bfbed85375 100644 --- a/src/test/fuzz/headerssync.cpp +++ b/src/test/fuzz/headerssync.cpp @@ -43,10 +43,11 @@ void MakeHeadersContinuous( class FuzzedHeadersSyncState : public HeadersSyncState { public: - FuzzedHeadersSyncState(const unsigned commit_offset, const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) - : HeadersSyncState(/*id=*/0, Params().GetConsensus(), chain_start, minimum_required_work) + FuzzedHeadersSyncState(const HeadersSyncParams& sync_params, const size_t commit_offset, + const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) + : HeadersSyncState(/*id=*/0, Params().GetConsensus(), sync_params, chain_start, minimum_required_work) { - const_cast(m_commit_offset) = commit_offset; + const_cast(m_commit_offset) = commit_offset; } }; @@ -65,9 +66,14 @@ FUZZ_TARGET(headers_sync_state, .init = initialize_headers_sync_state_fuzz) const uint256 genesis_hash = genesis_header.GetHash(); start_index.phashBlock = &genesis_hash; + const HeadersSyncParams params{ + .commitment_period = fuzzed_data_provider.ConsumeIntegralInRange(1, Params().HeadersSync().commitment_period * 2), + .redownload_buffer_size = fuzzed_data_provider.ConsumeIntegralInRange(0, Params().HeadersSync().redownload_buffer_size * 2), + }; arith_uint256 min_work{UintToArith256(ConsumeUInt256(fuzzed_data_provider))}; FuzzedHeadersSyncState headers_sync( - /*commit_offset=*/fuzzed_data_provider.ConsumeIntegralInRange(1, 1024), + params, + /*commit_offset=*/fuzzed_data_provider.ConsumeIntegralInRange(0, params.commitment_period - 1), /*chain_start=*/&start_index, /*minimum_required_work=*/min_work); diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index 4496ed5fa27..c91420978ff 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -40,11 +41,14 @@ using State = HeadersSyncState::State; } \ } while (false) -constexpr size_t TARGET_BLOCKS{15'012}; +constexpr size_t TARGET_BLOCKS{15'000}; constexpr arith_uint256 CHAIN_WORK{TARGET_BLOCKS * 2}; -// Copied from headerssync.cpp, will be redefined in next commit. -constexpr size_t REDOWNLOAD_BUFFER_SIZE{15'009}; +// Subtract MAX_HEADERS_RESULTS (2000 headers/message) + an arbitrary smaller +// value (123) so our redownload buffer is well below the number of blocks +// required to reach the CHAIN_WORK threshold, to behave similarly to mainnet. +constexpr size_t REDOWNLOAD_BUFFER_SIZE{TARGET_BLOCKS - (MAX_HEADERS_RESULTS + 123)}; +constexpr size_t COMMITMENT_PERIOD{600}; // Somewhat close to mainnet. struct HeadersGeneratorSetup : public RegTestingSetup { const CBlock& genesis{Params().GenesisBlock()}; @@ -78,6 +82,10 @@ struct HeadersGeneratorSetup : public RegTestingSetup { { return {/*id=*/0, Params().GetConsensus(), + HeadersSyncParams{ + .commitment_period = COMMITMENT_PERIOD, + .redownload_buffer_size = REDOWNLOAD_BUFFER_SIZE, + }, chain_start, /*minimum_required_work=*/CHAIN_WORK}; } @@ -157,6 +165,11 @@ BOOST_AUTO_TEST_CASE(sneaky_redownload) /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, /*exp_locator_hash=*/genesis.GetHash()); + // Below is the number of commitment bits that must randomly match between + // the two chains for this test to spuriously fail. 1 / 2^25 = + // 1 in 33'554'432 (somewhat less due to HeadersSyncState::m_commit_offset). + static_assert(TARGET_BLOCKS / COMMITMENT_PERIOD == 25); + // Try to sneakily feed back the second chain during REDOWNLOAD. CHECK_RESULT(hss.ProcessNextHeaders(second_chain, true), hss, /*exp_state=*/State::FINAL,