From a4ac9915a95eb865779cf4627dd518d94c01032b Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Tue, 19 Aug 2025 10:36:25 +0200 Subject: [PATCH 1/8] refactor(headerssync): Extract test constants ahead of breakup into functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made arith_uint256 constexpr-constructible so it can be used for compile time constants. Co-authored-by: Lőrinc --- src/arith_uint256.h | 19 +++--- src/test/headers_sync_chainwork_tests.cpp | 80 ++++++++++++----------- 2 files changed, 53 insertions(+), 46 deletions(-) diff --git a/src/arith_uint256.h b/src/arith_uint256.h index 5cefff53ae9..69e0aff8d3f 100644 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2022 The Bitcoin Core developers +// Copyright (c) 2009-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -21,7 +21,7 @@ public: }; /** Template base class for unsigned big integers. */ -template +template class base_uint { protected: @@ -29,9 +29,9 @@ protected: static constexpr int WIDTH = BITS / 32; /** Big integer represented with 32-bit digits, least-significant first. */ uint32_t pn[WIDTH]; -public: - base_uint() +public: + constexpr base_uint() { for (int i = 0; i < WIDTH; i++) pn[i] = 0; @@ -40,7 +40,7 @@ public: base_uint(const base_uint& b) = default; base_uint& operator=(const base_uint& b) = default; - base_uint(uint64_t b) + constexpr base_uint(uint64_t b) { pn[0] = (unsigned int)b; pn[1] = (unsigned int)(b >> 32); @@ -227,11 +227,12 @@ public: }; /** 256-bit unsigned big integer. */ -class arith_uint256 : public base_uint<256> { +class arith_uint256 : public base_uint<256> +{ public: - arith_uint256() = default; - arith_uint256(const base_uint<256>& b) : base_uint<256>(b) {} - arith_uint256(uint64_t b) : base_uint<256>(b) {} + constexpr arith_uint256() = default; + constexpr arith_uint256(const base_uint& b) : base_uint(b) {} + constexpr arith_uint256(uint64_t b) : base_uint(b) {} /** * The "compact" format is a representation of a whole diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index b710ad1801e..db58970f8a5 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2022 The Bitcoin Core developers +// Copyright (c) 2022-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -9,11 +9,35 @@ #include #include #include + +#include #include #include +constexpr size_t TARGET_BLOCKS{15'000}; +constexpr arith_uint256 CHAIN_WORK{TARGET_BLOCKS * 2}; + struct HeadersGeneratorSetup : public RegTestingSetup { + const CBlock& genesis{Params().GenesisBlock()}; + const CBlockIndex* chain_start{WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(genesis.GetHash()))}; + + // Generate headers for two different chains (using differing merkle roots + // to ensure the headers are different). + const std::vector& FirstChain() + { + static const auto first_chain{GenerateHeaders(/*count=*/TARGET_BLOCKS - 1, genesis.GetHash(), + genesis.nVersion, genesis.nTime, /*merkle_root=*/uint256::ZERO, genesis.nBits)}; + return first_chain; + } + const std::vector& SecondChain() + { + static const auto second_chain{GenerateHeaders(/*count=*/TARGET_BLOCKS - 2, genesis.GetHash(), + genesis.nVersion, genesis.nTime, /*merkle_root=*/uint256::ONE, genesis.nBits)}; + return second_chain; + } + +private: /** Search for a nonce to meet (regtest) proof of work */ void FindProofOfWork(CBlockHeader& starting_header); /** @@ -21,42 +45,36 @@ struct HeadersGeneratorSetup : public RegTestingSetup { * the given nVersion, advancing time by 1 second from the starting * prev_time, and with a fixed merkle root hash. */ - void GenerateHeaders(std::vector& headers, size_t count, - const uint256& starting_hash, const int nVersion, int prev_time, - const uint256& merkle_root, const uint32_t nBits); + std::vector GenerateHeaders(size_t count, + uint256 prev_hash, int32_t nVersion, uint32_t prev_time, + const uint256& merkle_root, uint32_t nBits); }; void HeadersGeneratorSetup::FindProofOfWork(CBlockHeader& starting_header) { while (!CheckProofOfWork(starting_header.GetHash(), starting_header.nBits, Params().GetConsensus())) { - ++(starting_header.nNonce); + ++starting_header.nNonce; } } -void HeadersGeneratorSetup::GenerateHeaders(std::vector& headers, - size_t count, const uint256& starting_hash, const int nVersion, int prev_time, - const uint256& merkle_root, const uint32_t nBits) +std::vector HeadersGeneratorSetup::GenerateHeaders( + const size_t count, uint256 prev_hash, const int32_t nVersion, + uint32_t prev_time, const uint256& merkle_root, const uint32_t nBits) { - uint256 prev_hash = starting_hash; - - while (headers.size() < count) { - headers.emplace_back(); - CBlockHeader& next_header = headers.back();; + std::vector headers(count); + for (auto& next_header : headers) { next_header.nVersion = nVersion; next_header.hashPrevBlock = prev_hash; next_header.hashMerkleRoot = merkle_root; - next_header.nTime = prev_time+1; + next_header.nTime = ++prev_time; next_header.nBits = nBits; FindProofOfWork(next_header); prev_hash = next_header.GetHash(); - prev_time = next_header.nTime; } - return; + return headers; } -BOOST_FIXTURE_TEST_SUITE(headers_sync_chainwork_tests, HeadersGeneratorSetup) - // In this test, we construct two sets of headers from genesis, one with // sufficient proof of work and one without. // 1. We deliver the first set of headers and verify that the headers sync state @@ -65,34 +83,22 @@ BOOST_FIXTURE_TEST_SUITE(headers_sync_chainwork_tests, HeadersGeneratorSetup) // processing (presumably due to commitments not matching). // 3. Finally, we verify that repeating with the first set of headers in both // phases is successful. +BOOST_FIXTURE_TEST_SUITE(headers_sync_chainwork_tests, HeadersGeneratorSetup) + BOOST_AUTO_TEST_CASE(headers_sync_state) { - std::vector first_chain; - std::vector second_chain; + const auto& first_chain{FirstChain()}; + const auto& second_chain{SecondChain()}; std::unique_ptr hss; - const int target_blocks = 15000; - arith_uint256 chain_work = target_blocks*2; - - // Generate headers for two different chains (using differing merkle roots - // to ensure the headers are different). - GenerateHeaders(first_chain, target_blocks-1, Params().GenesisBlock().GetHash(), - Params().GenesisBlock().nVersion, Params().GenesisBlock().nTime, - ArithToUint256(0), Params().GenesisBlock().nBits); - - GenerateHeaders(second_chain, target_blocks-2, Params().GenesisBlock().GetHash(), - Params().GenesisBlock().nVersion, Params().GenesisBlock().nTime, - ArithToUint256(1), Params().GenesisBlock().nBits); - - const CBlockIndex* chain_start = WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(Params().GenesisBlock().GetHash())); std::vector headers_batch; // Feed the first chain to HeadersSyncState, by delivering 1 header // initially and then the rest. headers_batch.insert(headers_batch.end(), std::next(first_chain.begin()), first_chain.end()); - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, chain_work)); + hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); (void)hss->ProcessNextHeaders({first_chain.front()}, true); // Pretend the first header is still "full", so we don't abort. auto result = hss->ProcessNextHeaders(headers_batch, true); @@ -109,7 +115,7 @@ BOOST_AUTO_TEST_CASE(headers_sync_state) BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); // Now try again, this time feeding the first chain twice. - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, chain_work)); + hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); (void)hss->ProcessNextHeaders(first_chain, true); BOOST_CHECK(hss->GetState() == HeadersSyncState::State::REDOWNLOAD); @@ -123,7 +129,7 @@ BOOST_AUTO_TEST_CASE(headers_sync_state) // Finally, verify that just trying to process the second chain would not // succeed (too little work) - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, chain_work)); + hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); BOOST_CHECK(hss->GetState() == HeadersSyncState::State::PRESYNC); // Pretend just the first message is "full", so we don't abort. (void)hss->ProcessNextHeaders({second_chain.front()}, true); From e984618d0b9946dc11f1087adf22a4cfbf9c1a77 Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Tue, 9 Sep 2025 21:04:47 +0200 Subject: [PATCH 2/8] refactor(headerssync): Process spans of headers More lightweight than vectors which needed to be copied in tests. Also good to get rid of headers_batch-vector before breaking up test. --- src/headerssync.cpp | 8 ++++---- src/headerssync.h | 4 ++-- src/test/headers_sync_chainwork_tests.cpp | 14 ++++---------- 3 files changed, 10 insertions(+), 16 deletions(-) diff --git a/src/headerssync.cpp b/src/headerssync.cpp index ae7187f48b2..f966b13a922 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2022 The Bitcoin Core developers +// Copyright (c) 2022-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -66,8 +66,8 @@ void HeadersSyncState::Finalize() /** Process the next batch of headers received from our peer. * Validate and store commitments, and compare total chainwork to our target to * see if we can switch to REDOWNLOAD mode. */ -HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const - std::vector& received_headers, const bool full_headers_message) +HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders( + std::span received_headers, const bool full_headers_message) { ProcessingResult ret; @@ -137,7 +137,7 @@ HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders(const return ret; } -bool HeadersSyncState::ValidateAndStoreHeadersCommitments(const std::vector& headers) +bool HeadersSyncState::ValidateAndStoreHeadersCommitments(std::span headers) { // The caller should not give us an empty set of headers. Assume(headers.size() > 0); diff --git a/src/headerssync.h b/src/headerssync.h index 56380c66fe2..12afe66097b 100644 --- a/src/headerssync.h +++ b/src/headerssync.h @@ -165,7 +165,7 @@ public: * ProcessingResult.request_more: if true, the caller is suggested to call * NextHeadersRequestLocator and send a getheaders message using it. */ - ProcessingResult ProcessNextHeaders(const std::vector& + ProcessingResult ProcessNextHeaders(std::span received_headers, bool full_headers_message); /** Issue the next GETHEADERS message to our peer. @@ -195,7 +195,7 @@ private: * processed headers. * On failure, this invokes Finalize() and returns false. */ - bool ValidateAndStoreHeadersCommitments(const std::vector& headers); + bool ValidateAndStoreHeadersCommitments(std::span headers); /** In PRESYNC, process and update state for a single header */ bool ValidateAndProcessSingleHeader(const CBlockHeader& current); diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index db58970f8a5..511933284d6 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -92,16 +92,12 @@ BOOST_AUTO_TEST_CASE(headers_sync_state) std::unique_ptr hss; - std::vector headers_batch; - // Feed the first chain to HeadersSyncState, by delivering 1 header // initially and then the rest. - headers_batch.insert(headers_batch.end(), std::next(first_chain.begin()), first_chain.end()); - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); - (void)hss->ProcessNextHeaders({first_chain.front()}, true); + (void)hss->ProcessNextHeaders({{first_chain.front()}}, true); // Pretend the first header is still "full", so we don't abort. - auto result = hss->ProcessNextHeaders(headers_batch, true); + auto result = hss->ProcessNextHeaders(std::span{first_chain}.subspan(1), true); // This chain should look valid, and we should have met the proof-of-work // requirement. @@ -132,15 +128,13 @@ BOOST_AUTO_TEST_CASE(headers_sync_state) hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); BOOST_CHECK(hss->GetState() == HeadersSyncState::State::PRESYNC); // Pretend just the first message is "full", so we don't abort. - (void)hss->ProcessNextHeaders({second_chain.front()}, true); + (void)hss->ProcessNextHeaders({{second_chain.front()}}, true); BOOST_CHECK(hss->GetState() == HeadersSyncState::State::PRESYNC); - headers_batch.clear(); - headers_batch.insert(headers_batch.end(), std::next(second_chain.begin(), 1), second_chain.end()); // Tell the sync logic that the headers message was not full, implying no // more headers can be requested. For a low-work-chain, this should causes // the sync to end with no headers for acceptance. - result = hss->ProcessNextHeaders(headers_batch, false); + result = hss->ProcessNextHeaders(std::span{second_chain}.subspan(1), false); BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); BOOST_CHECK(result.pow_validated_headers.empty()); BOOST_CHECK(!result.request_more); From f03686892a9c07e87e6dd12027d988fe188b1f9e Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Tue, 19 Aug 2025 10:38:12 +0200 Subject: [PATCH 3/8] refactor(test): Break up headers_sync_state Helps logically separate the scenarios being tested. Also adds missing comment for part 4. (unique_ptrs and ProcessingResults will be cleaned up in next commit). --- src/test/headers_sync_chainwork_tests.cpp | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index 511933284d6..c83fd29c409 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -85,7 +85,7 @@ std::vector HeadersGeneratorSetup::GenerateHeaders( // phases is successful. BOOST_FIXTURE_TEST_SUITE(headers_sync_chainwork_tests, HeadersGeneratorSetup) -BOOST_AUTO_TEST_CASE(headers_sync_state) +BOOST_AUTO_TEST_CASE(sneaky_redownload) { const auto& first_chain{FirstChain()}; const auto& second_chain{SecondChain()}; @@ -109,7 +109,14 @@ BOOST_AUTO_TEST_CASE(headers_sync_state) result = hss->ProcessNextHeaders(second_chain, true); BOOST_CHECK(!result.success); // foiled! BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); +} +BOOST_AUTO_TEST_CASE(happy_path) +{ + const auto& first_chain{FirstChain()}; + + std::unique_ptr hss; + HeadersSyncState::ProcessingResult result; // Now try again, this time feeding the first chain twice. hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); (void)hss->ProcessNextHeaders(first_chain, true); @@ -122,7 +129,14 @@ BOOST_AUTO_TEST_CASE(headers_sync_state) BOOST_CHECK(result.pow_validated_headers.size() == first_chain.size()); // Nothing left for the sync logic to do: BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); +} +BOOST_AUTO_TEST_CASE(too_little_work) +{ + const auto& second_chain{SecondChain()}; + + std::unique_ptr hss; + HeadersSyncState::ProcessingResult result; // Finally, verify that just trying to process the second chain would not // succeed (too little work) hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); From fe896f8faa7883f33169fe3e6dddb91feaca23e1 Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Wed, 3 Sep 2025 23:04:04 +0200 Subject: [PATCH 4/8] refactor(test): Store HeadersSyncState on the stack --- src/test/headers_sync_chainwork_tests.cpp | 48 ++++++++++++----------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index c83fd29c409..8c2f8e9a495 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -37,6 +37,14 @@ struct HeadersGeneratorSetup : public RegTestingSetup { return second_chain; } + HeadersSyncState CreateState() + { + return {/*id=*/0, + Params().GetConsensus(), + chain_start, + /*minimum_required_work=*/CHAIN_WORK}; + } + private: /** Search for a nonce to meet (regtest) proof of work */ void FindProofOfWork(CBlockHeader& starting_header); @@ -90,66 +98,60 @@ BOOST_AUTO_TEST_CASE(sneaky_redownload) const auto& first_chain{FirstChain()}; const auto& second_chain{SecondChain()}; - std::unique_ptr hss; - // Feed the first chain to HeadersSyncState, by delivering 1 header // initially and then the rest. - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); - (void)hss->ProcessNextHeaders({{first_chain.front()}}, true); + HeadersSyncState hss{CreateState()}; + (void)hss.ProcessNextHeaders({{first_chain.front()}}, true); // Pretend the first header is still "full", so we don't abort. - auto result = hss->ProcessNextHeaders(std::span{first_chain}.subspan(1), true); + auto result{hss.ProcessNextHeaders(std::span{first_chain}.subspan(1), true)}; // This chain should look valid, and we should have met the proof-of-work // requirement. BOOST_CHECK(result.success); BOOST_CHECK(result.request_more); - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::REDOWNLOAD); + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::REDOWNLOAD); // Try to sneakily feed back the second chain. - result = hss->ProcessNextHeaders(second_chain, true); + result = hss.ProcessNextHeaders(second_chain, true); BOOST_CHECK(!result.success); // foiled! - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); } BOOST_AUTO_TEST_CASE(happy_path) { const auto& first_chain{FirstChain()}; - std::unique_ptr hss; - HeadersSyncState::ProcessingResult result; // Now try again, this time feeding the first chain twice. - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); - (void)hss->ProcessNextHeaders(first_chain, true); - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::REDOWNLOAD); + HeadersSyncState hss{CreateState()}; + (void)hss.ProcessNextHeaders(first_chain, true); + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::REDOWNLOAD); - result = hss->ProcessNextHeaders(first_chain, true); + const auto result{hss.ProcessNextHeaders(first_chain, true)}; BOOST_CHECK(result.success); BOOST_CHECK(!result.request_more); // All headers should be ready for acceptance: BOOST_CHECK(result.pow_validated_headers.size() == first_chain.size()); // Nothing left for the sync logic to do: - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); } BOOST_AUTO_TEST_CASE(too_little_work) { const auto& second_chain{SecondChain()}; - std::unique_ptr hss; - HeadersSyncState::ProcessingResult result; // Finally, verify that just trying to process the second chain would not // succeed (too little work) - hss.reset(new HeadersSyncState(0, Params().GetConsensus(), chain_start, CHAIN_WORK)); - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::PRESYNC); + HeadersSyncState hss{CreateState()}; + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::PRESYNC); // Pretend just the first message is "full", so we don't abort. - (void)hss->ProcessNextHeaders({{second_chain.front()}}, true); - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::PRESYNC); + (void)hss.ProcessNextHeaders({{second_chain.front()}}, true); + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::PRESYNC); // Tell the sync logic that the headers message was not full, implying no // more headers can be requested. For a low-work-chain, this should causes // the sync to end with no headers for acceptance. - result = hss->ProcessNextHeaders(std::span{second_chain}.subspan(1), false); - BOOST_CHECK(hss->GetState() == HeadersSyncState::State::FINAL); + const auto result{hss.ProcessNextHeaders(std::span{second_chain}.subspan(1), false)}; + BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); BOOST_CHECK(result.pow_validated_headers.empty()); BOOST_CHECK(!result.request_more); // Nevertheless, no validation errors should have been detected with the From 04eeb9578c60ce5661f285f6bde996569fafdcc3 Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Wed, 3 Sep 2025 21:21:14 +0200 Subject: [PATCH 5/8] doc(test): Improve comments + new assert helping explain why CHAIN_WORK == TARGET_BLOCKS * 2. --- src/test/headers_sync_chainwork_tests.cpp | 39 ++++++++++++++++------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index 8c2f8e9a495..0802e69ad67 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -26,12 +26,21 @@ struct HeadersGeneratorSetup : public RegTestingSetup { // to ensure the headers are different). const std::vector& FirstChain() { + // Block header hash target is half of max uint256 (2**256 / 2), expressible + // roughly as the coefficient 0x7fffff with the exponent 0x20 (32 bytes). + // This implies around every 2nd hash attempt should succeed, which + // is why CHAIN_WORK == TARGET_BLOCKS * 2. + assert(genesis.nBits == 0x207fffff); + + // Subtract 1 since the genesis block also contributes work so we reach + // the CHAIN_WORK target. static const auto first_chain{GenerateHeaders(/*count=*/TARGET_BLOCKS - 1, genesis.GetHash(), genesis.nVersion, genesis.nTime, /*merkle_root=*/uint256::ZERO, genesis.nBits)}; return first_chain; } const std::vector& SecondChain() { + // Subtract 2 to keep total work below the target. static const auto second_chain{GenerateHeaders(/*count=*/TARGET_BLOCKS - 2, genesis.GetHash(), genesis.nVersion, genesis.nTime, /*merkle_root=*/uint256::ONE, genesis.nBits)}; return second_chain; @@ -87,10 +96,12 @@ std::vector HeadersGeneratorSetup::GenerateHeaders( // sufficient proof of work and one without. // 1. We deliver the first set of headers and verify that the headers sync state // updates to the REDOWNLOAD phase successfully. -// 2. Then we deliver the second set of headers and verify that they fail +// Then we deliver the second set of headers and verify that they fail // processing (presumably due to commitments not matching). -// 3. Finally, we verify that repeating with the first set of headers in both -// phases is successful. +// 2. Verify that repeating with the first set of headers in both phases is +// successful. +// 3. Repeat the second set of headers in both phases to demonstrate behavior +// when the chain a peer provides has too little work. BOOST_FIXTURE_TEST_SUITE(headers_sync_chainwork_tests, HeadersGeneratorSetup) BOOST_AUTO_TEST_CASE(sneaky_redownload) @@ -101,17 +112,20 @@ BOOST_AUTO_TEST_CASE(sneaky_redownload) // Feed the first chain to HeadersSyncState, by delivering 1 header // initially and then the rest. HeadersSyncState hss{CreateState()}; + + // Just feed one header. + // Pretend the message is still "full", so we don't abort. (void)hss.ProcessNextHeaders({{first_chain.front()}}, true); - // Pretend the first header is still "full", so we don't abort. + auto result{hss.ProcessNextHeaders(std::span{first_chain}.subspan(1), true)}; // This chain should look valid, and we should have met the proof-of-work - // requirement. + // requirement during PRESYNC and transitioned to REDOWNLOAD. BOOST_CHECK(result.success); BOOST_CHECK(result.request_more); BOOST_CHECK(hss.GetState() == HeadersSyncState::State::REDOWNLOAD); - // Try to sneakily feed back the second chain. + // Try to sneakily feed back the second chain during REDOWNLOAD. result = hss.ProcessNextHeaders(second_chain, true); BOOST_CHECK(!result.success); // foiled! BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); @@ -121,8 +135,10 @@ BOOST_AUTO_TEST_CASE(happy_path) { const auto& first_chain{FirstChain()}; - // Now try again, this time feeding the first chain twice. + // This time we feed the first chain twice. HeadersSyncState hss{CreateState()}; + + // Sufficient work transitions us from PRESYNC to REDOWNLOAD: (void)hss.ProcessNextHeaders(first_chain, true); BOOST_CHECK(hss.GetState() == HeadersSyncState::State::REDOWNLOAD); @@ -139,16 +155,17 @@ BOOST_AUTO_TEST_CASE(too_little_work) { const auto& second_chain{SecondChain()}; - // Finally, verify that just trying to process the second chain would not - // succeed (too little work) + // Verify that just trying to process the second chain would not succeed + // (too little work). HeadersSyncState hss{CreateState()}; BOOST_CHECK(hss.GetState() == HeadersSyncState::State::PRESYNC); - // Pretend just the first message is "full", so we don't abort. + + // Pretend just the first message is "full", so we don't abort. (void)hss.ProcessNextHeaders({{second_chain.front()}}, true); BOOST_CHECK(hss.GetState() == HeadersSyncState::State::PRESYNC); // Tell the sync logic that the headers message was not full, implying no - // more headers can be requested. For a low-work-chain, this should causes + // more headers can be requested. For a low-work-chain, this should cause // the sync to end with no headers for acceptance. const auto result{hss.ProcessNextHeaders(std::span{second_chain}.subspan(1), false)}; BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); From 7b00643ef5f932116ee303af9984312b27c040f1 Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Wed, 3 Sep 2025 21:36:40 +0200 Subject: [PATCH 6/8] test(headerssync): headers_sync_chainwork test improvements Introduces CHECK_RESULT for consistently validating ProcessingResult. * Verifies HeadersSyncState::State directly after ProcessNextHeaders(). * Uses BOOST_REQUIRE_EQUAL for HeadersSyncState::State - Nicer failure output and prevents continuing test in nonsensical state. * Encourages checking Locator and result.pow_validated_headers. Changes happy_path to test both full & non-full headers messages. --- src/test/headers_sync_chainwork_tests.cpp | 107 +++++++++++++++------- 1 file changed, 75 insertions(+), 32 deletions(-) diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index 0802e69ad67..40a85f04de5 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -15,6 +15,31 @@ #include +using State = HeadersSyncState::State; + +// Standard set of checks common to all scenarios. Macro keeps failure lines at the call-site. +#define CHECK_RESULT(result_expression, hss, exp_state, exp_success, exp_request_more, \ + exp_headers_size, exp_pow_validated_prev, exp_locator_hash) \ + do { \ + const auto result{result_expression}; \ + BOOST_REQUIRE_EQUAL(hss.GetState(), exp_state); \ + BOOST_CHECK_EQUAL(result.success, exp_success); \ + BOOST_CHECK_EQUAL(result.request_more, exp_request_more); \ + BOOST_CHECK_EQUAL(result.pow_validated_headers.size(), exp_headers_size); \ + const std::optional pow_validated_prev_opt{exp_pow_validated_prev}; \ + if (pow_validated_prev_opt) { \ + BOOST_CHECK_EQUAL(result.pow_validated_headers.at(0).hashPrevBlock, pow_validated_prev_opt); \ + } else { \ + BOOST_CHECK_EQUAL(exp_headers_size, 0); \ + } \ + const std::optional locator_hash_opt{exp_locator_hash}; \ + if (locator_hash_opt) { \ + BOOST_CHECK_EQUAL(hss.NextHeadersRequestLocator().vHave.at(0), locator_hash_opt); \ + } else { \ + BOOST_CHECK_EQUAL(exp_state, State::FINAL); \ + } \ + } while (false) + constexpr size_t TARGET_BLOCKS{15'000}; constexpr arith_uint256 CHAIN_WORK{TARGET_BLOCKS * 2}; @@ -113,42 +138,56 @@ BOOST_AUTO_TEST_CASE(sneaky_redownload) // initially and then the rest. HeadersSyncState hss{CreateState()}; - // Just feed one header. + // Just feed one header and check state. // Pretend the message is still "full", so we don't abort. - (void)hss.ProcessNextHeaders({{first_chain.front()}}, true); - - auto result{hss.ProcessNextHeaders(std::span{first_chain}.subspan(1), true)}; + CHECK_RESULT(hss.ProcessNextHeaders({{first_chain.front()}}, /*full_headers_message=*/true), + hss, /*exp_state=*/State::PRESYNC, + /*exp_success*/true, /*exp_request_more=*/true, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/first_chain.front().GetHash()); // This chain should look valid, and we should have met the proof-of-work // requirement during PRESYNC and transitioned to REDOWNLOAD. - BOOST_CHECK(result.success); - BOOST_CHECK(result.request_more); - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::REDOWNLOAD); + CHECK_RESULT(hss.ProcessNextHeaders(std::span{first_chain}.subspan(1), true), + hss, /*exp_state=*/State::REDOWNLOAD, + /*exp_success*/true, /*exp_request_more=*/true, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/genesis.GetHash()); // Try to sneakily feed back the second chain during REDOWNLOAD. - result = hss.ProcessNextHeaders(second_chain, true); - BOOST_CHECK(!result.success); // foiled! - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); + CHECK_RESULT(hss.ProcessNextHeaders(second_chain, true), + hss, /*exp_state=*/State::FINAL, + /*exp_success*/false, // Foiled! We detected mismatching headers. + /*exp_request_more=*/false, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/std::nullopt); } BOOST_AUTO_TEST_CASE(happy_path) { const auto& first_chain{FirstChain()}; - // This time we feed the first chain twice. - HeadersSyncState hss{CreateState()}; + // Headers message that moves us to the next state doesn't need to be full. + for (const bool full_headers_message : {false, true}) { + // This time we feed the first chain twice. + HeadersSyncState hss{CreateState()}; - // Sufficient work transitions us from PRESYNC to REDOWNLOAD: - (void)hss.ProcessNextHeaders(first_chain, true); - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::REDOWNLOAD); + // Sufficient work transitions us from PRESYNC to REDOWNLOAD: + const auto genesis_hash{genesis.GetHash()}; + CHECK_RESULT(hss.ProcessNextHeaders(first_chain, full_headers_message), + hss, /*exp_state=*/State::REDOWNLOAD, + /*exp_success*/true, /*exp_request_more=*/true, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/genesis_hash); - const auto result{hss.ProcessNextHeaders(first_chain, true)}; - BOOST_CHECK(result.success); - BOOST_CHECK(!result.request_more); - // All headers should be ready for acceptance: - BOOST_CHECK(result.pow_validated_headers.size() == first_chain.size()); - // Nothing left for the sync logic to do: - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); + CHECK_RESULT(hss.ProcessNextHeaders(first_chain, full_headers_message), + // Nothing left for the sync logic to do: + hss, /*exp_state=*/State::FINAL, + /*exp_success*/true, /*exp_request_more=*/false, + // All headers except the one already returned above: + /*exp_headers_size=*/first_chain.size(), /*exp_pow_validated_prev=*/genesis_hash, + /*exp_locator_hash=*/std::nullopt); + } } BOOST_AUTO_TEST_CASE(too_little_work) @@ -158,22 +197,26 @@ BOOST_AUTO_TEST_CASE(too_little_work) // Verify that just trying to process the second chain would not succeed // (too little work). HeadersSyncState hss{CreateState()}; - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::PRESYNC); + BOOST_REQUIRE_EQUAL(hss.GetState(), State::PRESYNC); // Pretend just the first message is "full", so we don't abort. - (void)hss.ProcessNextHeaders({{second_chain.front()}}, true); - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::PRESYNC); + CHECK_RESULT(hss.ProcessNextHeaders({{second_chain.front()}}, true), + hss, /*exp_state=*/State::PRESYNC, + /*exp_success*/true, /*exp_request_more=*/true, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/second_chain.front().GetHash()); // Tell the sync logic that the headers message was not full, implying no // more headers can be requested. For a low-work-chain, this should cause // the sync to end with no headers for acceptance. - const auto result{hss.ProcessNextHeaders(std::span{second_chain}.subspan(1), false)}; - BOOST_CHECK(hss.GetState() == HeadersSyncState::State::FINAL); - BOOST_CHECK(result.pow_validated_headers.empty()); - BOOST_CHECK(!result.request_more); - // Nevertheless, no validation errors should have been detected with the - // chain: - BOOST_CHECK(result.success); + CHECK_RESULT(hss.ProcessNextHeaders(std::span{second_chain}.subspan(1), false), + hss, /*exp_state=*/State::FINAL, + // Nevertheless, no validation errors should have been detected with the + // chain: + /*exp_success*/true, + /*exp_request_more=*/false, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/std::nullopt); } BOOST_AUTO_TEST_SUITE_END() From 8fd1c2893e6768223069d8b2fdec033b026cb2eb Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Wed, 3 Sep 2025 23:44:59 +0200 Subject: [PATCH 7/8] test(headerssync): Test returning of pow_validated_headers behavior Adding these checks necessitates increasing the length of the generated test chains so that we can properly exceed the REDOWNLOAD_BUFFER_SIZE during the test. One can check out this commit and locally revert the TARGET_BLOCKS value change to prove the need for tests being able to control the buffer size, as is done by the next commit. Beyond the current REDOWNLOAD_BUFFER_SIZE of 15'009 we need 3 extra - 15'012 TARGET_BLOCKS: * 1 for the genesis block. * 1 for the test wanting to check that we start receiving headers for permanent storage *before* the final header (first_chain.back()). * 1 to exceed REDOWNLOAD_BUFFER_SIZE in HeadersSyncState::PopHeadersReadyForAcceptance(). (The release process includes an occasional increase of the REDOWNLOAD_BUFFER_SIZE value, see release-process.md and history of headerssync.cpp). --- src/test/headers_sync_chainwork_tests.cpp | 27 +++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index 40a85f04de5..4496ed5fa27 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -40,9 +40,12 @@ using State = HeadersSyncState::State; } \ } while (false) -constexpr size_t TARGET_BLOCKS{15'000}; +constexpr size_t TARGET_BLOCKS{15'012}; constexpr arith_uint256 CHAIN_WORK{TARGET_BLOCKS * 2}; +// Copied from headerssync.cpp, will be redefined in next commit. +constexpr size_t REDOWNLOAD_BUFFER_SIZE{15'009}; + struct HeadersGeneratorSetup : public RegTestingSetup { const CBlock& genesis{Params().GenesisBlock()}; const CBlockIndex* chain_start{WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(genesis.GetHash()))}; @@ -180,12 +183,28 @@ BOOST_AUTO_TEST_CASE(happy_path) /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, /*exp_locator_hash=*/genesis_hash); - CHECK_RESULT(hss.ProcessNextHeaders(first_chain, full_headers_message), - // Nothing left for the sync logic to do: + // Process only so that the internal threshold isn't exceeded, meaning + // validated headers shouldn't be returned yet: + CHECK_RESULT(hss.ProcessNextHeaders({first_chain.begin(), REDOWNLOAD_BUFFER_SIZE}, true), + hss, /*exp_state=*/State::REDOWNLOAD, + /*exp_success*/true, /*exp_request_more=*/true, + /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, + /*exp_locator_hash=*/first_chain[REDOWNLOAD_BUFFER_SIZE - 1].GetHash()); + + // We start receiving headers for permanent storage before completing: + CHECK_RESULT(hss.ProcessNextHeaders({{first_chain[REDOWNLOAD_BUFFER_SIZE]}}, true), + hss, /*exp_state=*/State::REDOWNLOAD, + /*exp_success*/true, /*exp_request_more=*/true, + /*exp_headers_size=*/1, /*exp_pow_validated_prev=*/genesis_hash, + /*exp_locator_hash=*/first_chain[REDOWNLOAD_BUFFER_SIZE].GetHash()); + + // Feed in remaining headers, meeting the work threshold again and + // completing the REDOWNLOAD phase: + CHECK_RESULT(hss.ProcessNextHeaders({first_chain.begin() + REDOWNLOAD_BUFFER_SIZE + 1, first_chain.end()}, full_headers_message), hss, /*exp_state=*/State::FINAL, /*exp_success*/true, /*exp_request_more=*/false, // All headers except the one already returned above: - /*exp_headers_size=*/first_chain.size(), /*exp_pow_validated_prev=*/genesis_hash, + /*exp_headers_size=*/first_chain.size() - 1, /*exp_pow_validated_prev=*/first_chain.front().GetHash(), /*exp_locator_hash=*/std::nullopt); } } From cc5dda1de333cf7aa10e2237ee2c9221f705dbd9 Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Thu, 4 Sep 2025 09:17:00 +0200 Subject: [PATCH 8/8] headerssync: Make HeadersSyncState more flexible and move constants Move calculated constants from the top of src/headerssync.cpp into src/kernel/chainparams.cpp. Instead of being hardcoded to mainnet parameters, HeadersSyncState can now vary depending on chain or test. (This means we can reset TARGET_BLOCKS back to the nice round number of 15'000). Signet and testnets got new HeadersSyncParams constants through temporarily altering headerssync-params.py with corresponding GENESIS_TIME and MINCHAINWORK_HEADERS (based off defaultAssumeValid block height comments, corresponding to nMinimumChainWork). Regtest doesn't have a default assume valid block height, so the values are copied from Testnet 4. Since the constants only affect memory usage, and have very low impact unless dealing with a largely malicious chain, it's not that critical to keep updating them for non-mainnet chains. GENESIS_TIMEs (UTC): Testnet3: 1296688602 = datetime(2011, 2, 2) Testnet4: 1714777860 = datetime(2024, 5, 3) Signet: 1598918400 = datetime(2020, 9, 1) --- contrib/devtools/README.md | 2 +- contrib/devtools/headerssync-params.py | 16 ++++++------ doc/release-process.md | 2 +- src/headerssync.cpp | 32 ++++++++++------------- src/headerssync.h | 10 ++++--- src/kernel/chainparams.cpp | 30 +++++++++++++++++++++ src/kernel/chainparams.h | 11 ++++++++ src/net_processing.cpp | 2 +- src/test/fuzz/headerssync.cpp | 14 +++++++--- src/test/headers_sync_chainwork_tests.cpp | 19 +++++++++++--- 10 files changed, 99 insertions(+), 39 deletions(-) diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index 366cd4a07df..3309057b6f3 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -137,7 +137,7 @@ BUILDDIR=$PWD/my-build-dir contrib/devtools/gen-manpages.py headerssync-params.py ===================== -A script to generate optimal parameters for the headerssync module (src/headerssync.cpp). It takes no command-line +A script to generate optimal parameters for the headerssync module (stored in src/kernel/chainparams.cpp). It takes no command-line options, as all its configuration is set at the top of the file. It runs many times faster inside PyPy. Invocation: ```bash diff --git a/contrib/devtools/headerssync-params.py b/contrib/devtools/headerssync-params.py index ece1a786302..82bdaadb233 100755 --- a/contrib/devtools/headerssync-params.py +++ b/contrib/devtools/headerssync-params.py @@ -5,8 +5,8 @@ """Script to find the optimal parameters for the headerssync module through simulation.""" -from math import log, exp, sqrt from datetime import datetime, timedelta +from math import log, exp, sqrt import random # Parameters: @@ -337,15 +337,15 @@ def analyze(when): attack_volume = NET_HEADER_SIZE * MINCHAINWORK_HEADERS # And report them. print() - print("Optimal configuration:") + print(f"Given current min chainwork headers of {MINCHAINWORK_HEADERS}, the optimal parameters for low") + print(f"memory usage on mainchain for release until {TIME:%Y-%m-%d} is:") print() - print("//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.") - print(f"constexpr size_t HEADER_COMMITMENT_PERIOD{{{period}}};") - print() - print("//! Only feed headers to validation once this many headers on top have been") - print("//! received and validated against commitments.") - print(f"constexpr size_t REDOWNLOAD_BUFFER_SIZE{{{bufsize}}};" + print(f" // Generated by headerssync-params.py on {datetime.today():%Y-%m-%d}.") + print( " m_headers_sync_params = HeadersSyncParams{") + print(f" .commitment_period = {period},") + print(f" .redownload_buffer_size = {bufsize}," f" // {bufsize}/{period} = ~{bufsize/period:.1f} commitments") + print( " };") print() print("Properties:") print(f"- Per-peer memory for mainchain sync: {mem_mainchain / 8192:.3f} KiB") diff --git a/doc/release-process.md b/doc/release-process.md index 9159b8cc8c8..272f36eadcf 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -53,7 +53,7 @@ Release Process - Set `MINCHAINWORK_HEADERS` to the height used for the `nMinimumChainWork` calculation above. - Check that the other variables still look reasonable. - Run the script. It works fine in CPython, but PyPy is much faster (seconds instead of minutes): `pypy3 contrib/devtools/headerssync-params.py`. - - Paste the output defining `HEADER_COMMITMENT_PERIOD` and `REDOWNLOAD_BUFFER_SIZE` into the top of [`src/headerssync.cpp`](/src/headerssync.cpp). + - Paste the output defining the header `commitment_period` and `redownload_buffer_size` into the mainnet section of [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp). - Clear the release notes and move them to the wiki (see "Write the release notes" below). - Translations on Transifex: - Pull translations from Transifex into the master branch. diff --git a/src/headerssync.cpp b/src/headerssync.cpp index f966b13a922..e52a8cdc486 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -3,30 +3,24 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include + #include #include #include #include #include -// The two constants below are computed using the simulation script in -// contrib/devtools/headerssync-params.py. - -//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks. -constexpr size_t HEADER_COMMITMENT_PERIOD{632}; - -//! Only feed headers to validation once this many headers on top have been -//! received and validated against commitments. -constexpr size_t REDOWNLOAD_BUFFER_SIZE{15009}; // 15009/632 = ~23.7 commitments - -// Our memory analysis assumes 48 bytes for a CompressedHeader (so we should -// re-calculate parameters if we compress further) +// Our memory analysis in headerssync-params.py assumes this many bytes for a +// CompressedHeader (we should re-calculate parameters if we compress further). static_assert(sizeof(CompressedHeader) == 48); HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, - const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) : - m_commit_offset(FastRandomContext().randrange(HEADER_COMMITMENT_PERIOD)), + const HeadersSyncParams& params, const CBlockIndex* chain_start, + const arith_uint256& minimum_required_work) : + m_commit_offset((assert(params.commitment_period > 0), // HeadersSyncParams field must be initialized to non-zero. + FastRandomContext().randrange(params.commitment_period))), m_id(id), m_consensus_params(consensus_params), + m_params(params), m_chain_start(chain_start), m_minimum_required_work(minimum_required_work), m_current_chain_work(chain_start->nChainWork), @@ -41,7 +35,9 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus // exceeds this bound, because it's not possible for a consensus-valid // chain to be longer than this (at the current time -- in the future we // could try again, if necessary, to sync a longer chain). - m_max_commitments = 6*(Ticks(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD; + const auto max_seconds_since_start{(Ticks(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}})) + + MAX_FUTURE_BLOCK_TIME}; + m_max_commitments = 6 * max_seconds_since_start / m_params.commitment_period; LogDebug(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString()); } @@ -193,7 +189,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren return false; } - if (next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { + if (next_height % m_params.commitment_period == m_commit_offset) { // Add a commitment. m_header_commitments.push_back(m_hasher(current.GetHash()) & 1); if (m_header_commitments.size() > m_max_commitments) { @@ -254,7 +250,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he // it's possible our peer has extended its chain between our first sync and // our second, and we don't want to return failure after we've seen our // target blockhash just because we ran out of commitments. - if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) { + if (!m_process_all_remaining_headers && next_height % m_params.commitment_period == m_commit_offset) { if (m_header_commitments.size() == 0) { LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height); // Somehow our peer managed to feed us a different chain and @@ -285,7 +281,7 @@ std::vector HeadersSyncState::PopHeadersReadyForAcceptance() Assume(m_download_state == State::REDOWNLOAD); if (m_download_state != State::REDOWNLOAD) return ret; - while (m_redownloaded_headers.size() > REDOWNLOAD_BUFFER_SIZE || + while (m_redownloaded_headers.size() > m_params.redownload_buffer_size || (m_redownloaded_headers.size() > 0 && m_process_all_remaining_headers)) { ret.emplace_back(m_redownloaded_headers.front().GetFullHeader(m_redownload_buffer_first_prev_hash)); m_redownloaded_headers.pop_front(); diff --git a/src/headerssync.h b/src/headerssync.h index 12afe66097b..9e3af58d60a 100644 --- a/src/headerssync.h +++ b/src/headerssync.h @@ -136,7 +136,8 @@ public: * minimum_required_work: amount of chain work required to accept the chain */ HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, - const CBlockIndex* chain_start, const arith_uint256& minimum_required_work); + const HeadersSyncParams& params, const CBlockIndex* chain_start, + const arith_uint256& minimum_required_work); /** Result data structure for ProcessNextHeaders. */ struct ProcessingResult { @@ -179,8 +180,8 @@ protected: /** The (secret) offset on the heights for which to create commitments. * * m_header_commitments entries are created at any height h for which - * (h % HEADER_COMMITMENT_PERIOD) == m_commit_offset. */ - const unsigned m_commit_offset; + * (h % m_params.commitment_period) == m_commit_offset. */ + const size_t m_commit_offset; private: /** Clear out all download state that might be in progress (freeing any used @@ -214,6 +215,9 @@ private: /** We use the consensus params in our anti-DoS calculations */ const Consensus::Params& m_consensus_params; + /** Parameters that impact memory usage for a given chain, especially when attacked. */ + const HeadersSyncParams m_params; + /** Store the last block in our block index that the peer's chain builds from */ const CBlockIndex* m_chain_start{nullptr}; diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 3955794f896..062f91af4fa 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -195,6 +195,12 @@ public: .tx_count = 1235299397, .dTxRate = 5.456290459519495, }; + + // Generated by headerssync-params.py on 2025-09-01. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 632, + .redownload_buffer_size = 15009, // 15009/632 = ~23.7 commitments + }; } }; @@ -292,6 +298,12 @@ public: .tx_count = 508468699, .dTxRate = 7.172978845985714, }; + + // Generated by headerssync-params.py on 2025-09-03. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 628, + .redownload_buffer_size = 13460, // 13460/628 = ~21.4 commitments + }; } }; @@ -393,6 +405,12 @@ public: .tx_count = 11414302, .dTxRate = 0.2842619757327476, }; + + // Generated by headerssync-params.py on 2025-09-03. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 275, + .redownload_buffer_size = 7017, // 7017/275 = ~25.5 commitments + }; } }; @@ -506,6 +524,12 @@ public: fDefaultConsistencyChecks = false; m_is_mockable_chain = false; + + // Generated by headerssync-params.py on 2025-09-03. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 390, + .redownload_buffer_size = 9584, // 9584/390 = ~24.6 commitments + }; } }; @@ -636,6 +660,12 @@ public: base58Prefixes[EXT_SECRET_KEY] = {0x04, 0x35, 0x83, 0x94}; bech32_hrp = "bcrt"; + + // Copied from Testnet4. + m_headers_sync_params = HeadersSyncParams{ + .commitment_period = 275, + .redownload_buffer_size = 7017, // 7017/275 = ~25.5 commitments + }; } }; diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index 77991d497dd..0a1a5bdff8d 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -61,6 +61,15 @@ struct ChainTxData { double dTxRate; //!< estimated number of transactions per second after that timestamp }; +//! Configuration for headers sync memory usage. +struct HeadersSyncParams { + //! Distance in blocks between header commitments. + size_t commitment_period{0}; + //! Minimum number of validated headers to accumulate in the redownload + //! buffer before feeding them into the permanent block index. + size_t redownload_buffer_size{0}; +}; + /** * CChainParams defines various tweakable parameters of a given instance of the * Bitcoin system. @@ -106,6 +115,7 @@ public: const std::vector& Base58Prefix(Base58Type type) const { return base58Prefixes[type]; } const std::string& Bech32HRP() const { return bech32_hrp; } const std::vector& FixedSeeds() const { return vFixedSeeds; } + const HeadersSyncParams& HeadersSync() const { return m_headers_sync_params; } std::optional AssumeutxoForHeight(int height) const { @@ -170,6 +180,7 @@ protected: bool m_is_mockable_chain; std::vector m_assumeutxo_data; ChainTxData chainTxData; + HeadersSyncParams m_headers_sync_params; }; std::optional GetNetworkForMagic(const MessageStartChars& pchMessageStart); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 336669a8545..352aba8e684 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2654,7 +2654,7 @@ bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlo // advancing to the first unknown header would be a small effect. LOCK(peer.m_headers_sync_mutex); peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), - chain_start_header, minimum_chain_work)); + m_chainparams.HeadersSync(), chain_start_header, minimum_chain_work)); // Now a HeadersSyncState object for tracking this synchronization // is created, process the headers using it as normal. Failures are diff --git a/src/test/fuzz/headerssync.cpp b/src/test/fuzz/headerssync.cpp index f42bdf9562a..5bfbed85375 100644 --- a/src/test/fuzz/headerssync.cpp +++ b/src/test/fuzz/headerssync.cpp @@ -43,10 +43,11 @@ void MakeHeadersContinuous( class FuzzedHeadersSyncState : public HeadersSyncState { public: - FuzzedHeadersSyncState(const unsigned commit_offset, const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) - : HeadersSyncState(/*id=*/0, Params().GetConsensus(), chain_start, minimum_required_work) + FuzzedHeadersSyncState(const HeadersSyncParams& sync_params, const size_t commit_offset, + const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) + : HeadersSyncState(/*id=*/0, Params().GetConsensus(), sync_params, chain_start, minimum_required_work) { - const_cast(m_commit_offset) = commit_offset; + const_cast(m_commit_offset) = commit_offset; } }; @@ -65,9 +66,14 @@ FUZZ_TARGET(headers_sync_state, .init = initialize_headers_sync_state_fuzz) const uint256 genesis_hash = genesis_header.GetHash(); start_index.phashBlock = &genesis_hash; + const HeadersSyncParams params{ + .commitment_period = fuzzed_data_provider.ConsumeIntegralInRange(1, Params().HeadersSync().commitment_period * 2), + .redownload_buffer_size = fuzzed_data_provider.ConsumeIntegralInRange(0, Params().HeadersSync().redownload_buffer_size * 2), + }; arith_uint256 min_work{UintToArith256(ConsumeUInt256(fuzzed_data_provider))}; FuzzedHeadersSyncState headers_sync( - /*commit_offset=*/fuzzed_data_provider.ConsumeIntegralInRange(1, 1024), + params, + /*commit_offset=*/fuzzed_data_provider.ConsumeIntegralInRange(0, params.commitment_period - 1), /*chain_start=*/&start_index, /*minimum_required_work=*/min_work); diff --git a/src/test/headers_sync_chainwork_tests.cpp b/src/test/headers_sync_chainwork_tests.cpp index 4496ed5fa27..c91420978ff 100644 --- a/src/test/headers_sync_chainwork_tests.cpp +++ b/src/test/headers_sync_chainwork_tests.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -40,11 +41,14 @@ using State = HeadersSyncState::State; } \ } while (false) -constexpr size_t TARGET_BLOCKS{15'012}; +constexpr size_t TARGET_BLOCKS{15'000}; constexpr arith_uint256 CHAIN_WORK{TARGET_BLOCKS * 2}; -// Copied from headerssync.cpp, will be redefined in next commit. -constexpr size_t REDOWNLOAD_BUFFER_SIZE{15'009}; +// Subtract MAX_HEADERS_RESULTS (2000 headers/message) + an arbitrary smaller +// value (123) so our redownload buffer is well below the number of blocks +// required to reach the CHAIN_WORK threshold, to behave similarly to mainnet. +constexpr size_t REDOWNLOAD_BUFFER_SIZE{TARGET_BLOCKS - (MAX_HEADERS_RESULTS + 123)}; +constexpr size_t COMMITMENT_PERIOD{600}; // Somewhat close to mainnet. struct HeadersGeneratorSetup : public RegTestingSetup { const CBlock& genesis{Params().GenesisBlock()}; @@ -78,6 +82,10 @@ struct HeadersGeneratorSetup : public RegTestingSetup { { return {/*id=*/0, Params().GetConsensus(), + HeadersSyncParams{ + .commitment_period = COMMITMENT_PERIOD, + .redownload_buffer_size = REDOWNLOAD_BUFFER_SIZE, + }, chain_start, /*minimum_required_work=*/CHAIN_WORK}; } @@ -157,6 +165,11 @@ BOOST_AUTO_TEST_CASE(sneaky_redownload) /*exp_headers_size=*/0, /*exp_pow_validated_prev=*/std::nullopt, /*exp_locator_hash=*/genesis.GetHash()); + // Below is the number of commitment bits that must randomly match between + // the two chains for this test to spuriously fail. 1 / 2^25 = + // 1 in 33'554'432 (somewhat less due to HeadersSyncState::m_commit_offset). + static_assert(TARGET_BLOCKS / COMMITMENT_PERIOD == 25); + // Try to sneakily feed back the second chain during REDOWNLOAD. CHECK_RESULT(hss.ProcessNextHeaders(second_chain, true), hss, /*exp_state=*/State::FINAL,