mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-20 07:09:15 +01:00
headerssync: Make HeadersSyncState more flexible and move constants
Move calculated constants from the top of src/headerssync.cpp into src/kernel/chainparams.cpp. Instead of being hardcoded to mainnet parameters, HeadersSyncState can now vary depending on chain or test. (This means we can reset TARGET_BLOCKS back to the nice round number of 15'000). Signet and testnets got new HeadersSyncParams constants through temporarily altering headerssync-params.py with corresponding GENESIS_TIME and MINCHAINWORK_HEADERS (based off defaultAssumeValid block height comments, corresponding to nMinimumChainWork). Regtest doesn't have a default assume valid block height, so the values are copied from Testnet 4. Since the constants only affect memory usage, and have very low impact unless dealing with a largely malicious chain, it's not that critical to keep updating them for non-mainnet chains. GENESIS_TIMEs (UTC): Testnet3: 1296688602 = datetime(2011, 2, 2) Testnet4: 1714777860 = datetime(2024, 5, 3) Signet: 1598918400 = datetime(2020, 9, 1)
This commit is contained in:
@@ -3,30 +3,24 @@
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <headerssync.h>
|
||||
|
||||
#include <logging.h>
|
||||
#include <pow.h>
|
||||
#include <util/check.h>
|
||||
#include <util/time.h>
|
||||
#include <util/vector.h>
|
||||
|
||||
// The two constants below are computed using the simulation script in
|
||||
// contrib/devtools/headerssync-params.py.
|
||||
|
||||
//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.
|
||||
constexpr size_t HEADER_COMMITMENT_PERIOD{632};
|
||||
|
||||
//! Only feed headers to validation once this many headers on top have been
|
||||
//! received and validated against commitments.
|
||||
constexpr size_t REDOWNLOAD_BUFFER_SIZE{15009}; // 15009/632 = ~23.7 commitments
|
||||
|
||||
// Our memory analysis assumes 48 bytes for a CompressedHeader (so we should
|
||||
// re-calculate parameters if we compress further)
|
||||
// Our memory analysis in headerssync-params.py assumes this many bytes for a
|
||||
// CompressedHeader (we should re-calculate parameters if we compress further).
|
||||
static_assert(sizeof(CompressedHeader) == 48);
|
||||
|
||||
HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params,
|
||||
const CBlockIndex* chain_start, const arith_uint256& minimum_required_work) :
|
||||
m_commit_offset(FastRandomContext().randrange<unsigned>(HEADER_COMMITMENT_PERIOD)),
|
||||
const HeadersSyncParams& params, const CBlockIndex* chain_start,
|
||||
const arith_uint256& minimum_required_work) :
|
||||
m_commit_offset((assert(params.commitment_period > 0), // HeadersSyncParams field must be initialized to non-zero.
|
||||
FastRandomContext().randrange(params.commitment_period))),
|
||||
m_id(id), m_consensus_params(consensus_params),
|
||||
m_params(params),
|
||||
m_chain_start(chain_start),
|
||||
m_minimum_required_work(minimum_required_work),
|
||||
m_current_chain_work(chain_start->nChainWork),
|
||||
@@ -41,7 +35,9 @@ HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus
|
||||
// exceeds this bound, because it's not possible for a consensus-valid
|
||||
// chain to be longer than this (at the current time -- in the future we
|
||||
// could try again, if necessary, to sync a longer chain).
|
||||
m_max_commitments = 6*(Ticks<std::chrono::seconds>(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}) + MAX_FUTURE_BLOCK_TIME) / HEADER_COMMITMENT_PERIOD;
|
||||
const auto max_seconds_since_start{(Ticks<std::chrono::seconds>(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}}))
|
||||
+ MAX_FUTURE_BLOCK_TIME};
|
||||
m_max_commitments = 6 * max_seconds_since_start / m_params.commitment_period;
|
||||
|
||||
LogDebug(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString());
|
||||
}
|
||||
@@ -193,7 +189,7 @@ bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& curren
|
||||
return false;
|
||||
}
|
||||
|
||||
if (next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) {
|
||||
if (next_height % m_params.commitment_period == m_commit_offset) {
|
||||
// Add a commitment.
|
||||
m_header_commitments.push_back(m_hasher(current.GetHash()) & 1);
|
||||
if (m_header_commitments.size() > m_max_commitments) {
|
||||
@@ -254,7 +250,7 @@ bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& he
|
||||
// it's possible our peer has extended its chain between our first sync and
|
||||
// our second, and we don't want to return failure after we've seen our
|
||||
// target blockhash just because we ran out of commitments.
|
||||
if (!m_process_all_remaining_headers && next_height % HEADER_COMMITMENT_PERIOD == m_commit_offset) {
|
||||
if (!m_process_all_remaining_headers && next_height % m_params.commitment_period == m_commit_offset) {
|
||||
if (m_header_commitments.size() == 0) {
|
||||
LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height);
|
||||
// Somehow our peer managed to feed us a different chain and
|
||||
@@ -285,7 +281,7 @@ std::vector<CBlockHeader> HeadersSyncState::PopHeadersReadyForAcceptance()
|
||||
Assume(m_download_state == State::REDOWNLOAD);
|
||||
if (m_download_state != State::REDOWNLOAD) return ret;
|
||||
|
||||
while (m_redownloaded_headers.size() > REDOWNLOAD_BUFFER_SIZE ||
|
||||
while (m_redownloaded_headers.size() > m_params.redownload_buffer_size ||
|
||||
(m_redownloaded_headers.size() > 0 && m_process_all_remaining_headers)) {
|
||||
ret.emplace_back(m_redownloaded_headers.front().GetFullHeader(m_redownload_buffer_first_prev_hash));
|
||||
m_redownloaded_headers.pop_front();
|
||||
|
||||
Reference in New Issue
Block a user