Compare commits

...

6 Commits

Author SHA1 Message Date
Luke Dashjr
6567b9e977
Merge d4a3abf6d430e12f54241a6191a9226fb0d31a65 into 5f4422d68dc3530c353af1f87499de1c864b60ad 2025-03-17 03:53:14 +01:00
merge-script
5f4422d68d
Merge : qa: Fix TxIndex race conditions
3301d2cbe8c3b76c97285d75fa59637cb6952d0b qa: Wait for txindex to avoid race condition (Hodlinator)
9bfb0d75ba10591cc6c9620f9fd1ecc0e55e7a48 qa: Remove unnecessary -txindex args (Hodlinator)
7ac281c19cd3d11f316dbbb3308eabf1ad4f26d6 qa: Add missing coverage of corrupt indexes (Hodlinator)

Pull request description:

  - Add synchronization in 3 places where if the Transaction Index happens to be slow, we get rare test failures when querying it for transactions (one such case experienced on Windows, prompting investigation).
  - Remove unnecessary TxIndex initialization in some tests.
  - Add some test coverage where TxIndex aspect could be tested in feature_init.py.

ACKs for top commit:
  fjahr:
    re-ACK 3301d2cbe8c3b76c97285d75fa59637cb6952d0b
  mzumsande:
    Code Review ACK 3301d2cbe8c3b76c97285d75fa59637cb6952d0b
  furszy:
    Code review ACK 3301d2cbe8c3b76c97285d75fa59637cb6952d0b
  Prabhat1308:
    Concept ACK [`3301d2c`](3301d2cbe8)

Tree-SHA512: 7c2019e38455f344856aaf6b381faafbd88d53dc88d13309deb718c1dcfbee4ccca7c7f1b66917395503a6f94c3b216a007ad432cc8b93d0309db9805f38d602
2025-03-17 10:28:14 +08:00
Hodlinator
3301d2cbe8
qa: Wait for txindex to avoid race condition
Can be verified to be necessary through adding std::this_thread::sleep_for(0.5s) at the beginning of TxIndex::CustomAppend.
2025-03-10 15:24:16 +01:00
Hodlinator
9bfb0d75ba
qa: Remove unnecessary -txindex args
(Parent commit ensured indexes in feature_init.py are actually used, otherwise they would be removed here as well).
2025-03-07 22:22:31 +01:00
Hodlinator
7ac281c19c
qa: Add missing coverage of corrupt indexes 2025-03-07 22:22:31 +01:00
Luke Dashjr
d4a3abf6d4 Add -pruneduringinit option to temporarily use another prune target during IBD 2025-02-12 18:55:10 +00:00
11 changed files with 90 additions and 21 deletions

@ -508,6 +508,9 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc)
argsman.AddArg("-prune=<n>", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
"(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)", MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-pruneduringinit", "Temporarily adjusts the -prune setting until initial sync completes."
" Ignored if pruning is disabled."
" (default: -1 = same value as -prune)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-reindex", "If enabled, wipe chain state and block index, and rebuild them from blk*.dat files on disk. Also wipe and rebuild other optional indexes that are active. If an assumeutxo snapshot was loaded, its chainstate will be wiped as well. The snapshot can then be reloaded via RPC.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-reindex-chainstate", "If enabled, wipe chain state, and rebuild it from blk*.dat files on disk. If an assumeutxo snapshot was loaded, its chainstate will be wiped as well. The snapshot can then be reloaded via RPC.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-settings=<file>", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);

@ -25,6 +25,7 @@ struct BlockManagerOpts {
const CChainParams& chainparams;
bool use_xor{DEFAULT_XOR_BLOCKSDIR};
uint64_t prune_target{0};
int64_t prune_target_during_init{-1};
bool fast_prune{false};
const fs::path blocks_dir;
Notifications& notifications;

@ -12,26 +12,50 @@
#include <util/translation.h>
#include <validation.h>
#include <algorithm>
#include <cstdint>
#include <string_view>
namespace node {
util::Result<uint64_t> ParsePruneOption(const int64_t nPruneArg, const std::string_view opt_name)
{
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
if (nPruneArg < 0) {
return util::Error{strprintf(_("%s cannot be configured with a negative value."), opt_name)};
} else if (nPruneArg == 0) { // pruning disabled
return 0;
} else if (nPruneArg == 1) { // manual pruning: -prune=1
return BlockManager::PRUNE_TARGET_MANUAL;
}
const uint64_t nPruneTarget{uint64_t(nPruneArg) * 1024 * 1024};
if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
return util::Error{strprintf(_("%s configured below the minimum of %d MiB. Please use a higher number."), opt_name, MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)};
}
return nPruneTarget;
}
util::Result<void> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Options& opts)
{
if (auto value{args.GetBoolArg("-blocksxor")}) opts.use_xor = *value;
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
int64_t nPruneArg{args.GetIntArg("-prune", opts.prune_target)};
if (nPruneArg < 0) {
return util::Error{_("Prune cannot be configured with a negative value.")};
if (const auto prune_parsed = ParsePruneOption(nPruneArg, "Prune")) {
opts.prune_target = *prune_parsed;
} else {
return util::Error{util::ErrorString(prune_parsed)};
}
uint64_t nPruneTarget{uint64_t(nPruneArg) * 1024 * 1024};
if (nPruneArg == 1) { // manual pruning: -prune=1
nPruneTarget = BlockManager::PRUNE_TARGET_MANUAL;
} else if (nPruneTarget) {
if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
return util::Error{strprintf(_("Prune configured below the minimum of %d MiB. Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)};
if (const auto prune_during_init{args.GetIntArg("-pruneduringinit")}) {
if (*prune_during_init == -1) {
opts.prune_target_during_init = -1;
} else if (const auto prune_parsed = ParsePruneOption(*prune_during_init, "-pruneduringinit")) {
// NOTE: PRUNE_TARGET_MANUAL is >int64 max
opts.prune_target_during_init = std::min(std::numeric_limits<int64_t>::max(), (int64_t)*prune_parsed);
} else {
return util::Error{util::ErrorString(prune_parsed)};
}
}
opts.prune_target = nPruneTarget;
if (auto value{args.GetBoolArg("-fastprune")}) opts.fast_prune = *value;

@ -299,6 +299,26 @@ void BlockManager::FindFilesToPruneManual(
chain.GetRole(), last_block_can_prune, count);
}
uint64_t BlockManager::GetPruneTargetForChainstate(const Chainstate& chain, ChainstateManager& chainman) const
{
const auto number_of_chainstates{chainman.GetAll().size()};
const uint64_t min_overall_target{MIN_DISK_SPACE_FOR_BLOCK_FILES * number_of_chainstates};
auto target = std::max(min_overall_target, GetPruneTarget());
uint64_t target_boost{0};
if (m_opts.prune_target_during_init > -1 && chainman.IsInitialBlockDownload()) {
if ((uint64_t)m_opts.prune_target_during_init <= target) {
target = std::max(min_overall_target, (uint64_t)m_opts.prune_target_during_init);
} else if (chain.GetRole() != ChainstateRole::ASSUMEDVALID) {
// Only the background/normal gets the benefit
// NOTE: This assumes only one such chainstate exists
target_boost = m_opts.prune_target_during_init - target;
}
}
// Distribute our -prune budget over all chainstates.
target = (target / number_of_chainstates) + target_boost;
return target;
}
void BlockManager::FindFilesToPrune(
std::set<int>& setFilesToPrune,
int last_prune,
@ -306,9 +326,7 @@ void BlockManager::FindFilesToPrune(
ChainstateManager& chainman)
{
LOCK2(cs_main, cs_LastBlockFile);
// Distribute our -prune budget over all chainstates.
const auto target = std::max(
MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
const auto target{GetPruneTargetForChainstate(chain, chainman)};
const uint64_t target_sync_height = chainman.m_best_header->nHeight;
if (chain.m_chain.Height() < 0 || target == 0) {

@ -350,6 +350,7 @@ public:
/** Attempt to stay below this number of bytes of block files. */
[[nodiscard]] uint64_t GetPruneTarget() const { return m_opts.prune_target; }
[[nodiscard]] uint64_t GetPruneTargetForChainstate(const Chainstate& chain, ChainstateManager& chainman) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
static constexpr auto PRUNE_TARGET_MANUAL{std::numeric_limits<uint64_t>::max()};
[[nodiscard]] bool LoadingBlocks() const { return m_importing || !m_blockfiles_indexed; }

@ -88,7 +88,7 @@ class InitTest(BitcoinTestFramework):
args = ['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1']
for terminate_line in lines_to_terminate_after:
self.log.info(f"Starting node and will exit after line {terminate_line}")
self.log.info(f"Starting node and will terminate after line {terminate_line}")
with node.busy_wait_for_debug_log([terminate_line]):
if platform.system() == 'Windows':
# CREATE_NEW_PROCESS_GROUP is required in order to be able
@ -108,12 +108,22 @@ class InitTest(BitcoinTestFramework):
'blocks/index/*.ldb': 'Error opening block database.',
'chainstate/*.ldb': 'Error opening coins database.',
'blocks/blk*.dat': 'Error loading block database.',
'indexes/txindex/MANIFEST*': 'LevelDB error: Corruption: CURRENT points to a non-existent file',
# Removing these files does not result in a startup error:
# 'indexes/blockfilter/basic/*.dat', 'indexes/blockfilter/basic/db/*.*', 'indexes/coinstats/db/*.*',
# 'indexes/txindex/*.log', 'indexes/txindex/CURRENT', 'indexes/txindex/LOCK'
}
files_to_perturb = {
'blocks/index/*.ldb': 'Error loading block database.',
'chainstate/*.ldb': 'Error opening coins database.',
'blocks/blk*.dat': 'Corrupted block database detected.',
'indexes/blockfilter/basic/db/*.*': 'LevelDB error: Corruption',
'indexes/coinstats/db/*.*': 'LevelDB error: Corruption',
'indexes/txindex/*.log': 'LevelDB error: Corruption',
'indexes/txindex/CURRENT': 'LevelDB error: Corruption',
# Perturbing these files does not result in a startup error:
# 'indexes/blockfilter/basic/*.dat', 'indexes/txindex/MANIFEST*', 'indexes/txindex/LOCK'
}
for file_patt, err_fragment in files_to_delete.items():
@ -135,9 +145,10 @@ class InitTest(BitcoinTestFramework):
self.stop_node(0)
self.log.info("Test startup errors after perturbing certain essential files")
dirs = ["blocks", "chainstate", "indexes"]
for file_patt, err_fragment in files_to_perturb.items():
shutil.copytree(node.chain_path / "blocks", node.chain_path / "blocks_bak")
shutil.copytree(node.chain_path / "chainstate", node.chain_path / "chainstate_bak")
for dir in dirs:
shutil.copytree(node.chain_path / dir, node.chain_path / f"{dir}_bak")
target_files = list(node.chain_path.glob(file_patt))
for target_file in target_files:
@ -151,10 +162,9 @@ class InitTest(BitcoinTestFramework):
start_expecting_error(err_fragment)
shutil.rmtree(node.chain_path / "blocks")
shutil.rmtree(node.chain_path / "chainstate")
shutil.move(node.chain_path / "blocks_bak", node.chain_path / "blocks")
shutil.move(node.chain_path / "chainstate_bak", node.chain_path / "chainstate")
for dir in dirs:
shutil.rmtree(node.chain_path / dir)
shutil.move(node.chain_path / f"{dir}_bak", node.chain_path / dir)
def init_pid_test(self):
BITCOIN_PID_FILENAME_CUSTOM = "my_fancy_bitcoin_pid_file.foobar"

@ -45,6 +45,7 @@ from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
sync_txindex,
)
from test_framework.wallet import MiniWallet
from test_framework.wallet_util import generate_keypair
@ -270,6 +271,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
self.log.info('A coinbase transaction')
# Pick the input of the first tx we created, so it has to be a coinbase tx
sync_txindex(self, node)
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx = tx_from_hex(raw_tx_coinbase_spent)
self.check_mempool_result(

@ -34,6 +34,7 @@ from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
sync_txindex,
)
from test_framework.wallet import (
getnewdestination,
@ -70,7 +71,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.num_nodes = 3
self.extra_args = [
["-txindex"],
["-txindex"],
[],
["-fastprune", "-prune=1"],
]
# whitelist peers to speed up tx relay / mempool sync
@ -109,6 +110,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
if n == 0:
sync_txindex(self, self.nodes[n])
# With -txindex.
# 1. valid parameters - only supply txid
assert_equal(self.nodes[n].getrawtransaction(txId), tx['hex'])

@ -12,6 +12,7 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
sync_txindex,
)
from test_framework.wallet import MiniWallet
@ -77,6 +78,7 @@ class MerkleBlockTest(BitcoinTestFramework):
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
sync_txindex(self, self.nodes[1])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[0].gettxoutproof, [txid1, txid3])

@ -592,3 +592,10 @@ def find_vout_for_address(node, txid, addr):
if addr == tx["vout"][i]["scriptPubKey"]["address"]:
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
def sync_txindex(test_framework, node):
test_framework.log.debug("Waiting for node txindex to sync")
sync_start = int(time.time())
test_framework.wait_until(lambda: node.getindexinfo("txindex")["txindex"]["synced"])
test_framework.log.debug(f"Synced in {time.time() - sync_start} seconds")

@ -117,7 +117,6 @@ class AddressInputTypeGrouping(BitcoinTestFramework):
self.extra_args = [
[
"-addresstype=bech32",
"-txindex",
],
[
"-addresstype=p2sh-segwit",