mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-03-17 21:32:00 +01:00
Compare commits
9 Commits
64c2bb3e43
...
0e5df3bdda
Author | SHA1 | Date | |
---|---|---|---|
|
0e5df3bdda | ||
|
5f4422d68d | ||
|
93b07997e9 | ||
|
64b956f422 | ||
|
3463a7f481 | ||
|
921b89e1d4 | ||
|
3301d2cbe8 | ||
|
9bfb0d75ba | ||
|
7ac281c19c |
@ -5781,7 +5781,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||||||
|
|
||||||
// Detect whether we're stalling
|
// Detect whether we're stalling
|
||||||
auto stalling_timeout = m_block_stalling_timeout.load();
|
auto stalling_timeout = m_block_stalling_timeout.load();
|
||||||
if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) {
|
// Allow more time for addnode peers
|
||||||
|
const auto adjusted_timeout{pto->IsManualConn() ? BLOCK_STALLING_TIMEOUT_MAX : stalling_timeout};
|
||||||
|
if (state.m_stalling_since.count() && state.m_stalling_since < current_time - adjusted_timeout) {
|
||||||
// Stalling only triggers when the block download window cannot move. During normal steady state,
|
// Stalling only triggers when the block download window cannot move. During normal steady state,
|
||||||
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
|
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
|
||||||
// should only happen during initial block download.
|
// should only happen during initial block download.
|
||||||
@ -5796,7 +5798,8 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
// In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
|
// In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
|
||||||
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
|
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout
|
||||||
|
// unless it is an addnode peer.
|
||||||
// We compensate for other peers to prevent killing off peers due to our own downstream link
|
// We compensate for other peers to prevent killing off peers due to our own downstream link
|
||||||
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
|
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
|
||||||
// to unreasonably increase our timeout.
|
// to unreasonably increase our timeout.
|
||||||
@ -5804,8 +5807,12 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||||||
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
|
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
|
||||||
int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
|
int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
|
||||||
if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
|
if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
|
||||||
LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
|
if (pto->IsManualConn()) {
|
||||||
pto->fDisconnect = true;
|
LogInfo("Timeout downloading block %s from addnode peer, not %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
|
||||||
|
} else {
|
||||||
|
LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs));
|
||||||
|
pto->fDisconnect = true;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5814,17 +5821,18 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
|||||||
// Detect whether this is a stalling initial-headers-sync peer
|
// Detect whether this is a stalling initial-headers-sync peer
|
||||||
if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
|
if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) {
|
||||||
if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
|
if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
|
||||||
// Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
|
// Disconnect a peer (if it is neither an addnode peer, nor has
|
||||||
|
// NetPermissionFlags::NoBan permission) if it is our only sync peer
|
||||||
// and we have others we could be using instead.
|
// and we have others we could be using instead.
|
||||||
// Note: If all our peers are inbound, then we won't
|
// Note: If all our peers are inbound, then we won't
|
||||||
// disconnect our sync peer for stalling; we have bigger
|
// disconnect our sync peer for stalling; we have bigger
|
||||||
// problems if we can't get any outbound peers.
|
// problems if we can't get any outbound peers.
|
||||||
if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
|
if (!pto->IsManualConn() && !pto->HasPermission(NetPermissionFlags::NoBan)) {
|
||||||
LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs));
|
LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs));
|
||||||
pto->fDisconnect = true;
|
pto->fDisconnect = true;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs));
|
LogInfo("Timeout downloading headers from %s peer, not %s\n", pto->IsManualConn() ? "addnode" : "noban", pto->DisconnectMsg(fLogIPs));
|
||||||
// Reset the headers sync state so that we have a
|
// Reset the headers sync state so that we have a
|
||||||
// chance to try downloading from a different peer.
|
// chance to try downloading from a different peer.
|
||||||
// Note: this will also result in at least one more
|
// Note: this will also result in at least one more
|
||||||
|
@ -307,7 +307,8 @@ static RPCHelpMan addnode()
|
|||||||
return RPCHelpMan{"addnode",
|
return RPCHelpMan{"addnode",
|
||||||
"\nAttempts to add or remove a node from the addnode list.\n"
|
"\nAttempts to add or remove a node from the addnode list.\n"
|
||||||
"Or try a connection to a node once.\n"
|
"Or try a connection to a node once.\n"
|
||||||
"Nodes added using addnode (or -connect) are protected from DoS disconnection and are not required to be\n"
|
"Nodes added using addnode (or -connect) are protected from disconnection due to DoS or IBD header/block\n"
|
||||||
|
"download timeouts (and given more time before considered to be stalling), and are not required to be\n"
|
||||||
"full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n" +
|
"full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n" +
|
||||||
strprintf("Addnode connections are limited to %u at a time", MAX_ADDNODE_CONNECTIONS) +
|
strprintf("Addnode connections are limited to %u at a time", MAX_ADDNODE_CONNECTIONS) +
|
||||||
" and are counted separately from the -maxconnections limit.\n",
|
" and are counted separately from the -maxconnections limit.\n",
|
||||||
|
@ -88,7 +88,7 @@ class InitTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
args = ['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1']
|
args = ['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1']
|
||||||
for terminate_line in lines_to_terminate_after:
|
for terminate_line in lines_to_terminate_after:
|
||||||
self.log.info(f"Starting node and will exit after line {terminate_line}")
|
self.log.info(f"Starting node and will terminate after line {terminate_line}")
|
||||||
with node.busy_wait_for_debug_log([terminate_line]):
|
with node.busy_wait_for_debug_log([terminate_line]):
|
||||||
if platform.system() == 'Windows':
|
if platform.system() == 'Windows':
|
||||||
# CREATE_NEW_PROCESS_GROUP is required in order to be able
|
# CREATE_NEW_PROCESS_GROUP is required in order to be able
|
||||||
@ -108,12 +108,22 @@ class InitTest(BitcoinTestFramework):
|
|||||||
'blocks/index/*.ldb': 'Error opening block database.',
|
'blocks/index/*.ldb': 'Error opening block database.',
|
||||||
'chainstate/*.ldb': 'Error opening coins database.',
|
'chainstate/*.ldb': 'Error opening coins database.',
|
||||||
'blocks/blk*.dat': 'Error loading block database.',
|
'blocks/blk*.dat': 'Error loading block database.',
|
||||||
|
'indexes/txindex/MANIFEST*': 'LevelDB error: Corruption: CURRENT points to a non-existent file',
|
||||||
|
# Removing these files does not result in a startup error:
|
||||||
|
# 'indexes/blockfilter/basic/*.dat', 'indexes/blockfilter/basic/db/*.*', 'indexes/coinstats/db/*.*',
|
||||||
|
# 'indexes/txindex/*.log', 'indexes/txindex/CURRENT', 'indexes/txindex/LOCK'
|
||||||
}
|
}
|
||||||
|
|
||||||
files_to_perturb = {
|
files_to_perturb = {
|
||||||
'blocks/index/*.ldb': 'Error loading block database.',
|
'blocks/index/*.ldb': 'Error loading block database.',
|
||||||
'chainstate/*.ldb': 'Error opening coins database.',
|
'chainstate/*.ldb': 'Error opening coins database.',
|
||||||
'blocks/blk*.dat': 'Corrupted block database detected.',
|
'blocks/blk*.dat': 'Corrupted block database detected.',
|
||||||
|
'indexes/blockfilter/basic/db/*.*': 'LevelDB error: Corruption',
|
||||||
|
'indexes/coinstats/db/*.*': 'LevelDB error: Corruption',
|
||||||
|
'indexes/txindex/*.log': 'LevelDB error: Corruption',
|
||||||
|
'indexes/txindex/CURRENT': 'LevelDB error: Corruption',
|
||||||
|
# Perturbing these files does not result in a startup error:
|
||||||
|
# 'indexes/blockfilter/basic/*.dat', 'indexes/txindex/MANIFEST*', 'indexes/txindex/LOCK'
|
||||||
}
|
}
|
||||||
|
|
||||||
for file_patt, err_fragment in files_to_delete.items():
|
for file_patt, err_fragment in files_to_delete.items():
|
||||||
@ -135,9 +145,10 @@ class InitTest(BitcoinTestFramework):
|
|||||||
self.stop_node(0)
|
self.stop_node(0)
|
||||||
|
|
||||||
self.log.info("Test startup errors after perturbing certain essential files")
|
self.log.info("Test startup errors after perturbing certain essential files")
|
||||||
|
dirs = ["blocks", "chainstate", "indexes"]
|
||||||
for file_patt, err_fragment in files_to_perturb.items():
|
for file_patt, err_fragment in files_to_perturb.items():
|
||||||
shutil.copytree(node.chain_path / "blocks", node.chain_path / "blocks_bak")
|
for dir in dirs:
|
||||||
shutil.copytree(node.chain_path / "chainstate", node.chain_path / "chainstate_bak")
|
shutil.copytree(node.chain_path / dir, node.chain_path / f"{dir}_bak")
|
||||||
target_files = list(node.chain_path.glob(file_patt))
|
target_files = list(node.chain_path.glob(file_patt))
|
||||||
|
|
||||||
for target_file in target_files:
|
for target_file in target_files:
|
||||||
@ -151,10 +162,9 @@ class InitTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
start_expecting_error(err_fragment)
|
start_expecting_error(err_fragment)
|
||||||
|
|
||||||
shutil.rmtree(node.chain_path / "blocks")
|
for dir in dirs:
|
||||||
shutil.rmtree(node.chain_path / "chainstate")
|
shutil.rmtree(node.chain_path / dir)
|
||||||
shutil.move(node.chain_path / "blocks_bak", node.chain_path / "blocks")
|
shutil.move(node.chain_path / f"{dir}_bak", node.chain_path / dir)
|
||||||
shutil.move(node.chain_path / "chainstate_bak", node.chain_path / "chainstate")
|
|
||||||
|
|
||||||
def init_pid_test(self):
|
def init_pid_test(self):
|
||||||
BITCOIN_PID_FILENAME_CUSTOM = "my_fancy_bitcoin_pid_file.foobar"
|
BITCOIN_PID_FILENAME_CUSTOM = "my_fancy_bitcoin_pid_file.foobar"
|
||||||
|
@ -45,6 +45,7 @@ from test_framework.util import (
|
|||||||
assert_equal,
|
assert_equal,
|
||||||
assert_greater_than,
|
assert_greater_than,
|
||||||
assert_raises_rpc_error,
|
assert_raises_rpc_error,
|
||||||
|
sync_txindex,
|
||||||
)
|
)
|
||||||
from test_framework.wallet import MiniWallet
|
from test_framework.wallet import MiniWallet
|
||||||
from test_framework.wallet_util import generate_keypair
|
from test_framework.wallet_util import generate_keypair
|
||||||
@ -270,6 +271,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
|
|||||||
|
|
||||||
self.log.info('A coinbase transaction')
|
self.log.info('A coinbase transaction')
|
||||||
# Pick the input of the first tx we created, so it has to be a coinbase tx
|
# Pick the input of the first tx we created, so it has to be a coinbase tx
|
||||||
|
sync_txindex(self, node)
|
||||||
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
|
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
|
||||||
tx = tx_from_hex(raw_tx_coinbase_spent)
|
tx = tx_from_hex(raw_tx_coinbase_spent)
|
||||||
self.check_mempool_result(
|
self.check_mempool_result(
|
||||||
|
@ -34,6 +34,7 @@ from test_framework.util import (
|
|||||||
assert_equal,
|
assert_equal,
|
||||||
assert_greater_than,
|
assert_greater_than,
|
||||||
assert_raises_rpc_error,
|
assert_raises_rpc_error,
|
||||||
|
sync_txindex,
|
||||||
)
|
)
|
||||||
from test_framework.wallet import (
|
from test_framework.wallet import (
|
||||||
getnewdestination,
|
getnewdestination,
|
||||||
@ -70,7 +71,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
self.num_nodes = 3
|
self.num_nodes = 3
|
||||||
self.extra_args = [
|
self.extra_args = [
|
||||||
["-txindex"],
|
["-txindex"],
|
||||||
["-txindex"],
|
[],
|
||||||
["-fastprune", "-prune=1"],
|
["-fastprune", "-prune=1"],
|
||||||
]
|
]
|
||||||
# whitelist peers to speed up tx relay / mempool sync
|
# whitelist peers to speed up tx relay / mempool sync
|
||||||
@ -109,6 +110,7 @@ class RawTransactionsTest(BitcoinTestFramework):
|
|||||||
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
|
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
|
||||||
|
|
||||||
if n == 0:
|
if n == 0:
|
||||||
|
sync_txindex(self, self.nodes[n])
|
||||||
# With -txindex.
|
# With -txindex.
|
||||||
# 1. valid parameters - only supply txid
|
# 1. valid parameters - only supply txid
|
||||||
assert_equal(self.nodes[n].getrawtransaction(txId), tx['hex'])
|
assert_equal(self.nodes[n].getrawtransaction(txId), tx['hex'])
|
||||||
|
@ -12,6 +12,7 @@ from test_framework.test_framework import BitcoinTestFramework
|
|||||||
from test_framework.util import (
|
from test_framework.util import (
|
||||||
assert_equal,
|
assert_equal,
|
||||||
assert_raises_rpc_error,
|
assert_raises_rpc_error,
|
||||||
|
sync_txindex,
|
||||||
)
|
)
|
||||||
from test_framework.wallet import MiniWallet
|
from test_framework.wallet import MiniWallet
|
||||||
|
|
||||||
@ -77,6 +78,7 @@ class MerkleBlockTest(BitcoinTestFramework):
|
|||||||
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2]))), sorted(txlist))
|
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2]))), sorted(txlist))
|
||||||
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid2, txid1]))), sorted(txlist))
|
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid2, txid1]))), sorted(txlist))
|
||||||
# We can always get a proof if we have a -txindex
|
# We can always get a proof if we have a -txindex
|
||||||
|
sync_txindex(self, self.nodes[1])
|
||||||
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof([txid_spent])), [txid_spent])
|
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof([txid_spent])), [txid_spent])
|
||||||
# We can't get a proof if we specify transactions from different blocks
|
# We can't get a proof if we specify transactions from different blocks
|
||||||
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[0].gettxoutproof, [txid1, txid3])
|
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[0].gettxoutproof, [txid1, txid3])
|
||||||
|
@ -592,3 +592,10 @@ def find_vout_for_address(node, txid, addr):
|
|||||||
if addr == tx["vout"][i]["scriptPubKey"]["address"]:
|
if addr == tx["vout"][i]["scriptPubKey"]["address"]:
|
||||||
return i
|
return i
|
||||||
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
||||||
|
|
||||||
|
|
||||||
|
def sync_txindex(test_framework, node):
|
||||||
|
test_framework.log.debug("Waiting for node txindex to sync")
|
||||||
|
sync_start = int(time.time())
|
||||||
|
test_framework.wait_until(lambda: node.getindexinfo("txindex")["txindex"]["synced"])
|
||||||
|
test_framework.log.debug(f"Synced in {time.time() - sync_start} seconds")
|
||||||
|
@ -117,7 +117,6 @@ class AddressInputTypeGrouping(BitcoinTestFramework):
|
|||||||
self.extra_args = [
|
self.extra_args = [
|
||||||
[
|
[
|
||||||
"-addresstype=bech32",
|
"-addresstype=bech32",
|
||||||
"-txindex",
|
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"-addresstype=p2sh-segwit",
|
"-addresstype=p2sh-segwit",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user