mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-20 07:09:15 +01:00
Merge bitcoin/bitcoin#33042: refactor: inline constant return values from dbwrapper write methods
743abbcbderefactor: inline constant return value of `BlockTreeDB::WriteBatchSync` and `BlockManager::WriteBlockIndexDB` and `BlockTreeDB::WriteFlag` (Lőrinc)e030240e90refactor: inline constant return value of `CDBWrapper::Erase` and `BlockTreeDB::WriteReindexing` (Lőrinc)cdab9480e9refactor: inline constant return value of `CDBWrapper::Write` (Lőrinc)d1847cf5b5refactor: inline constant return value of `TxIndex::DB::WriteTxs` (Lőrinc)50b63a5698refactor: inline constant return value of `CDBWrapper::WriteBatch` (Lőrinc) Pull request description: Related to https://github.com/bitcoin/bitcoin/pull/31144#discussion_r2223587480 ### Summary `WriteBatch` always returns `true` - the errors are handled by throwing `dbwrapper_error` instead. ### Context This boolean return value of the `Write` methods is confusing because it's inconsistent with `CDBWrapper::Read`, which catches exceptions and returns a boolean to indicate success/failure. It's bad that `Read` returns and `Write` throws - but it's a lot worse that `Write` advertises a return value when it actually communicates errors through exceptions. ### Solution This PR removes the constant return values from write methods and inlines `true` at their call sites. Many upstream methods had boolean return values only because they were propagating these constants - those have been cleaned up as well. Methods that returned a constant `true` value that now return `void`: - `CDBWrapper::WriteBatch`, `CDBWrapper::Write`, `CDBWrapper::Erase` - `TxIndex::DB::WriteTxs` - `BlockTreeDB::WriteReindexing`, `BlockTreeDB::WriteBatchSync`, `BlockTreeDB::WriteFlag` - `BlockManager::WriteBlockIndexDB` ### Note `CCoinsView::BatchWrite` (and transitively `CCoinsViewCache::Flush` & `CCoinsViewCache::Sync`) were intentionally not changed here. While all implementations return `true`, the base `CCoinsView::BatchWrite` returns `false`. Changing this would cause `coins_view` tests to fail with: > terminating due to uncaught exception of type std::logic_error: Not all unspent flagged entries were cleared We can fix that in a follow-up PR. ACKs for top commit: achow101: ACK743abbcbdejanb84: ACK743abbcbdeTheCharlatan: ACK743abbcbdesipa: ACK743abbcbdeTree-SHA512: b2a550bff066216f1958d2dd9a7ef6a9949de518cc636f8ab9c670e0b7a330c1eb8c838e458a8629acb8ac980cea6616955cd84436a7b8ab9096f6d648073b1e
This commit is contained in:
@@ -274,7 +274,7 @@ CDBWrapper::~CDBWrapper()
|
||||
DBContext().options.env = nullptr;
|
||||
}
|
||||
|
||||
bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
|
||||
void CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
|
||||
{
|
||||
const bool log_memory = LogAcceptCategory(BCLog::LEVELDB, BCLog::Level::Debug);
|
||||
double mem_before = 0;
|
||||
@@ -288,7 +288,6 @@ bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
|
||||
LogDebug(BCLog::LEVELDB, "WriteBatch memory usage: db=%s, before=%.1fMiB, after=%.1fMiB\n",
|
||||
m_name, mem_before, mem_after);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t CDBWrapper::DynamicMemoryUsage() const
|
||||
|
||||
@@ -230,11 +230,11 @@ public:
|
||||
}
|
||||
|
||||
template <typename K, typename V>
|
||||
bool Write(const K& key, const V& value, bool fSync = false)
|
||||
void Write(const K& key, const V& value, bool fSync = false)
|
||||
{
|
||||
CDBBatch batch(*this);
|
||||
batch.Write(key, value);
|
||||
return WriteBatch(batch, fSync);
|
||||
WriteBatch(batch, fSync);
|
||||
}
|
||||
|
||||
//! @returns filesystem path to the on-disk data.
|
||||
@@ -255,14 +255,14 @@ public:
|
||||
}
|
||||
|
||||
template <typename K>
|
||||
bool Erase(const K& key, bool fSync = false)
|
||||
void Erase(const K& key, bool fSync = false)
|
||||
{
|
||||
CDBBatch batch(*this);
|
||||
batch.Erase(key);
|
||||
return WriteBatch(batch, fSync);
|
||||
WriteBatch(batch, fSync);
|
||||
}
|
||||
|
||||
bool WriteBatch(CDBBatch& batch, bool fSync = false);
|
||||
void WriteBatch(CDBBatch& batch, bool fSync = false);
|
||||
|
||||
// Get an estimate of LevelDB memory usage (in bytes).
|
||||
size_t DynamicMemoryUsage() const;
|
||||
|
||||
@@ -275,7 +275,7 @@ bool BaseIndex::Commit()
|
||||
ok = CustomCommit(batch);
|
||||
if (ok) {
|
||||
GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash()));
|
||||
ok = GetDB().WriteBatch(batch);
|
||||
GetDB().WriteBatch(batch);
|
||||
}
|
||||
}
|
||||
if (!ok) {
|
||||
|
||||
@@ -311,9 +311,7 @@ bool BlockFilterIndex::Write(const BlockFilter& filter, uint32_t block_height, c
|
||||
value.second.header = filter_header;
|
||||
value.second.pos = m_next_filter_pos;
|
||||
|
||||
if (!m_db->Write(DBHeightKey(block_height), value)) {
|
||||
return false;
|
||||
}
|
||||
m_db->Write(DBHeightKey(block_height), value);
|
||||
|
||||
m_next_filter_pos.nPos += bytes_written;
|
||||
return true;
|
||||
@@ -358,7 +356,7 @@ bool BlockFilterIndex::CustomRemove(const interfaces::BlockInfo& block)
|
||||
// But since this creates new references to the filter, the position should get updated here
|
||||
// atomically as well in case Commit fails.
|
||||
batch.Write(DB_FILTER_POS, m_next_filter_pos);
|
||||
if (!m_db->WriteBatch(batch)) return false;
|
||||
m_db->WriteBatch(batch);
|
||||
|
||||
// Update cached header to the previous block hash
|
||||
m_last_header = *Assert(ReadFilterHeader(block.height - 1, *Assert(block.prev_hash)));
|
||||
|
||||
@@ -253,7 +253,8 @@ bool CoinStatsIndex::CustomAppend(const interfaces::BlockInfo& block)
|
||||
|
||||
// Intentionally do not update DB_MUHASH here so it stays in sync with
|
||||
// DB_BEST_BLOCK, and the index is not corrupted if there is an unclean shutdown.
|
||||
return m_db->Write(DBHeightKey(block.height), value);
|
||||
m_db->Write(DBHeightKey(block.height), value);
|
||||
return true;
|
||||
}
|
||||
|
||||
[[nodiscard]] static bool CopyHeightIndexToHashIndex(CDBIterator& db_it, CDBBatch& batch,
|
||||
@@ -290,7 +291,7 @@ bool CoinStatsIndex::CustomRemove(const interfaces::BlockInfo& block)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!m_db->WriteBatch(batch)) return false;
|
||||
m_db->WriteBatch(batch);
|
||||
|
||||
if (!RevertBlock(block)) {
|
||||
return false; // failure cause logged internally
|
||||
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
bool ReadTxPos(const Txid& txid, CDiskTxPos& pos) const;
|
||||
|
||||
/// Write a batch of transaction positions to the DB.
|
||||
[[nodiscard]] bool WriteTxs(const std::vector<std::pair<Txid, CDiskTxPos>>& v_pos);
|
||||
void WriteTxs(const std::vector<std::pair<Txid, CDiskTxPos>>& v_pos);
|
||||
};
|
||||
|
||||
TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe) :
|
||||
@@ -58,13 +58,13 @@ bool TxIndex::DB::ReadTxPos(const Txid& txid, CDiskTxPos& pos) const
|
||||
return Read(std::make_pair(DB_TXINDEX, txid.ToUint256()), pos);
|
||||
}
|
||||
|
||||
bool TxIndex::DB::WriteTxs(const std::vector<std::pair<Txid, CDiskTxPos>>& v_pos)
|
||||
void TxIndex::DB::WriteTxs(const std::vector<std::pair<Txid, CDiskTxPos>>& v_pos)
|
||||
{
|
||||
CDBBatch batch(*this);
|
||||
for (const auto& [txid, pos] : v_pos) {
|
||||
batch.Write(std::make_pair(DB_TXINDEX, txid.ToUint256()), pos);
|
||||
}
|
||||
return WriteBatch(batch);
|
||||
WriteBatch(batch);
|
||||
}
|
||||
|
||||
TxIndex::TxIndex(std::unique_ptr<interfaces::Chain> chain, size_t n_cache_size, bool f_memory, bool f_wipe)
|
||||
@@ -86,7 +86,8 @@ bool TxIndex::CustomAppend(const interfaces::BlockInfo& block)
|
||||
vPos.emplace_back(tx->GetHash(), pos);
|
||||
pos.nTxOffset += ::GetSerializeSize(TX_WITH_WITNESS(*tx));
|
||||
}
|
||||
return m_db->WriteTxs(vPos);
|
||||
m_db->WriteTxs(vPos);
|
||||
return true;
|
||||
}
|
||||
|
||||
BaseIndex::DB& TxIndex::GetDB() const { return *m_db; }
|
||||
|
||||
@@ -59,12 +59,12 @@ bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
|
||||
return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
|
||||
}
|
||||
|
||||
bool BlockTreeDB::WriteReindexing(bool fReindexing)
|
||||
void BlockTreeDB::WriteReindexing(bool fReindexing)
|
||||
{
|
||||
if (fReindexing) {
|
||||
return Write(DB_REINDEX_FLAG, uint8_t{'1'});
|
||||
Write(DB_REINDEX_FLAG, uint8_t{'1'});
|
||||
} else {
|
||||
return Erase(DB_REINDEX_FLAG);
|
||||
Erase(DB_REINDEX_FLAG);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ bool BlockTreeDB::ReadLastBlockFile(int& nFile)
|
||||
return Read(DB_LAST_BLOCK, nFile);
|
||||
}
|
||||
|
||||
bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
|
||||
void BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
|
||||
{
|
||||
CDBBatch batch(*this);
|
||||
for (const auto& [file, info] : fileInfo) {
|
||||
@@ -88,12 +88,12 @@ bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFi
|
||||
for (const CBlockIndex* bi : blockinfo) {
|
||||
batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
|
||||
}
|
||||
return WriteBatch(batch, true);
|
||||
WriteBatch(batch, true);
|
||||
}
|
||||
|
||||
bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
|
||||
void BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
|
||||
{
|
||||
return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
|
||||
Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
|
||||
}
|
||||
|
||||
bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
|
||||
@@ -477,7 +477,7 @@ bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockha
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BlockManager::WriteBlockIndexDB()
|
||||
void BlockManager::WriteBlockIndexDB()
|
||||
{
|
||||
AssertLockHeld(::cs_main);
|
||||
std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
|
||||
@@ -493,10 +493,7 @@ bool BlockManager::WriteBlockIndexDB()
|
||||
m_dirty_blockindex.erase(it++);
|
||||
}
|
||||
int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
|
||||
if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks);
|
||||
}
|
||||
|
||||
bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
|
||||
|
||||
@@ -52,12 +52,12 @@ class BlockTreeDB : public CDBWrapper
|
||||
{
|
||||
public:
|
||||
using CDBWrapper::CDBWrapper;
|
||||
bool WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo);
|
||||
void WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo);
|
||||
bool ReadBlockFileInfo(int nFile, CBlockFileInfo& info);
|
||||
bool ReadLastBlockFile(int& nFile);
|
||||
bool WriteReindexing(bool fReindexing);
|
||||
void WriteReindexing(bool fReindexing);
|
||||
void ReadReindexing(bool& fReindexing);
|
||||
bool WriteFlag(const std::string& name, bool fValue);
|
||||
void WriteFlag(const std::string& name, bool fValue);
|
||||
bool ReadFlag(const std::string& name, bool& fValue);
|
||||
bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
||||
@@ -300,7 +300,7 @@ public:
|
||||
|
||||
std::unique_ptr<BlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
|
||||
|
||||
bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
||||
void WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
||||
bool LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper)
|
||||
for (uint8_t k{0}; k < 10; ++k) {
|
||||
uint8_t key{k};
|
||||
uint256 value{m_rng.rand256()};
|
||||
BOOST_CHECK(dbw.Write(key, value));
|
||||
dbw.Write(key, value);
|
||||
key_values.emplace_back(key, value);
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data)
|
||||
std::string key_block = "b" + m_rng.rand256().ToString();
|
||||
|
||||
uint256 in_block = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key_block, in_block));
|
||||
dbw.Write(key_block, in_block);
|
||||
BOOST_CHECK(dbw.Read(key_block, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in_block.ToString());
|
||||
|
||||
@@ -94,7 +94,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data)
|
||||
std::string key_file = strprintf("f%04x", m_rng.rand32());
|
||||
|
||||
uint256 in_file_info = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key_file, in_file_info));
|
||||
dbw.Write(key_file, in_file_info);
|
||||
BOOST_CHECK(dbw.Read(key_file, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in_file_info.ToString());
|
||||
|
||||
@@ -102,7 +102,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data)
|
||||
std::string key_transaction = "t" + m_rng.rand256().ToString();
|
||||
|
||||
uint256 in_transaction = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key_transaction, in_transaction));
|
||||
dbw.Write(key_transaction, in_transaction);
|
||||
BOOST_CHECK(dbw.Read(key_transaction, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in_transaction.ToString());
|
||||
|
||||
@@ -110,28 +110,28 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data)
|
||||
std::string key_utxo = "c" + m_rng.rand256().ToString();
|
||||
|
||||
uint256 in_utxo = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key_utxo, in_utxo));
|
||||
dbw.Write(key_utxo, in_utxo);
|
||||
BOOST_CHECK(dbw.Read(key_utxo, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in_utxo.ToString());
|
||||
|
||||
//Simulate last block file number - "l"
|
||||
uint8_t key_last_blockfile_number{'l'};
|
||||
uint32_t lastblockfilenumber = m_rng.rand32();
|
||||
BOOST_CHECK(dbw.Write(key_last_blockfile_number, lastblockfilenumber));
|
||||
dbw.Write(key_last_blockfile_number, lastblockfilenumber);
|
||||
BOOST_CHECK(dbw.Read(key_last_blockfile_number, res_uint_32));
|
||||
BOOST_CHECK_EQUAL(lastblockfilenumber, res_uint_32);
|
||||
|
||||
//Simulate Is Reindexing - "R"
|
||||
uint8_t key_IsReindexing{'R'};
|
||||
bool isInReindexing = m_rng.randbool();
|
||||
BOOST_CHECK(dbw.Write(key_IsReindexing, isInReindexing));
|
||||
dbw.Write(key_IsReindexing, isInReindexing);
|
||||
BOOST_CHECK(dbw.Read(key_IsReindexing, res_bool));
|
||||
BOOST_CHECK_EQUAL(isInReindexing, res_bool);
|
||||
|
||||
//Simulate last block hash up to which UXTO covers - 'B'
|
||||
uint8_t key_lastblockhash_uxto{'B'};
|
||||
uint256 lastblock_hash = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key_lastblockhash_uxto, lastblock_hash));
|
||||
dbw.Write(key_lastblockhash_uxto, lastblock_hash);
|
||||
BOOST_CHECK(dbw.Read(key_lastblockhash_uxto, res));
|
||||
BOOST_CHECK_EQUAL(lastblock_hash, res);
|
||||
|
||||
@@ -142,7 +142,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data)
|
||||
std::string key_file_option = strprintf("%s%01x%s", file_option_tag, filename_length, filename);
|
||||
|
||||
bool in_file_bool = m_rng.randbool();
|
||||
BOOST_CHECK(dbw.Write(key_file_option, in_file_bool));
|
||||
dbw.Write(key_file_option, in_file_bool);
|
||||
BOOST_CHECK(dbw.Read(key_file_option, res_bool));
|
||||
BOOST_CHECK_EQUAL(res_bool, in_file_bool);
|
||||
}
|
||||
@@ -173,7 +173,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_batch)
|
||||
// Remove key3 before it's even been written
|
||||
batch.Erase(key3);
|
||||
|
||||
BOOST_CHECK(dbw.WriteBatch(batch));
|
||||
dbw.WriteBatch(batch);
|
||||
|
||||
BOOST_CHECK(dbw.Read(key, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in.ToString());
|
||||
@@ -195,10 +195,10 @@ BOOST_AUTO_TEST_CASE(dbwrapper_iterator)
|
||||
// The two keys are intentionally chosen for ordering
|
||||
uint8_t key{'j'};
|
||||
uint256 in = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key, in));
|
||||
dbw.Write(key, in);
|
||||
uint8_t key2{'k'};
|
||||
uint256 in2 = m_rng.rand256();
|
||||
BOOST_CHECK(dbw.Write(key2, in2));
|
||||
dbw.Write(key2, in2);
|
||||
|
||||
std::unique_ptr<CDBIterator> it(const_cast<CDBWrapper&>(dbw).NewIterator());
|
||||
|
||||
@@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate)
|
||||
uint256 in = m_rng.rand256();
|
||||
uint256 res;
|
||||
|
||||
BOOST_CHECK(dbw->Write(key, in));
|
||||
dbw->Write(key, in);
|
||||
BOOST_CHECK(dbw->Read(key, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in.ToString());
|
||||
|
||||
@@ -261,7 +261,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate)
|
||||
uint256 res3;
|
||||
|
||||
// Check that we can write successfully
|
||||
BOOST_CHECK(odbw.Write(key, in2));
|
||||
odbw.Write(key, in2);
|
||||
BOOST_CHECK(odbw.Read(key, res3));
|
||||
BOOST_CHECK_EQUAL(res3.ToString(), in2.ToString());
|
||||
}
|
||||
@@ -279,7 +279,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex)
|
||||
uint256 in = m_rng.rand256();
|
||||
uint256 res;
|
||||
|
||||
BOOST_CHECK(dbw->Write(key, in));
|
||||
dbw->Write(key, in);
|
||||
BOOST_CHECK(dbw->Read(key, res));
|
||||
BOOST_CHECK_EQUAL(res.ToString(), in.ToString());
|
||||
|
||||
@@ -298,7 +298,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex)
|
||||
uint256 res3;
|
||||
|
||||
// Check that we can write successfully
|
||||
BOOST_CHECK(odbw.Write(key, in2));
|
||||
odbw.Write(key, in2);
|
||||
BOOST_CHECK(odbw.Read(key, res3));
|
||||
BOOST_CHECK_EQUAL(res3.ToString(), in2.ToString());
|
||||
}
|
||||
@@ -310,7 +310,7 @@ BOOST_AUTO_TEST_CASE(iterator_ordering)
|
||||
for (int x=0x00; x<256; ++x) {
|
||||
uint8_t key = x;
|
||||
uint32_t value = x*x;
|
||||
if (!(x & 1)) BOOST_CHECK(dbw.Write(key, value));
|
||||
if (!(x & 1)) dbw.Write(key, value);
|
||||
}
|
||||
|
||||
// Check that creating an iterator creates a snapshot
|
||||
@@ -319,7 +319,7 @@ BOOST_AUTO_TEST_CASE(iterator_ordering)
|
||||
for (unsigned int x=0x00; x<256; ++x) {
|
||||
uint8_t key = x;
|
||||
uint32_t value = x*x;
|
||||
if (x & 1) BOOST_CHECK(dbw.Write(key, value));
|
||||
if (x & 1) dbw.Write(key, value);
|
||||
}
|
||||
|
||||
for (const int seek_start : {0x00, 0x80}) {
|
||||
@@ -381,7 +381,7 @@ BOOST_AUTO_TEST_CASE(iterator_string_ordering)
|
||||
for (int z = 0; z < y; ++z)
|
||||
key += key;
|
||||
uint32_t value = x*x;
|
||||
BOOST_CHECK(dbw.Write(StringContentsSerializer{key}, value));
|
||||
dbw.Write(StringContentsSerializer{key}, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ FUZZ_TARGET(block_index, .init = init_block_index)
|
||||
}
|
||||
|
||||
// Store these files and blocks in the block index. It should not fail.
|
||||
assert(block_index.WriteBatchSync(files_info, files_count - 1, blocks_info));
|
||||
block_index.WriteBatchSync(files_info, files_count - 1, blocks_info);
|
||||
|
||||
// We should be able to read every block file info we stored. Its value should correspond to
|
||||
// what we stored above.
|
||||
|
||||
@@ -149,9 +149,9 @@ bool CCoinsViewDB::BatchWrite(CoinsViewCacheCursor& cursor, const uint256 &hashB
|
||||
batch.Write(DB_BEST_BLOCK, hashBlock);
|
||||
|
||||
LogDebug(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.ApproximateSize() * (1.0 / 1048576.0));
|
||||
bool ret = m_db->WriteBatch(batch);
|
||||
m_db->WriteBatch(batch);
|
||||
LogDebug(BCLog::COINDB, "Committed %u changed transaction outputs (out of %u) to coin database...\n", (unsigned int)changed, (unsigned int)count);
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t CCoinsViewDB::EstimateSize() const
|
||||
|
||||
@@ -2871,9 +2871,7 @@ bool Chainstate::FlushStateToDisk(
|
||||
{
|
||||
LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH);
|
||||
|
||||
if (!m_blockman.WriteBlockIndexDB()) {
|
||||
return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to block index database."));
|
||||
}
|
||||
m_blockman.WriteBlockIndexDB();
|
||||
}
|
||||
// Finally remove any pruned files
|
||||
if (fFlushForPrune) {
|
||||
|
||||
Reference in New Issue
Block a user