From 61d678a6e35785eaaf492648df16f49753fd5ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 30 Dec 2025 13:45:56 +0100 Subject: [PATCH 1/4] refactor: use `DataStream::clear` in `::read` and `::ignore` When `DataStream` is fully consumed, both `read()` and `ignore()` reset it to an empty state by clearing the backing buffer and resetting the read position. Call `clear()` in both places instead of open-coding the same state transition. This keeps the behavior unchanged while documenting the fully-consumed reset in one place. Remove the unused `Compact()` method as well - it has been unused for a long time and can be added back if it is ever needed. --- src/streams.h | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/streams.h b/src/streams.h index d6d6d57fb93..4fc5879f67e 100644 --- a/src/streams.h +++ b/src/streams.h @@ -206,12 +206,6 @@ public: value_type* data() { return vch.data() + m_read_pos; } const value_type* data() const { return vch.data() + m_read_pos; } - inline void Compact() - { - vch.erase(vch.begin(), vch.begin() + m_read_pos); - m_read_pos = 0; - } - bool Rewind(std::optional n = std::nullopt) { // Total rewind if no size is passed @@ -243,8 +237,8 @@ public: } memcpy(dst.data(), &vch[m_read_pos], dst.size()); if (next_read_pos.value() == vch.size()) { - m_read_pos = 0; - vch.clear(); + // If fully consumed, reset to empty state. + clear(); return; } m_read_pos = next_read_pos.value(); @@ -258,8 +252,8 @@ public: throw std::ios_base::failure("DataStream::ignore(): end of data"); } if (next_read_pos.value() == vch.size()) { - m_read_pos = 0; - vch.clear(); + // If all bytes are ignored, reset to empty state. + clear(); return; } m_read_pos = next_read_pos.value(); From b8eb6c2081a250333279b0da114d974f6e22a305 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 7 Apr 2026 23:32:05 +0300 Subject: [PATCH 2/4] refactor: use `SpanReader` in `TestBlockAndIndex` `TestBlockAndIndex` still deserialized its fixed block fixture through `DataStream` and appended a dummy byte to avoid compaction after full consumption. Use `SpanReader` for that fixture instead. This removes the leftover dummy-byte workaround and reads the immutable fixture through a read-only view. --- src/bench/rpc_blockchain.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index 0e89ac78a13..fe83c4a26f7 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -31,10 +31,7 @@ struct TestBlockAndIndex { TestBlockAndIndex() { - DataStream stream{benchmark::data::block413567}; - std::byte a{0}; - stream.write({&a, 1}); // Prevent compaction - + SpanReader stream{benchmark::data::block413567}; stream >> TX_WITH_WITNESS(block); blockHash = block.GetHash(); From 2529f255554be31582797594d299fb74987bc0df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 7 Apr 2026 23:33:49 +0300 Subject: [PATCH 3/4] refactor: use `SpanReader` in `PrevectorDeserialize` `PrevectorDeserialize` only needs a reusable read-only view over fixed serialized bytes. Keeping a mutable `DataStream` around just to call `Rewind()` is unnecessary. Rebuild a fresh `SpanReader` for each benchmark run and remove `DataStream::Rewind()`, whose remaining use was this benchmark-only reset path. The benchmark can now serialize exactly the 1000 entries it deserializes, so drop the stale extra element that used to avoid full consumption. Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> --- src/bench/prevector.cpp | 10 +++++----- src/streams.h | 15 --------------- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp index 8d386ec2c44..e842aec4ed2 100644 --- a/src/bench/prevector.cpp +++ b/src/bench/prevector.cpp @@ -66,22 +66,22 @@ static void PrevectorResize(benchmark::Bench& bench) template static void PrevectorDeserialize(benchmark::Bench& bench) { - DataStream s0{}; + DataStream data{}; prevector t0; t0.resize(CScriptBase::STATIC_SIZE); for (auto x = 0; x < 900; ++x) { - s0 << t0; + data << t0; } t0.resize(100); - for (auto x = 0; x < 101; ++x) { - s0 << t0; + for (auto x = 0; x < 100; ++x) { + data << t0; } bench.batch(1000).run([&] { + SpanReader s0{data}; prevector t1; for (auto x = 0; x < 1000; ++x) { s0 >> t1; } - s0.Rewind(); }); } diff --git a/src/streams.h b/src/streams.h index 4fc5879f67e..6ef9d164494 100644 --- a/src/streams.h +++ b/src/streams.h @@ -206,21 +206,6 @@ public: value_type* data() { return vch.data() + m_read_pos; } const value_type* data() const { return vch.data() + m_read_pos; } - bool Rewind(std::optional n = std::nullopt) - { - // Total rewind if no size is passed - if (!n) { - m_read_pos = 0; - return true; - } - // Rewind by n characters if the buffer hasn't been compacted yet - if (*n > m_read_pos) - return false; - m_read_pos -= *n; - return true; - } - - // // Stream subset // From 13c8df4d5a9eeca18a77644fada1816c0ec10308 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 7 Apr 2026 21:52:24 +0300 Subject: [PATCH 4/4] refactor: replace `DataStream` with `SpanReader` in block deserialization tests These benchmark inputs are immutable fixture bytes, so `DataStream` adds an unnecessary owned buffer and the setup needed to recreate or preserve its state. Use `SpanReader` for block deserialization in `checkblock` instead. This keeps `DeserializeBlockTest` focused on deserialization work, while `CheckBlockTest` still uses untimed setup only to rebuild a fresh uncached `CBlock` for the timed `CheckBlock()` call. Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> --- src/bench/checkblock.cpp | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp index cbba543f2d8..9faf9ac137a 100644 --- a/src/bench/checkblock.cpp +++ b/src/bench/checkblock.cpp @@ -4,22 +4,14 @@ #include #include -#include -#include #include #include #include -#include -#include #include -#include #include #include #include -#include -#include -#include // These are the two major time-sinks which happen after we have fully received // a block off the wire, but before we can relay the block on to peers using @@ -27,27 +19,29 @@ static void DeserializeBlockTest(benchmark::Bench& bench) { - DataStream stream; - bench.unit("block").epochIterations(1) - .setup([&] { stream = DataStream{benchmark::data::block413567}; }) - .run([&] { CBlock block; stream >> TX_WITH_WITNESS(block); }); + const auto block_data{benchmark::data::block413567}; + bench.unit("block").run([&] { + CBlock block; + SpanReader{block_data} >> TX_WITH_WITNESS(block); + assert(block.vtx.size() == 1557); + }); } static void CheckBlockTest(benchmark::Bench& bench) { - ArgsManager bench_args; - const auto chainParams = CreateChainParams(bench_args, ChainType::MAIN); + const auto& chain_params{CChainParams::Main()}; + const auto block_data{benchmark::data::block413567}; CBlock block; bench.unit("block").epochIterations(1) .setup([&] { block = CBlock{}; - DataStream stream{benchmark::data::block413567}; - stream >> TX_WITH_WITNESS(block); + SpanReader{block_data} >> TX_WITH_WITNESS(block); + assert(block.vtx.size() == 1557); }) .run([&] { BlockValidationState validationState; - bool checked = CheckBlock(block, validationState, chainParams->GetConsensus()); + const bool checked{CheckBlock(block, validationState, chain_params->GetConsensus())}; assert(checked); }); }