[p2p] overhaul TxOrphanage with smarter limits

This is largely a reimplementation using boost::multi_index_container.
All the same public methods are available. It has an index by outpoint,
per-peer tracking, peer worksets, etc.

A few differences:
- Limits have changed: instead of a global limit of 100 unique orphans,
  we have a maximum number of announcements (which can include duplicate
orphans) and a global memory limit which scales with the number of
peers.
- The maximum announcements limit is 100 to match the original limit,
  but this is actually a stricter limit because the announcement count
is not de-duplicated.
- Eviction strategy: when global limits are reached, a per-peer limit
  comes into play. While limits are exceeded, we choose the peer whose
“DoS score” (max usage / limit ratio for announcements and memory
limits) is highest and evict announcements by entry time, sorting
non-reconsiderable ones before reconsiderable ones. Since announcements
are unique by (wtxid, peer), as long as 1 announcement remains for a
transaction, it remains in the orphanage.
- This eviction strategy means no peer can influence the eviction of
  another peer’s orphans.
- Also, since global limits are a multiple of per-peer limits, as long
  as a peer does not exceed its limits, its orphans are protected from
eviction.
- Orphans no longer expire, since older announcements are generally
  removed before newer ones.
- GetChildrenFromSamePeer returns the transactions from newest to
  oldest.

Co-authored-by: Pieter Wuille <pieter@wuille.net>
This commit is contained in:
glozow
2025-04-02 17:29:38 -04:00
parent 1a41e7962d
commit 067365d2a8
4 changed files with 614 additions and 338 deletions

View File

@@ -188,6 +188,7 @@ bool TxDownloadManagerImpl::AddTxAnnouncement(NodeId peer, const GenTxid& gtxid,
if (MaybeAddOrphanResolutionCandidate(unique_parents, *wtxid, peer, now)) {
m_orphanage->AddAnnouncer(orphan_tx->GetWitnessHash(), peer);
m_orphanage->LimitOrphans(m_opts.m_rng);
}
// Return even if the peer isn't an orphan resolution candidate. This would be caught by AlreadyHaveTx.
@@ -420,8 +421,6 @@ node::RejectedTxTodo TxDownloadManagerImpl::MempoolRejectedTx(const CTransaction
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
// DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789)
// Note that, if the orphanage reaches capacity, it's possible that we immediately evict
// the transaction we just added.
m_orphanage->LimitOrphans(m_opts.m_rng);
} else {
unique_parents.clear();

View File

@@ -8,68 +8,205 @@
#include <logging.h>
#include <policy/policy.h>
#include <primitives/transaction.h>
#include <util/feefrac.h>
#include <util/time.h>
#include <util/hasher.h>
#include <boost/multi_index/indexed_by.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/tag.hpp>
#include <boost/multi_index_container.hpp>
#include <cassert>
#include <cmath>
#include <unordered_map>
namespace node {
/** Minimum NodeId for lower_bound lookups (in practice, NodeIds start at 0). */
static constexpr NodeId MIN_PEER{std::numeric_limits<NodeId>::min()};
/** Maximum NodeId for upper_bound lookups. */
static constexpr NodeId MAX_PEER{std::numeric_limits<NodeId>::max()};
class TxOrphanageImpl final : public TxOrphanage {
private:
struct OrphanTx : public OrphanTxBase {
NodeSeconds nTimeExpire;
size_t list_pos;
};
// Type alias for sequence numbers
using SequenceNumber = uint64_t;
/** Global sequence number, increment each time an announcement is added. */
SequenceNumber m_current_sequence{0};
/** Total usage (weight) of all entries in m_orphans. */
TxOrphanage::Usage m_total_orphan_usage{0};
/** Total number of <peer, tx> pairs. Can be larger than m_orphans.size() because multiple peers
* may have announced the same orphan. */
TxOrphanage::Count m_total_announcements{0};
/** Map from wtxid to orphan transaction record. Limited by
* DEFAULT_MAX_ORPHAN_TRANSACTIONS */
std::map<Wtxid, OrphanTx> m_orphans;
struct PeerOrphanInfo {
/** List of transactions that should be reconsidered: added to in AddChildrenToWorkSet,
* removed from one-by-one with each call to GetTxToReconsider. The wtxids may refer to
* transactions that are no longer present in orphanage; these are lazily removed in
* GetTxToReconsider. */
std::set<Wtxid> m_work_set;
/** Total weight of orphans for which this peer is an announcer.
* If orphans are provided by different peers, its weight will be accounted for in each
* PeerOrphanInfo, so the total of all peers' m_total_usage may be larger than
* m_total_orphan_size. If a peer is removed as an announcer, even if the orphan still
* remains in the orphanage, this number will be decremented. */
TxOrphanage::Usage m_total_usage{0};
};
std::map<NodeId, PeerOrphanInfo> m_peer_orphanage_info;
using OrphanMap = decltype(m_orphans);
struct IteratorComparator
/** One orphan announcement. Each announcement (i.e. combination of wtxid, nodeid) is unique. There may be multiple
* announcements for the same tx, and multiple transactions with the same txid but different wtxid are possible. */
struct Announcement
{
template<typename I>
bool operator()(const I& a, const I& b) const
{
return a->first < b->first;
const CTransactionRef m_tx;
/** Which peer announced this tx */
const NodeId m_announcer;
/** What order this transaction entered the orphanage. */
const SequenceNumber m_entry_sequence;
/** Whether this tx should be reconsidered. Always starts out false. A peer's workset is the collection of all
* announcements with m_reconsider=true. */
bool m_reconsider{false};
Announcement(const CTransactionRef& tx, NodeId peer, SequenceNumber seq) :
m_tx{tx}, m_announcer{peer}, m_entry_sequence{seq}
{ }
/** Get an approximation for "memory usage". The total memory is a function of the memory used to store the
* transaction itself, each entry in m_orphans, and each entry in m_outpoint_to_orphan_it. We use weight because
* it is often higher than the actual memory usage of the tranaction. This metric conveniently encompasses
* m_outpoint_to_orphan_it usage since input data does not get the witness discount, and makes it easier to
* reason about each peer's limits using well-understood transaction attributes. */
TxOrphanage::Usage GetMemUsage() const {
return GetTransactionWeight(*m_tx);
}
/** Get an approximation of how much this transaction contributes to latency in EraseForBlock and EraseForPeer.
* The computation time is a function of the number of entries in m_orphans (thus 1 per announcement) and the
* number of entries in m_outpoint_to_orphan_it (thus an additional 1 for every 10 inputs). Transactions with a
* small number of inputs (9 or fewer) are counted as 1 to make it easier to reason about each peer's limits in
* terms of "normal" transactions. */
TxOrphanage::Count GetLatencyScore() const {
return 1 + (m_tx->vin.size() / 10);
}
};
/** Index from the parents' COutPoint into the m_orphans. Used
* to remove orphan transactions from the m_orphans */
std::map<COutPoint, std::set<OrphanMap::iterator, IteratorComparator>> m_outpoint_to_orphan_it;
// Index by wtxid, then peer
struct ByWtxid {};
using ByWtxidView = std::tuple<Wtxid, NodeId>;
struct WtxidExtractor
{
using result_type = ByWtxidView;
result_type operator()(const Announcement& ann) const
{
return ByWtxidView{ann.m_tx->GetWitnessHash(), ann.m_announcer};
}
};
/** Orphan transactions in vector for quick random eviction */
std::vector<OrphanMap::iterator> m_orphan_list;
// Sort by peer, then by whether it is ready to reconsider, then by recency.
struct ByPeer {};
using ByPeerView = std::tuple<NodeId, bool, SequenceNumber>;
struct ByPeerViewExtractor {
using result_type = ByPeerView;
result_type operator()(const Announcement& ann) const
{
return ByPeerView{ann.m_announcer, ann.m_reconsider, ann.m_entry_sequence};
}
};
/** Timestamp for the next scheduled sweep of expired orphans */
NodeSeconds m_next_sweep{0s};
struct OrphanIndices final : boost::multi_index::indexed_by<
boost::multi_index::ordered_unique<boost::multi_index::tag<ByWtxid>, WtxidExtractor>,
boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor>
>{};
using AnnouncementMap = boost::multi_index::multi_index_container<Announcement, OrphanIndices>;
template<typename Tag>
using Iter = typename AnnouncementMap::index<Tag>::type::iterator;
AnnouncementMap m_orphans;
const TxOrphanage::Count m_max_global_latency_score{DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE};
const TxOrphanage::Usage m_reserved_usage_per_peer{DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER};
/** Number of unique orphans by wtxid. Less than or equal to the number of entries in m_orphans. */
TxOrphanage::Count m_unique_orphans{0};
/** Memory used by orphans (see Announcement::GetMemUsage()), deduplicated by wtxid. */
TxOrphanage::Usage m_unique_orphan_usage{0};
/** The sum of each unique transaction's latency scores including the inputs only (see Announcement::GetLatencyScore
* but subtract 1 for the announcements themselves). The total orphanage's latency score is given by this value +
* the number of entries in m_orphans. */
TxOrphanage::Count m_unique_rounded_input_scores{0};
/** Index from the parents' outputs to wtxids that exist in m_orphans. Used to find children of
* a transaction that can be reconsidered and to remove entries that conflict with a block.*/
std::unordered_map<COutPoint, std::set<Wtxid>, SaltedOutpointHasher> m_outpoint_to_orphan_it;
struct PeerDoSInfo {
TxOrphanage::Usage m_total_usage{0};
TxOrphanage::Count m_count_announcements{0};
TxOrphanage::Count m_total_latency_score{0};
bool operator==(const PeerDoSInfo& other) const
{
return m_total_usage == other.m_total_usage &&
m_count_announcements == other.m_count_announcements &&
m_total_latency_score == other.m_total_latency_score;
}
void Add(const Announcement& ann)
{
m_total_usage += ann.GetMemUsage();
m_total_latency_score += ann.GetLatencyScore();
m_count_announcements += 1;
}
bool Subtract(const Announcement& ann)
{
Assume(m_total_usage >= ann.GetMemUsage());
Assume(m_total_latency_score >= ann.GetLatencyScore());
Assume(m_count_announcements >= 1);
m_total_usage -= ann.GetMemUsage();
m_total_latency_score -= ann.GetLatencyScore();
m_count_announcements -= 1;
return m_count_announcements == 0;
}
/** There are 2 DoS scores:
* - Latency score (ratio of total latency score / max allowed latency score)
* - Memory score (ratio of total memory usage / max allowed memory usage).
*
* If the peer is using more than the allowed for either resource, its DoS score is > 1.
* A peer having a DoS score > 1 does not necessarily mean that something is wrong, since we
* do not trim unless the orphanage exceeds global limits, but it means that this peer will
* be selected for trimming sooner. If the global latency score or global memory usage
* limits are exceeded, it must be that there is a peer whose DoS score > 1. */
FeeFrac GetDosScore(TxOrphanage::Count max_peer_latency_score, TxOrphanage::Usage max_peer_bytes) const
{
assert(max_peer_latency_score > 0);
assert(max_peer_bytes > 0);
const FeeFrac cpu_score(m_total_latency_score, max_peer_latency_score);
const FeeFrac mem_score(m_total_usage, max_peer_bytes);
return std::max<FeeFrac>(cpu_score, mem_score);
}
};
/** Store per-peer statistics. Used to determine each peer's DoS score. The size of this map is used to determine the
* number of peers and thus global {latency score, memory} limits. */
std::unordered_map<NodeId, PeerDoSInfo> m_peer_orphanage_info;
/** Erase from m_orphans and update m_peer_orphanage_info. */
template<typename Tag>
void Erase(Iter<Tag> it);
/** Check if there is exactly one announcement with the same wtxid as it. */
bool IsUnique(Iter<ByWtxid> it) const;
/** Check if the orphanage needs trimming. */
bool NeedsTrim() const;
public:
TxOrphanageImpl() = default;
TxOrphanageImpl(Count max_global_ann, Usage reserved_peer_usage) :
m_max_global_latency_score{max_global_ann},
m_reserved_usage_per_peer{reserved_peer_usage}
{}
~TxOrphanageImpl() noexcept override = default;
TxOrphanage::Count CountAnnouncements() const override;
TxOrphanage::Count CountUniqueOrphans() const override;
TxOrphanage::Count AnnouncementsFromPeer(NodeId peer) const override;
TxOrphanage::Count LatencyScoreFromPeer(NodeId peer) const override;
TxOrphanage::Usage UsageByPeer(NodeId peer) const override;
TxOrphanage::Count MaxGlobalLatencyScore() const override;
TxOrphanage::Count TotalLatencyScore() const override;
TxOrphanage::Usage ReservedPeerUsage() const override;
/** Maximum allowed (deduplicated) latency score for all tranactions (see Announcement::GetLatencyScore()). Dynamic
* based on number of peers. Each peer has an equal amount, but the global maximum latency score stays constant. The
* number of peers times MaxPeerLatencyScore() (rounded) adds up to MaxGlobalLatencyScore(). As long as every peer's
* m_total_latency_score / MaxPeerLatencyScore() < 1, MaxGlobalLatencyScore() is not exceeded. */
TxOrphanage::Count MaxPeerLatencyScore() const override;
/** Maximum allowed (deduplicated) memory usage for all transactions (see Announcement::GetMemUsage()). Dynamic based
* on number of peers. More peers means more allowed memory usage. The number of peers times ReservedPeerUsage()
* adds up to MaxGlobalUsage(). As long as every peer's m_total_usage / ReservedPeerUsage() < 1, MaxGlobalUsage() is
* not exceeded. */
TxOrphanage::Usage MaxGlobalUsage() const override;
bool AddTx(const CTransactionRef& tx, NodeId peer) override;
bool AddAnnouncer(const Wtxid& wtxid, NodeId peer) override;
CTransactionRef GetTx(const Wtxid& wtxid) const override;
@@ -83,196 +220,302 @@ public:
void AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) override;
bool HaveTxToReconsider(NodeId peer) override;
std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const override;
size_t Size() const override { return m_orphans.size(); }
size_t Size() const override { return m_unique_orphans; }
std::vector<OrphanTxBase> GetOrphanTransactions() const override;
TxOrphanage::Usage TotalOrphanUsage() const override { return m_total_orphan_usage; }
TxOrphanage::Usage UsageByPeer(NodeId peer) const override;
TxOrphanage::Usage TotalOrphanUsage() const override;
void SanityCheck() const override;
};
template<typename Tag>
void TxOrphanageImpl::Erase(Iter<Tag> it)
{
// Update m_peer_orphanage_info and clean up entries if they point to an empty struct.
// This means peers that are not storing any orphans do not have an entry in
// m_peer_orphanage_info (they can be added back later if they announce another orphan) and
// ensures disconnected peers are not tracked forever.
auto peer_it = m_peer_orphanage_info.find(it->m_announcer);
Assume(peer_it != m_peer_orphanage_info.end());
if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it);
if (IsUnique(m_orphans.project<ByWtxid>(it))) {
m_unique_orphans -= 1;
m_unique_rounded_input_scores -= it->GetLatencyScore() - 1;
m_unique_orphan_usage -= it->GetMemUsage();
// Remove references in m_outpoint_to_orphan_it
const auto& wtxid{it->m_tx->GetWitnessHash()};
for (const auto& input : it->m_tx->vin) {
auto it_prev = m_outpoint_to_orphan_it.find(input.prevout);
if (it_prev != m_outpoint_to_orphan_it.end()) {
it_prev->second.erase(wtxid);
// Clean up keys if they point to an empty set.
if (it_prev->second.empty()) {
m_outpoint_to_orphan_it.erase(it_prev);
}
}
}
}
m_orphans.get<Tag>().erase(it);
}
bool TxOrphanageImpl::IsUnique(Iter<ByWtxid> it) const
{
// Iterators ByWtxid are sorted by wtxid, so check if neighboring elements have the same wtxid.
auto& index = m_orphans.get<ByWtxid>();
if (it == index.end()) return false;
if (std::next(it) != index.end() && std::next(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
if (it != index.begin() && std::prev(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false;
return true;
}
TxOrphanage::Usage TxOrphanageImpl::UsageByPeer(NodeId peer) const
{
auto it = m_peer_orphanage_info.find(peer);
return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_usage;
}
TxOrphanage::Count TxOrphanageImpl::CountAnnouncements() const { return m_orphans.size(); }
TxOrphanage::Usage TxOrphanageImpl::TotalOrphanUsage() const { return m_unique_orphan_usage; }
TxOrphanage::Count TxOrphanageImpl::CountUniqueOrphans() const { return m_unique_orphans; }
TxOrphanage::Count TxOrphanageImpl::AnnouncementsFromPeer(NodeId peer) const {
auto it = m_peer_orphanage_info.find(peer);
return it == m_peer_orphanage_info.end() ? 0 : it->second.m_count_announcements;
}
TxOrphanage::Count TxOrphanageImpl::LatencyScoreFromPeer(NodeId peer) const {
auto it = m_peer_orphanage_info.find(peer);
return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_latency_score;
}
bool TxOrphanageImpl::AddTx(const CTransactionRef& tx, NodeId peer)
{
const Txid& hash = tx->GetHash();
const Wtxid& wtxid = tx->GetWitnessHash();
if (auto it{m_orphans.find(wtxid)}; it != m_orphans.end()) {
AddAnnouncer(wtxid, peer);
// No new orphan entry was created. An announcer may have been added.
const auto& wtxid{tx->GetWitnessHash()};
const auto& txid{tx->GetHash()};
// Ignore transactions above max standard size to avoid a send-big-orphans memory exhaustion attack.
TxOrphanage::Usage sz = GetTransactionWeight(*tx);
if (sz > MAX_STANDARD_TX_WEIGHT) {
LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, txid.ToString(), wtxid.ToString());
return false;
}
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 100 orphans, each of which is at most 100,000 bytes big is
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
unsigned int sz = GetTransactionWeight(*tx);
if (sz > MAX_STANDARD_TX_WEIGHT)
{
LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, hash.ToString(), wtxid.ToString());
return false;
}
// We will return false if the tx already exists under a different peer.
const bool brand_new{!HaveTx(wtxid)};
auto ret = m_orphans.emplace(wtxid, OrphanTx{{tx, {peer}}, Now<NodeSeconds>() + ORPHAN_TX_EXPIRE_TIME, m_orphan_list.size()});
assert(ret.second);
m_orphan_list.push_back(ret.first);
for (const CTxIn& txin : tx->vin) {
m_outpoint_to_orphan_it[txin.prevout].insert(ret.first);
}
m_total_orphan_usage += sz;
m_total_announcements += 1;
auto [iter, inserted] = m_orphans.get<ByWtxid>().emplace(tx, peer, m_current_sequence);
// If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
if (!inserted) return false;
++m_current_sequence;
auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
peer_info.m_total_usage += sz;
peer_info.Add(*iter);
LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n", hash.ToString(), wtxid.ToString(), sz,
m_orphans.size(), m_outpoint_to_orphan_it.size());
return true;
// Add links in m_outpoint_to_orphan_it
if (brand_new) {
for (const auto& input : tx->vin) {
auto& wtxids_for_prevout = m_outpoint_to_orphan_it.try_emplace(input.prevout).first->second;
wtxids_for_prevout.emplace(wtxid);
}
m_unique_orphans += 1;
m_unique_orphan_usage += iter->GetMemUsage();
m_unique_rounded_input_scores += iter->GetLatencyScore() - 1;
LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n",
txid.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_it.size());
Assume(IsUnique(iter));
} else {
LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
peer, txid.ToString(), wtxid.ToString());
Assume(!IsUnique(iter));
}
return brand_new;
}
bool TxOrphanageImpl::AddAnnouncer(const Wtxid& wtxid, NodeId peer)
{
const auto it = m_orphans.find(wtxid);
if (it != m_orphans.end()) {
Assume(!it->second.announcers.empty());
const auto ret = it->second.announcers.insert(peer);
if (ret.second) {
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
// Do nothing if this transaction isn't already present. We can't create an entry if we don't
// have the tx data.
if (it == index_by_wtxid.end()) return false;
if (it->m_tx->GetWitnessHash() != wtxid) return false;
// Add another announcement, copying the CTransactionRef from one that already exists.
const auto& ptx = it->m_tx;
auto [iter, inserted] = index_by_wtxid.emplace(ptx, peer, m_current_sequence);
// If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false.
if (!inserted) return false;
++m_current_sequence;
auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second;
peer_info.m_total_usage += it->second.GetUsage();
m_total_announcements += 1;
LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s\n", peer, wtxid.ToString());
peer_info.Add(*iter);
const auto& txid = ptx->GetHash();
LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n",
peer, txid.ToString(), wtxid.ToString());
Assume(!IsUnique(iter));
return true;
}
}
return false;
}
bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid)
{
std::map<Wtxid, OrphanTx>::iterator it = m_orphans.find(wtxid);
if (it == m_orphans.end())
return false;
for (const CTxIn& txin : it->second.tx->vin)
{
auto itPrev = m_outpoint_to_orphan_it.find(txin.prevout);
if (itPrev == m_outpoint_to_orphan_it.end())
continue;
itPrev->second.erase(it);
if (itPrev->second.empty())
m_outpoint_to_orphan_it.erase(itPrev);
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
if (it == index_by_wtxid.end() || it->m_tx->GetWitnessHash() != wtxid) return false;
auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
unsigned int num_ann{0};
const auto txid = it->m_tx->GetHash();
while (it != it_end) {
Assume(it->m_tx->GetWitnessHash() == wtxid);
Erase<ByWtxid>(it++);
num_ann += 1;
}
const auto tx_size{it->second.GetUsage()};
m_total_orphan_usage -= tx_size;
m_total_announcements -= it->second.announcers.size();
// Decrement each announcer's m_total_usage
for (const auto& peer : it->second.announcers) {
auto peer_it = m_peer_orphanage_info.find(peer);
if (Assume(peer_it != m_peer_orphanage_info.end())) {
peer_it->second.m_total_usage -= tx_size;
}
}
LogDebug(BCLog::TXPACKAGES, "removed orphan tx %s (wtxid=%s) (%u announcements)\n", txid.ToString(), wtxid.ToString(), num_ann);
size_t old_pos = it->second.list_pos;
assert(m_orphan_list[old_pos] == it);
if (old_pos + 1 != m_orphan_list.size()) {
// Unless we're deleting the last entry in m_orphan_list, move the last
// entry to the position we're deleting.
auto it_last = m_orphan_list.back();
m_orphan_list[old_pos] = it_last;
it_last->second.list_pos = old_pos;
}
const auto& txid = it->second.tx->GetHash();
// Time spent in orphanage = difference between current and entry time.
// Entry time is equal to ORPHAN_TX_EXPIRE_TIME earlier than entry's expiry.
LogDebug(BCLog::TXPACKAGES, " removed orphan tx %s (wtxid=%s) after %ds\n", txid.ToString(), wtxid.ToString(),
Ticks<std::chrono::seconds>(NodeClock::now() + ORPHAN_TX_EXPIRE_TIME - it->second.nTimeExpire));
m_orphan_list.pop_back();
m_orphans.erase(it);
return true;
}
/** Erase all entries by this peer. */
void TxOrphanageImpl::EraseForPeer(NodeId peer)
{
// Zeroes out this peer's m_total_usage.
m_peer_orphanage_info.erase(peer);
auto& index_by_peer = m_orphans.get<ByPeer>();
auto it = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
if (it == index_by_peer.end() || it->m_announcer != peer) return;
int nErased = 0;
std::map<Wtxid, OrphanTx>::iterator iter = m_orphans.begin();
while (iter != m_orphans.end())
{
// increment to avoid iterator becoming invalid after erasure
auto& [wtxid, orphan] = *iter++;
auto orphan_it = orphan.announcers.find(peer);
if (orphan_it != orphan.announcers.end()) {
orphan.announcers.erase(peer);
m_total_announcements -= 1;
unsigned int num_ann{0};
while (it != index_by_peer.end() && it->m_announcer == peer) {
// Delete item, cleaning up m_outpoint_to_orphan_it iff this entry is unique by wtxid.
Erase<ByPeer>(it++);
num_ann += 1;
}
Assume(!m_peer_orphanage_info.contains(peer));
// No remaining announcers: clean up entry
if (orphan.announcers.empty()) {
nErased += EraseTx(orphan.tx->GetWitnessHash());
}
}
}
if (nErased > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", nErased, peer);
if (num_ann > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", num_ann, peer);
}
/** If the data structure needs trimming, evicts announcements by selecting the DoSiest peer and evicting its oldest
* announcement (sorting non-reconsiderable orphans first, to give reconsiderable orphans a greater chance of being
* processed). Does nothing if no global limits are exceeded. This eviction strategy effectively "reserves" an
* amount of announcements and space for each peer. The reserved amount is protected from eviction even if there
* are peers spamming the orphanage.
*/
void TxOrphanageImpl::LimitOrphans(FastRandomContext& rng)
{
unsigned int nEvicted = 0;
auto nNow{Now<NodeSeconds>()};
if (m_next_sweep <= nNow) {
// Sweep out expired orphan pool entries:
int nErased = 0;
auto nMinExpTime{nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL};
std::map<Wtxid, OrphanTx>::iterator iter = m_orphans.begin();
while (iter != m_orphans.end())
{
std::map<Wtxid, OrphanTx>::iterator maybeErase = iter++;
if (maybeErase->second.nTimeExpire <= nNow) {
nErased += EraseTx(maybeErase->first);
} else {
nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
if (!NeedsTrim()) return;
const auto original_unique_txns{CountUniqueOrphans()};
// Even though it's possible for MaxPeerLatencyScore to increase within this call to LimitOrphans
// (e.g. if a peer's orphans are removed entirely, changing the number of peers), use consistent limits throughout.
const auto max_ann{MaxPeerLatencyScore()};
const auto max_mem{ReservedPeerUsage()};
// We have exceeded the global limit(s). Now, identify who is using too much and evict their orphans.
// Create a heap of pairs (NodeId, DoS score), sorted by descending DoS score.
std::vector<std::pair<NodeId, FeeFrac>> heap_peer_dos;
heap_peer_dos.reserve(m_peer_orphanage_info.size());
for (const auto& [nodeid, entry] : m_peer_orphanage_info) {
// Performance optimization: only consider peers with a DoS score > 1.
const auto dos_score = entry.GetDosScore(max_ann, max_mem);
if (dos_score >> FeeFrac{1, 1}) {
heap_peer_dos.emplace_back(nodeid, dos_score);
}
}
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
m_next_sweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
if (nErased > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan tx due to expiration\n", nErased);
static constexpr auto compare_score = [](const auto& left, const auto& right) {
if (left.second != right.second) return left.second < right.second;
// Tiebreak by considering the more recent peer (higher NodeId) to be worse.
return left.first < right.first;
};
std::make_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
unsigned int num_erased{0};
// This outer loop finds the peer with the highest DoS score, which is a fraction of {usage, announcements} used
// over the respective allowances. We continue until the orphanage is within global limits. That means some peers
// might still have a DoS score > 1 at the end.
// Note: if ratios are the same, FeeFrac tiebreaks by denominator. In practice, since the CPU denominator (number of
// announcements) is always lower, this means that a peer with only high number of announcements will be targeted
// before a peer using a lot of memory, even if they have the same ratios.
do {
Assume(!heap_peer_dos.empty());
// This is a max-heap, so the worst peer is at the front. pop_heap()
// moves it to the back, and the next worst peer is moved to the front.
std::pop_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
const auto [worst_peer, dos_score] = std::move(heap_peer_dos.back());
heap_peer_dos.pop_back();
// If needs trim, then at least one peer has a DoS score higher than 1.
Assume(dos_score >> (FeeFrac{1, 1}));
auto it_worst_peer = m_peer_orphanage_info.find(worst_peer);
// This inner loop trims until this peer is no longer the DoSiest one or has a score within 1. The score 1 is
// just a conservative fallback: once the last peer goes below ratio 1, NeedsTrim() will return false anyway.
// We evict the oldest announcement(s) from this peer, sorting non-reconsiderable before reconsiderable.
// The number of inner loop iterations is bounded by the total number of announcements.
const auto& dos_threshold = heap_peer_dos.empty() ? FeeFrac{1, 1} : heap_peer_dos.front().second;
auto it_ann = m_orphans.get<ByPeer>().lower_bound(ByPeerView{worst_peer, false, 0});
while (NeedsTrim()) {
if (!Assume(it_ann->m_announcer == worst_peer)) break;
if (!Assume(it_ann != m_orphans.get<ByPeer>().end())) break;
Erase<ByPeer>(it_ann++);
num_erased += 1;
// If we erased the last orphan from this peer, it_worst_peer will be invalidated.
it_worst_peer = m_peer_orphanage_info.find(worst_peer);
if (it_worst_peer == m_peer_orphanage_info.end() || it_worst_peer->second.GetDosScore(max_ann, max_mem) <= dos_threshold) break;
}
while (m_orphans.size() > DEFAULT_MAX_ORPHAN_TRANSACTIONS)
{
// Evict a random orphan:
size_t randompos = rng.randrange(m_orphan_list.size());
EraseTx(m_orphan_list[randompos]->first);
++nEvicted;
if (!NeedsTrim()) break;
// Unless this peer is empty, put it back in the heap so we continue to consider evicting its orphans.
// We may select this peer for evictions again if there are multiple DoSy peers.
if (it_worst_peer != m_peer_orphanage_info.end() && it_worst_peer->second.m_count_announcements > 0) {
heap_peer_dos.emplace_back(worst_peer, it_worst_peer->second.GetDosScore(max_ann, max_mem));
std::push_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score);
}
if (nEvicted > 0) LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx\n", nEvicted);
} while (true);
const auto remaining_unique_orphans{CountUniqueOrphans()};
LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx (%u announcements)\n", original_unique_txns - remaining_unique_orphans, num_erased);
}
void TxOrphanageImpl::AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng)
{
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
for (unsigned int i = 0; i < tx.vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(tx.GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& elem : it_by_prev->second) {
// Belt and suspenders, each orphan should always have at least 1 announcer.
if (!Assume(!elem->second.announcers.empty())) continue;
for (const auto& wtxid : it_by_prev->second) {
// Belt and suspenders, each entry in m_outpoint_to_orphan_it should always have at least 1 announcement.
auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER});
if (!Assume(it != index_by_wtxid.end())) continue;
// Select a random peer to assign orphan processing, reducing wasted work if the orphan is still missing
// inputs. However, we don't want to create an issue in which the assigned peer can purposefully stop us
// from processing the orphan by disconnecting.
auto announcer_iter = std::begin(elem->second.announcers);
std::advance(announcer_iter, rng.randrange(elem->second.announcers.size()));
auto announcer = *(announcer_iter);
auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER});
const auto num_announcers{std::distance(it, it_end)};
if (!Assume(num_announcers > 0)) continue;
std::advance(it, rng.randrange(num_announcers));
if (!Assume(it->m_tx->GetWitnessHash() == wtxid)) break;
// Mark this orphan as ready to be reconsidered.
static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = true; };
index_by_wtxid.modify(it, mark_reconsidered_modifier);
// Get this source peer's work set, emplacing an empty set if it didn't exist
// (note: if this peer wasn't still connected, we would have removed the orphan tx already)
std::set<Wtxid>& orphan_work_set = m_peer_orphanage_info.try_emplace(announcer).first->second.m_work_set;
// Add this tx to the work set
orphan_work_set.insert(elem->first);
LogDebug(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n",
tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), announcer);
it->m_tx->GetHash().ToString(), it->m_tx->GetWitnessHash().ToString(), it->m_announcer);
}
}
}
@@ -280,176 +523,193 @@ void TxOrphanageImpl::AddChildrenToWorkSet(const CTransaction& tx, FastRandomCon
bool TxOrphanageImpl::HaveTx(const Wtxid& wtxid) const
{
return m_orphans.count(wtxid);
auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
return it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid;
}
CTransactionRef TxOrphanageImpl::GetTx(const Wtxid& wtxid) const
{
auto it = m_orphans.find(wtxid);
return it != m_orphans.end() ? it->second.tx : nullptr;
auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER});
if (it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid) return it_lower->m_tx;
return nullptr;
}
bool TxOrphanageImpl::HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const
{
auto it = m_orphans.find(wtxid);
return (it != m_orphans.end() && it->second.announcers.contains(peer));
return m_orphans.get<ByWtxid>().count(ByWtxidView{wtxid, peer}) > 0;
}
/** If there is a tx that can be reconsidered, return it and set it back to
* non-reconsiderable. Otherwise, return a nullptr. */
CTransactionRef TxOrphanageImpl::GetTxToReconsider(NodeId peer)
{
auto peer_it = m_peer_orphanage_info.find(peer);
if (peer_it == m_peer_orphanage_info.end()) return nullptr;
auto& work_set = peer_it->second.m_work_set;
while (!work_set.empty()) {
Wtxid wtxid = *work_set.begin();
work_set.erase(work_set.begin());
const auto orphan_it = m_orphans.find(wtxid);
if (orphan_it != m_orphans.end()) {
return orphan_it->second.tx;
}
auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
if (it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider) {
// Flip m_reconsider. Even if this transaction stays in orphanage, it shouldn't be
// reconsidered again until there is a new reason to do so.
static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = false; };
m_orphans.get<ByPeer>().modify(it, mark_reconsidered_modifier);
return it->m_tx;
}
return nullptr;
}
/** Return whether there is a tx that can be reconsidered. */
bool TxOrphanageImpl::HaveTxToReconsider(NodeId peer)
{
auto peer_it = m_peer_orphanage_info.find(peer);
if (peer_it == m_peer_orphanage_info.end()) return false;
auto& work_set = peer_it->second.m_work_set;
return !work_set.empty();
auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0});
return it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider;
}
void TxOrphanageImpl::EraseForBlock(const CBlock& block)
{
std::vector<Wtxid> vOrphanErase;
std::set<Wtxid> wtxids_to_erase;
for (const CTransactionRef& ptx : block.vtx) {
const CTransaction& tx = *ptx;
const CTransaction& block_tx = *ptx;
// Which orphan pool entries must we evict?
for (const auto& txin : tx.vin) {
auto itByPrev = m_outpoint_to_orphan_it.find(txin.prevout);
if (itByPrev == m_outpoint_to_orphan_it.end()) continue;
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
const CTransaction& orphanTx = *(*mi)->second.tx;
vOrphanErase.push_back(orphanTx.GetWitnessHash());
for (const auto& input : block_tx.vin) {
auto it_prev = m_outpoint_to_orphan_it.find(input.prevout);
if (it_prev != m_outpoint_to_orphan_it.end()) {
// Copy all wtxids to wtxids_to_erase.
std::copy(it_prev->second.cbegin(), it_prev->second.cend(), std::inserter(wtxids_to_erase, wtxids_to_erase.end()));
}
}
}
// Erase orphan transactions included or precluded by this block
if (vOrphanErase.size()) {
int nErased = 0;
for (const auto& orphanHash : vOrphanErase) {
nErased += EraseTx(orphanHash);
}
LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", nErased);
}
unsigned int num_erased{0};
for (const auto& wtxid : wtxids_to_erase) {
num_erased += EraseTx(wtxid) ? 1 : 0;
}
std::vector<CTransactionRef> TxOrphanageImpl::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const
if (num_erased != 0) {
LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", num_erased);
}
Assume(wtxids_to_erase.size() == num_erased);
}
/** Get all children that spend from this tx and were received from nodeid. Sorted from most
* recent to least recent. */
std::vector<CTransactionRef> TxOrphanageImpl::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId peer) const
{
// First construct a vector of iterators to ensure we do not return duplicates of the same tx
// and so we can sort by nTimeExpire.
std::vector<OrphanMap::iterator> iters;
// For each output, get all entries spending this prevout, filtering for ones from the specified peer.
for (unsigned int i = 0; i < parent->vout.size(); i++) {
const auto it_by_prev = m_outpoint_to_orphan_it.find(COutPoint(parent->GetHash(), i));
if (it_by_prev != m_outpoint_to_orphan_it.end()) {
for (const auto& elem : it_by_prev->second) {
if (elem->second.announcers.contains(nodeid)) {
iters.emplace_back(elem);
}
}
}
}
// Sort by address so that duplicates can be deleted. At the same time, sort so that more recent
// orphans (which expire later) come first. Break ties based on address, as nTimeExpire is
// quantified in seconds and it is possible for orphans to have the same expiry.
std::sort(iters.begin(), iters.end(), [](const auto& lhs, const auto& rhs) {
if (lhs->second.nTimeExpire == rhs->second.nTimeExpire) {
return &(*lhs) < &(*rhs);
} else {
return lhs->second.nTimeExpire > rhs->second.nTimeExpire;
}
});
// Erase duplicates
iters.erase(std::unique(iters.begin(), iters.end()), iters.end());
// Convert to a vector of CTransactionRef
std::vector<CTransactionRef> children_found;
children_found.reserve(iters.size());
for (const auto& child_iter : iters) {
children_found.emplace_back(child_iter->second.tx);
const auto& parent_txid{parent->GetHash()};
// Iterate through all orphans from this peer, in reverse order, so that more recent
// transactions are added first. Doing so helps avoid work when one of the orphans replaced
// an earlier one. Since we require the NodeId to match, one peer's announcement order does
// not bias how we process other peer's orphans.
auto& index_by_peer = m_orphans.get<ByPeer>();
auto it_upper = index_by_peer.upper_bound(ByPeerView{peer, true, std::numeric_limits<uint64_t>::max()});
auto it_lower = index_by_peer.lower_bound(ByPeerView{peer, false, 0});
while (it_upper != it_lower) {
--it_upper;
if (!Assume(it_upper->m_announcer == peer)) break;
// Check if this tx spends from parent.
for (const auto& input : it_upper->m_tx->vin) {
if (input.prevout.hash == parent_txid) {
children_found.emplace_back(it_upper->m_tx);
break;
}
}
}
return children_found;
}
std::vector<TxOrphanage::OrphanTxBase> TxOrphanageImpl::GetOrphanTransactions() const
{
std::vector<OrphanTxBase> ret;
ret.reserve(m_orphans.size());
for (auto const& o : m_orphans) {
ret.push_back({o.second.tx, o.second.announcers});
}
return ret;
std::vector<TxOrphanage::OrphanTxBase> result;
result.reserve(m_unique_orphans);
auto& index_by_wtxid = m_orphans.get<ByWtxid>();
auto it = index_by_wtxid.begin();
std::set<NodeId> this_orphan_announcers;
while (it != index_by_wtxid.end()) {
this_orphan_announcers.insert(it->m_announcer);
// If this is the last entry, or the next entry has a different wtxid, build a OrphanTxBase.
if (std::next(it) == index_by_wtxid.end() || std::next(it)->m_tx->GetWitnessHash() != it->m_tx->GetWitnessHash()) {
result.emplace_back(it->m_tx, std::move(this_orphan_announcers));
this_orphan_announcers.clear();
}
TxOrphanage::Usage TxOrphanageImpl::UsageByPeer(NodeId peer) const
{
auto peer_it = m_peer_orphanage_info.find(peer);
return peer_it == m_peer_orphanage_info.end() ? 0 : peer_it->second.m_total_usage;
++it;
}
Assume(m_unique_orphans == result.size());
return result;
}
void TxOrphanageImpl::SanityCheck() const
{
// Check that cached m_total_announcements is correct
TxOrphanage::Count counted_total_announcements{0};
// Check that m_total_orphan_usage is correct
TxOrphanage::Usage counted_total_usage{0};
std::unordered_map<NodeId, PeerDoSInfo> reconstructed_peer_info;
std::map<Wtxid, std::pair<TxOrphanage::Usage, TxOrphanage::Count>> unique_wtxids_to_scores;
std::set<COutPoint> all_outpoints;
// Check that cached PeerOrphanInfo::m_total_size is correct
std::map<NodeId, TxOrphanage::Usage> counted_size_per_peer;
for (auto it = m_orphans.begin(); it != m_orphans.end(); ++it) {
for (const auto& input : it->m_tx->vin) {
all_outpoints.insert(input.prevout);
}
unique_wtxids_to_scores.emplace(it->m_tx->GetWitnessHash(), std::make_pair(it->GetMemUsage(), it->GetLatencyScore() - 1));
for (const auto& [wtxid, orphan] : m_orphans) {
counted_total_announcements += orphan.announcers.size();
counted_total_usage += orphan.GetUsage();
auto& peer_info = reconstructed_peer_info[it->m_announcer];
peer_info.m_total_usage += it->GetMemUsage();
peer_info.m_count_announcements += 1;
peer_info.m_total_latency_score += it->GetLatencyScore();
}
assert(reconstructed_peer_info.size() == m_peer_orphanage_info.size());
Assume(!orphan.announcers.empty());
for (const auto& peer : orphan.announcers) {
auto& count_peer_entry = counted_size_per_peer.try_emplace(peer).first->second;
count_peer_entry += orphan.GetUsage();
// All outpoints exist in m_outpoint_to_orphan_it, all keys in m_outpoint_to_orphan_it correspond to some
// orphan, and all wtxids referenced in m_outpoint_to_orphan_it are also in m_orphans.
// This ensures m_outpoint_to_orphan_it is cleaned up.
assert(all_outpoints.size() == m_outpoint_to_orphan_it.size());
for (const auto& [outpoint, wtxid_set] : m_outpoint_to_orphan_it) {
assert(all_outpoints.contains(outpoint));
for (const auto& wtxid : wtxid_set) {
assert(unique_wtxids_to_scores.contains(wtxid));
}
}
Assume(m_total_announcements >= m_orphans.size());
Assume(counted_total_announcements == m_total_announcements);
Assume(counted_total_usage == m_total_orphan_usage);
// Cached m_unique_orphans value is correct.
assert(m_orphans.size() >= m_unique_orphans);
assert(m_orphans.size() <= m_peer_orphanage_info.size() * m_unique_orphans);
assert(unique_wtxids_to_scores.size() == m_unique_orphans);
// There must be an entry in m_peer_orphanage_info for each peer
// However, there may be m_peer_orphanage_info entries corresponding to peers for whom we
// previously had orphans but no longer do.
Assume(counted_size_per_peer.size() <= m_peer_orphanage_info.size());
const auto calculated_dedup_usage = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.first; });
assert(calculated_dedup_usage == m_unique_orphan_usage);
for (const auto& [peerid, info] : m_peer_orphanage_info) {
auto it_counted = counted_size_per_peer.find(peerid);
if (it_counted == counted_size_per_peer.end()) {
Assume(info.m_total_usage == 0);
} else {
Assume(it_counted->second == info.m_total_usage);
}
}
// Global usage is deduplicated, should be less than or equal to the sum of all per-peer usages.
const auto summed_peer_usage = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.m_total_usage; });
assert(summed_peer_usage >= m_unique_orphan_usage);
// Cached m_unique_rounded_input_scores value is correct.
const auto calculated_total_latency_score = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(),
TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.second; });
assert(calculated_total_latency_score == m_unique_rounded_input_scores);
// Global latency score is deduplicated, should be less than or equal to the sum of all per-peer latency scores.
const auto summed_peer_latency_score = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(),
TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.m_total_latency_score; });
assert(summed_peer_latency_score >= m_unique_rounded_input_scores + m_orphans.size());
}
TxOrphanage::Count TxOrphanageImpl::MaxGlobalLatencyScore() const { return m_max_global_latency_score; }
TxOrphanage::Count TxOrphanageImpl::TotalLatencyScore() const { return m_unique_rounded_input_scores + m_orphans.size(); }
TxOrphanage::Usage TxOrphanageImpl::ReservedPeerUsage() const { return m_reserved_usage_per_peer; }
TxOrphanage::Count TxOrphanageImpl::MaxPeerLatencyScore() const { return m_max_global_latency_score / std::max<unsigned int>(m_peer_orphanage_info.size(), 1); }
TxOrphanage::Usage TxOrphanageImpl::MaxGlobalUsage() const { return m_reserved_usage_per_peer * std::max<int64_t>(m_peer_orphanage_info.size(), 1); }
bool TxOrphanageImpl::NeedsTrim() const
{
return TotalLatencyScore() > MaxGlobalLatencyScore() || TotalOrphanUsage() > MaxGlobalUsage();
}
std::unique_ptr<TxOrphanage> MakeTxOrphanage() noexcept
{
return std::make_unique<TxOrphanageImpl>();
}
std::unique_ptr<TxOrphanage> MakeTxOrphanage(TxOrphanage::Count max_global_ann, TxOrphanage::Usage reserved_peer_usage) noexcept
{
return std::make_unique<TxOrphanageImpl>(max_global_ann, reserved_peer_usage);
}
} // namespace node

View File

@@ -16,17 +16,25 @@
#include <set>
namespace node {
/** Expiration time for orphan transactions */
static constexpr auto ORPHAN_TX_EXPIRE_TIME{20min};
/** Minimum time between orphan transactions expire time checks */
static constexpr auto ORPHAN_TX_EXPIRE_INTERVAL{5min};
/** Default value for TxOrphanage::m_reserved_usage_per_peer. Helps limit the total amount of memory used by the orphanage. */
static constexpr int64_t DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER{404'000};
/** Default value for TxOrphanage::m_max_global_latency_score. Helps limit the maximum latency for operations like
* EraseForBlock and LimitOrphans. */
static constexpr unsigned int DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE{100};
/** Default maximum number of orphan transactions kept in memory */
static const uint32_t DEFAULT_MAX_ORPHAN_TRANSACTIONS{100};
/** A class to track orphan transactions (failed on TX_MISSING_INPUTS)
* Since we cannot distinguish orphans from bad transactions with
* non-existent inputs, we heavily limit the number of orphans
* we keep and the duration we keep them for.
* Since we cannot distinguish orphans from bad transactions with non-existent inputs, we heavily limit the amount of
* announcements (unique (NodeId, wtxid) pairs), the number of inputs, and size of the orphans stored (both individual
* and summed). We also try to prevent adversaries from churning this data structure: once global limits are reached, we
* continuously evict the oldest announcement (sorting non-reconsiderable orphans before reconsiderable ones) from the
* most resource-intensive peer until we are back within limits.
* - Peers can exceed their individual limits (e.g. because they are very useful transaction relay peers) as long as the
* global limits are not exceeded.
* - As long as the orphan has 1 announcer, it remains in the orphanage.
* - No peer can trigger the eviction of another peer's orphans.
* - Peers' orphans are effectively protected from eviction as long as they don't exceed their limits.
* Not thread-safe. Requires external synchronization.
*/
class TxOrphanage {
@@ -40,10 +48,11 @@ public:
/** Peers added with AddTx or AddAnnouncer. */
std::set<NodeId> announcers;
/** Get the weight of this transaction, an approximation of its memory usage. */
TxOrphanage::Usage GetUsage() const {
return GetTransactionWeight(*tx);
}
// Constructor with moved announcers
OrphanTxBase(CTransactionRef tx, std::set<NodeId>&& announcers) :
tx(std::move(tx)),
announcers(std::move(announcers))
{}
};
virtual ~TxOrphanage() = default;
@@ -63,7 +72,7 @@ public:
/** Check if a {tx, peer} exists in the orphanage.*/
virtual bool HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const = 0;
/** Extract a transaction from a peer's work set
/** Extract a transaction from a peer's work set, and flip it back to non-reconsiderable.
* Returns nullptr if there are no transactions to work on.
* Otherwise returns the transaction reference, and removes
* it from the work set.
@@ -81,7 +90,7 @@ public:
/** Erase all orphans included in or invalidated by a new block */
virtual void EraseForBlock(const CBlock& block) = 0;
/** Limit the orphanage to DEFAULT_MAX_ORPHAN_TRANSACTIONS. */
/** Limit the orphanage to MaxGlobalLatencyScore and MaxGlobalUsage. */
virtual void LimitOrphans(FastRandomContext& rng) = 0;
/** Add any orphans that list a particular tx as a parent into the from peer's work set */
@@ -106,16 +115,45 @@ public:
/** Total usage (weight) of orphans for which this peer is an announcer. If an orphan has multiple
* announcers, its weight will be accounted for in each PeerOrphanInfo, so the total of all
* peers' UsageByPeer() may be larger than TotalOrphanUsage(). */
* peers' UsageByPeer() may be larger than TotalOrphanUsage(). Similarly, UsageByPeer() may be far higher than
* ReservedPeerUsage(), particularly if many peers have provided the same orphans. */
virtual Usage UsageByPeer(NodeId peer) const = 0;
/** Check consistency between PeerOrphanInfo and m_orphans. Recalculate counters and ensure they
* match what is cached. */
virtual void SanityCheck() const = 0;
/** Number of announcements, i.e. total size of m_orphans. Ones for the same wtxid are not de-duplicated.
* Not the same as TotalLatencyScore(). */
virtual Count CountAnnouncements() const = 0;
/** Number of unique orphans (by wtxid). */
virtual Count CountUniqueOrphans() const = 0;
/** Number of orphans stored from this peer. */
virtual Count AnnouncementsFromPeer(NodeId peer) const = 0;
/** Latency score of transactions announced by this peer. */
virtual Count LatencyScoreFromPeer(NodeId peer) const = 0;
/** Get the maximum global latency score allowed */
virtual Count MaxGlobalLatencyScore() const = 0;
/** Get the total latency score of all orphans */
virtual Count TotalLatencyScore() const = 0;
/** Get the reserved usage per peer */
virtual Usage ReservedPeerUsage() const = 0;
/** Get the maximum latency score allowed per peer */
virtual Count MaxPeerLatencyScore() const = 0;
/** Get the maximum global usage allowed */
virtual Usage MaxGlobalUsage() const = 0;
};
/** Create a new TxOrphanage instance */
std::unique_ptr<TxOrphanage> MakeTxOrphanage() noexcept;
std::unique_ptr<TxOrphanage> MakeTxOrphanage(TxOrphanage::Count max_global_ann, TxOrphanage::Usage reserved_peer_usage) noexcept;
} // namespace node
#endif // BITCOIN_NODE_TXORPHANAGE_H

View File

@@ -170,27 +170,6 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
expected_num_orphans -= 2;
BOOST_CHECK(orphanage->Size() == expected_num_orphans);
}
// Test LimitOrphanTxSize() function, nothing should timeout:
FastRandomContext rng{/*fDeterministic=*/true};
orphanage->LimitOrphans(rng);
BOOST_CHECK_EQUAL(orphanage->Size(), expected_num_orphans);
// Add one more orphan, check timeout logic
auto timeout_tx = MakeTransactionSpending(/*outpoints=*/{}, rng);
orphanage->AddTx(timeout_tx, 0);
expected_num_orphans += 1;
BOOST_CHECK_EQUAL(orphanage->Size(), expected_num_orphans);
// One second shy of expiration
SetMockTime(now + node::ORPHAN_TX_EXPIRE_TIME - 1s);
orphanage->LimitOrphans(rng);
BOOST_CHECK_EQUAL(orphanage->Size(), expected_num_orphans);
// Jump one more second, orphan should be timed out on limiting
SetMockTime(now + node::ORPHAN_TX_EXPIRE_TIME);
orphanage->LimitOrphans(rng);
BOOST_CHECK_EQUAL(orphanage->Size(), 0);
}
BOOST_AUTO_TEST_CASE(same_txid_diff_witness)