Merge #7840: Several performance and privacy improvements to inv/mempool handling

b559914 Move bloom and feerate filtering to just prior to tx sending. (Gregory Maxwell)
4578215 Return mempool queries in dependency order (Pieter Wuille)
ed70683 Handle mempool requests in send loop, subject to trickle (Pieter Wuille)
dc13dcd Split up and optimize transaction and block inv queues (Pieter Wuille)
f2d3ba7 Eliminate TX trickle bypass, sort TX invs for privacy and priority. (Gregory Maxwell)
This commit is contained in:
Wladimir J. van der Laan
2016-05-05 13:14:29 +02:00
6 changed files with 196 additions and 93 deletions

View File

@@ -4556,12 +4556,16 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH);
pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
}
if (!vRecv.empty())
if (!vRecv.empty()) {
vRecv >> pfrom->nStartingHeight;
if (!vRecv.empty())
vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
else
pfrom->fRelayTxes = true;
}
{
LOCK(pfrom->cs_filter);
if (!vRecv.empty())
vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
else
pfrom->fRelayTxes = true;
}
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
@@ -5234,34 +5238,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->fDisconnect = true;
return true;
}
LOCK2(cs_main, pfrom->cs_filter);
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
vector<CInv> vInv;
BOOST_FOREACH(uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
if (pfrom->pfilter) {
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if (!pfrom->pfilter->IsRelevantAndUpdate(tx)) continue;
}
if (pfrom->minFeeFilter) {
CFeeRate feeRate;
mempool.lookupFeeRate(hash, feeRate);
LOCK(pfrom->cs_feeFilter);
if (feeRate.GetFeePerK() < pfrom->minFeeFilter)
continue;
}
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pfrom->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
if (vInv.size() > 0)
pfrom->PushMessage(NetMsgType::INV, vInv);
LOCK(pfrom->cs_inventory);
pfrom->fSendMempool = true;
}
@@ -5349,12 +5328,13 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
CBloomFilter filter;
vRecv >> filter;
LOCK(pfrom->cs_filter);
if (!filter.IsWithinSizeConstraints())
// There is no excuse for sending a too-large filter
Misbehaving(pfrom->GetId(), 100);
else
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter(filter);
pfrom->pfilter->UpdateEmptyFull();
@@ -5559,6 +5539,22 @@ bool ProcessMessages(CNode* pfrom)
return fOk;
}
class CompareInvMempoolOrder
{
CTxMemPool *mp;
public:
CompareInvMempoolOrder(CTxMemPool *mempool)
{
mp = mempool;
}
bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
{
/* As std::make_heap produces a max-heap, we want the entries with the
* fewest ancestors/highest fee to sort later. */
return mp->CompareDepthAndScore(*b, *a);
}
};
bool SendMessages(CNode* pto)
{
@@ -5798,49 +5794,127 @@ bool SendMessages(CNode* pto)
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL);
}
LOCK(pto->cs_inventory);
vInv.reserve(std::min<size_t>(1000, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;
vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
if (fTrickleWait)
{
vInvWait.push_back(inv);
continue;
}
}
pto->filterInventoryKnown.insert(inv.hash);
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
// Add blocks
BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
pto->vInventoryToSend = vInvWait;
pto->vInventoryBlockToSend.clear();
// Check whether periodic sends should happen
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
}
// Time to send but the peer has requested we not relay transactions.
if (fSendTrickle) {
LOCK(pto->cs_filter);
if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
}
// Respond to BIP35 mempool requests
if (fSendTrickle && pto->fSendMempool) {
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
pto->fSendMempool = false;
CAmount filterrate = 0;
{
LOCK(pto->cs_feeFilter);
filterrate = pto->minFeeFilter;
}
LOCK(pto->cs_filter);
BOOST_FOREACH(const uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
pto->setInventoryTxToSend.erase(hash);
if (filterrate) {
CFeeRate feeRate;
mempool.lookupFeeRate(hash, feeRate);
if (feeRate.GetFeePerK() < filterrate)
continue;
}
if (pto->pfilter) {
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if (!pto->pfilter->IsRelevantAndUpdate(tx)) continue;
}
pto->filterInventoryKnown.insert(hash);
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
}
// Determine transactions to relay
if (fSendTrickle) {
// Produce a vector with all candidates for sending
vector<std::set<uint256>::iterator> vInvTx;
vInvTx.reserve(pto->setInventoryTxToSend.size());
for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
vInvTx.push_back(it);
}
CAmount filterrate = 0;
{
LOCK(pto->cs_feeFilter);
filterrate = pto->minFeeFilter;
}
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
// A heap is used so that not all items need sorting if only a few are being sent.
CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
// No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays.
unsigned int nRelayedTransactions = 0;
LOCK(pto->cs_filter);
while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
// Fetch the top element from the heap
std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
// Remove it from the to-be-sent set
pto->setInventoryTxToSend.erase(it);
// Check if not in the filter already
if (pto->filterInventoryKnown.contains(hash)) {
continue;
}
// Not in the mempool anymore? don't bother sending it.
CFeeRate feeRate;
if (!mempool.lookupFeeRate(hash, feeRate)) {
continue;
}
if (filterrate && feeRate.GetFeePerK() < filterrate) {
continue;
}
if (pto->pfilter) {
CTransaction tx;
if (!mempool.lookup(hash, tx)) continue;
if (!pto->pfilter->IsRelevantAndUpdate(tx)) continue;
}
// Send
vInv.push_back(CInv(MSG_TX, hash));
nRelayedTransactions++;
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
pto->filterInventoryKnown.insert(hash);
}
}
}
if (!vInv.empty())
pto->PushMessage(NetMsgType::INV, vInv);