net: use an interface class rather than signals for message processing

Drop boost signals in favor of a stateful class. This will allow the message
processing loop to actually move to net_processing in a future step.
This commit is contained in:
Cory Fields
2017-07-06 13:40:09 -04:00
parent 28f11e9406
commit 8ad663c1fa
8 changed files with 103 additions and 126 deletions

View File

@@ -195,11 +195,10 @@ void Shutdown()
#endif
MapPort(false);
UnregisterValidationInterface(peerLogic.get());
peerLogic.reset();
g_connman.reset();
peerLogic.reset();
StopTorControl();
UnregisterNodeSignals(GetNodeSignals());
if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool();
}
@@ -1268,7 +1267,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
peerLogic.reset(new PeerLogicValidation(&connman));
RegisterValidationInterface(peerLogic.get());
RegisterNodeSignals(GetNodeSignals());
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
@@ -1659,6 +1657,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
connOptions.nMaxFeeler = 1;
connOptions.nBestHeight = chainActive.Height();
connOptions.uiInterface = &uiInterface;
connOptions.m_msgproc = peerLogic.get();
connOptions.nSendBufferMaxSize = 1000*gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
connOptions.nReceiveFloodSize = 1000*gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);