net: use an interface class rather than signals for message processing

Drop boost signals in favor of a stateful class. This will allow the message
processing loop to actually move to net_processing in a future step.

Github-Pull: #10756
Rebased-From: 8ad663c1fa
This commit is contained in:
Cory Fields
2017-07-06 13:40:09 -04:00
committed by MarcoFalke
parent 8aee55af3d
commit dc897e53d8
8 changed files with 103 additions and 125 deletions

View File

@@ -194,11 +194,10 @@ void Shutdown()
#endif
MapPort(false);
UnregisterValidationInterface(peerLogic.get());
peerLogic.reset();
g_connman.reset();
peerLogic.reset();
StopTorControl();
UnregisterNodeSignals(GetNodeSignals());
if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
DumpMempool();
}
@@ -1277,7 +1276,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
peerLogic.reset(new PeerLogicValidation(&connman));
RegisterValidationInterface(peerLogic.get());
RegisterNodeSignals(GetNodeSignals());
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<std::string> uacomments;
@@ -1668,6 +1666,7 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
connOptions.nMaxFeeler = 1;
connOptions.nBestHeight = chainActive.Height();
connOptions.uiInterface = &uiInterface;
connOptions.m_msgproc = peerLogic.get();
connOptions.nSendBufferMaxSize = 1000*gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
connOptions.nReceiveFloodSize = 1000*gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);