server: feed through any SCBs on start up to be restored

In this commit, we modify the server to serve the role as the agent
which will carry out the SCB restoration protocol if the Init/Unlock
methods include a set of channels to be recovered.
This commit is contained in:
Olaoluwa Osuntokun
2018-12-09 20:08:32 -08:00
parent af4c11c6c1
commit c5933d45fb
2 changed files with 74 additions and 30 deletions

View File

@@ -26,6 +26,7 @@ import (
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/brontide"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channelnotifier"
"github.com/lightningnetwork/lnd/contractcourt"
@@ -45,6 +46,7 @@ import (
"github.com/lightningnetwork/lnd/sweep"
"github.com/lightningnetwork/lnd/ticker"
"github.com/lightningnetwork/lnd/tor"
"github.com/lightningnetwork/lnd/walletunlocker"
"github.com/lightningnetwork/lnd/zpay32"
)
@@ -184,6 +186,10 @@ type server struct {
// changed since last start.
currentNodeAnn *lnwire.NodeAnnouncement
// chansToRestore is the set of channels that upon starting, the server
// should attempt to restore/recover.
chansToRestore walletunlocker.ChannelsToRecover
quit chan struct{}
wg sync.WaitGroup
@@ -237,7 +243,8 @@ func noiseDial(idPriv *btcec.PrivateKey) func(net.Addr) (net.Conn, error) {
// newServer creates a new instance of the server which is to listen using the
// passed listener address.
func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
privKey *btcec.PrivateKey) (*server, error) {
privKey *btcec.PrivateKey,
chansToRestore walletunlocker.ChannelsToRecover) (*server, error) {
var err error
@@ -293,11 +300,12 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
}
s := &server{
chanDB: chanDB,
cc: cc,
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer),
writePool: writePool,
readPool: readPool,
chanDB: chanDB,
cc: cc,
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer),
writePool: writePool,
readPool: readPool,
chansToRestore: chansToRestore,
invoices: invoices.NewRegistry(chanDB, decodeFinalCltvExpiry),
@@ -1074,8 +1082,6 @@ func (s *server) Start() error {
if err := s.fundingMgr.Start(); err != nil {
return err
}
s.connMgr.Start()
if err := s.invoices.Start(); err != nil {
return err
}
@@ -1083,6 +1089,37 @@ func (s *server) Start() error {
return err
}
// Before we start the connMgr, we'll check to see if we have any
// backups to recover. We do this now as we want to ensure that have
// all the information we need to handle channel recovery _before_ we
// even accept connections from any peers.
chanRestorer := &chanDBRestorer{
db: s.chanDB,
secretKeys: s.cc.keyRing,
}
if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
err := chanbackup.UnpackAndRecoverSingles(
s.chansToRestore.PackedSingleChanBackups,
s.cc.keyRing, chanRestorer, s,
)
if err != nil {
return fmt.Errorf("unable to unpack single "+
"backups: %v", err)
}
}
if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
err := chanbackup.UnpackAndRecoverMulti(
s.chansToRestore.PackedMultiChanBackup,
s.cc.keyRing, chanRestorer, s,
)
if err != nil {
return fmt.Errorf("unable to unpack chan "+
"backup: %v", err)
}
}
s.connMgr.Start()
// With all the relevant sub-systems started, we'll now attempt to
// establish persistent connections to our direct channel collaborators
// within the network. Before doing so however, we'll prune our set of
@@ -2761,7 +2798,8 @@ func (s *server) ConnectToPeer(addr *lnwire.NetAddress, perm bool) error {
s.persistentPeersBackoff[targetPub] = cfg.MinBackoff
}
s.persistentConnReqs[targetPub] = append(
s.persistentConnReqs[targetPub], connReq)
s.persistentConnReqs[targetPub], connReq,
)
s.mu.Unlock()
go s.connMgr.Connect(connReq)