Merge pull request #2313 from Roasbeef/static-chan-backups

multi: implement new safe static channel backup and recovery scheme, RPCs, and cli commands
This commit is contained in:
Olaoluwa Osuntokun 2019-04-01 16:22:37 -07:00 committed by GitHub
commit c37ea68ba6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 4738 additions and 1360 deletions

View File

@ -108,7 +108,7 @@ type chainControl struct {
signer input.Signer
keyRing keychain.KeyRing
keyRing keychain.SecretKeyRing
wc lnwallet.WalletController

View File

@ -76,6 +76,8 @@ func (b *MultiFile) UpdateAndSwap(newBackup PackedMulti) error {
return ErrNoBackupFileExists
}
log.Infof("Updating backup file at %v", b.fileName)
// If the old back up file still exists, then we'll delete it before
// proceeding.
if _, err := os.Stat(b.tempFileName); err == nil {
@ -94,17 +96,17 @@ func (b *MultiFile) UpdateAndSwap(newBackup PackedMulti) error {
var err error
b.tempFile, err = os.Create(b.tempFileName)
if err != nil {
return err
return fmt.Errorf("unable to create temp file: %v", err)
}
// With the file created, we'll write the new packed multi backup and
// remove the temporary file all together once this method exits.
_, err = b.tempFile.Write([]byte(newBackup))
if err != nil {
return err
return fmt.Errorf("unable to write backup to temp file: %v", err)
}
if err := b.tempFile.Sync(); err != nil {
return err
return fmt.Errorf("unable to sync temp file: %v", err)
}
defer os.Remove(b.tempFileName)

View File

@ -19,6 +19,11 @@ const (
// backup. The serialized format for this version is simply: version ||
// numBackups || SCBs...
DefaultMultiVersion = 0
// NilMultiSizePacked is the size of a "nil" packed Multi (45 bytes).
// This consists of the 24 byte chacha nonce, the 16 byte MAC, one byte
// for the version, and 4 bytes to signal zero entries.
NilMultiSizePacked = 24 + 16 + 1 + 4
)
// Multi is a form of static channel backup that is amenable to being

View File

@ -2,6 +2,7 @@ package chanbackup
import (
"bytes"
"fmt"
"net"
"sync"
"sync/atomic"
@ -47,9 +48,9 @@ type ChannelEvent struct {
// ChannelSubscription represents an intent to be notified of any updates to
// the primary channel state.
type ChannelSubscription struct {
// ChanUpdates is a read-only channel that will be sent upon once the
// primary channel state is updated.
ChanUpdates <-chan ChannelEvent
// ChanUpdates is a channel that will be sent upon once the primary
// channel state is updated.
ChanUpdates chan ChannelEvent
// Cancel is a closure that allows the caller to cancel their
// subscription and free up any resources allocated.
@ -160,6 +161,36 @@ func (s *SubSwapper) Stop() error {
return nil
}
// updateBackupFile updates the backup file in place given the current state of
// the SubSwapper.
func (s *SubSwapper) updateBackupFile() error {
// With our updated channel state obtained, we'll create a new multi
// from our series of singles.
var newMulti Multi
for _, backup := range s.backupState {
newMulti.StaticBackups = append(
newMulti.StaticBackups, backup,
)
}
// Now that our multi has been assembled, we'll attempt to pack
// (encrypt+encode) the new channel state to our target reader.
var b bytes.Buffer
err := newMulti.PackToWriter(&b, s.keyRing)
if err != nil {
return fmt.Errorf("unable to pack multi backup: %v", err)
}
// Finally, we'll swap out the old backup for this new one in a single
// atomic step.
err = s.Swapper.UpdateAndSwap(PackedMulti(b.Bytes()))
if err != nil {
return fmt.Errorf("unable to update multi backup: %v", err)
}
return nil
}
// backupFileUpdater is the primary goroutine of the SubSwapper which is
// responsible for listening for changes to the channel, and updating the
// persistent multi backup state with a new packed multi of the latest channel
@ -172,6 +203,12 @@ func (s *SubSwapper) backupUpdater() {
log.Debugf("SubSwapper's backupUpdater is active!")
// Before we enter our main loop, we'll update the on-disk state with
// the latest Single state, as nodes may have new advertised addresses.
if err := s.updateBackupFile(); err != nil {
log.Errorf("Unable to refresh backup file: %v", err)
}
for {
select {
// The channel state has been modified! We'll evaluate all
@ -183,7 +220,7 @@ func (s *SubSwapper) backupUpdater() {
// For all new open channels, we'll create a new SCB
// given the required information.
for _, newChan := range chanUpdate.NewChans {
log.Debugf("Adding chanenl %v to backup state",
log.Debugf("Adding channel %v to backup state",
newChan.FundingOutpoint)
s.backupState[newChan.FundingOutpoint] = NewSingle(
@ -204,41 +241,20 @@ func (s *SubSwapper) backupUpdater() {
newStateSize := len(s.backupState)
// With our updated channel state obtained, we'll
// create a new multi from our series of singles.
var newMulti Multi
for _, backup := range s.backupState {
newMulti.StaticBackups = append(
newMulti.StaticBackups, backup,
)
}
// Now that our multi has been assembled, we'll attempt
// to pack (encrypt+encode) the new channel state to
// our target reader.
var b bytes.Buffer
err := newMulti.PackToWriter(&b, s.keyRing)
if err != nil {
log.Errorf("unable to pack multi backup: %v",
err)
continue
}
log.Infof("Updating on-disk multi SCB backup: "+
"num_old_chans=%v, num_new_chans=%v",
oldStateSize, newStateSize)
// Finally, we'll swap out the old backup for this new
// one in a single atomic step.
err = s.Swapper.UpdateAndSwap(
PackedMulti(b.Bytes()),
)
if err != nil {
log.Errorf("unable to update multi "+
"backup: %v", err)
continue
// With out new state constructed, we'll, atomically
// update the on-disk backup state.
if err := s.updateBackupFile(); err != nil {
log.Errorf("unable to update backup file: %v",
err)
}
// TODO(roasbeef): refresh periodically on a time basis due to
// possible addr changes from node
// Exit at once if a quit signal is detected.
case <-s.quit:
return

View File

@ -131,17 +131,23 @@ func TestSubSwapperIdempotentStartStop(t *testing.T) {
keyRing := &mockKeyRing{}
var (
swapper mockSwapper
chanNotifier mockChannelNotifier
)
var chanNotifier mockChannelNotifier
subSwapper, err := NewSubSwapper(nil, &chanNotifier, keyRing, &swapper)
swapper := newMockSwapper()
subSwapper, err := NewSubSwapper(nil, &chanNotifier, keyRing, swapper)
if err != nil {
t.Fatalf("unable to init subSwapper: %v", err)
}
subSwapper.Start()
if err := subSwapper.Start(); err != nil {
t.Fatalf("unable to start swapper: %v", err)
}
// The swapper should write the initial channel state as soon as it's
// active.
backupSet := make(map[wire.OutPoint]Single)
assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet)
subSwapper.Start()
subSwapper.Stop()
@ -188,6 +194,10 @@ func TestSubSwapperUpdater(t *testing.T) {
}
defer subSwapper.Stop()
// The swapper should write the initial channel state as soon as it's
// active.
assertExpectedBackupSwap(t, swapper, subSwapper, keyRing, backupSet)
// Now that the sub-swapper is active, we'll notify to add a brand new
// channel to the channel state.
newChannel, err := genRandomOpenChannelShell()

View File

@ -9,6 +9,7 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
@ -40,6 +41,11 @@ type Single struct {
// pack the single backup.
Version SingleBackupVersion
// IsInitiator is true if we were the initiator of the channel, and
// false otherwise. We'll need to know this information in order to
// properly re-derive the state hint information.
IsInitiator bool
// ChainHash is a hash which represents the blockchain that this
// channel will be opened within. This value is typically the genesis
// hash. In the case that the original chain went through a contentious
@ -66,16 +72,29 @@ type Single struct {
// authenticated connection for the stored identity public key.
Addresses []net.Addr
// CsvDelay is the local CSV delay used within the channel. We may need
// this value to reconstruct our script to recover the funds on-chain
// after a force close.
CsvDelay uint16
// Capacity is the size of the original channel.
Capacity btcutil.Amount
// PaymentBasePoint describes how to derive base public that's used to
// deriving the key used within the non-delayed pay-to-self output on
// the commitment transaction for a node. With this information, we can
// re-derive the private key needed to sweep the funds on-chain.
PaymentBasePoint keychain.KeyLocator
// LocalChanCfg is our local channel configuration. It contains all the
// information we need to re-derive the keys we used within the
// channel. Most importantly, it allows to derive the base public
// that's used to deriving the key used within the non-delayed
// pay-to-self output on the commitment transaction for a node. With
// this information, we can re-derive the private key needed to sweep
// the funds on-chain.
//
// NOTE: Of the items in the ChannelConstraints, we only write the CSV
// delay.
LocalChanCfg channeldb.ChannelConfig
// RemoteChanCfg is the remote channel confirmation. We store this as
// well since we'll need some of their keys to re-derive things like
// the state hint obfuscator which will allow us to recognize the state
// their broadcast on chain.
//
// NOTE: Of the items in the ChannelConstraints, we only write the CSV
// delay.
RemoteChanCfg channeldb.ChannelConfig
// ShaChainRootDesc describes how to derive the private key that was
// used as the shachain root for this channel.
@ -88,8 +107,6 @@ type Single struct {
func NewSingle(channel *channeldb.OpenChannel,
nodeAddrs []net.Addr) Single {
chanCfg := channel.LocalChanCfg
// TODO(roasbeef): update after we start to store the KeyLoc for
// shachain root
@ -105,13 +122,16 @@ func NewSingle(channel *channeldb.OpenChannel,
_, shaChainPoint := btcec.PrivKeyFromBytes(btcec.S256(), b.Bytes())
return Single{
ChainHash: channel.ChainHash,
FundingOutpoint: channel.FundingOutpoint,
ShortChannelID: channel.ShortChannelID,
RemoteNodePub: channel.IdentityPub,
Addresses: nodeAddrs,
CsvDelay: chanCfg.CsvDelay,
PaymentBasePoint: chanCfg.PaymentBasePoint.KeyLocator,
Version: DefaultSingleVersion,
IsInitiator: channel.IsInitiator,
ChainHash: channel.ChainHash,
FundingOutpoint: channel.FundingOutpoint,
ShortChannelID: channel.ShortChannelID,
RemoteNodePub: channel.IdentityPub,
Addresses: nodeAddrs,
Capacity: channel.Capacity,
LocalChanCfg: channel.LocalChanCfg,
RemoteChanCfg: channel.RemoteChanCfg,
ShaChainRootDesc: keychain.KeyDescriptor{
PubKey: shaChainPoint,
KeyLocator: keychain.KeyLocator{
@ -150,14 +170,39 @@ func (s *Single) Serialize(w io.Writer) error {
var singleBytes bytes.Buffer
if err := lnwire.WriteElements(
&singleBytes,
s.IsInitiator,
s.ChainHash[:],
s.FundingOutpoint,
s.ShortChannelID,
s.RemoteNodePub,
s.Addresses,
s.CsvDelay,
uint32(s.PaymentBasePoint.Family),
s.PaymentBasePoint.Index,
s.Capacity,
s.LocalChanCfg.CsvDelay,
// We only need to write out the KeyLocator portion of the
// local channel config.
uint32(s.LocalChanCfg.MultiSigKey.Family),
s.LocalChanCfg.MultiSigKey.Index,
uint32(s.LocalChanCfg.RevocationBasePoint.Family),
s.LocalChanCfg.RevocationBasePoint.Index,
uint32(s.LocalChanCfg.PaymentBasePoint.Family),
s.LocalChanCfg.PaymentBasePoint.Index,
uint32(s.LocalChanCfg.DelayBasePoint.Family),
s.LocalChanCfg.DelayBasePoint.Index,
uint32(s.LocalChanCfg.HtlcBasePoint.Family),
s.LocalChanCfg.HtlcBasePoint.Index,
s.RemoteChanCfg.CsvDelay,
// We only need to write out the raw pubkey for the remote
// channel config.
s.RemoteChanCfg.MultiSigKey.PubKey,
s.RemoteChanCfg.RevocationBasePoint.PubKey,
s.RemoteChanCfg.PaymentBasePoint.PubKey,
s.RemoteChanCfg.DelayBasePoint.PubKey,
s.RemoteChanCfg.HtlcBasePoint.PubKey,
shaChainPub[:],
uint32(s.ShaChainRootDesc.KeyLocator.Family),
s.ShaChainRootDesc.KeyLocator.Index,
@ -201,6 +246,49 @@ func (s *Single) PackToWriter(w io.Writer, keyRing keychain.KeyRing) error {
return encryptPayloadToWriter(rawBytes, w, keyRing)
}
// readLocalKeyDesc reads a KeyDescriptor encoded within an unpacked Single.
// For local KeyDescs, we only write out the KeyLocator information as we can
// re-derive the pubkey from it.
func readLocalKeyDesc(r io.Reader) (keychain.KeyDescriptor, error) {
var keyDesc keychain.KeyDescriptor
var keyFam uint32
if err := lnwire.ReadElements(r, &keyFam); err != nil {
return keyDesc, err
}
keyDesc.Family = keychain.KeyFamily(keyFam)
if err := lnwire.ReadElements(r, &keyDesc.Index); err != nil {
return keyDesc, err
}
return keyDesc, nil
}
// readRemoteKeyDesc reads a remote KeyDescriptor encoded within an unpacked
// Single. For remote KeyDescs, we write out only the PubKey since we don't
// actually have the KeyLocator data.
func readRemoteKeyDesc(r io.Reader) (keychain.KeyDescriptor, error) {
var (
keyDesc keychain.KeyDescriptor
pub [33]byte
)
_, err := io.ReadFull(r, pub[:])
if err != nil {
return keyDesc, nil
}
keyDesc.PubKey, err = btcec.ParsePubKey(pub[:], btcec.S256())
if err != nil {
return keyDesc, nil
}
keyDesc.PubKey.Curve = nil
return keyDesc, nil
}
// Deserialize attempts to read the raw plaintext serialized SCB from the
// passed io.Reader. If the method is successful, then the target
// StaticChannelBackup will be fully populated.
@ -228,20 +316,59 @@ func (s *Single) Deserialize(r io.Reader) error {
}
err = lnwire.ReadElements(
r, s.ChainHash[:], &s.FundingOutpoint, &s.ShortChannelID,
&s.RemoteNodePub, &s.Addresses, &s.CsvDelay,
r, &s.IsInitiator, s.ChainHash[:], &s.FundingOutpoint,
&s.ShortChannelID, &s.RemoteNodePub, &s.Addresses, &s.Capacity,
)
if err != nil {
return err
}
var keyFam uint32
if err := lnwire.ReadElements(r, &keyFam); err != nil {
err = lnwire.ReadElements(r, &s.LocalChanCfg.CsvDelay)
if err != nil {
return err
}
s.LocalChanCfg.MultiSigKey, err = readLocalKeyDesc(r)
if err != nil {
return err
}
s.LocalChanCfg.RevocationBasePoint, err = readLocalKeyDesc(r)
if err != nil {
return err
}
s.LocalChanCfg.PaymentBasePoint, err = readLocalKeyDesc(r)
if err != nil {
return err
}
s.LocalChanCfg.DelayBasePoint, err = readLocalKeyDesc(r)
if err != nil {
return err
}
s.LocalChanCfg.HtlcBasePoint, err = readLocalKeyDesc(r)
if err != nil {
return err
}
s.PaymentBasePoint.Family = keychain.KeyFamily(keyFam)
err = lnwire.ReadElements(r, &s.PaymentBasePoint.Index)
err = lnwire.ReadElements(r, &s.RemoteChanCfg.CsvDelay)
if err != nil {
return err
}
s.RemoteChanCfg.MultiSigKey, err = readRemoteKeyDesc(r)
if err != nil {
return err
}
s.RemoteChanCfg.RevocationBasePoint, err = readRemoteKeyDesc(r)
if err != nil {
return err
}
s.RemoteChanCfg.PaymentBasePoint, err = readRemoteKeyDesc(r)
if err != nil {
return err
}
s.RemoteChanCfg.DelayBasePoint, err = readRemoteKeyDesc(r)
if err != nil {
return err
}
s.RemoteChanCfg.HtlcBasePoint, err = readRemoteKeyDesc(r)
if err != nil {
return err
}
@ -256,7 +383,7 @@ func (s *Single) Deserialize(r io.Reader) error {
}
// Since this field is optional, we'll check to see if the pubkey has
// ben specified or not.
// been specified or not.
if !bytes.Equal(shaChainPub[:], zeroPub[:]) {
s.ShaChainRootDesc.PubKey, err = btcec.ParsePubKey(
shaChainPub[:], btcec.S256(),

View File

@ -42,6 +42,10 @@ func assertSingleEqual(t *testing.T, a, b Single) {
t.Fatalf("versions don't match: %v vs %v", a.Version,
b.Version)
}
if a.IsInitiator != b.IsInitiator {
t.Fatalf("initiators don't match: %v vs %v", a.IsInitiator,
b.IsInitiator)
}
if a.ChainHash != b.ChainHash {
t.Fatalf("chainhash doesn't match: %v vs %v", a.ChainHash,
b.ChainHash)
@ -54,24 +58,29 @@ func assertSingleEqual(t *testing.T, a, b Single) {
t.Fatalf("chan id doesn't match: %v vs %v",
a.ShortChannelID, b.ShortChannelID)
}
if a.Capacity != b.Capacity {
t.Fatalf("capacity doesn't match: %v vs %v",
a.Capacity, b.Capacity)
}
if !a.RemoteNodePub.IsEqual(b.RemoteNodePub) {
t.Fatalf("node pubs don't match %x vs %x",
a.RemoteNodePub.SerializeCompressed(),
b.RemoteNodePub.SerializeCompressed())
}
if a.CsvDelay != b.CsvDelay {
t.Fatalf("csv delay doesn't match: %v vs %v", a.CsvDelay,
b.CsvDelay)
if !reflect.DeepEqual(a.LocalChanCfg, b.LocalChanCfg) {
t.Fatalf("local chan config doesn't match: %v vs %v",
spew.Sdump(a.LocalChanCfg),
spew.Sdump(b.LocalChanCfg))
}
if !reflect.DeepEqual(a.PaymentBasePoint, b.PaymentBasePoint) {
t.Fatalf("base point doesn't match: %v vs %v",
spew.Sdump(a.PaymentBasePoint),
spew.Sdump(b.PaymentBasePoint))
if !reflect.DeepEqual(a.RemoteChanCfg, b.RemoteChanCfg) {
t.Fatalf("remote chan config doesn't match: %v vs %v",
spew.Sdump(a.RemoteChanCfg),
spew.Sdump(b.RemoteChanCfg))
}
if !reflect.DeepEqual(a.ShaChainRootDesc, b.ShaChainRootDesc) {
t.Fatalf("sha chain point doesn't match: %v vs %v",
spew.Sdump(a.PaymentBasePoint),
spew.Sdump(b.PaymentBasePoint))
spew.Sdump(a.ShaChainRootDesc),
spew.Sdump(b.ShaChainRootDesc))
}
if len(a.Addresses) != len(b.Addresses) {
@ -110,8 +119,14 @@ func genRandomOpenChannelShell() (*channeldb.OpenChannel, error) {
shaChainProducer := shachain.NewRevocationProducer(shaChainRoot)
var isInitiator bool
if rand.Int63()%2 == 0 {
isInitiator = true
}
return &channeldb.OpenChannel{
ChainHash: chainHash,
IsInitiator: isInitiator,
FundingOutpoint: chanPoint,
ShortChannelID: lnwire.NewShortChanIDFromInt(
uint64(rand.Int63()),
@ -121,12 +136,56 @@ func genRandomOpenChannelShell() (*channeldb.OpenChannel, error) {
ChannelConstraints: channeldb.ChannelConstraints{
CsvDelay: uint16(rand.Int63()),
},
MultiSigKey: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamily(rand.Int63()),
Index: uint32(rand.Int63()),
},
},
RevocationBasePoint: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamily(rand.Int63()),
Index: uint32(rand.Int63()),
},
},
PaymentBasePoint: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamily(rand.Int63()),
Index: uint32(rand.Int63()),
},
},
DelayBasePoint: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamily(rand.Int63()),
Index: uint32(rand.Int63()),
},
},
HtlcBasePoint: keychain.KeyDescriptor{
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamily(rand.Int63()),
Index: uint32(rand.Int63()),
},
},
},
RemoteChanCfg: channeldb.ChannelConfig{
ChannelConstraints: channeldb.ChannelConstraints{
CsvDelay: uint16(rand.Int63()),
},
MultiSigKey: keychain.KeyDescriptor{
PubKey: pub,
},
RevocationBasePoint: keychain.KeyDescriptor{
PubKey: pub,
},
PaymentBasePoint: keychain.KeyDescriptor{
PubKey: pub,
},
DelayBasePoint: keychain.KeyDescriptor{
PubKey: pub,
},
HtlcBasePoint: keychain.KeyDescriptor{
PubKey: pub,
},
},
RevocationProducer: shaChainProducer,
}, nil

145
channel_notifier.go Normal file
View File

@ -0,0 +1,145 @@
package main
import (
"fmt"
"net"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channelnotifier"
)
// addrSource is an interface that allow us to get the addresses for a target
// node. We'll need this in order to be able to properly proxy the
// notifications to create SCBs.
type addrSource interface {
// AddrsForNode returns all known addresses for the target node public
// key.
AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error)
}
// channelNotifier is an implementation of the chanbackup.ChannelNotifier
// interface using the existing channelnotifier.ChannelNotifier struct. This
// implementation allows us to satisfy all the dependencies of the
// chanbackup.SubSwapper struct.
type channelNotifier struct {
// chanNotifier is the based channel notifier that we'll proxy requests
// from.
chanNotifier *channelnotifier.ChannelNotifier
// addrs is an implementation of the addrSource interface that allows
// us to get the latest set of addresses for a given node. We'll need
// this to be able to create an SCB for new channels.
addrs addrSource
}
// SubscribeChans requests a new channel subscription relative to the initial
// set of known channels. We use the knownChans as a synchronization point to
// ensure that the chanbackup.SubSwapper does not miss any channel open or
// close events in the period between when it's created, and when it requests
// the channel subscription.
//
// NOTE: This is part of the chanbackup.ChannelNotifier interface.
func (c *channelNotifier) SubscribeChans(startingChans map[wire.OutPoint]struct{}) (
*chanbackup.ChannelSubscription, error) {
ltndLog.Infof("Channel backup proxy channel notifier starting")
// TODO(roasbeef): read existing set of chans and diff
quit := make(chan struct{})
chanUpdates := make(chan chanbackup.ChannelEvent, 1)
// In order to adhere to the interface, we'll proxy the events from the
// channel notifier to the sub-swapper in a format it understands.
go func() {
// First, we'll subscribe to the primary channel notifier so we can
// obtain events for new opened/closed channels.
chanSubscription, err := c.chanNotifier.SubscribeChannelEvents()
if err != nil {
panic(fmt.Sprintf("unable to subscribe to chans: %v",
err))
}
defer chanSubscription.Cancel()
for {
select {
// A new event has been sent by the chanNotifier, we'll
// filter out the events we actually care about and
// send them to the sub-swapper.
case e := <-chanSubscription.Updates():
// TODO(roasbeef): batch dispatch ntnfs
switch event := e.(type) {
// A new channel has been opened, we'll obtain
// the node address, then send to the
// sub-swapper.
case channelnotifier.OpenChannelEvent:
nodeAddrs, err := c.addrs.AddrsForNode(
event.Channel.IdentityPub,
)
if err != nil {
pub := event.Channel.IdentityPub
ltndLog.Errorf("unable to "+
"fetch addrs for %x: %v",
pub.SerializeCompressed(),
err)
}
channel := event.Channel
chanEvent := chanbackup.ChannelEvent{
NewChans: []chanbackup.ChannelWithAddrs{
{
OpenChannel: channel,
Addrs: nodeAddrs,
},
},
}
select {
case chanUpdates <- chanEvent:
case <-quit:
return
}
// An existing channel has been closed, we'll
// send only the chanPoint of the closed
// channel to the sub-swapper.
case channelnotifier.ClosedChannelEvent:
chanPoint := event.CloseSummary.ChanPoint
chanEvent := chanbackup.ChannelEvent{
ClosedChans: []wire.OutPoint{
chanPoint,
},
}
select {
case chanUpdates <- chanEvent:
case <-quit:
return
}
}
// The cancel method has been called, signalling us to
// exit
case <-quit:
return
}
}
}()
return &chanbackup.ChannelSubscription{
ChanUpdates: chanUpdates,
Cancel: func() {
close(quit)
},
}, nil
}
// A compile-time constraint to ensure channelNotifier implements
// chanbackup.ChannelNotifier.
var _ chanbackup.ChannelNotifier = (*channelNotifier)(nil)

View File

@ -2425,7 +2425,9 @@ func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
}
// For single funder channels that we initiated, write the funding txn.
if channel.ChanType == SingleFunder && channel.IsInitiator {
if channel.ChanType == SingleFunder && channel.IsInitiator &&
!channel.hasChanStatus(ChanStatusRestored) {
if err := WriteElement(&w, channel.FundingTxn); err != nil {
return err
}
@ -2545,7 +2547,9 @@ func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
}
// For single funder channels that we initiated, read the funding txn.
if channel.ChanType == SingleFunder && channel.IsInitiator {
if channel.ChanType == SingleFunder && channel.IsInitiator &&
!channel.hasChanStatus(ChanStatusRestored) {
if err := ReadElement(r, &channel.FundingTxn); err != nil {
return err
}

View File

@ -906,6 +906,12 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
for _, channelShell := range channelShells {
channel := channelShell.Chan
// When we make a channel, we mark that the channel has
// been restored, this will signal to other sub-systems
// to not attempt to use the channel as if it was a
// regular one.
channel.chanStatus |= ChanStatusRestored
// First, we'll attempt to create a new open channel
// and link node for this channel. If the channel
// already exists, then in order to ensure this method
@ -930,6 +936,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
ChannelID: channel.ShortChannelID.ToUint64(),
ChainHash: channel.ChainHash,
ChannelPoint: channel.FundingOutpoint,
Capacity: channel.Capacity,
}
nodes := tx.Bucket(nodeBucket)
@ -958,7 +965,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
// With the edge info shell constructed, we'll now add
// it to the graph.
err = chanGraph.addChannelEdge(tx, &edgeInfo)
if err != nil {
if err != nil && err != ErrEdgeAlreadyExist {
return err
}
@ -1011,7 +1018,9 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
}
compressedPubKey := nodePub.SerializeCompressed()
graphNode, err = fetchLightningNode(nodes, compressedPubKey)
if err != nil {
if err != nil && err != ErrGraphNodeNotFound {
// If the node isn't found, then that's OK, as we still
// have the link node data.
return err
}

187
chanrestore.go Normal file
View File

@ -0,0 +1,187 @@
package main
import (
"fmt"
"net"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/shachain"
)
// chanDBRestorer is an implementation of the chanbackup.ChannelRestorer
// interface that is able to properly map a Single backup, into a
// channeldb.ChannelShell which is required to fully restore a channel. We also
// need the secret key chain in order obtain the prior shachain root so we can
// verify the DLP protocol as initiated by the remote node.
type chanDBRestorer struct {
db *channeldb.DB
secretKeys keychain.SecretKeyRing
chainArb *contractcourt.ChainArbitrator
}
// openChannelShell maps the static channel back up into an open channel
// "shell". We say shell as this doesn't include all the information required
// to continue to use the channel, only the minimal amount of information to
// insert this shell channel back into the database.
func (c *chanDBRestorer) openChannelShell(backup chanbackup.Single) (
*channeldb.ChannelShell, error) {
// First, we'll also need to obtain the private key for the shachain
// root from the encoded public key.
//
// TODO(roasbeef): now adds req for hardware signers to impl
// shachain...
privKey, err := c.secretKeys.DerivePrivKey(backup.ShaChainRootDesc)
if err != nil {
return nil, nil
}
revRoot, err := chainhash.NewHash(privKey.Serialize())
if err != nil {
return nil, err
}
shaChainProducer := shachain.NewRevocationProducer(*revRoot)
// Each of the keys in our local channel config only have their
// locators populate, so we'll re-derive the raw key now as we'll need
// it in order to carry out the DLP protocol.
backup.LocalChanCfg.MultiSigKey, err = c.secretKeys.DeriveKey(
backup.LocalChanCfg.MultiSigKey.KeyLocator,
)
if err != nil {
return nil, fmt.Errorf("unable to derive multi sig key: %v", err)
}
backup.LocalChanCfg.RevocationBasePoint, err = c.secretKeys.DeriveKey(
backup.LocalChanCfg.RevocationBasePoint.KeyLocator,
)
if err != nil {
return nil, fmt.Errorf("unable to derive revocation key: %v", err)
}
backup.LocalChanCfg.PaymentBasePoint, err = c.secretKeys.DeriveKey(
backup.LocalChanCfg.PaymentBasePoint.KeyLocator,
)
if err != nil {
return nil, fmt.Errorf("unable to derive payment key: %v", err)
}
backup.LocalChanCfg.DelayBasePoint, err = c.secretKeys.DeriveKey(
backup.LocalChanCfg.DelayBasePoint.KeyLocator,
)
if err != nil {
return nil, fmt.Errorf("unable to derive delay key: %v", err)
}
backup.LocalChanCfg.HtlcBasePoint, err = c.secretKeys.DeriveKey(
backup.LocalChanCfg.HtlcBasePoint.KeyLocator,
)
if err != nil {
return nil, fmt.Errorf("unable to derive htlc key: %v", err)
}
chanShell := channeldb.ChannelShell{
NodeAddrs: backup.Addresses,
Chan: &channeldb.OpenChannel{
ChainHash: backup.ChainHash,
IsInitiator: backup.IsInitiator,
Capacity: backup.Capacity,
FundingOutpoint: backup.FundingOutpoint,
ShortChannelID: backup.ShortChannelID,
IdentityPub: backup.RemoteNodePub,
IsPending: false,
LocalChanCfg: backup.LocalChanCfg,
RemoteChanCfg: backup.RemoteChanCfg,
RemoteCurrentRevocation: backup.RemoteNodePub,
RevocationStore: shachain.NewRevocationStore(),
RevocationProducer: shaChainProducer,
},
}
return &chanShell, nil
}
// RestoreChansFromSingles attempts to map the set of single channel backups to
// channel shells that will be stored persistently. Once these shells have been
// stored on disk, we'll be able to connect to the channel peer an execute the
// data loss recovery protocol.
//
// NOTE: Part of the chanbackup.ChannelRestorer interface.
func (c *chanDBRestorer) RestoreChansFromSingles(backups ...chanbackup.Single) error {
channelShells := make([]*channeldb.ChannelShell, 0, len(backups))
for _, backup := range backups {
chanShell, err := c.openChannelShell(backup)
if err != nil {
return err
}
channelShells = append(channelShells, chanShell)
}
ltndLog.Infof("Inserting %v SCB channel shells into DB",
len(channelShells))
// Now that we have all the backups mapped into a series of Singles,
// we'll insert them all into the database.
if err := c.db.RestoreChannelShells(channelShells...); err != nil {
return err
}
ltndLog.Infof("Informing chain watchers of new restored channels")
// Finally, we'll need to inform the chain arbitrator of these new
// channels so we'll properly watch for their ultimate closure on chain
// and sweep them via the DLP.
for _, restoredChannel := range channelShells {
err := c.chainArb.WatchNewChannel(restoredChannel.Chan)
if err != nil {
return err
}
}
return nil
}
// A compile-time constraint to ensure chanDBRestorer implements
// chanbackup.ChannelRestorer.
var _ chanbackup.ChannelRestorer = (*chanDBRestorer)(nil)
// ConnectPeer attempts to connect to the target node at the set of available
// addresses. Once this method returns with a non-nil error, the connector
// should attempt to persistently connect to the target peer in the background
// as a persistent attempt.
//
// NOTE: Part of the chanbackup.PeerConnector interface.
func (s *server) ConnectPeer(nodePub *btcec.PublicKey, addrs []net.Addr) error {
// For each of the known addresses, we'll attempt to launch a
// persistent connection to the (pub, addr) pair. In the event that any
// of them connect, all the other stale requests will be cancelled.
for _, addr := range addrs {
netAddr := &lnwire.NetAddress{
IdentityKey: nodePub,
Address: addr,
}
ltndLog.Infof("Attempting to connect to %v for SCB restore "+
"DLP", netAddr)
// Attempt to connect to the peer using this full address. If
// we're unable to connect to them, then we'll try the next
// address in place of it.
if err := s.ConnectToPeer(netAddr, true); err != nil {
ltndLog.Errorf("unable to connect to %v to "+
"complete SCB restore: %v", netAddr, err)
continue
}
// If we connected no problem, then we can exit early as our
// job here is done.
return nil
}
return fmt.Errorf("unable to connect to peer %x for SCB restore",
nodePub.SerializeCompressed())
}

View File

@ -17,6 +17,7 @@ import (
"syscall"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/lightningnetwork/lnd/lnrpc"
@ -1324,7 +1325,29 @@ var createCommand = cli.Command{
was provided by the user. This should be written down as it can be used
to potentially recover all on-chain funds, and most off-chain funds as
well.
Finally, it's also possible to use this command and a set of static
channel backups to trigger a recover attempt for the provided Static
Channel Backups. Only one of the three parameters will be accepted. See
the restorechanbackup command for further details w.r.t the format
accepted.
`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "single_backup",
Usage: "a hex encoded single channel backup obtained " +
"from exportchanbackup",
},
cli.StringFlag{
Name: "multi_backup",
Usage: "a hex encoded multi-channel backup obtained " +
"from exportchanbackup",
},
cli.StringFlag{
Name: "multi_file",
Usage: "the path to a multi-channel back up file",
},
},
Action: actionDecorator(create),
}
@ -1566,6 +1589,30 @@ mnemonicCheck:
fmt.Println("\n!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO " +
"RESTORE THE WALLET!!!")
// We'll also check to see if they provided any static channel backups,
// if so, then we'll also tack these onto the final innit wallet
// request.
var chanBackups *lnrpc.ChanBackupSnapshot
backups, err := parseChanBackups(ctx)
if err != nil {
return fmt.Errorf("unable to parse chan "+
"backups: %v", err)
}
if backups != nil {
switch {
case backups.GetChanBackups() != nil:
singleBackup := backups.GetChanBackups()
chanBackups.SingleChanBackups = singleBackup
case backups.GetMultiChanBackup() != nil:
multiBackup := backups.GetMultiChanBackup()
chanBackups.MultiChanBackup = &lnrpc.MultiChanBackup{
MultiChanBackup: multiBackup,
}
}
}
// With either the user's prior cipher seed, or a newly generated one,
// we'll go ahead and initialize the wallet.
req := &lnrpc.InitWalletRequest{
@ -1573,6 +1620,7 @@ mnemonicCheck:
CipherSeedMnemonic: cipherSeedMnemonic,
AezeedPassphrase: aezeedPass,
RecoveryWindow: recoveryWindow,
ChannelBackups: chanBackups,
}
if _, err := client.InitWallet(ctxb, req); err != nil {
return err
@ -1646,6 +1694,8 @@ func unlock(ctx *cli.Context) error {
fmt.Println("\nlnd successfully unlocked!")
// TODO(roasbeef): add ability to accept hex single and multi backups
return nil
}
@ -3328,6 +3378,31 @@ var updateChannelPolicyCommand = cli.Command{
Action: actionDecorator(updateChannelPolicy),
}
func parseChanPoint(s string) (*lnrpc.ChannelPoint, error) {
split := strings.Split(s, ":")
if len(split) != 2 {
return nil, fmt.Errorf("expecting chan_point to be in format of: " +
"txid:index")
}
index, err := strconv.ParseInt(split[1], 10, 32)
if err != nil {
return nil, fmt.Errorf("unable to decode output index: %v", err)
}
txid, err := chainhash.NewHashFromStr(split[0])
if err != nil {
return nil, fmt.Errorf("unable to parse hex string: %v", err)
}
return &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: txid[:],
},
OutputIndex: uint32(index),
}, nil
}
func updateChannelPolicy(ctx *cli.Context) error {
ctxb := context.Background()
client, cleanUp := getClient(ctx)
@ -3396,22 +3471,9 @@ func updateChannelPolicy(ctx *cli.Context) error {
}
if chanPointStr != "" {
split := strings.Split(chanPointStr, ":")
if len(split) != 2 {
return fmt.Errorf("expecting chan_point to be in format of: " +
"txid:index")
}
index, err := strconv.ParseInt(split[1], 10, 32)
chanPoint, err = parseChanPoint(chanPointStr)
if err != nil {
return fmt.Errorf("unable to decode output index: %v", err)
}
chanPoint = &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
FundingTxidStr: split[0],
},
OutputIndex: uint32(index),
return fmt.Errorf("unable to parse chan point: %v", err)
}
}
@ -3554,3 +3616,378 @@ func forwardingHistory(ctx *cli.Context) error {
printRespJSON(resp)
return nil
}
var exportChanBackupCommand = cli.Command{
Name: "exportchanbackup",
Category: "Channels",
Usage: "Obtain a static channel back up for a selected channels, " +
"or all known channels",
ArgsUsage: "[chan_point] [--all] [--output_file]",
Description: `
This command allows a user to export a Static Channel Backup (SCB) for
as selected channel. SCB's are encrypted backups of a channel's initial
state that are encrypted with a key derived from the seed of a user.In
the case of partial or complete data loss, the SCB will allow the user
to reclaim settled funds in the channel at its final state. The
exported channel backups can be restored at a later time using the
restorechanbackup command.
This command will return one of two types of channel backups depending
on the set of passed arguments:
* If a target channel point is specified, then a single channel
backup containing only the information for that channel will be
returned.
* If the --all flag is passed, then a multi-channel backup will be
returned. A multi backup is a single encrypted blob (displayed in
hex encoding) that contains several channels in a single cipher
text.
Both of the backup types can be restored using the restorechanbackup
command.
`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "chan_point",
Usage: "the target channel to obtain an SCB for",
},
cli.BoolFlag{
Name: "all",
Usage: "if specified, then a multi backup of all " +
"active channels will be returned",
},
cli.StringFlag{
Name: "output_file",
Usage: `
if specified, then rather than printing a JSON output
of the static channel backup, a serialized version of
the backup (either Single or Multi) will be written to
the target file, this is the same format used by lnd in
its channels.backup file `,
},
},
Action: actionDecorator(exportChanBackup),
}
func exportChanBackup(ctx *cli.Context) error {
ctxb := context.Background()
client, cleanUp := getClient(ctx)
defer cleanUp()
// Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "exportchanbackup")
return nil
}
var (
err error
chanPointStr string
)
args := ctx.Args()
switch {
case ctx.IsSet("chan_point"):
chanPointStr = ctx.String("chan_point")
case args.Present():
chanPointStr = args.First()
case !ctx.IsSet("all"):
return fmt.Errorf("must specify chan_point if --all isn't set")
}
if chanPointStr != "" {
chanPointRPC, err := parseChanPoint(chanPointStr)
if err != nil {
return err
}
chanBackup, err := client.ExportChannelBackup(
ctxb, &lnrpc.ExportChannelBackupRequest{
ChanPoint: chanPointRPC,
},
)
if err != nil {
return err
}
txid, err := chainhash.NewHash(
chanPointRPC.GetFundingTxidBytes(),
)
if err != nil {
return err
}
chanPoint := wire.OutPoint{
Hash: *txid,
Index: chanPointRPC.OutputIndex,
}
printJSON(struct {
ChanPoint string `json:"chan_point"`
ChanBackup string `json:"chan_backup"`
}{
ChanPoint: chanPoint.String(),
ChanBackup: hex.EncodeToString(
chanBackup.ChanBackup,
),
},
)
return nil
}
if !ctx.IsSet("all") {
return fmt.Errorf("if a channel isn't specified, -all must be")
}
chanBackup, err := client.ExportAllChannelBackups(
ctxb, &lnrpc.ChanBackupExportRequest{},
)
if err != nil {
return err
}
if ctx.IsSet("output_file") {
return ioutil.WriteFile(
ctx.String("output_file"),
chanBackup.MultiChanBackup.MultiChanBackup,
0666,
)
}
// TODO(roasbeef): support for export | restore ?
var chanPoints []string
for _, chanPoint := range chanBackup.MultiChanBackup.ChanPoints {
txid, err := chainhash.NewHash(chanPoint.GetFundingTxidBytes())
if err != nil {
return err
}
chanPoints = append(chanPoints, wire.OutPoint{
Hash: *txid,
Index: chanPoint.OutputIndex,
}.String())
}
printJSON(struct {
ChanPoints []string `json:"chan_points"`
MultiChanBackup string `json:"multi_chan_backup"`
}{
ChanPoints: chanPoints,
MultiChanBackup: hex.EncodeToString(
chanBackup.MultiChanBackup.MultiChanBackup,
),
},
)
return nil
}
var verifyChanBackupCommand = cli.Command{
Name: "verifychanbackup",
Category: "Channels",
Usage: "Verify an existing channel backup",
ArgsUsage: "[--single_backup] [--multi_backup] [--multi_file=]",
Description: `
This command allows a user to verify an existing Single or Multi channel
backup for integrity. This is useful when a user has a backup, but is
unsure as to if it's valid or for the target node.
The command will accept backups in one of three forms:
* A single channel packed SCB, which can be obtained from
exportchanbackup. This should be passed in hex encoded format.
* A packed multi-channel SCB, which couples several individual
static channel backups in single blob.
* A file path which points to a packed multi-channel backup within a
file, using the same format that lnd does in its channels.backup
file.
`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "single_backup",
Usage: "a hex encoded single channel backup obtained " +
"from exportchanbackup",
},
cli.StringFlag{
Name: "multi_backup",
Usage: "a hex encoded multi-channel backup obtained " +
"from exportchanbackup",
},
cli.StringFlag{
Name: "multi_file",
Usage: "the path to a multi-channel back up file",
},
},
Action: actionDecorator(verifyChanBackup),
}
func verifyChanBackup(ctx *cli.Context) error {
ctxb := context.Background()
client, cleanUp := getClient(ctx)
defer cleanUp()
// Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "verifychanbackup")
return nil
}
backups, err := parseChanBackups(ctx)
if err != nil {
return err
}
verifyReq := lnrpc.ChanBackupSnapshot{}
if backups.GetChanBackups() != nil {
verifyReq.SingleChanBackups = backups.GetChanBackups()
}
if backups.GetMultiChanBackup() != nil {
verifyReq.MultiChanBackup = &lnrpc.MultiChanBackup{
MultiChanBackup: backups.GetMultiChanBackup(),
}
}
resp, err := client.VerifyChanBackup(ctxb, &verifyReq)
if err != nil {
return err
}
printRespJSON(resp)
return nil
}
var restoreChanBackupCommand = cli.Command{
Name: "restorechanbackup",
Category: "Channels",
Usage: "Restore an existing single or multi-channel static channel " +
"backup",
ArgsUsage: "[--single_backup] [--multi_backup] [--multi_file=",
Description: `
Allows a suer to restore a Static Channel Backup (SCB) that was
obtained either via the exportchanbackup command, or from lnd's
automatically manged channels.backup file. This command should be used
if a user is attempting to restore a channel due to data loss on a
running node restored with the same seed as the node that created the
channel. If successful, this command will allows the user to recover
the settled funds stored in the recovered channels.
The command will accept backups in one of three forms:
* A single channel packed SCB, which can be obtained from
exportchanbackup. This should be passed in hex encoded format.
* A packed multi-channel SCB, which couples several individual
static channel backups in single blob.
* A file path which points to a packed multi-channel backup within a
file, using the same format that lnd does in its channels.backup
file.
`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "single_backup",
Usage: "a hex encoded single channel backup obtained " +
"from exportchanbackup",
},
cli.StringFlag{
Name: "multi_backup",
Usage: "a hex encoded multi-channel backup obtained " +
"from exportchanbackup",
},
cli.StringFlag{
Name: "multi_file",
Usage: "the path to a multi-channel back up file",
},
},
Action: actionDecorator(restoreChanBackup),
}
func parseChanBackups(ctx *cli.Context) (*lnrpc.RestoreChanBackupRequest, error) {
switch {
case ctx.IsSet("single_backup"):
packedBackup, err := hex.DecodeString(
ctx.String("single_backup"),
)
if err != nil {
return nil, fmt.Errorf("unable to decode single packed "+
"backup: %v", err)
}
return &lnrpc.RestoreChanBackupRequest{
Backup: &lnrpc.RestoreChanBackupRequest_ChanBackups{
ChanBackups: &lnrpc.ChannelBackups{
ChanBackups: []*lnrpc.ChannelBackup{
{
ChanBackup: packedBackup,
},
},
},
},
}, nil
case ctx.IsSet("multi_backup"):
packedMulti, err := hex.DecodeString(
ctx.String("multi_backup"),
)
if err != nil {
return nil, fmt.Errorf("unable to decode multi packed "+
"backup: %v", err)
}
return &lnrpc.RestoreChanBackupRequest{
Backup: &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
MultiChanBackup: packedMulti,
},
}, nil
case ctx.IsSet("multi_file"):
packedMulti, err := ioutil.ReadFile(ctx.String("multi_file"))
if err != nil {
return nil, fmt.Errorf("unable to decode multi packed "+
"backup: %v", err)
}
return &lnrpc.RestoreChanBackupRequest{
Backup: &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
MultiChanBackup: packedMulti,
},
}, nil
default:
return nil, nil
}
}
func restoreChanBackup(ctx *cli.Context) error {
ctxb := context.Background()
client, cleanUp := getClient(ctx)
defer cleanUp()
// Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "restorechanbackup")
return nil
}
var req lnrpc.RestoreChanBackupRequest
backups, err := parseChanBackups(ctx)
if err != nil {
return err
}
req.Backup = backups.Backup
_, err = client.RestoreChannelBackups(ctxb, &req)
if err != nil {
return fmt.Errorf("unable to restore chan backups: %v", err)
}
return nil
}

View File

@ -295,6 +295,9 @@ func main() {
feeReportCommand,
updateChannelPolicyCommand,
forwardingHistoryCommand,
exportChanBackupCommand,
verifyChanBackupCommand,
restoreChanBackupCommand,
}
// Add any extra autopilot commands determined by build flags.

View File

@ -22,6 +22,7 @@ import (
"github.com/btcsuite/btcutil"
flags "github.com/jessevdk/go-flags"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/htlcswitch/hodl"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
@ -210,10 +211,11 @@ type config struct {
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65535"`
DebugHTLC bool `long:"debughtlc" description:"Activate the debug htlc mode. With the debug HTLC mode, all payments sent use a pre-determined R-Hash. Additionally, all HTLCs sent to a node with the debug HTLC R-Hash are immediately settled in the next available state transition."`
UnsafeDisconnect bool `long:"unsafe-disconnect" description:"Allows the rpcserver to intentionally disconnect from peers with open channels. USED FOR TESTING ONLY."`
UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."`
MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."`
DebugHTLC bool `long:"debughtlc" description:"Activate the debug htlc mode. With the debug HTLC mode, all payments sent use a pre-determined R-Hash. Additionally, all HTLCs sent to a node with the debug HTLC R-Hash are immediately settled in the next available state transition."`
UnsafeDisconnect bool `long:"unsafe-disconnect" description:"Allows the rpcserver to intentionally disconnect from peers with open channels. USED FOR TESTING ONLY."`
UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."`
MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."`
BackupFilePath string `long:"backupfilepath" description:"The target location of the channel backup file"`
Bitcoin *chainConfig `group:"Bitcoin" namespace:"bitcoin"`
BtcdMode *btcdConfig `group:"btcd" namespace:"btcd"`
@ -814,7 +816,7 @@ func loadConfig() (*config, error) {
}
// We'll now construct the network directory which will be where we
// store all the data specifc to this chain/network.
// store all the data specific to this chain/network.
networkDir = filepath.Join(
cfg.DataDir, defaultChainSubDirname,
registeredChains.PrimaryChain().String(),
@ -840,6 +842,14 @@ func loadConfig() (*config, error) {
)
}
// Similarly, if a custom back up file path wasn't specified, then
// we'll update the file location to match our set network directory.
if cfg.BackupFilePath == "" {
cfg.BackupFilePath = filepath.Join(
networkDir, chanbackup.DefaultBackupFileName,
)
}
// Append the network type to the log directory so it is "namespaced"
// per network in the same fashion as the data directory.
cfg.LogDir = filepath.Join(cfg.LogDir,

View File

@ -283,22 +283,22 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
c.cfg.chanState.FundingOutpoint)
select {
// We've detected a spend of the channel onchain! Depending on
// the type of spend, we'll act accordingly , so we'll examine
// the spending transaction to determine what we should do.
// We've detected a spend of the channel onchain! Depending on the type
// of spend, we'll act accordingly , so we'll examine the spending
// transaction to determine what we should do.
//
// TODO(Roasbeef): need to be able to ensure this only triggers
// on confirmation, to ensure if multiple txns are broadcast, we
// act on the one that's timestamped
case commitSpend, ok := <-spendNtfn.Spend:
// If the channel was closed, then this means that the
// notifier exited, so we will as well.
// If the channel was closed, then this means that the notifier
// exited, so we will as well.
if !ok {
return
}
// Otherwise, the remote party might have broadcast a
// prior revoked state...!!!
// Otherwise, the remote party might have broadcast a prior
// revoked state...!!!
commitTxBroadcast := commitSpend.SpendingTx
localCommit, remoteCommit, err := c.cfg.chanState.LatestCommitments()
@ -308,9 +308,9 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
return
}
// We'll not retrieve the latest sate of the revocation
// store so we can populate the information within the
// channel state object that we have.
// We'll not retrieve the latest sate of the revocation store
// so we can populate the information within the channel state
// object that we have.
//
// TODO(roasbeef): mutation is bad mkay
_, err = c.cfg.chanState.RemoteRevocationStore()
@ -320,30 +320,45 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
return
}
// If this is our commitment transaction, then we can
// exit here as we don't have any further processing we
// need to do (we can't cheat ourselves :p).
commitmentHash := localCommit.CommitTx.TxHash()
isOurCommitment := commitSpend.SpenderTxHash.IsEqual(
&commitmentHash,
// If this channel has been recovered, then we'll modify our
// behavior as it isn't possible for us to close out the
// channel off-chain ourselves. It can only be the remote party
// force closing, or a cooperative closure we signed off on
// before losing data getting confirmed in the chain.
isRecoveredChan := c.cfg.chanState.HasChanStatus(
channeldb.ChanStatusRestored,
)
if isOurCommitment {
if err := c.dispatchLocalForceClose(
commitSpend, *localCommit,
); err != nil {
log.Errorf("unable to handle local"+
"close for chan_point=%v: %v",
c.cfg.chanState.FundingOutpoint, err)
// If we're not recovering this channel, and this is our
// commitment transaction, then we can exit here as we don't
// have any further processing we need to do (we can't cheat
// ourselves :p).
if !isRecoveredChan {
commitmentHash := localCommit.CommitTx.TxHash()
isOurCommitment := commitSpend.SpenderTxHash.IsEqual(
&commitmentHash,
)
if isOurCommitment {
if err := c.dispatchLocalForceClose(
commitSpend, *localCommit,
); err != nil {
log.Errorf("unable to handle local"+
"close for chan_point=%v: %v",
c.cfg.chanState.FundingOutpoint, err)
}
return
}
return
}
// Next, we'll check to see if this is a cooperative
// channel closure or not. This is characterized by
// having an input sequence number that's finalized.
// This won't happen with regular commitment
// transactions due to the state hint encoding scheme.
// Next, we'll check to see if this is a cooperative channel
// closure or not. This is characterized by having an input
// sequence number that's finalized. This won't happen with
// regular commitment transactions due to the state hint
// encoding scheme.
if commitTxBroadcast.TxIn[0].Sequence == wire.MaxTxInSequenceNum {
// TODO(roasbeef): rare but possible, need itest case
// for
err := c.dispatchCooperativeClose(commitSpend)
if err != nil {
log.Errorf("unable to handle co op close: %v", err)
@ -371,12 +386,12 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
}
switch {
// If state number spending transaction matches the
// current latest state, then they've initiated a
// unilateral close. So we'll trigger the unilateral
// close signal so subscribers can clean up the state
// as necessary.
case broadcastStateNum == remoteStateNum:
// If state number spending transaction matches the current
// latest state, then they've initiated a unilateral close. So
// we'll trigger the unilateral close signal so subscribers can
// clean up the state as necessary.
case broadcastStateNum == remoteStateNum && !isRecoveredChan:
err := c.dispatchRemoteForceClose(
commitSpend, *remoteCommit,
c.cfg.chanState.RemoteCurrentRevocation,
@ -387,14 +402,13 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
c.cfg.chanState.FundingOutpoint, err)
}
// We'll also handle the case of the remote party
// broadcasting their commitment transaction which is
// one height above ours. This case can arise when we
// initiate a state transition, but the remote party
// has a fail crash _after_ accepting the new state,
// but _before_ sending their signature to us.
// We'll also handle the case of the remote party broadcasting
// their commitment transaction which is one height above ours.
// This case can arise when we initiate a state transition, but
// the remote party has a fail crash _after_ accepting the new
// state, but _before_ sending their signature to us.
case broadcastStateNum == remoteStateNum+1 &&
remoteChainTip != nil:
remoteChainTip != nil && !isRecoveredChan:
err := c.dispatchRemoteForceClose(
commitSpend, remoteChainTip.Commitment,
@ -410,8 +424,12 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
// known state for them, and they don't have a pending
// commitment (we write them to disk before sending out), then
// this means that we've lost data. In this case, we'll enter
// the DLP protocol.
case broadcastStateNum > remoteStateNum:
// the DLP protocol. Otherwise, if we've recovered our channel
// state from scratch, then we don't know what the precise
// current state is, so we assume either the remote party
// forced closed or we've been breached. In the latter case,
// our tower will take care of us.
case broadcastStateNum > remoteStateNum || isRecoveredChan:
log.Warnf("Remote node broadcast state #%v, "+
"which is more than 1 beyond best known "+
"state #%v!!! Attempting recovery...",
@ -476,12 +494,11 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
c.cfg.chanState.FundingOutpoint, err)
}
// If the state number broadcast is lower than the
// remote node's current un-revoked height, then
// THEY'RE ATTEMPTING TO VIOLATE THE CONTRACT LAID OUT
// WITHIN THE PAYMENT CHANNEL. Therefore we close the
// signal indicating a revoked broadcast to allow
// subscribers to swiftly dispatch justice!!!
// If the state number broadcast is lower than the remote
// node's current un-revoked height, then THEY'RE ATTEMPTING TO
// VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL.
// Therefore we close the signal indicating a revoked broadcast
// to allow subscribers to swiftly dispatch justice!!!
case broadcastStateNum < remoteStateNum:
err := c.dispatchContractBreach(
commitSpend, remoteCommit,
@ -494,8 +511,8 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
}
}
// Now that a spend has been detected, we've done our
// job, so we'll exit immediately.
// Now that a spend has been detected, we've done our job, so
// we'll exit immediately.
return
// The chainWatcher has been signalled to exit, so we'll do so now.
@ -564,7 +581,10 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
}
// Attempt to add a channel sync message to the close summary.
chanSync, err := lnwallet.ChanSyncMsg(c.cfg.chanState)
chanSync, err := lnwallet.ChanSyncMsg(
c.cfg.chanState,
c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored),
)
if err != nil {
log.Errorf("ChannelPoint(%v): unable to create channel sync "+
"message: %v", c.cfg.chanState.FundingOutpoint, err)
@ -641,7 +661,10 @@ func (c *chainWatcher) dispatchLocalForceClose(
}
// Attempt to add a channel sync message to the close summary.
chanSync, err := lnwallet.ChanSyncMsg(c.cfg.chanState)
chanSync, err := lnwallet.ChanSyncMsg(
c.cfg.chanState,
c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored),
)
if err != nil {
log.Errorf("ChannelPoint(%v): unable to create channel sync "+
"message: %v", c.cfg.chanState.FundingOutpoint, err)
@ -819,7 +842,10 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail
}
// Attempt to add a channel sync message to the close summary.
chanSync, err := lnwallet.ChanSyncMsg(c.cfg.chanState)
chanSync, err := lnwallet.ChanSyncMsg(
c.cfg.chanState,
c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored),
)
if err != nil {
log.Errorf("ChannelPoint(%v): unable to create channel sync "+
"message: %v", c.cfg.chanState.FundingOutpoint, err)

View File

@ -562,7 +562,11 @@ func (l *channelLink) syncChanStates() error {
// First, we'll generate our ChanSync message to send to the other
// side. Based on this message, the remote party will decide if they
// need to retransmit any data or not.
localChanSyncMsg, err := lnwallet.ChanSyncMsg(l.channel.State())
chanState := l.channel.State()
localChanSyncMsg, err := lnwallet.ChanSyncMsg(
chanState,
chanState.HasChanStatus(channeldb.ChanStatusRestored),
)
if err != nil {
return fmt.Errorf("unable to generate chan sync message for "+
"ChannelPoint(%v)", l.channel.ChannelPoint())

48
lnd.go
View File

@ -217,18 +217,21 @@ func lndMain() error {
}
var (
privateWalletPw = lnwallet.DefaultPrivatePassphrase
publicWalletPw = lnwallet.DefaultPublicPassphrase
birthday = time.Now()
recoveryWindow uint32
unlockedWallet *wallet.Wallet
walletInitParams WalletUnlockParams
privateWalletPw = lnwallet.DefaultPrivatePassphrase
publicWalletPw = lnwallet.DefaultPublicPassphrase
)
// If the user didn't request a seed, then we'll manually assume a
// wallet birthday of now, as otherwise the seed would've specified
// this information.
walletInitParams.Birthday = time.Now()
// We wait until the user provides a password over RPC. In case lnd is
// started with the --noseedbackup flag, we use the default password
// for wallet encryption.
if !cfg.NoSeedBackup {
walletInitParams, err := waitForWalletPassword(
params, err := waitForWalletPassword(
cfg.RPCListeners, cfg.RESTListeners, serverOpts,
proxyOpts, tlsConf,
)
@ -236,16 +239,14 @@ func lndMain() error {
return err
}
walletInitParams = *params
privateWalletPw = walletInitParams.Password
publicWalletPw = walletInitParams.Password
birthday = walletInitParams.Birthday
recoveryWindow = walletInitParams.RecoveryWindow
unlockedWallet = walletInitParams.Wallet
if recoveryWindow > 0 {
if walletInitParams.RecoveryWindow > 0 {
ltndLog.Infof("Wallet recovery mode enabled with "+
"address lookahead of %d addresses",
recoveryWindow)
walletInitParams.RecoveryWindow)
}
}
@ -264,7 +265,7 @@ func lndMain() error {
// Try to unlock the macaroon store with the private password.
err = macaroonService.CreateUnlock(&privateWalletPw)
if err != nil {
srvrLog.Error(err)
srvrLog.Errorf("unable to unlock macaroons: %v", err)
return err
}
@ -288,8 +289,9 @@ func lndMain() error {
// instances of the pertinent interfaces required to operate the
// Lightning Network Daemon.
activeChainControl, chainCleanUp, err := newChainControlFromConfig(
cfg, chanDB, privateWalletPw, publicWalletPw, birthday,
recoveryWindow, unlockedWallet, neutrinoCS,
cfg, chanDB, privateWalletPw, publicWalletPw,
walletInitParams.Birthday, walletInitParams.RecoveryWindow,
walletInitParams.Wallet, neutrinoCS,
)
if err != nil {
fmt.Printf("unable to create chain control: %v\n", err)
@ -327,6 +329,7 @@ func lndMain() error {
// connections.
server, err := newServer(
cfg.Listeners, chanDB, activeChainControl, idPrivKey,
walletInitParams.ChansToRestore,
)
if err != nil {
srvrLog.Errorf("unable to create server: %v\n", err)
@ -672,6 +675,10 @@ type WalletUnlockParams struct {
// later when lnd actually uses it). Because unlocking involves scrypt
// which is resource intensive, we want to avoid doing it twice.
Wallet *wallet.Wallet
// ChansToRestore a set of static channel backups that should be
// restored before the main server instance starts up.
ChansToRestore walletunlocker.ChannelsToRecover
}
// waitForWalletPassword will spin up gRPC and REST endpoints for the
@ -825,24 +832,23 @@ func waitForWalletPassword(grpcEndpoints, restEndpoints []net.Addr,
return nil, err
}
walletInitParams := &WalletUnlockParams{
return &WalletUnlockParams{
Password: password,
Birthday: birthday,
RecoveryWindow: recoveryWindow,
Wallet: newWallet,
}
return walletInitParams, nil
ChansToRestore: initMsg.ChanBackups,
}, nil
// The wallet has already been created in the past, and is simply being
// unlocked. So we'll just return these passphrases.
case unlockMsg := <-pwService.UnlockMsgs:
walletInitParams := &WalletUnlockParams{
return &WalletUnlockParams{
Password: unlockMsg.Passphrase,
RecoveryWindow: unlockMsg.RecoveryWindow,
Wallet: unlockMsg.Wallet,
}
return walletInitParams, nil
ChansToRestore: unlockMsg.ChanBackups,
}, nil
case <-signal.ShutdownChannel():
return nil, fmt.Errorf("shutting down")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -145,11 +145,21 @@ message InitWalletRequest {
/**
recovery_window is an optional argument specifying the address lookahead
when restoring a wallet seed. The recovery window applies to each
invdividual branch of the BIP44 derivation paths. Supplying a recovery
individual branch of the BIP44 derivation paths. Supplying a recovery
window of zero indicates that no addresses should be recovered, such after
the first initialization of the wallet.
*/
int32 recovery_window = 4;
/**
channel_backups is an optional argument that allows clients to recover the
settled funds within a set of channels. This should be populated if the
user was unable to close out all channels and sweep funds before partial or
total data loss occurred. If specified, then after on-chain recovery of
funds, lnd begin to carry out the data loss recovery protocol in order to
recover the funds in each channel from a remote force closed transaction.
*/
ChanBackupSnapshot channel_backups = 5;
}
message InitWalletResponse {
}
@ -170,6 +180,16 @@ message UnlockWalletRequest {
the first initialization of the wallet.
*/
int32 recovery_window = 2;
/**
channel_backups is an optional argument that allows clients to recover the
settled funds within a set of channels. This should be populated if the
user was unable to close out all channels and sweep funds before partial or
total data loss occurred. If specified, then after on-chain recovery of
funds, lnd begin to carry out the data loss recovery protocol in order to
recover the funds in each channel from a remote force closed transaction.
*/
ChanBackupSnapshot channel_backups = 3;
}
message UnlockWalletResponse {}
@ -668,7 +688,7 @@ service Lightning {
/** lncli: `fwdinghistory`
ForwardingHistory allows the caller to query the htlcswitch for a record of
all HTLC's forwarded within the target time range, and integer offset
all HTLCs forwarded within the target time range, and integer offset
within that time range. If no time-range is specified, then the first chunk
of the past 24 hrs of forwarding history are returned.
@ -684,6 +704,57 @@ service Lightning {
body: "*"
};
};
/** lncli: `exportchanbackup`
ExportChannelBackup attempts to return an encrypted static channel backup
for the target channel identified by it channel point. The backup is
encrypted with a key generated from the aezeed seed of the user. The
returned backup can either be restored using the RestoreChannelBackup
method once lnd is running, or via the InitWallet and UnlockWallet methods
from the WalletUnlocker service.
*/
rpc ExportChannelBackup(ExportChannelBackupRequest) returns (ChannelBackup) {
};
/**
ExportAllChannelBackups returns static channel backups for all existing
channels known to lnd. A set of regular singular static channel backups for
each channel are returned. Additionally, a multi-channel backup is returned
as well, which contains a single encrypted blob containing the backups of
each channel.
*/
rpc ExportAllChannelBackups(ChanBackupExportRequest) returns (ChanBackupSnapshot) {
};
/**
VerifyChanBackup allows a caller to verify the integrity of a channel
backup snapshot. This method will accept both a packed Single, and also a
Packed multi. Two bools are returned which indicate if the passed Single
(if present) is valid and also if the passed Multi (if present) is valid.
*/
rpc VerifyChanBackup(ChanBackupSnapshot) returns (VerifyChanBackupResponse) {
};
/** lncli: `restorechanbackup`
RestoreChannelBackups accepts a set of singular channel backups, or a
single encrypted multi-chan backup and attempts to recover any funds
remaining within the channel. If we are able to unpack the backup, then the
new channel will be shown under listchannels, as well as pending channels.
*/
rpc RestoreChannelBackups(RestoreChanBackupRequest) returns (RestoreBackupResponse) {
};
/**
SubscribeChannelBackups allows a client to sub-subscribe to the most up to
date information concerning the state of all channel backups. Each time a
new channel is added, we return the new set of channels, along with a
multi-chan backup containing the backup info for all channels. Each time a
channel is closed, we send a new update, which contains new new chan back
ups, but the updated set of encrypted multi-chan backups with the closed
channel(s) removed.
*/
rpc SubscribeChannelBackups(ChannelBackupSubscription) returns (stream ChanBackupSnapshot) {
};
}
message Utxo {
@ -1067,9 +1138,8 @@ message Channel {
repeated HTLC pending_htlcs = 15 [json_name = "pending_htlcs"];
/**
The CSV delay expressed in relative blocks. If the channel is force
closed, we'll need to wait for this many blocks before we can regain our
funds.
The CSV delay expressed in relative blocks. If the channel is force closed,
we will need to wait for this many blocks before we can regain our funds.
*/
uint32 csv_delay = 16 [json_name = "csv_delay"];
@ -1078,6 +1148,9 @@ message Channel {
/// True if we were the ones that created the channel.
bool initiator = 18 [json_name = "initiator"];
/// A set of flags showing the current state of the cahnnel.
string chan_status_flags = 19 [json_name = "chan_status_flags"];
}
@ -2190,3 +2263,74 @@ message ForwardingHistoryResponse {
/// The index of the last time in the set of returned forwarding events. Can be used to seek further, pagination style.
uint32 last_offset_index = 2 [json_name = "last_offset_index"];
}
message ExportChannelBackupRequest {
/// The target chanenl point to obtain a back up for.
ChannelPoint chan_point = 1;
}
message ChannelBackup {
/**
Identifies the channel that this backup belongs to.
*/
ChannelPoint chan_point = 1 [ json_name = "chan_point" ];
/**
Is an encrypted single-chan backup. this can be passed to
RestoreChannelBackups, or the WalletUnlocker Innit and Unlock methods in
order to trigger the recovery protocol.
*/
bytes chan_backup = 2 [ json_name = "chan_backup" ];
}
message MultiChanBackup {
/**
Is the set of all channels that are included in this multi-channel backup.
*/
repeated ChannelPoint chan_points = 1 [ json_name = "chan_points" ];
/**
A single encrypted blob containing all the static channel backups of the
channel listed above. This can be stored as a single file or blob, and
safely be replaced with any prior/future versions.
*/
bytes multi_chan_backup = 2 [ json_name = "multi_chan_backup" ];
}
message ChanBackupExportRequest {}
message ChanBackupSnapshot {
/**
The set of new channels that have been added since the last channel backup
snapshot was requested.
*/
ChannelBackups single_chan_backups = 1 [ json_name = "single_chan_backups" ];
/**
A multi-channel backup that covers all open channels currently known to
lnd.
*/
MultiChanBackup multi_chan_backup = 2 [ json_name = "multi_chan_backup" ];
}
message ChannelBackups {
/**
A set of single-chan static channel backups.
*/
repeated ChannelBackup chan_backups = 1 [ json_name = "chan_backups" ];
}
message RestoreChanBackupRequest {
oneof backup {
ChannelBackups chan_backups = 1 [ json_name = "chan_backups" ];
bytes multi_chan_backup = 2 [ json_name = "multi_chan_backup" ];
}
}
message RestoreBackupResponse {}
message ChannelBackupSubscription {}
message VerifyChanBackupResponse {
bool singles_valid = 1 [ json_name = "singles_valid"];
bool multi_valid = 2 [ json_name = "multi_valid"];
}

View File

@ -976,7 +976,7 @@
},
"/v1/switch": {
"post": {
"summary": "* lncli: `fwdinghistory`\nForwardingHistory allows the caller to query the htlcswitch for a record of\nall HTLC's forwarded within the target time range, and integer offset\nwithin that time range. If no time-range is specified, then the first chunk\nof the past 24 hrs of forwarding history are returned.",
"summary": "* lncli: `fwdinghistory`\nForwardingHistory allows the caller to query the htlcswitch for a record of\nall HTLCs forwarded within the target time range, and integer offset\nwithin that time range. If no time-range is specified, then the first chunk\nof the past 24 hrs of forwarding history are returned.",
"description": "A list of forwarding events are returned. The size of each forwarding event\nis 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.\nAs a result each message can only contain 50k entries. Each response has\nthe index offset of the last entry. The index offset can be provided to the\nrequest to allow the caller to skip a series of records.",
"operationId": "ForwardingHistory",
"responses": {
@ -1358,6 +1358,19 @@
}
}
},
"lnrpcChanBackupSnapshot": {
"type": "object",
"properties": {
"single_chan_backups": {
"$ref": "#/definitions/lnrpcChannelBackups",
"description": "*\nThe set of new channels that have been added since the last channel backup\nsnapshot was requested."
},
"multi_chan_backup": {
"$ref": "#/definitions/lnrpcMultiChanBackup",
"description": "*\nA multi-channel backup that covers all open channels currently known to\nlnd."
}
}
},
"lnrpcChangePasswordRequest": {
"type": "object",
"properties": {
@ -1457,7 +1470,7 @@
"csv_delay": {
"type": "integer",
"format": "int64",
"description": "*\nThe CSV delay expressed in relative blocks. If the channel is force\nclosed, we'll need to wait for this many blocks before we can regain our\nfunds."
"description": "*\nThe CSV delay expressed in relative blocks. If the channel is force closed,\nwe will need to wait for this many blocks before we can regain our funds."
},
"private": {
"type": "boolean",
@ -1468,6 +1481,36 @@
"type": "boolean",
"format": "boolean",
"description": "/ True if we were the ones that created the channel."
},
"chan_status_flags": {
"type": "string",
"description": "/ A set of flags showing the current state of the cahnnel."
}
}
},
"lnrpcChannelBackup": {
"type": "object",
"properties": {
"chan_point": {
"$ref": "#/definitions/lnrpcChannelPoint",
"description": "*\nIdentifies the channel that this backup belongs to."
},
"chan_backup": {
"type": "string",
"format": "byte",
"description": "*\nIs an encrypted single-chan backup. this can be passed to\nRestoreChannelBackups, or the WalletUnlocker Innit and Unlock methods in\norder to trigger the recovery protocol."
}
}
},
"lnrpcChannelBackups": {
"type": "object",
"properties": {
"chan_backups": {
"type": "array",
"items": {
"$ref": "#/definitions/lnrpcChannelBackup"
},
"description": "*\nA set of single-chan static channel backups."
}
}
},
@ -2139,7 +2182,11 @@
"recovery_window": {
"type": "integer",
"format": "int32",
"description": "*\nrecovery_window is an optional argument specifying the address lookahead\nwhen restoring a wallet seed. The recovery window applies to each\ninvdividual branch of the BIP44 derivation paths. Supplying a recovery\nwindow of zero indicates that no addresses should be recovered, such after\nthe first initialization of the wallet."
"description": "*\nrecovery_window is an optional argument specifying the address lookahead\nwhen restoring a wallet seed. The recovery window applies to each\nindividual branch of the BIP44 derivation paths. Supplying a recovery\nwindow of zero indicates that no addresses should be recovered, such after\nthe first initialization of the wallet."
},
"channel_backups": {
"$ref": "#/definitions/lnrpcChanBackupSnapshot",
"description": "*\nchannel_backups is an optional argument that allows clients to recover the\nsettled funds within a set of channels. This should be populated if the\nuser was unable to close out all channels and sweep funds before partial or\ntotal data loss occurred. If specified, then after on-chain recovery of\nfunds, lnd begin to carry out the data loss recovery protocol in order to\nrecover the funds in each channel from a remote force closed transaction."
}
}
},
@ -2362,6 +2409,23 @@
}
}
},
"lnrpcMultiChanBackup": {
"type": "object",
"properties": {
"chan_points": {
"type": "array",
"items": {
"$ref": "#/definitions/lnrpcChannelPoint"
},
"description": "*\nIs the set of all channels that are included in this multi-channel backup."
},
"multi_chan_backup": {
"type": "string",
"format": "byte",
"description": "*\nA single encrypted blob containing all the static channel backups of the\nchannel listed above. This can be stored as a single file or blob, and\nsafely be replaced with any prior/future versions."
}
}
},
"lnrpcNetworkInfo": {
"type": "object",
"properties": {
@ -2812,6 +2876,9 @@
}
}
},
"lnrpcRestoreBackupResponse": {
"type": "object"
},
"lnrpcRoute": {
"type": "object",
"properties": {
@ -3124,6 +3191,10 @@
"type": "integer",
"format": "int32",
"description": "*\nrecovery_window is an optional argument specifying the address lookahead\nwhen restoring a wallet seed. The recovery window applies to each\ninvdividual branch of the BIP44 derivation paths. Supplying a recovery\nwindow of zero indicates that no addresses should be recovered, such after\nthe first initialization of the wallet."
},
"channel_backups": {
"$ref": "#/definitions/lnrpcChanBackupSnapshot",
"description": "*\nchannel_backups is an optional argument that allows clients to recover the\nsettled funds within a set of channels. This should be populated if the\nuser was unable to close out all channels and sweep funds before partial or\ntotal data loss occurred. If specified, then after on-chain recovery of\nfunds, lnd begin to carry out the data loss recovery protocol in order to\nrecover the funds in each channel from a remote force closed transaction."
}
}
},
@ -3161,6 +3232,19 @@
}
}
},
"lnrpcVerifyChanBackupResponse": {
"type": "object",
"properties": {
"singles_valid": {
"type": "boolean",
"format": "boolean"
},
"multi_valid": {
"type": "boolean",
"format": "boolean"
}
}
},
"lnrpcVerifyMessageRequest": {
"type": "object",
"properties": {

View File

@ -248,7 +248,7 @@ func (n *NetworkHarness) TearDownAll() error {
// current instance of the network harness. The created node is running, but
// not yet connected to other nodes within the network.
func (n *NetworkHarness) NewNode(name string, extraArgs []string) (*HarnessNode, error) {
return n.newNode(name, extraArgs, false)
return n.newNode(name, extraArgs, false, nil)
}
// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh
@ -258,7 +258,7 @@ func (n *NetworkHarness) NewNode(name string, extraArgs []string) (*HarnessNode,
func (n *NetworkHarness) NewNodeWithSeed(name string, extraArgs []string,
password []byte) (*HarnessNode, []string, error) {
node, err := n.newNode(name, extraArgs, true)
node, err := n.newNode(name, extraArgs, true, password)
if err != nil {
return nil, nil, err
}
@ -301,14 +301,15 @@ func (n *NetworkHarness) NewNodeWithSeed(name string, extraArgs []string,
}
// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic,
// password, and recovery window. After providing the initialization request to
// unlock the node, this method will finish initializing the LightningClient
// such that the HarnessNode can be used for regular rpc operations.
// password, recovery window, and optionally a set of static channel backups.
// After providing the initialization request to unlock the node, this method
// will finish initializing the LightningClient such that the HarnessNode can
// be used for regular rpc operations.
func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
password []byte, mnemonic []string,
recoveryWindow int32) (*HarnessNode, error) {
password []byte, mnemonic []string, recoveryWindow int32,
chanBackups *lnrpc.ChanBackupSnapshot) (*HarnessNode, error) {
node, err := n.newNode(name, extraArgs, true)
node, err := n.newNode(name, extraArgs, true, password)
if err != nil {
return nil, err
}
@ -318,6 +319,7 @@ func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
CipherSeedMnemonic: mnemonic,
AezeedPassphrase: password,
RecoveryWindow: recoveryWindow,
ChannelBackups: chanBackups,
}
err = node.Init(context.Background(), initReq)
@ -337,10 +339,12 @@ func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
// can be used immediately. Otherwise, the node will require an additional
// initialization phase where the wallet is either created or restored.
func (n *NetworkHarness) newNode(name string, extraArgs []string,
hasSeed bool) (*HarnessNode, error) {
hasSeed bool, password []byte) (*HarnessNode, error) {
node, err := newNode(nodeConfig{
Name: name,
HasSeed: hasSeed,
Password: password,
BackendCfg: n.BackendCfg,
NetParams: n.netParams,
ExtraArgs: extraArgs,
@ -573,8 +577,12 @@ func (n *NetworkHarness) DisconnectNodes(ctx context.Context, a, b *HarnessNode)
//
// This method can be useful when testing edge cases such as a node broadcast
// and invalidated prior state, or persistent state recovery, simulating node
// crashes, etc.
func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error) error {
// crashes, etc. Additionally, each time the node is restarted, the caller can
// pass a set of SCBs to pass in via the Unlock method allowing them to restore
// channels during restart.
func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error,
chanBackups ...*lnrpc.ChanBackupSnapshot) error {
if err := node.stop(); err != nil {
return err
}
@ -585,7 +593,27 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error) e
}
}
return node.start(n.lndErrorChan)
if err := node.start(n.lndErrorChan); err != nil {
return err
}
// If the node doesn't have a password set, then we can exit here as we
// don't need to unlock it.
if len(node.cfg.Password) == 0 {
return nil
}
// Otherwise, we'll unlock the wallet, then complete the final steps
// for the node initialization process.
unlockReq := &lnrpc.UnlockWalletRequest{
WalletPassword: node.cfg.Password,
}
if len(chanBackups) != 0 {
unlockReq.ChannelBackups = chanBackups[0]
unlockReq.RecoveryWindow = 1000
}
return node.Unlock(context.Background(), unlockReq)
}
// SuspendNode stops the given node and returns a callback that can be used to

View File

@ -25,6 +25,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/macaroons"
)
@ -117,7 +118,8 @@ type nodeConfig struct {
ReadMacPath string
InvoiceMacPath string
HasSeed bool
HasSeed bool
Password []byte
P2PPort int
RPCPort int
@ -141,6 +143,16 @@ func (cfg nodeConfig) DBPath() string {
fmt.Sprintf("%v/channel.db", cfg.NetParams.Name))
}
func (cfg nodeConfig) ChanBackupPath() string {
return filepath.Join(
cfg.DataDir, "chain", "bitcoin",
fmt.Sprintf(
"%v/%v", cfg.NetParams.Name,
chanbackup.DefaultBackupFileName,
),
)
}
// genArgs generates a slice of command line arguments from the lightning node
// config struct.
func (cfg nodeConfig) genArgs() []string {
@ -278,6 +290,12 @@ func (hn *HarnessNode) Name() string {
return hn.cfg.Name
}
// ChanBackupPath returns the fielpath to the on-disk channels.backup file for
// this node.
func (hn *HarnessNode) ChanBackupPath() string {
return hn.cfg.ChanBackupPath()
}
// Start launches a new process running lnd. Additionally, the PID of the
// launched process is saved in order to possibly kill the process forcibly
// later.
@ -404,6 +422,24 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
return hn.initLightningClient(conn)
}
// initClientWhenReady waits until the main gRPC server is detected as active,
// then complete the normal HarnessNode gRPC connection creation. This can be
// used it a node has just been unlocked, or has its wallet state initialized.
func (hn *HarnessNode) initClientWhenReady() error {
var (
conn *grpc.ClientConn
connErr error
)
if err := WaitNoError(func() error {
conn, connErr = hn.ConnectRPC(true)
return connErr
}, 5*time.Second); err != nil {
return err
}
return hn.initLightningClient(conn)
}
// Init initializes a harness node by passing the init request via rpc. After
// the request is submitted, this method will block until an
// macaroon-authenticated rpc connection can be established to the harness node.
@ -412,8 +448,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
func (hn *HarnessNode) Init(ctx context.Context,
initReq *lnrpc.InitWalletRequest) error {
timeout := time.Duration(time.Second * 15)
ctxt, _ := context.WithTimeout(ctx, timeout)
ctxt, _ := context.WithTimeout(ctx, DefaultTimeout)
_, err := hn.InitWallet(ctxt, initReq)
if err != nil {
return err
@ -421,15 +456,27 @@ func (hn *HarnessNode) Init(ctx context.Context,
// Wait for the wallet to finish unlocking, such that we can connect to
// it via a macaroon-authenticated rpc connection.
var conn *grpc.ClientConn
if err = WaitPredicate(func() bool {
conn, err = hn.ConnectRPC(true)
return err == nil
}, 5*time.Second); err != nil {
return hn.initClientWhenReady()
}
// Unlock attempts to unlock the wallet of the target HarnessNode. This method
// should be called after the restart of a HarnessNode that was created with a
// seed+password. Once this method returns, the HarnessNode will be ready to
// accept normal gRPC requests and harness command.
func (hn *HarnessNode) Unlock(ctx context.Context,
unlockReq *lnrpc.UnlockWalletRequest) error {
ctxt, _ := context.WithTimeout(ctx, DefaultTimeout)
// Otherwise, we'll need to unlock the node before it's able to start
// up properly.
if _, err := hn.UnlockWallet(ctxt, unlockReq); err != nil {
return err
}
return hn.initLightningClient(conn)
// Now that the wallet has been unlocked, we'll wait for the RPC client
// to be ready, then establish the normal gRPC connection.
return hn.initClientWhenReady()
}
// initLightningClient constructs the grpc LightningClient from the given client

View File

@ -856,12 +856,16 @@ func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool,
// haven't yet received a responding commitment from the remote party.
var localCommitKeys, remoteCommitKeys *CommitmentKeyRing
if localCommitPoint != nil {
localCommitKeys = deriveCommitmentKeys(localCommitPoint, true,
lc.localChanCfg, lc.remoteChanCfg)
localCommitKeys = deriveCommitmentKeys(
localCommitPoint, true, lc.localChanCfg,
lc.remoteChanCfg,
)
}
if remoteCommitPoint != nil {
remoteCommitKeys = deriveCommitmentKeys(remoteCommitPoint, false,
lc.localChanCfg, lc.remoteChanCfg)
remoteCommitKeys = deriveCommitmentKeys(
remoteCommitPoint, false, lc.localChanCfg,
lc.remoteChanCfg,
)
}
// With the key rings re-created, we'll now convert all the on-disk
@ -1408,8 +1412,7 @@ func NewLightningChannel(signer input.Signer, pCache PreimageCache,
// Create the sign descriptor which we'll be using very frequently to
// request a signature for the 2-of-2 multi-sig from the signer in
// order to complete channel state transitions.
err = lc.createSignDesc()
if err != nil {
if err := lc.createSignDesc(); err != nil {
return nil, err
}
@ -1701,7 +1704,8 @@ func (lc *LightningChannel) restoreCommitState(
// Finally, with the commitment states restored, we'll now restore the
// state logs based on the current local+remote commit, and any pending
// remote commit that exists.
err = lc.restoreStateLogs(localCommit, remoteCommit, pendingRemoteCommit,
err = lc.restoreStateLogs(
localCommit, remoteCommit, pendingRemoteCommit,
pendingRemoteCommitDiff, pendingRemoteKeyChain,
)
if err != nil {
@ -3248,6 +3252,13 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
}
}
// If we detect that this is is a restored channel, then we can skip a
// portion of the verification, as we already know that we're unable to
// proceed with any updates.
isRestoredChan := lc.channelState.HasChanStatus(
channeldb.ChanStatusRestored,
)
// Take note of our current commit chain heights before we begin adding
// more to them.
var (
@ -3265,12 +3276,17 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
// If their reported height for our local chain tail is ahead of our
// view, then we're behind!
case msg.RemoteCommitTailHeight > localTailHeight:
case msg.RemoteCommitTailHeight > localTailHeight || isRestoredChan:
walletLog.Errorf("ChannelPoint(%v), sync failed with local "+
"data loss: remote believes our tail height is %v, "+
"while we have %v!", lc.channelState.FundingOutpoint,
msg.RemoteCommitTailHeight, localTailHeight)
if isRestoredChan {
walletLog.Warnf("ChannelPoint(%v): detected restored " +
"triggering DLP")
}
// We must check that we had recovery options to ensure the
// commitment secret matched up, and the remote is just not
// lying about its height.
@ -3480,12 +3496,21 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
// is valid.
var commitPoint *btcec.PublicKey
switch {
case msg.NextLocalCommitHeight == remoteTailHeight+2:
commitPoint = lc.channelState.RemoteNextRevocation
// If their height is one beyond what we know their current height to
// be, then we need to compare their current unrevoked commitment point
// as that's what they should send.
case msg.NextLocalCommitHeight == remoteTailHeight+1:
commitPoint = lc.channelState.RemoteCurrentRevocation
// Alternatively, if their height is two beyond what we know their best
// height to be, then they're holding onto two commitments, and the
// highest unrevoked point is their next revocation.
//
// TODO(roasbeef): verify this in the spec...
case msg.NextLocalCommitHeight == remoteTailHeight+2:
commitPoint = lc.channelState.RemoteNextRevocation
}
if commitPoint != nil &&
!commitPoint.IsEqual(msg.LocalUnrevokedCommitPoint) {
@ -3497,6 +3522,7 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
if err := lc.channelState.MarkBorked(); err != nil {
return nil, nil, nil, err
}
// TODO(halseth): force close?
return nil, nil, nil, ErrInvalidLocalUnrevokedCommitPoint
}
@ -3515,7 +3541,14 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
// it.
// 3. We didn't get the last RevokeAndAck message they sent, so they'll
// re-send it.
func ChanSyncMsg(c *channeldb.OpenChannel) (*lnwire.ChannelReestablish, error) {
//
// The isRestoredChan bool indicates if we need to craft a chan sync message
// for a channel that's been restored. If this is a restored channel, then
// we'll modify our typical chan sync message to ensure they force close even
// if we're on the very first state.
func ChanSyncMsg(c *channeldb.OpenChannel,
isRestoredChan bool) (*lnwire.ChannelReestablish, error) {
c.Lock()
defer c.Unlock()
@ -3556,6 +3589,13 @@ func ChanSyncMsg(c *channeldb.OpenChannel) (*lnwire.ChannelReestablish, error) {
return nil, err
}
// If we've restored this channel, then we'll purposefully give them an
// invalid LocalUnrevokedCommitPoint so they'll force close the channel
// allowing us to sweep our funds.
if isRestoredChan {
currentCommitSecret[0] ^= 1
}
return &lnwire.ChannelReestablish{
ChanID: lnwire.NewChanIDFromOutPoint(
&c.FundingOutpoint,
@ -3616,7 +3656,7 @@ func (lc *LightningChannel) computeView(view *htlcView, remoteChain bool,
// will remove their corresponding added HTLCs. The resulting filtered
// view will only have Add entries left, making it easy to compare the
// channel constraints to the final commitment state. If any fee
// updates are found in the logs, the comitment fee rate should be
// updates are found in the logs, the commitment fee rate should be
// changed, so we'll also set the feePerKw to this new value.
filteredHTLCView := lc.evaluateHTLCView(view, &ourBalance,
&theirBalance, nextHeight, remoteChain, updateState)
@ -5070,12 +5110,14 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
// Next, we'll obtain HTLC resolutions for all the outgoing HTLC's we
// had on their commitment transaction.
htlcResolutions, err := extractHtlcResolutions(
SatPerKWeight(remoteCommit.FeePerKw), false, signer, remoteCommit.Htlcs,
keyRing, &chanState.LocalChanCfg, &chanState.RemoteChanCfg,
*commitSpend.SpenderTxHash, pCache,
SatPerKWeight(remoteCommit.FeePerKw), false, signer,
remoteCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
&chanState.RemoteChanCfg, *commitSpend.SpenderTxHash,
pCache,
)
if err != nil {
return nil, fmt.Errorf("unable to create htlc resolutions: %v", err)
return nil, fmt.Errorf("unable to create htlc "+
"resolutions: %v", err)
}
commitTxBroadcast := commitSpend.SpendingTx
@ -5085,7 +5127,8 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
// transaction.
selfP2WKH, err := input.CommitScriptUnencumbered(keyRing.NoDelayKey)
if err != nil {
return nil, fmt.Errorf("unable to create self commit script: %v", err)
return nil, fmt.Errorf("unable to create self commit "+
"script: %v", err)
}
var (
@ -5143,7 +5186,10 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
}
// Attempt to add a channel sync message to the close summary.
chanSync, err := ChanSyncMsg(chanState)
chanSync, err := ChanSyncMsg(
chanState,
chanState.HasChanStatus(channeldb.ChanStatusRestored),
)
if err != nil {
walletLog.Errorf("ChannelPoint(%v): unable to create channel sync "+
"message: %v", chanState.FundingOutpoint, err)

View File

@ -2532,7 +2532,7 @@ func assertNoChanSyncNeeded(t *testing.T, aliceChannel *LightningChannel,
_, _, line, _ := runtime.Caller(1)
aliceChanSyncMsg, err := ChanSyncMsg(aliceChannel.channelState)
aliceChanSyncMsg, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("line #%v: unable to produce chan sync msg: %v",
line, err)
@ -2547,7 +2547,7 @@ func assertNoChanSyncNeeded(t *testing.T, aliceChannel *LightningChannel,
"instead wants to send: %v", line, spew.Sdump(bobMsgsToSend))
}
bobChanSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobChanSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("line #%v: unable to produce chan sync msg: %v",
line, err)
@ -2780,11 +2780,11 @@ func TestChanSyncOweCommitment(t *testing.T) {
// Bob doesn't get this message so upon reconnection, they need to
// synchronize. Alice should conclude that she owes Bob a commitment,
// while Bob should think he's properly synchronized.
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState)
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3093,11 +3093,11 @@ func TestChanSyncOweRevocation(t *testing.T) {
// If we fetch the channel sync messages at this state, then Alice
// should report that she owes Bob a revocation message, while Bob
// thinks they're fully in sync.
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState)
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3261,11 +3261,11 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
// If we now attempt to resync, then Alice should conclude that she
// doesn't need any further updates, while Bob concludes that he needs
// to re-send both his revocation and commit sig message.
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState)
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3470,11 +3470,11 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
// Now if we attempt to synchronize states at this point, Alice should
// detect that she owes nothing, while Bob should re-send both his
// RevokeAndAck as well as his commitment message.
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState)
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3674,11 +3674,11 @@ func TestChanSyncFailure(t *testing.T) {
assertLocalDataLoss := func(aliceOld *LightningChannel) {
t.Helper()
aliceSyncMsg, err := ChanSyncMsg(aliceOld.channelState)
aliceSyncMsg, err := ChanSyncMsg(aliceOld.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3752,7 +3752,7 @@ func TestChanSyncFailure(t *testing.T) {
// If we remove the recovery options from Bob's message, Alice cannot
// tell if she lost state, since Bob might be lying. She still should
// be able to detect that chains cannot be synced.
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3766,7 +3766,7 @@ func TestChanSyncFailure(t *testing.T) {
// If Bob lies about the NextLocalCommitHeight, making it greater than
// what Alice expect, she cannot tell for sure whether she lost state,
// but should detect the desync.
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3779,7 +3779,7 @@ func TestChanSyncFailure(t *testing.T) {
// If Bob's NextLocalCommitHeight is lower than what Alice expects, Bob
// probably lost state.
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3792,7 +3792,7 @@ func TestChanSyncFailure(t *testing.T) {
// If Alice and Bob's states are in sync, but Bob is sending the wrong
// LocalUnrevokedCommitPoint, Alice should detect this.
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3821,7 +3821,7 @@ func TestChanSyncFailure(t *testing.T) {
// when there's a pending remote commit.
halfAdvance()
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -3909,11 +3909,11 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
// Bob doesn't get this message so upon reconnection, they need to
// synchronize. Alice should conclude that she owes Bob a commitment,
// while Bob should think he's properly synchronized.
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState)
aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState)
bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to produce chan sync msg: %v", err)
}
@ -4357,11 +4357,11 @@ func TestChanSyncInvalidLastSecret(t *testing.T) {
}
// Next, we'll produce the ChanSync messages for both parties.
aliceChanSync, err := ChanSyncMsg(aliceChannel.channelState)
aliceChanSync, err := ChanSyncMsg(aliceChannel.channelState, false)
if err != nil {
t.Fatalf("unable to generate chan sync msg: %v", err)
}
bobChanSync, err := ChanSyncMsg(bobChannel.channelState)
bobChanSync, err := ChanSyncMsg(bobChannel.channelState, false)
if err != nil {
t.Fatalf("unable to generate chan sync msg: %v", err)
}

View File

@ -416,6 +416,14 @@ func WriteElement(w io.Writer, element interface{}) error {
return err
}
case bool:
var b [1]byte
if e {
b[0] = 1
}
if _, err := w.Write(b[:]); err != nil {
return err
}
default:
return fmt.Errorf("Unknown type in WriteElement: %T", e)
}
@ -440,6 +448,16 @@ func WriteElements(w io.Writer, elements ...interface{}) error {
func ReadElement(r io.Reader, element interface{}) error {
var err error
switch e := element.(type) {
case *bool:
var b [1]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
if b[0] == 1 {
*e = true
}
case *NodeAlias:
var a [32]byte
if _, err := io.ReadFull(r, a[:]); err != nil {

4
log.go
View File

@ -14,6 +14,7 @@ import (
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channelnotifier"
"github.com/lightningnetwork/lnd/contractcourt"
@ -83,6 +84,7 @@ var (
ntfrLog = build.NewSubLogger("NTFR", backendLog.Logger)
irpcLog = build.NewSubLogger("IRPC", backendLog.Logger)
chnfLog = build.NewSubLogger("CHNF", backendLog.Logger)
chbuLog = build.NewSubLogger("CHBU", backendLog.Logger)
)
// Initialize package-global logger variables.
@ -109,6 +111,7 @@ func init() {
chainrpc.UseLogger(ntfrLog)
invoicesrpc.UseLogger(irpcLog)
channelnotifier.UseLogger(chnfLog)
chanbackup.UseLogger(chbuLog)
addSubLogger(routerrpc.Subsystem, routerrpc.UseLogger)
}
@ -151,6 +154,7 @@ var subsystemLoggers = map[string]btclog.Logger{
"NTFR": ntfnLog,
"IRPC": irpcLog,
"CHNF": chnfLog,
"CHBU": chbuLog,
}
// initLogRotator initializes the logging rotator to write logs to logFile and

View File

@ -438,7 +438,12 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
// Skip adding any permanently irreconcilable channels to the
// htlcswitch.
if dbChan.ChanStatus() != channeldb.ChanStatusDefault {
switch {
case dbChan.HasChanStatus(channeldb.ChanStatusBorked):
fallthrough
case dbChan.HasChanStatus(channeldb.ChanStatusCommitBroadcasted):
fallthrough
case dbChan.HasChanStatus(channeldb.ChanStatusLocalDataLoss):
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
"start.", chanPoint, dbChan.ChanStatus())
continue

View File

@ -31,6 +31,7 @@ import (
proxy "github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channelnotifier"
"github.com/lightningnetwork/lnd/htlcswitch"
@ -357,6 +358,26 @@ var (
Entity: "offchain",
Action: "read",
}},
"/lnrpc.Lightning/RestoreChannelBackups": {{
Entity: "offchain",
Action: "write",
}},
"/lnrpc.Lightning/ExportChannelBackup": {{
Entity: "offchain",
Action: "read",
}},
"/lnrpc.Lightning/VerifyChanBackup": {{
Entity: "offchain",
Action: "read",
}},
"/lnrpc.Lightning/ExportAllChannelBackups": {{
Entity: "offchain",
Action: "read",
}},
"/lnrpc.Lightning/SubscribeChannelBackups": {{
Entity: "offchain",
Action: "read",
}},
}
)
@ -1415,17 +1436,12 @@ out:
switch update := fundingUpdate.Update.(type) {
case *lnrpc.OpenStatusUpdate_ChanOpen:
chanPoint := update.ChanOpen.ChannelPoint
txidHash, err := getChanPointFundingTxid(chanPoint)
if err != nil {
return err
}
h, err := chainhash.NewHash(txidHash)
txid, err := getChanPointFundingTxid(chanPoint)
if err != nil {
return err
}
outpoint = wire.OutPoint{
Hash: *h,
Hash: *txid,
Index: chanPoint.OutputIndex,
}
@ -1573,7 +1589,7 @@ func (r *rpcServer) OpenChannelSync(ctx context.Context,
// getChanPointFundingTxid returns the given channel point's funding txid in
// raw bytes.
func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) ([]byte, error) {
func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, error) {
var txid []byte
// A channel point's funding txid can be get/set as a byte slice or a
@ -1591,7 +1607,7 @@ func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) ([]byte, error) {
txid = h[:]
}
return txid, nil
return chainhash.NewHash(txid)
}
// CloseChannel attempts to close an active channel identified by its channel
@ -1608,16 +1624,11 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
force := in.Force
index := in.ChannelPoint.OutputIndex
txidHash, err := getChanPointFundingTxid(in.GetChannelPoint())
txid, err := getChanPointFundingTxid(in.GetChannelPoint())
if err != nil {
rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
return err
}
txid, err := chainhash.NewHash(txidHash)
if err != nil {
rpcsLog.Errorf("[closechannel] invalid txid: %v", err)
return err
}
chanPoint := wire.NewOutPoint(txid, index)
rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v",
@ -1821,11 +1832,7 @@ func (r *rpcServer) AbandonChannel(ctx context.Context,
// We'll parse out the arguments to we can obtain the chanPoint of the
// target channel.
txidHash, err := getChanPointFundingTxid(in.GetChannelPoint())
if err != nil {
return nil, err
}
txid, err := chainhash.NewHash(txidHash)
txid, err := getChanPointFundingTxid(in.GetChannelPoint())
if err != nil {
return nil, err
}
@ -2540,6 +2547,7 @@ func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph,
PendingHtlcs: make([]*lnrpc.HTLC, len(localCommit.Htlcs)),
CsvDelay: uint32(dbChannel.LocalChanCfg.CsvDelay),
Initiator: dbChannel.IsInitiator,
ChanStatusFlags: dbChannel.ChanStatus().String(),
}
for i, htlc := range localCommit.Htlcs {
@ -4503,11 +4511,7 @@ func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
// Otherwise, we're targeting an individual channel by its channel
// point.
case *lnrpc.PolicyUpdateRequest_ChanPoint:
txidHash, err := getChanPointFundingTxid(scope.ChanPoint)
if err != nil {
return nil, err
}
txid, err := chainhash.NewHash(txidHash)
txid, err := getChanPointFundingTxid(scope.ChanPoint)
if err != nil {
return nil, err
}
@ -4676,3 +4680,338 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context,
return resp, nil
}
// ExportChannelBackup attempts to return an encrypted static channel backup
// for the target channel identified by it channel point. The backup is
// encrypted with a key generated from the aezeed seed of the user. The
// returned backup can either be restored using the RestoreChannelBackup method
// once lnd is running, or via the InitWallet and UnlockWallet methods from the
// WalletUnlocker service.
func (r *rpcServer) ExportChannelBackup(ctx context.Context,
in *lnrpc.ExportChannelBackupRequest) (*lnrpc.ChannelBackup, error) {
// First, we'll convert the lnrpc channel point into a wire.OutPoint
// that we can manipulate.
txid, err := getChanPointFundingTxid(in.ChanPoint)
if err != nil {
return nil, err
}
chanPoint := wire.OutPoint{
Hash: *txid,
Index: in.ChanPoint.OutputIndex,
}
// Next, we'll attempt to fetch a channel backup for this channel from
// the database. If this channel has been closed, or the outpoint is
// unknown, then we'll return an error
unpackedBackup, err := chanbackup.FetchBackupForChan(
chanPoint, r.server.chanDB,
)
if err != nil {
return nil, err
}
// At this point, we have an unpacked backup (plaintext) so we'll now
// attempt to serialize and encrypt it in order to create a packed
// backup.
packedBackups, err := chanbackup.PackStaticChanBackups(
[]chanbackup.Single{*unpackedBackup},
r.server.cc.keyRing,
)
if err != nil {
return nil, fmt.Errorf("packing of back ups failed: %v", err)
}
// Before we proceed, we'll ensure that we received a backup for this
// channel, otherwise, we'll bail out.
packedBackup, ok := packedBackups[chanPoint]
if !ok {
return nil, fmt.Errorf("expected single backup for "+
"ChannelPoint(%v), got %v", chanPoint,
len(packedBackup))
}
return &lnrpc.ChannelBackup{
ChanPoint: in.ChanPoint,
ChanBackup: packedBackup,
}, nil
}
// VerifyChanBackup allows a caller to verify the integrity of a channel
// backup snapshot. This method will accept both a packed Single, and also a
// Packed multi. Two bools are returned which indicate if the passed Single
// (if present) is valid and also if the passed Multi (if present) is valid.
func (r *rpcServer) VerifyChanBackup(ctx context.Context,
in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) {
// If neither a Single or Multi has been specified, then we have
// nothing to verify.
if in.GetSingleChanBackups() == nil && in.GetMultiChanBackup() == nil {
return nil, fmt.Errorf("either a Single or Multi channel " +
"backup must be specified")
}
// If a Single is specified then we'll only accept one of them to allow
// the caller to map the valid/invalid state for each individual
// Single.
if in.GetSingleChanBackups() != nil &&
len(in.GetSingleChanBackups().ChanBackups) != 1 {
return nil, fmt.Errorf("only one Single is accepted at a time")
}
// By default, we'll assume that both backups are valid.
resp := lnrpc.VerifyChanBackupResponse{
SinglesValid: true,
MultiValid: true,
}
if in.GetSingleChanBackups() != nil {
// First, we'll convert the raw byte sliice into a type we can
// work with a bit better.
chanBackupsProtos := in.GetSingleChanBackups().ChanBackups
chanBackup := chanbackup.PackedSingles(
[][]byte{chanBackupsProtos[0].ChanBackup},
)
// With our PackedSingles created, we'll attempt to unpack the
// backup. If this fails, then we know the backup is invalid
// for some reason.
_, err := chanBackup.Unpack(r.server.cc.keyRing)
resp.SinglesValid = err == nil
}
if in.GetMultiChanBackup() != nil {
// Similarly, we'll convert the raw byte slice into a
// PackedMulti that we can easily work with.
packedMultiBackup := in.GetMultiChanBackup().MultiChanBackup
packedMulti := chanbackup.PackedMulti(packedMultiBackup)
// We'll now attempt to unpack the Multi. If this fails, then
// we know it's invalid.
_, err := packedMulti.Unpack(r.server.cc.keyRing)
resp.MultiValid = err == nil
}
return &resp, nil
}
// createBackupSnapshot converts the passed Single backup into a snapshot which
// contains individual packed single backups, as well as a single packed multi
// backup.
func (r *rpcServer) createBackupSnapshot(backups []chanbackup.Single) (
*lnrpc.ChanBackupSnapshot, error) {
// Once we have the set of back ups, we'll attempt to pack them all
// into a series of single channel backups.
singleChanPackedBackups, err := chanbackup.PackStaticChanBackups(
backups, r.server.cc.keyRing,
)
if err != nil {
return nil, fmt.Errorf("unable to pack set of chan "+
"backups: %v", err)
}
// Now that we have our set of single packed backups, we'll morph that
// into a form that the proto response requires.
numBackups := len(singleChanPackedBackups)
singleBackupResp := &lnrpc.ChannelBackups{
ChanBackups: make([]*lnrpc.ChannelBackup, 0, numBackups),
}
for chanPoint, singlePackedBackup := range singleChanPackedBackups {
txid := chanPoint.Hash
rpcChanPoint := &lnrpc.ChannelPoint{
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
FundingTxidBytes: txid[:],
},
OutputIndex: chanPoint.Index,
}
singleBackupResp.ChanBackups = append(
singleBackupResp.ChanBackups,
&lnrpc.ChannelBackup{
ChanPoint: rpcChanPoint,
ChanBackup: singlePackedBackup,
},
)
}
// In addition, to the set of single chan backups, we'll also create a
// single multi-channel backup which can be serialized into a single
// file for safe storage.
var b bytes.Buffer
unpackedMultiBackup := chanbackup.Multi{
StaticBackups: backups,
}
err = unpackedMultiBackup.PackToWriter(&b, r.server.cc.keyRing)
if err != nil {
return nil, fmt.Errorf("unable to multi-pack backups: %v", err)
}
multiBackupResp := &lnrpc.MultiChanBackup{
MultiChanBackup: b.Bytes(),
}
for _, singleBackup := range singleBackupResp.ChanBackups {
multiBackupResp.ChanPoints = append(
multiBackupResp.ChanPoints, singleBackup.ChanPoint,
)
}
return &lnrpc.ChanBackupSnapshot{
SingleChanBackups: singleBackupResp,
MultiChanBackup: multiBackupResp,
}, nil
}
// ExportAllChannelBackups returns static channel backups for all existing
// channels known to lnd. A set of regular singular static channel backups for
// each channel are returned. Additionally, a multi-channel backup is returned
// as well, which contains a single encrypted blob containing the backups of
// each channel.
func (r *rpcServer) ExportAllChannelBackups(ctx context.Context,
in *lnrpc.ChanBackupExportRequest) (*lnrpc.ChanBackupSnapshot, error) {
// First, we'll attempt to read back ups for ALL currently opened
// channels from disk.
allUnpackedBackups, err := chanbackup.FetchStaticChanBackups(
r.server.chanDB,
)
if err != nil {
return nil, fmt.Errorf("unable to fetch all static chan "+
"backups: %v", err)
}
// With the backups assembled, we'll create a full snapshot.
return r.createBackupSnapshot(allUnpackedBackups)
}
// RestoreChannelBackups accepts a set of singular channel backups, or a single
// encrypted multi-chan backup and attempts to recover any funds remaining
// within the channel. If we're able to unpack the backup, then the new channel
// will be shown under listchannels, as well as pending channels.
func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
in *lnrpc.RestoreChanBackupRequest) (*lnrpc.RestoreBackupResponse, error) {
// First, we'll make our implementation of the
// chanbackup.ChannelRestorer interface which we'll use to properly
// restore either a set of chanbackup.Single or chanbackup.Multi
// backups.
chanRestorer := &chanDBRestorer{
db: r.server.chanDB,
secretKeys: r.server.cc.keyRing,
chainArb: r.server.chainArb,
}
// We'll accept either a list of Single backups, or a single Multi
// backup which contains several single backups.
switch {
case in.GetChanBackups() != nil:
chanBackupsProtos := in.GetChanBackups()
// Now that we know what type of backup we're working with,
// we'll parse them all out into a more suitable format.
packedBackups := make([][]byte, 0, len(chanBackupsProtos.ChanBackups))
for _, chanBackup := range chanBackupsProtos.ChanBackups {
packedBackups = append(
packedBackups, chanBackup.ChanBackup,
)
}
// With our backups obtained, we'll now restore them which will
// write the new backups to disk, and then attempt to connect
// out to any peers that we know of which were our prior
// channel peers.
err := chanbackup.UnpackAndRecoverSingles(
chanbackup.PackedSingles(packedBackups),
r.server.cc.keyRing, chanRestorer, r.server,
)
if err != nil {
return nil, fmt.Errorf("unable to unpack single "+
"backups: %v", err)
}
case in.GetMultiChanBackup() != nil:
packedMultiBackup := in.GetMultiChanBackup()
// With our backups obtained, we'll now restore them which will
// write the new backups to disk, and then attempt to connect
// out to any peers that we know of which were our prior
// channel peers.
packedMulti := chanbackup.PackedMulti(packedMultiBackup)
err := chanbackup.UnpackAndRecoverMulti(
packedMulti, r.server.cc.keyRing, chanRestorer,
r.server,
)
if err != nil {
return nil, fmt.Errorf("unable to unpack chan "+
"backup: %v", err)
}
}
return &lnrpc.RestoreBackupResponse{}, nil
}
// SubscribeChannelBackups allows a client to sub-subscribe to the most up to
// date information concerning the state of all channel back ups. Each time a
// new channel is added, we return the new set of channels, along with a
// multi-chan backup containing the backup info for all channels. Each time a
// channel is closed, we send a new update, which contains new new chan back
// ups, but the updated set of encrypted multi-chan backups with the closed
// channel(s) removed.
func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription,
updateStream lnrpc.Lightning_SubscribeChannelBackupsServer) error {
// First, we'll subscribe to the primary channel notifier so we can
// obtain events for new opened/closed channels.
chanSubscription, err := r.server.channelNotifier.SubscribeChannelEvents()
if err != nil {
return err
}
defer chanSubscription.Cancel()
for {
select {
// A new event has been sent by the channel notifier, we'll
// assemble, then sling out a new event to the client.
case e := <-chanSubscription.Updates():
// TODO(roasbeef): batch dispatch ntnfs
switch e.(type) {
// We only care about new/closed channels, so we'll
// skip any events for active/inactive channels.
case channelnotifier.ActiveChannelEvent:
continue
case channelnotifier.InactiveChannelEvent:
continue
}
// Now that we know the channel state has changed,
// we'll obtains the current set of single channel
// backups from disk.
chanBackups, err := chanbackup.FetchStaticChanBackups(
r.server.chanDB,
)
if err != nil {
return fmt.Errorf("unable to fetch all "+
"static chan backups: %v", err)
}
// With our backups obtained, we'll pack them into a
// snapshot and send them back to the client.
backupSnapshot, err := r.createBackupSnapshot(
chanBackups,
)
if err != nil {
return err
}
err = updateStream.Send(backupSnapshot)
if err != nil {
return err
}
case <-r.quit:
return nil
}
}
}

389
server.go
View File

@ -26,6 +26,7 @@ import (
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/brontide"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channelnotifier"
"github.com/lightningnetwork/lnd/contractcourt"
@ -45,6 +46,7 @@ import (
"github.com/lightningnetwork/lnd/sweep"
"github.com/lightningnetwork/lnd/ticker"
"github.com/lightningnetwork/lnd/tor"
"github.com/lightningnetwork/lnd/walletunlocker"
"github.com/lightningnetwork/lnd/zpay32"
)
@ -79,8 +81,11 @@ var (
// Additionally, the server is also used as a central messaging bus to interact
// with any of its companion objects.
type server struct {
started int32 // atomic
shutdown int32 // atomic
active int32 // atomic
stopping int32 // atomic
start sync.Once
stop sync.Once
// identityPriv is the private key used to authenticate any incoming
// connections.
@ -184,6 +189,15 @@ type server struct {
// changed since last start.
currentNodeAnn *lnwire.NodeAnnouncement
// chansToRestore is the set of channels that upon starting, the server
// should attempt to restore/recover.
chansToRestore walletunlocker.ChannelsToRecover
// chanSubSwapper is a sub-system that will ensure our on-disk channel
// backups are consistent at all times. It interacts with the
// channelNotifier to be notified of newly opened and closed channels.
chanSubSwapper *chanbackup.SubSwapper
quit chan struct{}
wg sync.WaitGroup
@ -237,7 +251,8 @@ func noiseDial(idPriv *btcec.PrivateKey) func(net.Addr) (net.Conn, error) {
// newServer creates a new instance of the server which is to listen using the
// passed listener address.
func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
privKey *btcec.PrivateKey) (*server, error) {
privKey *btcec.PrivateKey,
chansToRestore walletunlocker.ChannelsToRecover) (*server, error) {
var err error
@ -293,11 +308,12 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
}
s := &server{
chanDB: chanDB,
cc: cc,
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer),
writePool: writePool,
readPool: readPool,
chanDB: chanDB,
cc: cc,
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer),
writePool: writePool,
readPool: readPool,
chansToRestore: chansToRestore,
invoices: invoices.NewRegistry(chanDB, decodeFinalCltvExpiry),
@ -982,6 +998,24 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
return nil, err
}
// Next, we'll assemble the sub-system that will maintain an on-disk
// static backup of the latest channel state.
chanNotifier := &channelNotifier{
chanNotifier: s.channelNotifier,
addrs: s.chanDB,
}
backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath)
startingChans, err := chanbackup.FetchStaticChanBackups(s.chanDB)
if err != nil {
return nil, err
}
s.chanSubSwapper, err = chanbackup.NewSubSwapper(
startingChans, chanNotifier, s.cc.keyRing, backupFile,
)
if err != nil {
return nil, err
}
// Create the connection manager which will be responsible for
// maintaining persistent outbound connections and also accepting new
// incoming connections
@ -1004,115 +1038,176 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
// Started returns true if the server has been started, and false otherwise.
// NOTE: This function is safe for concurrent access.
func (s *server) Started() bool {
return atomic.LoadInt32(&s.started) != 0
return atomic.LoadInt32(&s.active) != 0
}
// Start starts the main daemon server, all requested listeners, and any helper
// goroutines.
// NOTE: This function is safe for concurrent access.
func (s *server) Start() error {
// Already running?
if !atomic.CompareAndSwapInt32(&s.started, 0, 1) {
return nil
}
if s.torController != nil {
if err := s.initTorController(); err != nil {
return err
}
}
if s.natTraversal != nil {
s.wg.Add(1)
go s.watchExternalIP()
}
// Start the notification server. This is used so channel management
// goroutines can be notified when a funding transaction reaches a
// sufficient number of confirmations, or when the input for the
// funding transaction is spent in an attempt at an uncooperative close
// by the counterparty.
if err := s.sigPool.Start(); err != nil {
return err
}
if err := s.writePool.Start(); err != nil {
return err
}
if err := s.readPool.Start(); err != nil {
return err
}
if err := s.cc.chainNotifier.Start(); err != nil {
return err
}
if err := s.channelNotifier.Start(); err != nil {
return err
}
if err := s.sphinx.Start(); err != nil {
return err
}
if err := s.htlcSwitch.Start(); err != nil {
return err
}
if err := s.sweeper.Start(); err != nil {
return err
}
if err := s.utxoNursery.Start(); err != nil {
return err
}
if err := s.chainArb.Start(); err != nil {
return err
}
if err := s.breachArbiter.Start(); err != nil {
return err
}
if err := s.authGossiper.Start(); err != nil {
return err
}
if err := s.chanRouter.Start(); err != nil {
return err
}
if err := s.fundingMgr.Start(); err != nil {
return err
}
s.connMgr.Start()
if err := s.invoices.Start(); err != nil {
return err
}
if err := s.chanStatusMgr.Start(); err != nil {
return err
}
// With all the relevant sub-systems started, we'll now attempt to
// establish persistent connections to our direct channel collaborators
// within the network. Before doing so however, we'll prune our set of
// link nodes found within the database to ensure we don't reconnect to
// any nodes we no longer have open channels with.
if err := s.chanDB.PruneLinkNodes(); err != nil {
return err
}
if err := s.establishPersistentConnections(); err != nil {
return err
}
// If network bootstrapping hasn't been disabled, then we'll configure
// the set of active bootstrappers, and launch a dedicated goroutine to
// maintain a set of persistent connections.
if !cfg.NoNetBootstrap && !(cfg.Bitcoin.SimNet || cfg.Litecoin.SimNet) &&
!(cfg.Bitcoin.RegTest || cfg.Litecoin.RegTest) {
bootstrappers, err := initNetworkBootstrappers(s)
if err != nil {
return err
var startErr error
s.start.Do(func() {
if s.torController != nil {
if err := s.initTorController(); err != nil {
startErr = err
return
}
}
s.wg.Add(1)
go s.peerBootstrapper(defaultMinPeers, bootstrappers)
} else {
srvrLog.Infof("Auto peer bootstrapping is disabled")
}
if s.natTraversal != nil {
s.wg.Add(1)
go s.watchExternalIP()
}
return nil
// Start the notification server. This is used so channel
// management goroutines can be notified when a funding
// transaction reaches a sufficient number of confirmations, or
// when the input for the funding transaction is spent in an
// attempt at an uncooperative close by the counterparty.
if err := s.sigPool.Start(); err != nil {
startErr = err
return
}
if err := s.writePool.Start(); err != nil {
startErr = err
return
}
if err := s.readPool.Start(); err != nil {
startErr = err
return
}
if err := s.cc.chainNotifier.Start(); err != nil {
startErr = err
return
}
if err := s.channelNotifier.Start(); err != nil {
startErr = err
return
}
if err := s.sphinx.Start(); err != nil {
startErr = err
return
}
if err := s.htlcSwitch.Start(); err != nil {
startErr = err
return
}
if err := s.sweeper.Start(); err != nil {
startErr = err
return
}
if err := s.utxoNursery.Start(); err != nil {
startErr = err
return
}
if err := s.chainArb.Start(); err != nil {
startErr = err
return
}
if err := s.breachArbiter.Start(); err != nil {
startErr = err
return
}
if err := s.authGossiper.Start(); err != nil {
startErr = err
return
}
if err := s.chanRouter.Start(); err != nil {
startErr = err
return
}
if err := s.fundingMgr.Start(); err != nil {
startErr = err
return
}
if err := s.invoices.Start(); err != nil {
startErr = err
return
}
if err := s.chanStatusMgr.Start(); err != nil {
startErr = err
return
}
// Before we start the connMgr, we'll check to see if we have
// any backups to recover. We do this now as we want to ensure
// that have all the information we need to handle channel
// recovery _before_ we even accept connections from any peers.
chanRestorer := &chanDBRestorer{
db: s.chanDB,
secretKeys: s.cc.keyRing,
chainArb: s.chainArb,
}
if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
err := chanbackup.UnpackAndRecoverSingles(
s.chansToRestore.PackedSingleChanBackups,
s.cc.keyRing, chanRestorer, s,
)
if err != nil {
startErr = fmt.Errorf("unable to unpack single "+
"backups: %v", err)
return
}
}
if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
err := chanbackup.UnpackAndRecoverMulti(
s.chansToRestore.PackedMultiChanBackup,
s.cc.keyRing, chanRestorer, s,
)
if err != nil {
startErr = fmt.Errorf("unable to unpack chan "+
"backup: %v", err)
return
}
}
if err := s.chanSubSwapper.Start(); err != nil {
startErr = err
return
}
s.connMgr.Start()
// With all the relevant sub-systems started, we'll now attempt
// to establish persistent connections to our direct channel
// collaborators within the network. Before doing so however,
// we'll prune our set of link nodes found within the database
// to ensure we don't reconnect to any nodes we no longer have
// open channels with.
if err := s.chanDB.PruneLinkNodes(); err != nil {
startErr = err
return
}
if err := s.establishPersistentConnections(); err != nil {
startErr = err
return
}
// If network bootstrapping hasn't been disabled, then we'll
// configure the set of active bootstrappers, and launch a
// dedicated goroutine to maintain a set of persistent
// connections.
if !cfg.NoNetBootstrap && !(cfg.Bitcoin.SimNet || cfg.Litecoin.SimNet) &&
!(cfg.Bitcoin.RegTest || cfg.Litecoin.RegTest) {
bootstrappers, err := initNetworkBootstrappers(s)
if err != nil {
startErr = err
return
}
s.wg.Add(1)
go s.peerBootstrapper(defaultMinPeers, bootstrappers)
} else {
srvrLog.Infof("Auto peer bootstrapping is disabled")
}
// Set the active flag now that we've completed the full
// startup.
atomic.StoreInt32(&s.active, 1)
})
return startErr
}
// Stop gracefully shutsdown the main daemon server. This function will signal
@ -1120,48 +1215,48 @@ func (s *server) Start() error {
// all successfully exited. Additionally, any/all listeners are closed.
// NOTE: This function is safe for concurrent access.
func (s *server) Stop() error {
// Bail if we're already shutting down.
if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) {
return nil
}
s.stop.Do(func() {
atomic.LoadInt32(&s.stopping)
close(s.quit)
close(s.quit)
if s.torController != nil {
s.torController.Stop()
}
if s.torController != nil {
s.torController.Stop()
}
// Shutdown the wallet, funding manager, and the rpc server.
s.chanStatusMgr.Stop()
s.cc.chainNotifier.Stop()
s.chanRouter.Stop()
s.htlcSwitch.Stop()
s.sphinx.Stop()
s.utxoNursery.Stop()
s.breachArbiter.Stop()
s.authGossiper.Stop()
s.chainArb.Stop()
s.sweeper.Stop()
s.channelNotifier.Stop()
s.cc.wallet.Shutdown()
s.cc.chainView.Stop()
s.connMgr.Stop()
s.cc.feeEstimator.Stop()
s.invoices.Stop()
s.fundingMgr.Stop()
// Shutdown the wallet, funding manager, and the rpc server.
s.chanStatusMgr.Stop()
s.cc.chainNotifier.Stop()
s.chanRouter.Stop()
s.htlcSwitch.Stop()
s.sphinx.Stop()
s.utxoNursery.Stop()
s.breachArbiter.Stop()
s.authGossiper.Stop()
s.chainArb.Stop()
s.sweeper.Stop()
s.channelNotifier.Stop()
s.cc.wallet.Shutdown()
s.cc.chainView.Stop()
s.connMgr.Stop()
s.cc.feeEstimator.Stop()
s.invoices.Stop()
s.fundingMgr.Stop()
s.chanSubSwapper.Start()
// Disconnect from each active peers to ensure that
// peerTerminationWatchers signal completion to each peer.
for _, peer := range s.Peers() {
s.DisconnectPeer(peer.addr.IdentityKey)
}
// Disconnect from each active peers to ensure that
// peerTerminationWatchers signal completion to each peer.
for _, peer := range s.Peers() {
s.DisconnectPeer(peer.addr.IdentityKey)
}
// Wait for all lingering goroutines to quit.
s.wg.Wait()
// Wait for all lingering goroutines to quit.
s.wg.Wait()
s.sigPool.Stop()
s.writePool.Stop()
s.readPool.Stop()
s.sigPool.Stop()
s.writePool.Stop()
s.readPool.Stop()
})
return nil
}
@ -1169,7 +1264,7 @@ func (s *server) Stop() error {
// Stopped returns true if the server has been instructed to shutdown.
// NOTE: This function is safe for concurrent access.
func (s *server) Stopped() bool {
return atomic.LoadInt32(&s.shutdown) != 0
return atomic.LoadInt32(&s.stopping) != 0
}
// configurePortForwarding attempts to set up port forwarding for the different
@ -2719,7 +2814,6 @@ type openChanReq struct {
//
// NOTE: This function is safe for concurrent access.
func (s *server) ConnectToPeer(addr *lnwire.NetAddress, perm bool) error {
targetPub := string(addr.IdentityKey.SerializeCompressed())
// Acquire mutex, but use explicit unlocking instead of defer for
@ -2761,7 +2855,8 @@ func (s *server) ConnectToPeer(addr *lnwire.NetAddress, perm bool) error {
s.persistentPeersBackoff[targetPub] = cfg.MinBackoff
}
s.persistentConnReqs[targetPub] = append(
s.persistentConnReqs[targetPub], connReq)
s.persistentConnReqs[targetPub], connReq,
)
s.mu.Unlock()
go s.connMgr.Connect(connReq)

View File

@ -10,6 +10,7 @@ import (
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcwallet/wallet"
"github.com/lightningnetwork/lnd/aezeed"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwallet"
@ -17,6 +18,19 @@ import (
"golang.org/x/net/context"
)
// ChannelsToRecover wraps any set of packed (serialized+encrypted) channel
// back ups together. These can be passed in when unlocking the wallet, or
// creating a new wallet for the first time with an existing seed.
type ChannelsToRecover struct {
// PackedMultiChanBackup is an encrypted and serialized multi-channel
// backup.
PackedMultiChanBackup chanbackup.PackedMulti
// PackedSingleChanBackups is a series of encrypted and serialized
// single-channel backup for one or more channels.
PackedSingleChanBackups chanbackup.PackedSingles
}
// WalletInitMsg is a message sent by the UnlockerService when a user wishes to
// set up the internal wallet for the first time. The user MUST provide a
// passphrase, but is also able to provide their own source of entropy. If
@ -36,6 +50,10 @@ type WalletInitMsg struct {
// recovery should be attempted, such as after the wallet's initial
// creation.
RecoveryWindow uint32
// ChanBackups a set of static channel backups that should be received
// after the wallet has been initialized.
ChanBackups ChannelsToRecover
}
// WalletUnlockMsg is a message sent by the UnlockerService when a user wishes
@ -53,12 +71,16 @@ type WalletUnlockMsg struct {
// creation, but before any addresses have been created.
RecoveryWindow uint32
// Wallet is the loaded and unlocked Wallet. This is returned
// through the channel to avoid it being unlocked twice (once to check
// if the password is correct, here in the WalletUnlocker and again
// later when lnd actually uses it). Because unlocking involves scrypt
// which is resource intensive, we want to avoid doing it twice.
// Wallet is the loaded and unlocked Wallet. This is returned through
// the channel to avoid it being unlocked twice (once to check if the
// password is correct, here in the WalletUnlocker and again later when
// lnd actually uses it). Because unlocking involves scrypt which is
// resource intensive, we want to avoid doing it twice.
Wallet *wallet.Wallet
// ChanBackups a set of static channel backups that should be received
// after the wallet has been unlocked.
ChanBackups ChannelsToRecover
}
// UnlockerService implements the WalletUnlocker service used to provide lnd
@ -167,6 +189,43 @@ func (u *UnlockerService) GenSeed(ctx context.Context,
}, nil
}
// extractChanBackups is a helper function that extracts the set of channel
// backups from the proto into a format that we'll pass to higher level
// sub-systems.
func extractChanBackups(chanBackups *lnrpc.ChanBackupSnapshot) *ChannelsToRecover {
// If there aren't any populated channel backups, then we can exit
// early as there's nothing to extract.
if chanBackups == nil || (chanBackups.SingleChanBackups == nil &&
chanBackups.MultiChanBackup == nil) {
return nil
}
// Now that we know there's at least a single back up populated, we'll
// extract the multi-chan backup (if it's there).
var backups ChannelsToRecover
if chanBackups.MultiChanBackup != nil {
multiBackup := chanBackups.MultiChanBackup
backups.PackedMultiChanBackup = chanbackup.PackedMulti(
multiBackup.MultiChanBackup,
)
}
if chanBackups.SingleChanBackups == nil {
return &backups
}
// Finally, we can extract all the single chan backups as well.
for _, backup := range chanBackups.SingleChanBackups.ChanBackups {
singleChanBackup := backup.ChanBackup
backups.PackedSingleChanBackups = append(
backups.PackedSingleChanBackups, singleChanBackup,
)
}
return &backups
}
// InitWallet is used when lnd is starting up for the first time to fully
// initialize the daemon and its internal wallet. At the very least a wallet
// password must be provided. This will be used to encrypt sensitive material
@ -233,6 +292,13 @@ func (u *UnlockerService) InitWallet(ctx context.Context,
RecoveryWindow: uint32(recoveryWindow),
}
// Before we return the unlock payload, we'll check if we can extract
// any channel backups to pass up to the higher level sub-system.
chansToRestore := extractChanBackups(in.ChannelBackups)
if chansToRestore != nil {
initMsg.ChanBackups = *chansToRestore
}
u.InitMsgs <- initMsg
return &lnrpc.InitWalletResponse{}, nil
@ -277,6 +343,13 @@ func (u *UnlockerService) UnlockWallet(ctx context.Context,
Wallet: unlockedWallet,
}
// Before we return the unlock payload, we'll check if we can extract
// any channel backups to pass up to the higher level sub-system.
chansToRestore := extractChanBackups(in.ChannelBackups)
if chansToRestore != nil {
walletUnlockMsg.ChanBackups = *chansToRestore
}
// At this point we was able to open the existing wallet with the
// provided password. We send the password over the UnlockMsgs
// channel, such that it can be used by lnd to open the wallet.