mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-05-29 17:19:33 +02:00
Merge pull request #5528 from guggero/keep-channel-backup-local-force-close
backups: Keep channel backup in case of local force close
This commit is contained in:
commit
be341bcd68
@ -122,12 +122,52 @@ func (c *channelNotifier) SubscribeChans(startingChans map[wire.OutPoint]struct{
|
||||
// channel to the sub-swapper.
|
||||
case channelnotifier.ClosedChannelEvent:
|
||||
chanPoint := event.CloseSummary.ChanPoint
|
||||
closeType := event.CloseSummary.CloseType
|
||||
|
||||
// Because we see the contract as closed
|
||||
// once our local force close TX
|
||||
// confirms, the channel arbitrator
|
||||
// already fires on this event. But
|
||||
// because our funds can be in limbo for
|
||||
// up to 2 weeks worst case we don't
|
||||
// want to remove the crucial info we
|
||||
// need for sweeping that time locked
|
||||
// output before we've actually done so.
|
||||
if closeType == channeldb.LocalForceClose {
|
||||
ltndLog.Debugf("Channel %v "+
|
||||
"was force closed by "+
|
||||
"us, not removing "+
|
||||
"from channel backup "+
|
||||
"until fully resolved",
|
||||
chanPoint)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
chanEvent := chanbackup.ChannelEvent{
|
||||
ClosedChans: []wire.OutPoint{
|
||||
chanPoint,
|
||||
},
|
||||
}
|
||||
|
||||
select {
|
||||
case chanUpdates <- chanEvent:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
|
||||
// A channel was fully resolved on chain. This
|
||||
// should only really interest us if it was a
|
||||
// locally force closed channel where we didn't
|
||||
// remove the channel already when the close
|
||||
// event was fired.
|
||||
case channelnotifier.FullyResolvedChannelEvent:
|
||||
chanEvent := chanbackup.ChannelEvent{
|
||||
ClosedChans: []wire.OutPoint{
|
||||
*event.ChannelPoint,
|
||||
},
|
||||
}
|
||||
|
||||
select {
|
||||
case chanUpdates <- chanEvent:
|
||||
case <-quit:
|
||||
|
@ -65,6 +65,14 @@ type ClosedChannelEvent struct {
|
||||
CloseSummary *channeldb.ChannelCloseSummary
|
||||
}
|
||||
|
||||
// FullyResolvedChannelEvent represents a new event where a channel becomes
|
||||
// fully resolved.
|
||||
type FullyResolvedChannelEvent struct {
|
||||
// ChannelPoint is the channelpoint for the newly fully resolved
|
||||
// channel.
|
||||
ChannelPoint *wire.OutPoint
|
||||
}
|
||||
|
||||
// New creates a new channel notifier. The ChannelNotifier gets channel
|
||||
// events from peers and from the chain arbitrator, and dispatches them to
|
||||
// its clients.
|
||||
@ -125,7 +133,6 @@ func (c *ChannelNotifier) NotifyPendingOpenChannelEvent(chanPoint wire.OutPoint,
|
||||
// NotifyOpenChannelEvent notifies the channelEventNotifier goroutine that a
|
||||
// channel has gone from pending open to open.
|
||||
func (c *ChannelNotifier) NotifyOpenChannelEvent(chanPoint wire.OutPoint) {
|
||||
|
||||
// Fetch the relevant channel from the database.
|
||||
channel, err := c.chanDB.FetchChannel(chanPoint)
|
||||
if err != nil {
|
||||
@ -155,6 +162,18 @@ func (c *ChannelNotifier) NotifyClosedChannelEvent(chanPoint wire.OutPoint) {
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyFullyResolvedChannelEvent notifies the channelEventNotifier goroutine
|
||||
// that a channel was fully resolved on chain.
|
||||
func (c *ChannelNotifier) NotifyFullyResolvedChannelEvent(
|
||||
chanPoint wire.OutPoint) {
|
||||
|
||||
// Send the resolved event to all channel event subscribers.
|
||||
event := FullyResolvedChannelEvent{ChannelPoint: &chanPoint}
|
||||
if err := c.ntfnServer.SendUpdate(event); err != nil {
|
||||
log.Warnf("Unable to send resolved channel update: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyActiveLinkEvent notifies the channelEventNotifier goroutine that a
|
||||
// link has been added to the switch.
|
||||
func (c *ChannelNotifier) NotifyActiveLinkEvent(chanPoint wire.OutPoint) {
|
||||
|
@ -158,6 +158,15 @@ type ChainArbitratorConfig struct {
|
||||
// will use to notify the ChannelNotifier about a newly closed channel.
|
||||
NotifyClosedChannel func(wire.OutPoint)
|
||||
|
||||
// NotifyFullyResolvedChannel is a function closure that the
|
||||
// ChainArbitrator will use to notify the ChannelNotifier about a newly
|
||||
// resolved channel. The main difference to NotifyClosedChannel is that
|
||||
// in case of a local force close the NotifyClosedChannel is called when
|
||||
// the published commitment transaction confirms while
|
||||
// NotifyFullyResolvedChannel is only called when the channel is fully
|
||||
// resolved (which includes sweeping any time locked funds).
|
||||
NotifyFullyResolvedChannel func(point wire.OutPoint)
|
||||
|
||||
// OnionProcessor is used to decode onion payloads for on-chain
|
||||
// resolution.
|
||||
OnionProcessor OnionProcessor
|
||||
@ -366,6 +375,10 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel,
|
||||
}
|
||||
|
||||
arbCfg.MarkChannelResolved = func() error {
|
||||
if c.cfg.NotifyFullyResolvedChannel != nil {
|
||||
c.cfg.NotifyFullyResolvedChannel(chanPoint)
|
||||
}
|
||||
|
||||
return c.ResolveContract(chanPoint)
|
||||
}
|
||||
|
||||
@ -566,6 +579,10 @@ func (c *ChainArbitrator) Start() error {
|
||||
return err
|
||||
}
|
||||
arbCfg.MarkChannelResolved = func() error {
|
||||
if c.cfg.NotifyFullyResolvedChannel != nil {
|
||||
c.cfg.NotifyFullyResolvedChannel(chanPoint)
|
||||
}
|
||||
|
||||
return c.ResolveContract(chanPoint)
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,11 @@ you.
|
||||
* [Makes publishtransaction, in the wallet sub-server, reachable through
|
||||
lncli](https://github.com/lightningnetwork/lnd/pull/5460).
|
||||
|
||||
# Safety
|
||||
|
||||
* Locally force closed channels are now [kept in the channel.backup file until
|
||||
their time lock has fully matured](https://github.com/lightningnetwork/lnd/pull/5528).
|
||||
|
||||
# Build System
|
||||
|
||||
* [A new pre-submit check has been
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2249,6 +2249,7 @@ message ChannelEventUpdate {
|
||||
ChannelPoint active_channel = 3;
|
||||
ChannelPoint inactive_channel = 4;
|
||||
PendingUpdate pending_open_channel = 6;
|
||||
ChannelPoint fully_resolved_channel = 7;
|
||||
}
|
||||
|
||||
enum UpdateType {
|
||||
@ -2257,6 +2258,7 @@ message ChannelEventUpdate {
|
||||
ACTIVE_CHANNEL = 2;
|
||||
INACTIVE_CHANNEL = 3;
|
||||
PENDING_OPEN_CHANNEL = 4;
|
||||
FULLY_RESOLVED_CHANNEL = 5;
|
||||
}
|
||||
|
||||
UpdateType type = 5;
|
||||
|
@ -2327,7 +2327,8 @@
|
||||
"CLOSED_CHANNEL",
|
||||
"ACTIVE_CHANNEL",
|
||||
"INACTIVE_CHANNEL",
|
||||
"PENDING_OPEN_CHANNEL"
|
||||
"PENDING_OPEN_CHANNEL",
|
||||
"FULLY_RESOLVED_CHANNEL"
|
||||
],
|
||||
"default": "OPEN_CHANNEL"
|
||||
},
|
||||
@ -3306,6 +3307,9 @@
|
||||
"pending_open_channel": {
|
||||
"$ref": "#/definitions/lnrpcPendingUpdate"
|
||||
},
|
||||
"fully_resolved_channel": {
|
||||
"$ref": "#/definitions/lnrpcChannelPoint"
|
||||
},
|
||||
"type": {
|
||||
"$ref": "#/definitions/ChannelEventUpdateUpdateType"
|
||||
}
|
||||
|
@ -1386,6 +1386,91 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
|
||||
assertNodeNumChannels(t, carol, 0)
|
||||
}
|
||||
|
||||
func assertTimeLockSwept(net *lntest.NetworkHarness, t *harnessTest,
|
||||
carol *lntest.HarnessNode, carolStartingBalance int64,
|
||||
dave *lntest.HarnessNode, daveStartingBalance int64,
|
||||
anchors bool) {
|
||||
|
||||
ctxb := context.Background()
|
||||
expectedTxes := 2
|
||||
if anchors {
|
||||
expectedTxes = 3
|
||||
}
|
||||
|
||||
// Carol should sweep her funds immediately, as they are not timelocked.
|
||||
// We also expect Carol and Dave to sweep their anchor, if present.
|
||||
_, err := waitForNTxsInMempool(
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err, "unable to find Carol's sweep tx in mempool")
|
||||
|
||||
// Carol should consider the channel pending force close (since she is
|
||||
// waiting for her sweep to confirm).
|
||||
assertNumPendingChannels(t, carol, 0, 1)
|
||||
|
||||
// Dave is considering it "pending force close", as we must wait
|
||||
// before he can sweep her outputs.
|
||||
assertNumPendingChannels(t, dave, 0, 1)
|
||||
|
||||
// Mine the sweep (and anchor) tx(ns).
|
||||
_ = mineBlocks(t, net, 1, expectedTxes)[0]
|
||||
|
||||
// Now Carol should consider the channel fully closed.
|
||||
assertNumPendingChannels(t, carol, 0, 0)
|
||||
|
||||
// We query Carol's balance to make sure it increased after the channel
|
||||
// closed. This checks that she was able to sweep the funds she had in
|
||||
// the channel.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
||||
carolBalResp, err := carol.WalletBalance(ctxt, balReq)
|
||||
require.NoError(t.t, err, "unable to get Carol's balance")
|
||||
|
||||
carolBalance := carolBalResp.ConfirmedBalance
|
||||
require.Greater(
|
||||
t.t, carolBalance, carolStartingBalance, "balance not increased",
|
||||
)
|
||||
|
||||
// After the Dave's output matures, he should reclaim his funds.
|
||||
//
|
||||
// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
|
||||
// we already mined one block after the commitment was published, so
|
||||
// take that into account.
|
||||
mineBlocks(t, net, defaultCSV-1-1, 0)
|
||||
daveSweep, err := waitForTxInMempool(
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err, "unable to find Dave's sweep tx in mempool")
|
||||
block := mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, daveSweep)
|
||||
|
||||
// Now the channel should be fully closed also from Dave's POV.
|
||||
assertNumPendingChannels(t, dave, 0, 0)
|
||||
|
||||
// Make sure Dave got his balance back.
|
||||
err = wait.NoError(func() error {
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
daveBalResp, err := dave.WalletBalance(ctxt, balReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get Dave's balance: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
daveBalance := daveBalResp.ConfirmedBalance
|
||||
if daveBalance <= daveStartingBalance {
|
||||
return fmt.Errorf("expected dave to have balance "+
|
||||
"above %d, instead had %v", daveStartingBalance,
|
||||
daveBalance)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, defaultTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
assertNodeNumChannels(t, dave, 0)
|
||||
assertNodeNumChannels(t, carol, 0)
|
||||
}
|
||||
|
||||
// verifyCloseUpdate is used to verify that a closed channel update is of the
|
||||
// expected type.
|
||||
func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
|
||||
@ -1401,6 +1486,7 @@ func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
|
||||
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
|
||||
chanUpdate.Type)
|
||||
}
|
||||
|
||||
case *lnrpc.ChannelEventUpdate_ClosedChannel:
|
||||
if chanUpdate.Type !=
|
||||
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL {
|
||||
@ -1422,6 +1508,13 @@ func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
|
||||
update.ClosedChannel.CloseInitiator)
|
||||
}
|
||||
|
||||
case *lnrpc.ChannelEventUpdate_FullyResolvedChannel:
|
||||
if chanUpdate.Type != lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
|
||||
return fmt.Errorf("update type mismatch: expected %v, got %v",
|
||||
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
|
||||
chanUpdate.Type)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("channel update channel of wrong type, "+
|
||||
"expected closed channel, got %T",
|
||||
|
@ -390,6 +390,35 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
)
|
||||
},
|
||||
},
|
||||
|
||||
// Restore a channel that was force closed by dave just before
|
||||
// going offline.
|
||||
{
|
||||
name: "restore force closed from backup file " +
|
||||
"anchors",
|
||||
initiator: true,
|
||||
private: false,
|
||||
anchorCommit: true,
|
||||
localForceClose: true,
|
||||
restoreMethod: func(oldNode *lntest.HarnessNode,
|
||||
backupFilePath string,
|
||||
mnemonic []string) (nodeRestorer, error) {
|
||||
|
||||
// Read the entire Multi backup stored within
|
||||
// this node's channels.backup file.
|
||||
multi, err := ioutil.ReadFile(backupFilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now that we have Dave's backup file, we'll
|
||||
// create a new nodeRestorer that will restore
|
||||
// using the on-disk channels.backup.
|
||||
return chanRestoreViaRPC(
|
||||
net, password, mnemonic, multi, oldNode,
|
||||
)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(roasbeef): online vs offline close?
|
||||
@ -578,15 +607,28 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxt, t, net, net.Alice, chanPoint, forceClose,
|
||||
)
|
||||
|
||||
// We should get a single notification after closing, and the
|
||||
// on-disk state should match this latest notifications.
|
||||
assertBackupNtfns(1)
|
||||
assertBackupFileState()
|
||||
|
||||
// If we force closed the channel, then we'll mine enough
|
||||
// blocks to ensure all outputs have been swept.
|
||||
if forceClose {
|
||||
// A local force closed channel will trigger a
|
||||
// notification once the commitment TX confirms on
|
||||
// chain. But that won't remove the channel from the
|
||||
// backup just yet, that will only happen once the time
|
||||
// locked contract was fully resolved on chain.
|
||||
assertBackupNtfns(1)
|
||||
|
||||
cleanupForceClose(t, net, net.Alice, chanPoint)
|
||||
|
||||
// Now that the channel's been fully resolved, we expect
|
||||
// another notification.
|
||||
assertBackupNtfns(1)
|
||||
assertBackupFileState()
|
||||
} else {
|
||||
// We should get a single notification after closing,
|
||||
// and the on-disk state should match this latest
|
||||
// notifications.
|
||||
assertBackupNtfns(1)
|
||||
assertBackupFileState()
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -793,6 +835,10 @@ type chanRestoreTestCase struct {
|
||||
// producer format should also be created before restoring.
|
||||
legacyRevocation bool
|
||||
|
||||
// localForceClose signals if the channel should be force closed by the
|
||||
// node that is going to recover.
|
||||
localForceClose bool
|
||||
|
||||
// restoreMethod takes an old node, then returns a function
|
||||
// closure that'll return the same node, but with its state
|
||||
// restored via a custom method. We use this to abstract away
|
||||
@ -869,6 +915,7 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
|
||||
// We will either open a confirmed or unconfirmed channel, depending on
|
||||
// the requirements of the test case.
|
||||
var chanPoint *lnrpc.ChannelPoint
|
||||
switch {
|
||||
case testCase.unconfirmed:
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
@ -903,7 +950,7 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
|
||||
default:
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
chanPoint = openChannelAndAssert(
|
||||
ctxt, t, net, from, to,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
@ -946,6 +993,32 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
}
|
||||
}
|
||||
|
||||
// If we're testing that locally force closed channels can be restored
|
||||
// then we issue the force close now.
|
||||
if testCase.localForceClose && chanPoint != nil {
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{
|
||||
ChannelPoint: chanPoint,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// After closing the channel we mine one transaction to make
|
||||
// sure the commitment TX was confirmed.
|
||||
_ = mineBlocks(t, net, 1, 1)
|
||||
|
||||
// Now we need to make sure that the channel is still in the
|
||||
// backup. Otherwise restoring won't work later.
|
||||
_, err = dave.ExportChannelBackup(
|
||||
ctxt, &lnrpc.ExportChannelBackupRequest{
|
||||
ChanPoint: chanPoint,
|
||||
},
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
// Before we start the recovery, we'll record the balances of both
|
||||
// Carol and Dave to ensure they both sweep their coins at the end.
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
||||
@ -1009,6 +1082,30 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
t.Fatalf("On-chain balance not restored: %v", err)
|
||||
}
|
||||
|
||||
// For our force close scenario we don't need the channel to be closed
|
||||
// by Carol since it was already force closed before we started the
|
||||
// recovery. All we need is for Carol to send us over the commit height
|
||||
// so we can sweep the time locked output with the correct commit point.
|
||||
if testCase.localForceClose {
|
||||
assertNumPendingChannels(t, dave, 0, 1)
|
||||
|
||||
err = restartCarol()
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Now that we have our new node up, we expect that it'll
|
||||
// re-connect to Carol automatically based on the restored
|
||||
// backup.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, dave, carol)
|
||||
|
||||
assertTimeLockSwept(
|
||||
net, t, carol, carolStartingBalance, dave,
|
||||
daveStartingBalance, testCase.anchorCommit,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We now check that the restored channel is in the proper state. It
|
||||
// should not yet be force closing as no connection with the remote
|
||||
// peer was established yet. We should also not be able to close the
|
||||
|
@ -276,8 +276,8 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
|
||||
aliceChanSub := subscribeChannelNotifications(ctxb, t, net.Alice)
|
||||
defer close(aliceChanSub.quit)
|
||||
|
||||
// Open the channel between Alice and Bob, asserting that the
|
||||
// channel has been properly open on-chain.
|
||||
// Open the channels between Alice and Bob, asserting that the channels
|
||||
// have been properly opened on-chain.
|
||||
chanPoints := make([]*lnrpc.ChannelPoint, numChannels)
|
||||
for i := 0; i < numChannels; i++ {
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
@ -291,11 +291,10 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
|
||||
|
||||
// Since each of the channels just became open, Bob and Alice should
|
||||
// each receive an open and an active notification for each channel.
|
||||
var numChannelUpds int
|
||||
const totalNtfns = 3 * numChannels
|
||||
const numExpectedOpenUpdates = 3 * numChannels
|
||||
verifyOpenUpdatesReceived := func(sub channelSubscription) error {
|
||||
numChannelUpds = 0
|
||||
for numChannelUpds < totalNtfns {
|
||||
numChannelUpds := 0
|
||||
for numChannelUpds < numExpectedOpenUpdates {
|
||||
select {
|
||||
case update := <-sub.updateChan:
|
||||
switch update.Type {
|
||||
@ -327,30 +326,32 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
|
||||
update.Type)
|
||||
}
|
||||
numChannelUpds++
|
||||
|
||||
case <-time.After(time.Second * 10):
|
||||
return fmt.Errorf("timeout waiting for channel "+
|
||||
"notifications, only received %d/%d "+
|
||||
"chanupds", numChannelUpds,
|
||||
totalNtfns)
|
||||
numExpectedOpenUpdates)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := verifyOpenUpdatesReceived(bobChanSub); err != nil {
|
||||
t.Fatalf("error verifying open updates: %v", err)
|
||||
}
|
||||
if err := verifyOpenUpdatesReceived(aliceChanSub); err != nil {
|
||||
t.Fatalf("error verifying open updates: %v", err)
|
||||
}
|
||||
require.NoError(
|
||||
t.t, verifyOpenUpdatesReceived(bobChanSub), "bob open channels",
|
||||
)
|
||||
require.NoError(
|
||||
t.t, verifyOpenUpdatesReceived(aliceChanSub), "alice open "+
|
||||
"channels",
|
||||
)
|
||||
|
||||
// Close the channel between Alice and Bob, asserting that the channel
|
||||
// has been properly closed on-chain.
|
||||
// Close the channels between Alice and Bob, asserting that the channels
|
||||
// have been properly closed on-chain.
|
||||
for i, chanPoint := range chanPoints {
|
||||
ctx, _ := context.WithTimeout(context.Background(), defaultTimeout)
|
||||
|
||||
// Force close half of the channels.
|
||||
// Force close the first of the two channels.
|
||||
force := i%2 == 0
|
||||
closeChannelAndAssert(ctx, t, net, net.Alice, chanPoint, force)
|
||||
if force {
|
||||
@ -360,20 +361,21 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
|
||||
|
||||
// verifyCloseUpdatesReceived is used to verify that Alice and Bob
|
||||
// receive the correct channel updates in order.
|
||||
const numExpectedCloseUpdates = 3 * numChannels
|
||||
verifyCloseUpdatesReceived := func(sub channelSubscription,
|
||||
forceType lnrpc.ChannelCloseSummary_ClosureType,
|
||||
closeInitiator lnrpc.Initiator) error {
|
||||
|
||||
// Ensure one inactive and one closed notification is received for each
|
||||
// closed channel.
|
||||
// Ensure one inactive and one closed notification is received
|
||||
// for each closed channel.
|
||||
numChannelUpds := 0
|
||||
for numChannelUpds < 2*numChannels {
|
||||
for numChannelUpds < numExpectedCloseUpdates {
|
||||
expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
|
||||
|
||||
// Every other channel should be force closed. If this
|
||||
// channel was force closed, set the expected close type
|
||||
// the the type passed in.
|
||||
force := (numChannelUpds/2)%2 == 0
|
||||
// to the type passed in.
|
||||
force := (numChannelUpds/3)%2 == 0
|
||||
if force {
|
||||
expectedCloseType = forceType
|
||||
}
|
||||
@ -389,13 +391,15 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
|
||||
}
|
||||
|
||||
numChannelUpds++
|
||||
|
||||
case err := <-sub.errChan:
|
||||
return err
|
||||
|
||||
case <-time.After(time.Second * 10):
|
||||
return fmt.Errorf("timeout waiting "+
|
||||
"for channel notifications, only "+
|
||||
"received %d/%d chanupds",
|
||||
numChannelUpds, 2*numChannels)
|
||||
numChannelUpds, numChannelUpds)
|
||||
}
|
||||
}
|
||||
|
||||
@ -406,19 +410,23 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
|
||||
// receive a remote force close notification for force closed channels.
|
||||
// All channels (cooperatively and force closed) should have a remote
|
||||
// close initiator because Alice closed the channels.
|
||||
if err := verifyCloseUpdatesReceived(bobChanSub,
|
||||
lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
|
||||
lnrpc.Initiator_INITIATOR_REMOTE); err != nil {
|
||||
t.Fatalf("errored verifying close updates: %v", err)
|
||||
}
|
||||
require.NoError(
|
||||
t.t, verifyCloseUpdatesReceived(
|
||||
bobChanSub,
|
||||
lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
|
||||
lnrpc.Initiator_INITIATOR_REMOTE,
|
||||
), "verifying bob close updates",
|
||||
)
|
||||
|
||||
// Verify Alice receives all closed channel notifications. She should
|
||||
// receive a remote force close notification for force closed channels.
|
||||
// All channels (cooperatively and force closed) should have a local
|
||||
// close initiator because Alice closed the channels.
|
||||
if err := verifyCloseUpdatesReceived(aliceChanSub,
|
||||
lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
|
||||
lnrpc.Initiator_INITIATOR_LOCAL); err != nil {
|
||||
t.Fatalf("errored verifying close updates: %v", err)
|
||||
}
|
||||
require.NoError(
|
||||
t.t, verifyCloseUpdatesReceived(
|
||||
aliceChanSub,
|
||||
lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
|
||||
lnrpc.Initiator_INITIATOR_LOCAL,
|
||||
), "verifying alice close updates",
|
||||
)
|
||||
}
|
||||
|
@ -29,6 +29,8 @@
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcSuccessResolver: Transaction rejected: output already spent
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: htlcswitch shutting down
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: TxNotifier is exiting
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unexpected local commitment confirmed while in StateDefault
|
||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unexpected local on-chain channel close
|
||||
<time> [ERR] CNCT: *contractcourt.commitSweepResolver(<chan_point>): unable to sweep input: remote party swept utxo
|
||||
<time> [ERR] CNCT: Unable to advance state: channel not found
|
||||
<time> [ERR] CNCT: unable to hand breached contract off to breachArbiter: server is shutting down
|
||||
|
13
rpcserver.go
13
rpcserver.go
@ -4062,6 +4062,19 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
|
||||
case channelnotifier.ActiveLinkEvent:
|
||||
continue
|
||||
|
||||
case channelnotifier.FullyResolvedChannelEvent:
|
||||
update = &lnrpc.ChannelEventUpdate{
|
||||
Type: lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
|
||||
Channel: &lnrpc.ChannelEventUpdate_FullyResolvedChannel{
|
||||
FullyResolvedChannel: &lnrpc.ChannelPoint{
|
||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||
FundingTxidBytes: event.ChannelPoint.Hash[:],
|
||||
},
|
||||
OutputIndex: event.ChannelPoint.Index,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unexpected channel event update: %v", event)
|
||||
}
|
||||
|
@ -1004,6 +1004,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
Sweeper: s.sweeper,
|
||||
Registry: s.invoices,
|
||||
NotifyClosedChannel: s.channelNotifier.NotifyClosedChannelEvent,
|
||||
NotifyFullyResolvedChannel: s.channelNotifier.NotifyFullyResolvedChannelEvent,
|
||||
OnionProcessor: s.sphinx,
|
||||
PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
|
||||
IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC,
|
||||
|
Loading…
x
Reference in New Issue
Block a user