diff --git a/docs/release-notes/release-notes-0.14.0.md b/docs/release-notes/release-notes-0.14.0.md index d2489883e..6c45edf81 100644 --- a/docs/release-notes/release-notes-0.14.0.md +++ b/docs/release-notes/release-notes-0.14.0.md @@ -52,6 +52,9 @@ you. * [The itest error whitelist check was removed to reduce the number of failed Travis builds](https://github.com/lightningnetwork/lnd/pull/5588). +* [A flake in the Neutrino integration tests with anchor sweeps was + addressed](https://github.com/lightningnetwork/lnd/pull/5509). + # Documentation * [Outdated warning about unsupported pruning was replaced with clarification that LND **does** diff --git a/lntest/harness.go b/lntest/harness.go index ae31ccf04..68ce2f83e 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -772,7 +772,15 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error, unlockReq.RecoveryWindow = 1000 } - return node.Unlock(context.Background(), unlockReq) + if err := node.Unlock(context.Background(), unlockReq); err != nil { + return err + } + + // Give the node some time to catch up with the chain before we continue + // with the tests. + ctxc, done := context.WithTimeout(context.Background(), DefaultTimeout) + defer done() + return node.WaitForBlockchainSync(ctxc) } // RestartNodeNoUnlock attempts to restart a lightning node by shutting it down @@ -840,7 +848,7 @@ func (n *NetworkHarness) StopNode(node *HarnessNode) error { // SaveProfilesPages hits profiles pages of all active nodes and writes it to // disk using a similar naming scheme as to the regular set of logs. -func (n *NetworkHarness) SaveProfilesPages() { +func (n *NetworkHarness) SaveProfilesPages(t *testing.T) { // Only write gorutine dumps if flag is active. if !(*goroutineDump) { return @@ -848,7 +856,8 @@ func (n *NetworkHarness) SaveProfilesPages() { for _, node := range n.activeNodes { if err := saveProfilesPage(node); err != nil { - fmt.Printf("Error: %v\n", err) + t.Logf("Logging follow-up error only, see rest of "+ + "the log for actual cause: %v\n", err) } } } diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index def13577d..1df8e812e 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "io" + "math" "sync/atomic" "testing" "time" @@ -1304,7 +1305,6 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, // Dave should sweep his funds immediately, as they are not timelocked. // We also expect Dave to sweep his anchor, if present. - _, err = waitForNTxsInMempool( net.Miner.Client, expectedTxes, minerMempoolTimeout, ) @@ -1340,7 +1340,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, // After the Carol's output matures, she should also reclaim her funds. // // The commit sweep resolver publishes the sweep tx at defaultCSV-1 and - // we already mined one block after the commitmment was published, so + // we already mined one block after the commitment was published, so // take that into account. mineBlocks(t, net, defaultCSV-1-1, 0) carolSweep, err := waitForTxInMempool( @@ -1362,6 +1362,16 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, } carolBalance := carolBalResp.ConfirmedBalance + + // With Neutrino we don't get a backend error when trying to + // publish an orphan TX (which is what the sweep for the remote + // anchor is since the remote commitment TX was not broadcast). + // That's why the wallet still sees that as unconfirmed and we + // need to count the total balance instead of the confirmed. + if net.BackendCfg.Name() == lntest.NeutrinoBackendName { + carolBalance = carolBalResp.TotalBalance + } + if carolBalance <= carolStartingBalance { return fmt.Errorf("expected carol to have balance "+ "above %d, instead had %v", carolStartingBalance, @@ -1714,3 +1724,30 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( } } } + +// assertNumUTXOs waits for the given number of UTXOs to be available or fails +// if that isn't the case before the default timeout. +func assertNumUTXOs(t *testing.T, node *lntest.HarnessNode, expectedUtxos int) { + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + err := wait.NoError(func() error { + resp, err := node.ListUnspent( // nolint:staticcheck + ctxt, &lnrpc.ListUnspentRequest{ + MinConfs: 1, + MaxConfs: math.MaxInt32, + }, + ) + if err != nil { + return fmt.Errorf("error listing unspent: %v", err) + } + + if len(resp.Utxos) != expectedUtxos { + return fmt.Errorf("not enough UTXOs, got %d wanted %d", + len(resp.Utxos), expectedUtxos) + } + + return nil + }, defaultTimeout) + require.NoError(t, err, "wait for listunspent") +} diff --git a/lntest/itest/lnd_channel_backup_test.go b/lntest/itest/lnd_channel_backup_test.go index 89558dcb3..4c383bcb0 100644 --- a/lntest/itest/lnd_channel_backup_test.go +++ b/lntest/itest/lnd_channel_backup_test.go @@ -847,6 +847,12 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, carol) + // For the anchor output case we need two UTXOs for Carol so she can + // sweep both the local and remote anchor. + if testCase.anchorCommit { + net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, carol) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, dave) @@ -1077,6 +1083,12 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, err = restartCarol() require.NoError(t.t, err) + numUTXOs := 1 + if testCase.anchorCommit { + numUTXOs = 2 + } + assertNumUTXOs(t.t, carol, numUTXOs) + // Now that we have our new node up, we expect that it'll re-connect to // Carol automatically based on the restored backup. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) diff --git a/lntest/itest/lnd_channel_force_close.go b/lntest/itest/lnd_channel_force_close.go index 4af4f7049..7154a4102 100644 --- a/lntest/itest/lnd_channel_force_close.go +++ b/lntest/itest/lnd_channel_force_close.go @@ -314,6 +314,9 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) net.ConnectNodes(ctxt, t.t, alice, carol) + // We need one additional UTXO for sweeping the remote anchor. + net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, alice) + // Before we start, obtain Carol's current wallet balance, we'll check // to ensure that at the end of the force closure by Alice, Carol // recognizes his new on-chain output. @@ -484,6 +487,12 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("Node restart failed: %v", err) } + // To give the neutrino backend some time to catch up with the chain, we + // wait here until we have enough UTXOs to actually sweep the local and + // remote anchor. + const expectedUtxos = 2 + assertNumUTXOs(t.t, alice, expectedUtxos) + // Mine a block which should confirm the commitment transaction // broadcast as a result of the force closure. If there are anchors, we // also expect the anchor sweep tx to be in the mempool. @@ -497,9 +506,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, sweepTxns, err := getNTxsFromMempool( net.Miner.Client, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("failed to find commitment in miner mempool: %v", err) - } + require.NoError(t.t, err, "sweep txns in miner mempool") // Verify fee rate of the commitment tx plus anchor if present. var totalWeight, totalFee int64 @@ -520,9 +527,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Find alice's commit sweep and anchor sweep (if present) in the // mempool. aliceCloseTx := waitingClose.Commitments.LocalTxid - _, aliceAnchor := findCommitAndAnchor( - t, net, sweepTxns, aliceCloseTx, - ) + _, aliceAnchor := findCommitAndAnchor(t, net, sweepTxns, aliceCloseTx) // If we expect anchors, add alice's anchor to our expected set of // reports. diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 3aa3fddf5..0555bfb97 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -610,7 +610,7 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { } } -// testGarbageCollectLinkNodes tests that we properly garbase collect link nodes +// testGarbageCollectLinkNodes tests that we properly garbage collect link nodes // from the database and the set of persistent connections within the server. func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() @@ -623,8 +623,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { // cooperatively closed. ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) coopChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ + ctxt, t, net, net.Alice, net.Bob, lntest.OpenChannelParams{ Amt: chanAmt, }, ) @@ -639,8 +638,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { // closed. ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) forceCloseChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ + ctxt, t, net, net.Alice, carol, lntest.OpenChannelParams{ Amt: chanAmt, }, ) @@ -654,8 +652,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { net.ConnectNodes(ctxt, t.t, net.Alice, dave) ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) persistentChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, dave, - lntest.OpenChannelParams{ + ctxt, t, net, net.Alice, dave, lntest.OpenChannelParams{ Amt: chanAmt, }, ) @@ -1620,30 +1617,22 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { ctxt, t, net, net.Alice, net.Bob, channelParam, ) txid, err := lnrpc.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } + require.NoError(t.t, err, "alice bob get channel funding txid") chanPointStr := fmt.Sprintf("%v:%v", txid, chanPoint.OutputIndex) // Wait for channel to be confirmed open. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } + require.NoError(t.t, err, "alice wait for network channel open") err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } + require.NoError(t.t, err, "bob wait for network channel open") // Now that the channel is open, we'll obtain its channel ID real quick // so we can use it to query the graph below. listReq := &lnrpc.ListChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) aliceChannelList, err := net.Alice.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to fetch alice's channels: %v", err) - } + require.NoError(t.t, err) var chanID uint64 for _, channel := range aliceChannelList.Channels { if channel.ChannelPoint == chanPointStr { @@ -1651,18 +1640,13 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { } } - if chanID == 0 { - t.Fatalf("unable to find channel") - } + require.NotZero(t.t, chanID, "unable to find channel") // To make sure the channel is removed from the backup file as well when // being abandoned, grab a backup snapshot so we can compare it with the // later state. bkupBefore, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) - if err != nil { - t.Fatalf("could not get channel backup before abandoning "+ - "channel: %v", err) - } + require.NoError(t.t, err, "channel backup before abandoning channel") // Send request to abandon channel. abandonChannelRequest := &lnrpc.AbandonChannelRequest{ @@ -1671,44 +1655,31 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) _, err = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) - if err != nil { - t.Fatalf("unable to abandon channel: %v", err) - } + require.NoError(t.t, err, "abandon channel") // Assert that channel in no longer open. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) aliceChannelList, err = net.Alice.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to list channels: %v", err) - } - if len(aliceChannelList.Channels) != 0 { - t.Fatalf("alice should only have no channels open, "+ - "instead she has %v", - len(aliceChannelList.Channels)) - } + require.NoError(t.t, err, "list channels") + require.Zero(t.t, len(aliceChannelList.Channels), "alice open channels") // Assert that channel is not pending closure. pendingReq := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) alicePendingList, err := net.Alice.PendingChannels(ctxt, pendingReq) - if err != nil { - t.Fatalf("unable to list pending channels: %v", err) - } - if len(alicePendingList.PendingClosingChannels) != 0 { //nolint:staticcheck - t.Fatalf("alice should only have no pending closing channels, "+ - "instead she has %v", - len(alicePendingList.PendingClosingChannels)) //nolint:staticcheck - } - if len(alicePendingList.PendingForceClosingChannels) != 0 { - t.Fatalf("alice should only have no pending force closing "+ - "channels instead she has %v", - len(alicePendingList.PendingForceClosingChannels)) - } - if len(alicePendingList.WaitingCloseChannels) != 0 { - t.Fatalf("alice should only have no waiting close "+ - "channels instead she has %v", - len(alicePendingList.WaitingCloseChannels)) - } + require.NoError(t.t, err, "alice list pending channels") + require.Zero( + t.t, len(alicePendingList.PendingClosingChannels), //nolint:staticcheck + "alice pending channels", + ) + require.Zero( + t.t, len(alicePendingList.PendingForceClosingChannels), + "alice pending force close channels", + ) + require.Zero( + t.t, len(alicePendingList.WaitingCloseChannels), + "alice waiting close channels", + ) // Assert that channel is listed as abandoned. closedReq := &lnrpc.ClosedChannelsRequest{ @@ -1716,45 +1687,49 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) aliceClosedList, err := net.Alice.ClosedChannels(ctxt, closedReq) - if err != nil { - t.Fatalf("unable to list closed channels: %v", err) - } - if len(aliceClosedList.Channels) != 1 { - t.Fatalf("alice should only have a single abandoned channel, "+ - "instead she has %v", - len(aliceClosedList.Channels)) - } + require.NoError(t.t, err, "alice list closed channels") + require.Len(t.t, aliceClosedList.Channels, 1, "alice closed channels") // Ensure that the channel can no longer be found in the channel graph. - _, err = net.Alice.GetChanInfo(ctxb, &lnrpc.ChanInfoRequest{ - ChanId: chanID, - }) - if !strings.Contains(err.Error(), "marked as zombie") { - t.Fatalf("channel shouldn't be found in the channel " + - "graph!") - } - - // Make sure the channel is no longer in the channel backup list. - err = wait.Predicate(func() bool { - bkupAfter, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) - if err != nil { - t.Fatalf("could not get channel backup before "+ - "abandoning channel: %v", err) + err = wait.NoError(func() error { + _, err := net.Alice.GetChanInfo(ctxb, &lnrpc.ChanInfoRequest{ + ChanId: chanID, + }) + if err == nil { + return fmt.Errorf("expected error but got nil") } - return len(bkupAfter) < len(bkupBefore) + if !strings.Contains(err.Error(), "marked as zombie") { + return fmt.Errorf("expected error to contain '%s' but "+ + "was '%v'", "marked as zombie", err) + } + + return nil }, defaultTimeout) - if err != nil { - t.Fatalf("channel wasn't removed from channel backup file") - } + require.NoError(t.t, err, "marked as zombie") + + // Make sure the channel is no longer in the channel backup list. + err = wait.NoError(func() error { + bkupAfter, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) + if err != nil { + return fmt.Errorf("could not get channel backup "+ + "before abandoning channel: %v", err) + } + + if len(bkupAfter) >= len(bkupBefore) { + return fmt.Errorf("expected backups after to be less "+ + "than %d but was %d", bkupBefore, bkupAfter) + } + + return nil + }, defaultTimeout) + require.NoError(t.t, err, "channel removed from backup file") // Calling AbandonChannel again, should result in no new errors, as the // channel has already been removed. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) _, err = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) - if err != nil { - t.Fatalf("unable to abandon channel a second time: %v", err) - } + require.NoError(t.t, err, "abandon channel second time") // Now that we're done with the test, the channel can be closed. This // is necessary to avoid unexpected outcomes of other tests that use diff --git a/lntest/itest/log_error_whitelist.txt b/lntest/itest/log_error_whitelist.txt index 1b98e7186..9b26b514d 100644 --- a/lntest/itest/log_error_whitelist.txt +++ b/lntest/itest/log_error_whitelist.txt @@ -18,7 +18,9 @@