diff --git a/lntest/harness.go b/lntest/harness.go index a264b3c30..68ce2f83e 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -772,7 +772,15 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error, unlockReq.RecoveryWindow = 1000 } - return node.Unlock(context.Background(), unlockReq) + if err := node.Unlock(context.Background(), unlockReq); err != nil { + return err + } + + // Give the node some time to catch up with the chain before we continue + // with the tests. + ctxc, done := context.WithTimeout(context.Background(), DefaultTimeout) + defer done() + return node.WaitForBlockchainSync(ctxc) } // RestartNodeNoUnlock attempts to restart a lightning node by shutting it down diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index def13577d..c22b8f3fc 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "io" + "math" "sync/atomic" "testing" "time" @@ -1714,3 +1715,30 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( } } } + +// assertNumUTXOs waits for the given number of UTXOs to be available or fails +// if that isn't the case before the default timeout. +func assertNumUTXOs(t *testing.T, node *lntest.HarnessNode, expectedUtxos int) { + ctxb := context.Background() + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + err := wait.NoError(func() error { + resp, err := node.ListUnspent( // nolint:staticcheck + ctxt, &lnrpc.ListUnspentRequest{ + MinConfs: 1, + MaxConfs: math.MaxInt32, + }, + ) + if err != nil { + return fmt.Errorf("error listing unspent: %v", err) + } + + if len(resp.Utxos) != expectedUtxos { + return fmt.Errorf("not enough UTXOs, got %d wanted %d", + len(resp.Utxos), expectedUtxos) + } + + return nil + }, defaultTimeout) + require.NoError(t, err, "wait for listunspent") +} diff --git a/lntest/itest/lnd_channel_force_close.go b/lntest/itest/lnd_channel_force_close.go index 4af4f7049..7154a4102 100644 --- a/lntest/itest/lnd_channel_force_close.go +++ b/lntest/itest/lnd_channel_force_close.go @@ -314,6 +314,9 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) net.ConnectNodes(ctxt, t.t, alice, carol) + // We need one additional UTXO for sweeping the remote anchor. + net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, alice) + // Before we start, obtain Carol's current wallet balance, we'll check // to ensure that at the end of the force closure by Alice, Carol // recognizes his new on-chain output. @@ -484,6 +487,12 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("Node restart failed: %v", err) } + // To give the neutrino backend some time to catch up with the chain, we + // wait here until we have enough UTXOs to actually sweep the local and + // remote anchor. + const expectedUtxos = 2 + assertNumUTXOs(t.t, alice, expectedUtxos) + // Mine a block which should confirm the commitment transaction // broadcast as a result of the force closure. If there are anchors, we // also expect the anchor sweep tx to be in the mempool. @@ -497,9 +506,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, sweepTxns, err := getNTxsFromMempool( net.Miner.Client, expectedTxes, minerMempoolTimeout, ) - if err != nil { - t.Fatalf("failed to find commitment in miner mempool: %v", err) - } + require.NoError(t.t, err, "sweep txns in miner mempool") // Verify fee rate of the commitment tx plus anchor if present. var totalWeight, totalFee int64 @@ -520,9 +527,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Find alice's commit sweep and anchor sweep (if present) in the // mempool. aliceCloseTx := waitingClose.Commitments.LocalTxid - _, aliceAnchor := findCommitAndAnchor( - t, net, sweepTxns, aliceCloseTx, - ) + _, aliceAnchor := findCommitAndAnchor(t, net, sweepTxns, aliceCloseTx) // If we expect anchors, add alice's anchor to our expected set of // reports.