From 8cd5a9b6b17ae2f485ff461a840df0bc0c6dccce Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 28 Jul 2022 17:44:23 +0800 Subject: [PATCH] itest: refactor `testChannelBackupRestore` --- lntemp/rpc/lnd.go | 13 + lntest/itest/assertions.go | 87 +-- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_backup_test.go | 867 ++++++++++++------------ lntest/itest/lnd_misc_test.go | 2 +- lntest/itest/lnd_multi-hop_test.go | 4 +- lntest/itest/lnd_test_list_on_test.go | 4 - lntest/itest/lnd_zero_conf_test.go | 6 +- lntest/itest/utils.go | 10 +- 9 files changed, 465 insertions(+), 532 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 4c9f4cfd1..01022f501 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -306,3 +306,16 @@ func (h *HarnessRPC) RestoreChanBackups( return resp } + +type AcceptorClient lnrpc.Lightning_ChannelAcceptorClient + +// ChannelAcceptor makes a RPC call to the node's ChannelAcceptor and asserts. +func (h *HarnessRPC) ChannelAcceptor() (AcceptorClient, context.CancelFunc) { + // Use runCtx here instead of a timeout context to keep the client + // alive for the entire test case. + ctxt, cancel := context.WithCancel(h.runCtx) + resp, err := h.LN.ChannelAcceptor(ctxt) + h.NoError(err, "ChannelAcceptor") + + return resp, cancel +} diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index dac88010d..41a147599 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -1106,7 +1106,7 @@ func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode, // funds immediately, and Carol sweeping her fund after her CSV delay is up. If // the blankSlate value is true, then this means that Dave won't need to sweep // on chain as he has no funds in the channel. -func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, +func assertDLPExecutedOld(net *lntest.NetworkHarness, t *harnessTest, carol *lntest.HarnessNode, carolStartingBalance int64, dave *lntest.HarnessNode, daveStartingBalance int64, commitType lnrpc.CommitmentType) { @@ -1293,91 +1293,6 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, assertNodeNumChannels(t, carol, 0) } -func assertTimeLockSwept(net *lntest.NetworkHarness, t *harnessTest, - carol *lntest.HarnessNode, carolStartingBalance int64, - dave *lntest.HarnessNode, daveStartingBalance int64, - anchors bool) { - - ctxb := context.Background() - expectedTxes := 2 - if anchors { - expectedTxes = 3 - } - - // Carol should sweep her funds immediately, as they are not timelocked. - // We also expect Carol and Dave to sweep their anchor, if present. - _, err := waitForNTxsInMempool( - net.Miner.Client, expectedTxes, minerMempoolTimeout, - ) - require.NoError(t.t, err, "unable to find Carol's sweep tx in mempool") - - // Carol should consider the channel pending force close (since she is - // waiting for her sweep to confirm). - assertNumPendingChannels(t, carol, 0, 1) - - // Dave is considering it "pending force close", as we must wait - // before he can sweep her outputs. - assertNumPendingChannels(t, dave, 0, 1) - - // Mine the sweep (and anchor) tx(ns). - _ = mineBlocks(t, net, 1, expectedTxes)[0] - - // Now Carol should consider the channel fully closed. - assertNumPendingChannels(t, carol, 0, 0) - - // We query Carol's balance to make sure it increased after the channel - // closed. This checks that she was able to sweep the funds she had in - // the channel. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - balReq := &lnrpc.WalletBalanceRequest{} - carolBalResp, err := carol.WalletBalance(ctxt, balReq) - require.NoError(t.t, err, "unable to get Carol's balance") - - carolBalance := carolBalResp.ConfirmedBalance - require.Greater( - t.t, carolBalance, carolStartingBalance, "balance not increased", - ) - - // After the Dave's output matures, he should reclaim his funds. - // - // The commit sweep resolver publishes the sweep tx at defaultCSV-1 and - // we already mined one block after the commitment was published, so - // take that into account. - mineBlocks(t, net, defaultCSV-1-1, 0) - daveSweep, err := waitForTxInMempool( - net.Miner.Client, minerMempoolTimeout, - ) - require.NoError(t.t, err, "unable to find Dave's sweep tx in mempool") - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, daveSweep) - - // Now the channel should be fully closed also from Dave's POV. - assertNumPendingChannels(t, dave, 0, 0) - - // Make sure Dave got his balance back. - err = wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - daveBalResp, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - return fmt.Errorf("unable to get Dave's balance: %v", - err) - } - - daveBalance := daveBalResp.ConfirmedBalance - if daveBalance <= daveStartingBalance { - return fmt.Errorf("expected dave to have balance "+ - "above %d, instead had %v", daveStartingBalance, - daveBalance) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err) - - assertNodeNumChannels(t, dave, 0) - assertNodeNumChannels(t, carol, 0) -} - // verifyCloseUpdate is used to verify that a closed channel update is of the // expected type. func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate, diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 524e8a397..bd4d2d392 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -19,4 +19,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "external channel funding", TestFunc: testExternalFundingChanPoint, }, + { + Name: "channel backup restore", + TestFunc: testChannelBackupRestore, + }, } diff --git a/lntest/itest/lnd_channel_backup_test.go b/lntest/itest/lnd_channel_backup_test.go index 52191918c..7a3995990 100644 --- a/lntest/itest/lnd_channel_backup_test.go +++ b/lntest/itest/lnd_channel_backup_test.go @@ -17,6 +17,8 @@ import ( "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" + "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" @@ -27,11 +29,9 @@ import ( // restoring from initial wallet creation. We'll also alternate between // restoring form the on disk file, and restoring from the exported RPC command // as well. -func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { +func testChannelBackupRestore(ht *lntemp.HarnessTest) { password := []byte("El Psy Kongroo") - ctxb := context.Background() - var testCases = []chanRestoreTestCase{ // Restore from backups obtained via the RPC interface. Dave // was the initiator, of the non-advertised channel. @@ -40,30 +40,25 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { channelsUpdated: false, initiator: true, private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // For this restoration method, we'll grab the // current multi-channel backup from the old // node, and use it to restore a new node // within the closure. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } + chanBackup := oldNode.RPC.ExportAllChanBackups() - multi := chanBackup.MultiChanBackup.MultiChanBackup + multi := chanBackup.MultiChanBackup. + MultiChanBackup // In our nodeRestorer function, we'll restore // the node from seed, then manually recover // the channel backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -74,22 +69,21 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { name: "restore from backup file", initiator: true, private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that will restore // using the on-disk channel.backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -100,33 +94,29 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { name: "restore during creation", initiator: true, private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // First, fetch the current backup state as is, // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } + chanBackup := oldNode.RPC.ExportAllChanBackups() backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, + MultiChanBackup: chanBackup. + MultiChanBackup, } // Create a new nodeRestorer that will restore // the node using the Multi backup we just // obtained above. - return func() (*lntest.HarnessNode, error) { - return net.RestoreNodeWithSeed( + return func() *node.HarnessNode { + return st.RestoreNodeWithSeed( "dave", nil, password, mnemonic, "", 1000, backupSnapshot, copyPorts(oldNode), ) - }, nil + } }, }, @@ -136,46 +126,34 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { name: "restore during unlock", initiator: true, private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // First, fetch the current backup state as is, // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } + chanBackup := oldNode.RPC.ExportAllChanBackups() backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, + MultiChanBackup: chanBackup. + MultiChanBackup, } // Create a new nodeRestorer that will restore // the node with its seed, but no channel // backup, shutdown this initialized node, then // restart it again using Unlock. - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( + return func() *node.HarnessNode { + newNode := st.RestoreNodeWithSeed( "dave", nil, password, mnemonic, "", 1000, nil, copyPorts(oldNode), ) - if err != nil { - return nil, err - } - err = net.RestartNode( - newNode, nil, backupSnapshot, - ) - if err != nil { - return nil, err - } + st.RestartNode(newNode, backupSnapshot) - return newNode, nil - }, nil + return newNode + } }, }, @@ -185,16 +163,15 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { name: "restore from backup file twice", initiator: true, private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that will restore @@ -203,46 +180,25 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { MultiChanBackup: multi, } - ctxb := context.Background() - - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( + return func() *node.HarnessNode { + newNode := st.RestoreNodeWithSeed( "dave", nil, password, mnemonic, "", 1000, nil, copyPorts(oldNode), ) - if err != nil { - return nil, fmt.Errorf("unable to "+ - "restore node: %v", err) - } - _, err = newNode.RestoreChannelBackups( - ctxb, - &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if err != nil { - return nil, fmt.Errorf("unable "+ - "to restore backups: %v", - err) + req := &lnrpc.RestoreChanBackupRequest{ + Backup: backup, } + newNode.RPC.RestoreChanBackups(req) - _, err = newNode.RestoreChannelBackups( - ctxb, - &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if err != nil { - return nil, fmt.Errorf("unable "+ - "to restore backups the"+ - "second time: %v", - err) + req = &lnrpc.RestoreChanBackupRequest{ + Backup: backup, } + newNode.RPC.RestoreChanBackups(req) - return newNode, nil - }, nil + return newNode + } }, }, @@ -254,16 +210,15 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { initiator: true, private: false, unconfirmed: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Let's assume time passes, the channel // confirms in the meantime but for some reason @@ -273,13 +228,13 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { // simulate time passing, we mine some blocks // to get the channel confirmed _after_ we saved // the backup. - mineBlocks(t, net, 6, 1) + st.Miner.MineBlocksAndAssertNumTxes(6, 1) // In our nodeRestorer function, we'll restore // the node from seed, then manually recover // the channel backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -292,27 +247,19 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { initiator: true, private: false, unconfirmed: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // For this restoration method, we'll grab the // current multi-channel backup from the old // node. The channel should be included, even if // it is not confirmed yet. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } + chanBackup := oldNode.RPC.ExportAllChanBackups() chanPoints := chanBackup.MultiChanBackup.ChanPoints - if len(chanPoints) == 0 { - return nil, fmt.Errorf("unconfirmed " + - "channel not included in backup") - } + require.NotEmpty(st, chanPoints, + "unconfirmed channel not found") // Let's assume time passes, the channel // confirms in the meantime but for some reason @@ -322,14 +269,14 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { // simulate time passing, we mine some blocks // to get the channel confirmed _after_ we saved // the backup. - mineBlocks(t, net, 6, 1) + st.Miner.MineBlocksAndAssertNumTxes(6, 1) // In our nodeRestorer function, we'll restore // the node from seed, then manually recover // the channel backup. multi := chanBackup.MultiChanBackup.MultiChanBackup return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -341,22 +288,21 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { initiator: true, private: false, commitmentType: lnrpc.CommitmentType_ANCHORS, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channels.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that will restore // using the on-disk channels.backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -364,26 +310,26 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { // Restore the backup from the on-disk file, using the RPC // interface, for script-enforced leased channels. { - name: "restore from backup file script enforced lease", + name: "restore from backup file script " + + "enforced lease", initiator: true, private: false, commitmentType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that will restore // using the on-disk channel.backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -394,27 +340,23 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { name: "old revocation producer format", initiator: true, legacyRevocation: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // For this restoration method, we'll grab the // current multi-channel backup from the old // node, and use it to restore a new node // within the closure. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - require.NoError(t.t, err) - + chanBackup := oldNode.RPC.ExportAllChanBackups() multi := chanBackup.MultiChanBackup.MultiChanBackup // In our nodeRestorer function, we'll restore // the node from seed, then manually recover the // channel backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -428,22 +370,21 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { private: false, commitmentType: lnrpc.CommitmentType_ANCHORS, localForceClose: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that will restore // using the on-disk channel.backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -457,23 +398,21 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { private: false, commitmentType: lnrpc.CommitmentType_ANCHORS, zeroConf: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channels.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that we'll restore // using the on-disk channels.backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, - oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -487,23 +426,21 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { private: false, commitmentType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE, zeroConf: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that we'll restore // using the on-disk channel backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, - oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -518,23 +455,21 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { commitmentType: lnrpc.CommitmentType_ANCHORS, localForceClose: true, zeroConf: true, - restoreMethod: func(oldNode *lntest.HarnessNode, + restoreMethod: func(st *lntemp.HarnessTest, + oldNode *node.HarnessNode, backupFilePath string, - mnemonic []string) (nodeRestorer, error) { + mnemonic []string) nodeRestorer { // Read the entire Multi backup stored within // this node's channel.backup file. multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } + require.NoError(st, err) // Now that we have Dave's backup file, we'll // create a new nodeRestorer that we'll restore // using the on-disk channel backup. return chanRestoreViaRPC( - net, password, mnemonic, multi, - oldNode, + st, password, mnemonic, multi, oldNode, ) }, }, @@ -547,13 +482,11 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { for _, testCase := range testCases { testCase := testCase - success := t.t.Run(testCase.name, func(t *testing.T) { - h := newHarnessTest(t, net) + success := ht.Run(testCase.name, func(t *testing.T) { + h, cleanup := ht.Subtest(t) + defer cleanup() - // Start each test with the default static fee estimate. - net.SetFeeEstimate(12500) - - testChanRestoreScenario(h, net, &testCase, password) + testChanRestoreScenario(h, &testCase, password) }) if !success { break @@ -900,7 +833,7 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { // nodeRestorer is a function closure that allows each chanRestoreTestCase to // control exactly *how* the prior node is restored. This might be using an // backup obtained over RPC, or the file system, etc. -type nodeRestorer func() (*lntest.HarnessNode, error) +type nodeRestorer func() *node.HarnessNode // chanRestoreTestCase describes a test case for an end to end SCB restoration // work flow. One node will start from scratch using an existing SCB. At the @@ -945,9 +878,8 @@ type chanRestoreTestCase struct { // restored via a custom method. We use this to abstract away // _how_ a node is restored from our assertions once the node // has been fully restored itself. - restoreMethod func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) + restoreMethod func(ht *lntemp.HarnessTest, oldNode *node.HarnessNode, + backupFilePath string, mnemonic []string) nodeRestorer // zeroConf denotes whether the opened channel is a zero-conf channel // or not. @@ -958,7 +890,7 @@ type chanRestoreTestCase struct { // ensuring that after Dave restores his channel state according to the // testCase, the DLP protocol is executed properly and both nodes are made // whole. -func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, +func testChanRestoreScenario(ht *lntemp.HarnessTest, testCase *chanRestoreTestCase, password []byte) { const ( @@ -966,8 +898,6 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, pushAmt = btcutil.Amount(5000000) ) - ctxb := context.Background() - nodeArgs := []string{ "--minbackoff=50ms", "--maxbackoff=1s", @@ -987,33 +917,24 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, // First, we'll create a brand new node we'll use within the test. If // we have a custom backup file specified, then we'll also create that // for use. - dave, mnemonic, _, err := net.NewNodeWithSeed( + dave, mnemonic, _ := ht.NewNodeWithSeed( "dave", nodeArgs, password, false, ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - // Defer to a closure instead of to shutdownAndAssert due to the value - // of 'dave' changing throughout the test. - defer func() { - shutdownAndAssert(net, t, dave) - }() - carol := net.NewNode(t.t, "carol", nodeArgs) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("carol", nodeArgs) // Now that our new nodes are created, we'll give them some coins for // channel opening and anchor sweeping. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol) + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) // For the anchor output case we need two UTXOs for Carol so she can // sweep both the local and remote anchor. if commitTypeHasAnchors(testCase.commitmentType) { - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol) + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) } - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, dave) + ht.FundCoins(btcutil.SatoshiPerBitcoin, dave) - var from, to *lntest.HarnessNode + var from, to *node.HarnessNode if testCase.initiator { from, to = dave, carol } else { @@ -1022,23 +943,23 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, // Next, we'll connect Dave to Carol, and open a new channel to her // with a portion pushed. - net.ConnectNodes(t.t, dave, carol) + ht.ConnectNodes(dave, carol) // We will either open a confirmed or unconfirmed channel, depending on // the requirements of the test case. var chanPoint *lnrpc.ChannelPoint switch { case testCase.unconfirmed: - _, err := net.OpenPendingChannel( - from, to, chanAmt, pushAmt, + ht.OpenChannelAssertPending( + from, to, lntemp.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + }, ) - if err != nil { - t.Fatalf("couldn't open pending channel: %v", err) - } // Give the pubsub some time to update the channel backup. - err = wait.NoError(func() error { - fi, err := os.Stat(dave.ChanBackupPath()) + err := wait.NoError(func() error { + fi, err := os.Stat(dave.Cfg.ChanBackupPath()) if err != nil { return err } @@ -1047,168 +968,112 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, } return nil }, defaultTimeout) - if err != nil { - t.Fatalf("channel backup not updated in time: %v", err) - } + require.NoError(ht, err, "channel backup not updated in time") // Also create channels with the legacy revocation producer format if // requested. case testCase.legacyRevocation: - createLegacyRevocationChannel( - net, t, chanAmt, pushAmt, from, to, - ) + createLegacyRevocationChannel(ht, chanAmt, pushAmt, from, to) default: // If we are testing zero-conf channels, setup a // ChannelAcceptor for the fundee. - var cancel context.CancelFunc + var cancelAcceptor context.CancelFunc if testCase.zeroConf { // Setup a ChannelAcceptor. - var ctxc context.Context - ctxc, cancel = context.WithCancel(ctxb) - acceptStream, err := to.ChannelAcceptor(ctxc) - require.NoError(t.t, err) - go acceptChannel(t, true, acceptStream) + acceptStream, cancel := to.RPC.ChannelAcceptor() + cancelAcceptor = cancel + go acceptChannel(ht.T, true, acceptStream) } var fundingShim *lnrpc.FundingShim if testCase.commitmentType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - _, minerHeight, err := net.Miner.Client.GetBestBlock() - require.NoError(t.t, err) + _, minerHeight := ht.Miner.GetBestBlock() thawHeight := uint32(minerHeight + 144) - fundingShim, _, _ = deriveFundingShimOld( - net, t, from, to, chanAmt, thawHeight, true, + fundingShim, _, _ = deriveFundingShim( + ht, from, to, chanAmt, thawHeight, true, ) } - params := lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - Private: testCase.private, - FundingShim: fundingShim, - CommitmentType: testCase.commitmentType, - ZeroConf: testCase.zeroConf, - } - chanPoint = openChannelAndAssert( - t, net, from, to, params, + chanPoint = ht.OpenChannel( + from, to, lntemp.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + Private: testCase.private, + FundingShim: fundingShim, + CommitmentType: testCase.commitmentType, + ZeroConf: testCase.zeroConf, + }, ) // Remove the ChannelAcceptor. if testCase.zeroConf { - cancel() + cancelAcceptor() } // Wait for both sides to see the opened channel. - err = dave.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("dave didn't report channel: %v", err) - } - err = carol.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("carol didn't report channel: %v", err) - } + ht.AssertTopologyChannelOpen(dave, chanPoint) + ht.AssertTopologyChannelOpen(carol, chanPoint) } // If both parties should start with existing channel updates, then // we'll send+settle an HTLC between 'from' and 'to' now. if testCase.channelsUpdated { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoice := &lnrpc.Invoice{ Memo: "testing", Value: 100000, } - invoiceResp, err := to.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + invoiceResp := to.RPC.AddInvoice(invoice) - err = completePaymentRequests( - from, from.RouterClient, - []string{invoiceResp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to complete payments: %v", err) - } + requests := []string{invoiceResp.PaymentRequest} + ht.CompletePaymentRequests(from, requests) } // If we're testing that locally force closed channels can be restored // then we issue the force close now. if testCase.localForceClose && chanPoint != nil { - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - _, err = dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{ - ChannelPoint: chanPoint, - Force: true, - }) - require.NoError(t.t, err) + // Calls the rpc to close the channel. + ht.CloseChannelAssertPending(dave, chanPoint, true) // After closing the channel we mine one transaction to make // sure the commitment TX was confirmed. - _ = mineBlocks(t, net, 1, 1) + ht.Miner.MineBlocksAndAssertNumTxes(1, 1) // Now we need to make sure that the channel is still in the // backup. Otherwise restoring won't work later. - _, err = dave.ExportChannelBackup( - ctxt, &lnrpc.ExportChannelBackupRequest{ - ChanPoint: chanPoint, - }, - ) - require.NoError(t.t, err) + dave.RPC.ExportChanBackup(chanPoint) } // Before we start the recovery, we'll record the balances of both // Carol and Dave to ensure they both sweep their coins at the end. - balReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, err := carol.WalletBalance(ctxt, balReq) - if err != nil { - t.Fatalf("unable to get carol's balance: %v", err) - } + carolBalResp := carol.RPC.WalletBalance() carolStartingBalance := carolBalResp.ConfirmedBalance - daveBalance, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - t.Fatalf("unable to get carol's balance: %v", err) - } + daveBalance := dave.RPC.WalletBalance() daveStartingBalance := daveBalance.ConfirmedBalance // At this point, we'll now execute the restore method to give us the // new node we should attempt our assertions against. - backupFilePath := dave.ChanBackupPath() - restoredNodeFunc, err := testCase.restoreMethod( - dave, backupFilePath, mnemonic, + backupFilePath := dave.Cfg.ChanBackupPath() + restoredNodeFunc := testCase.restoreMethod( + ht, dave, backupFilePath, mnemonic, ) - if err != nil { - t.Fatalf("unable to prep node restoration: %v", err) - } // Now that we're able to make our restored now, we'll shutdown the old // Dave node as we'll be storing it shortly below. - shutdownAndAssert(net, t, dave) + ht.Shutdown(dave) // To make sure the channel state is advanced correctly if the channel // peer is not online at first, we also shutdown Carol. - restartCarol, err := net.SuspendNode(carol) - require.NoError(t.t, err) + restartCarol := ht.SuspendNode(carol) // Next, we'll make a new Dave and start the bulk of our recovery // workflow. - dave, err = restoredNodeFunc() - if err != nil { - t.Fatalf("unable to restore node: %v", err) - } + dave = restoredNodeFunc() // First ensure that the on-chain balance is restored. - err = wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - balReq := &lnrpc.WalletBalanceRequest{} - daveBalResp, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - return err - } - + err := wait.NoError(func() error { + daveBalResp := dave.RPC.WalletBalance() daveBal := daveBalResp.ConfirmedBalance if daveBal <= 0 { return fmt.Errorf("expected positive balance, had %v", @@ -1217,27 +1082,24 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, return nil }, defaultTimeout) - if err != nil { - t.Fatalf("On-chain balance not restored: %v", err) - } + require.NoError(ht, err, "On-chain balance not restored") // For our force close scenario we don't need the channel to be closed // by Carol since it was already force closed before we started the // recovery. All we need is for Carol to send us over the commit height // so we can sweep the time locked output with the correct commit point. if testCase.localForceClose { - assertNumPendingChannels(t, dave, 0, 1) + ht.AssertNumPendingForceClose(dave, 1) - err = restartCarol() - require.NoError(t.t, err) + require.NoError(ht, restartCarol(), "restart carol failed") // Now that we have our new node up, we expect that it'll // re-connect to Carol automatically based on the restored // backup. - net.EnsureConnected(t.t, dave, carol) + ht.EnsureConnected(dave, carol) assertTimeLockSwept( - net, t, carol, carolStartingBalance, dave, + ht, carol, carolStartingBalance, dave, daveStartingBalance, commitTypeHasAnchors(testCase.commitmentType), ) @@ -1249,85 +1111,45 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, // should not yet be force closing as no connection with the remote // peer was established yet. We should also not be able to close the // channel. - assertNumPendingChannels(t, dave, 1, 0) - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - pendingChanResp, err := dave.PendingChannels( - ctxt, &lnrpc.PendingChannelsRequest{}, - ) - require.NoError(t.t, err) - - // We now need to make sure the server is fully started before we can - // actually close the channel. This is the first check in CloseChannel - // so we can try with a nil channel point until we get the correct error - // to find out if Dave is fully started. - err = wait.Predicate(func() bool { - const expectedErr = "must specify channel point" - ctxc, cancel := context.WithCancel(ctxt) - defer cancel() - - resp, err := dave.CloseChannel( - ctxc, &lnrpc.CloseChannelRequest{}, - ) - if err != nil { - return false - } - - defer func() { _ = resp.CloseSend() }() - - _, err = resp.Recv() - if err != nil && strings.Contains(err.Error(), expectedErr) { - return true - } - - return false - }, defaultTimeout) - require.NoError(t.t, err) + channel := ht.AssertNumWaitingClose(dave, 1)[0] + chanPointStr := channel.Channel.ChannelPoint // We also want to make sure we cannot force close in this state. That // would get the state machine in a weird state. - chanPointParts := strings.Split( - pendingChanResp.WaitingCloseChannels[0].Channel.ChannelPoint, - ":", - ) + chanPointParts := strings.Split(chanPointStr, ":") chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32) - resp, err := dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{ - ChannelPoint: &lnrpc.ChannelPoint{ + + // We don't get an error directly but only when reading the first + // message of the stream. + err = ht.CloseChannelAssertErr( + dave, &lnrpc.ChannelPoint{ FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{ FundingTxidStr: chanPointParts[0], }, OutputIndex: uint32(chanPointIndex), - }, - Force: true, - }) - - // We don't get an error directly but only when reading the first - // message of the stream. - require.NoError(t.t, err) - _, err = resp.Recv() - require.Error(t.t, err) - require.Contains(t.t, err.Error(), "cannot close channel with state: ") - require.Contains(t.t, err.Error(), "ChanStatusRestored") + }, true, + ) + require.Contains(ht, err.Error(), "cannot close channel with state: ") + require.Contains(ht, err.Error(), "ChanStatusRestored") // Increase the fee estimate so that the following force close tx will // be cpfp'ed in case of anchor commitments. - net.SetFeeEstimate(30000) + ht.SetFeeEstimate(30000) // Now that we have ensured that the channels restored by the backup are // in the correct state even without the remote peer telling us so, // let's start up Carol again. - err = restartCarol() - require.NoError(t.t, err) + require.NoError(ht, restartCarol(), "restart carol failed") numUTXOs := 1 if commitTypeHasAnchors(testCase.commitmentType) { numUTXOs = 2 } - assertNumUTXOs(t.t, carol, numUTXOs) + ht.AssertNumUTXOs(carol, numUTXOs) // Now that we have our new node up, we expect that it'll re-connect to // Carol automatically based on the restored backup. - net.EnsureConnected(t.t, dave, carol) + ht.EnsureConnected(dave, carol) // TODO(roasbeef): move dave restarts? @@ -1335,21 +1157,19 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, // We grab their balances now to ensure that they're made whole at the // end of the protocol. assertDLPExecuted( - net, t, carol, carolStartingBalance, dave, daveStartingBalance, - testCase.commitmentType, + ht, carol, carolStartingBalance, dave, + daveStartingBalance, testCase.commitmentType, ) } // createLegacyRevocationChannel creates a single channel using the legacy // revocation producer format by using PSBT to signal a special pending channel // ID. -func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest, - chanAmt, pushAmt btcutil.Amount, from, to *lntest.HarnessNode) { +func createLegacyRevocationChannel(ht *lntemp.HarnessTest, + chanAmt, pushAmt btcutil.Amount, from, to *node.HarnessNode) { - ctxb := context.Background() - - // We'll signal to the wallet that we also want to create a channel with - // the legacy revocation producer format that relies on deriving a + // We'll signal to the wallet that we also want to create a channel + // with the legacy revocation producer format that relies on deriving a // private key from the key ring. This is only available during itests // to make sure we don't hard depend on the DerivePrivKey method of the // key ring. We can signal the wallet by setting a custom pending @@ -1360,27 +1180,21 @@ func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x2d, 0x72, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, } - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - openChannelReq := lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - FundingShim: &lnrpc.FundingShim{ - Shim: &lnrpc.FundingShim_PsbtShim{ - PsbtShim: &lnrpc.PsbtShim{ - PendingChanId: itestLegacyFormatChanID[:], - }, + shim := &lnrpc.FundingShim{ + Shim: &lnrpc.FundingShim_PsbtShim{ + PsbtShim: &lnrpc.PsbtShim{ + PendingChanId: itestLegacyFormatChanID[:], }, }, } - chanUpdates, tempPsbt, err := openChannelPsbt( - ctxt, from, to, openChannelReq, - ) - require.NoError(t.t, err) + openChannelReq := lntemp.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + FundingShim: shim, + } + chanUpdates, tempPsbt := ht.OpenChannelPsbt(from, to, openChannelReq) // Fund the PSBT by using the source node's wallet. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() fundReq := &walletrpc.FundPsbtRequest{ Template: &walletrpc.FundPsbtRequest_Psbt{ Psbt: tempPsbt, @@ -1389,52 +1203,43 @@ func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest, SatPerVbyte: 2, }, } - fundResp, err := from.WalletKitClient.FundPsbt(ctxt, fundReq) - require.NoError(t.t, err) + fundResp := from.RPC.FundPsbt(fundReq) // We have a PSBT that has no witness data yet, which is exactly what we // need for the next step of verifying the PSBT with the funding intents. - _, err = from.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ + msg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ PsbtVerify: &lnrpc.FundingPsbtVerify{ PendingChanId: itestLegacyFormatChanID[:], FundedPsbt: fundResp.FundedPsbt, }, }, - }) - require.NoError(t.t, err) + } + from.RPC.FundingStateStep(msg) // Now we'll ask the source node's wallet to sign the PSBT so we can // finish the funding flow. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() finalizeReq := &walletrpc.FinalizePsbtRequest{ FundedPsbt: fundResp.FundedPsbt, } - finalizeRes, err := from.WalletKitClient.FinalizePsbt( - ctxt, finalizeReq, - ) - require.NoError(t.t, err) + finalizeRes := from.RPC.FinalizePsbt(finalizeReq) // We've signed our PSBT now, let's pass it to the intent again. - _, err = from.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ + msg = &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ PsbtFinalize: &lnrpc.FundingPsbtFinalize{ PendingChanId: itestLegacyFormatChanID[:], SignedPsbt: finalizeRes.SignedPsbt, }, }, - }) - require.NoError(t.t, err) + } + from.RPC.FundingStateStep(msg) // Consume the "channel pending" update. This waits until the funding // transaction was fully compiled. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - updateResp, err := receiveChanUpdate(ctxt, chanUpdates) - require.NoError(t.t, err) + updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates) upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - require.True(t.t, ok) + require.True(ht, ok) chanPoint := &lnrpc.ChannelPoint{ FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ FundingTxidBytes: upd.ChanPending.Txid, @@ -1442,54 +1247,38 @@ func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest, OutputIndex: upd.ChanPending.OutputIndex, } - _ = mineBlocks(t, net, 6, 1) - err = from.WaitForNetworkChannelOpen(chanPoint) - require.NoError(t.t, err) - err = to.WaitForNetworkChannelOpen(chanPoint) - require.NoError(t.t, err) + ht.Miner.MineBlocksAndAssertNumTxes(6, 1) + ht.AssertTopologyChannelOpen(from, chanPoint) + ht.AssertTopologyChannelOpen(to, chanPoint) } // chanRestoreViaRPC is a helper test method that returns a nodeRestorer // instance which will restore the target node from a password+seed, then // trigger a SCB restore using the RPC interface. -func chanRestoreViaRPC(net *lntest.NetworkHarness, password []byte, +func chanRestoreViaRPC(ht *lntemp.HarnessTest, password []byte, mnemonic []string, multi []byte, - oldNode *lntest.HarnessNode) (nodeRestorer, error) { + oldNode *node.HarnessNode) nodeRestorer { backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ MultiChanBackup: multi, } - ctxb := context.Background() - - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( + return func() *node.HarnessNode { + newNode := ht.RestoreNodeWithSeed( "dave", nil, password, mnemonic, "", 1000, nil, copyPorts(oldNode), ) - if err != nil { - return nil, fmt.Errorf("unable to "+ - "restore node: %v", err) - } + req := &lnrpc.RestoreChanBackupRequest{Backup: backup} + newNode.RPC.RestoreChanBackups(req) - _, err = newNode.RestoreChannelBackups( - ctxb, &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if err != nil { - return nil, fmt.Errorf("unable "+ - "to restore backups: %v", err) - } - - return newNode, nil - }, nil + return newNode + } } // copyPorts returns a node option function that copies the ports of an existing // node over to the newly created one. -func copyPorts(oldNode *lntest.HarnessNode) lntest.NodeOption { - return func(cfg *lntest.BaseNodeConfig) { +func copyPorts(oldNode *node.HarnessNode) node.Option { + return func(cfg *node.BaseNodeConfig) { cfg.P2PPort = oldNode.Cfg.P2PPort cfg.RPCPort = oldNode.Cfg.RPCPort cfg.RESTPort = oldNode.Cfg.RESTPort @@ -1505,3 +1294,219 @@ func rpcPointToWirePoint(t *harnessTest, return op } + +// assertTimeLockSwept when dave's outputs matures, he should claim them. This +// function will advance 2 blocks such that all the pending closing +// transactions would be swept in the end. +// +// Note: this function is only used in this test file and has been made +// specifically for testChanRestoreScenario. +func assertTimeLockSwept(ht *lntemp.HarnessTest, + carol *node.HarnessNode, carolStartingBalance int64, + dave *node.HarnessNode, daveStartingBalance int64, anchors bool) { + + expectedTxes := 2 + if anchors { + expectedTxes = 3 + } + + // Carol should sweep her funds immediately, as they are not timelocked. + // We also expect Carol and Dave to sweep their anchor, if present. + ht.Miner.AssertNumTxsInMempool(expectedTxes) + + // Carol should consider the channel pending force close (since she is + // waiting for her sweep to confirm). + ht.AssertNumPendingForceClose(carol, 1) + + // Dave is considering it "pending force close", as we must wait + // before he can sweep her outputs. + ht.AssertNumPendingForceClose(dave, 1) + + // Mine the sweep (and anchor) tx(ns). + ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes) + + // Now Carol should consider the channel fully closed. + ht.AssertNumPendingForceClose(carol, 0) + + // We query Carol's balance to make sure it increased after the channel + // closed. This checks that she was able to sweep the funds she had in + // the channel. + carolBalResp := carol.RPC.WalletBalance() + carolBalance := carolBalResp.ConfirmedBalance + require.Greater(ht, carolBalance, carolStartingBalance, + "balance not increased") + + // After the Dave's output matures, he should reclaim his funds. + // + // The commit sweep resolver publishes the sweep tx at defaultCSV-1 and + // we already mined one block after the commitment was published, so + // take that into account. + ht.Miner.MineBlocks(defaultCSV - 1 - 1) + daveSweep := ht.Miner.AssertNumTxsInMempool(1)[0] + block := ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0] + ht.Miner.AssertTxInBlock(block, daveSweep) + + // Now the channel should be fully closed also from Dave's POV. + ht.AssertNumPendingForceClose(dave, 0) + + // Make sure Dave got his balance back. + err := wait.NoError(func() error { + daveBalResp := dave.RPC.WalletBalance() + daveBalance := daveBalResp.ConfirmedBalance + if daveBalance <= daveStartingBalance { + return fmt.Errorf("expected dave to have balance "+ + "above %d, instead had %v", daveStartingBalance, + daveBalance) + } + + return nil + }, defaultTimeout) + require.NoError(ht, err) + + ht.AssertNodeNumChannels(dave, 0) + ht.AssertNodeNumChannels(carol, 0) +} + +// assertDLPExecuted asserts that Dave is a node that has recovered their state +// form scratch. Carol should then force close on chain, with Dave sweeping his +// funds immediately, and Carol sweeping her fund after her CSV delay is up. If +// the blankSlate value is true, then this means that Dave won't need to sweep +// on chain as he has no funds in the channel. +func assertDLPExecuted(ht *lntemp.HarnessTest, + carol *node.HarnessNode, carolStartingBalance int64, + dave *node.HarnessNode, daveStartingBalance int64, + commitType lnrpc.CommitmentType) { + + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + ht.SetFeeEstimate(30000) + + // We disabled auto-reconnect for some tests to avoid timing issues. + // To make sure the nodes are initiating DLP now, we have to manually + // re-connect them. + ht.EnsureConnected(carol, dave) + + // Upon reconnection, the nodes should detect that Dave is out of sync. + // Carol should force close the channel using her latest commitment. + expectedTxes := 1 + if commitTypeHasAnchors(commitType) { + expectedTxes = 2 + } + ht.Miner.AssertNumTxsInMempool(expectedTxes) + + // Channel should be in the state "waiting close" for Carol since she + // broadcasted the force close tx. + ht.AssertNumWaitingClose(carol, 1) + + // Dave should also consider the channel "waiting close", as he noticed + // the channel was out of sync, and is now waiting for a force close to + // hit the chain. + ht.AssertNumWaitingClose(dave, 1) + + // Restart Dave to make sure he is able to sweep the funds after + // shutdown. + ht.RestartNode(dave) + + // Generate a single block, which should confirm the closing tx. + ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes) + + // Dave should consider the channel pending force close (since he is + // waiting for his sweep to confirm). + ht.AssertNumPendingForceClose(dave, 1) + + // Carol is considering it "pending force close", as we must wait + // before she can sweep her outputs. + ht.AssertNumPendingForceClose(carol, 1) + + if commitType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { + // Dave should sweep his anchor only, since he still has the + // lease CLTV constraint on his commitment output. + ht.Miner.AssertNumTxsInMempool(1) + + // Mine Dave's anchor sweep tx. + ht.Miner.MineBlocksAndAssertNumTxes(1, 1) + + // After Carol's output matures, she should also reclaim her + // funds. + // + // The commit sweep resolver publishes the sweep tx at + // defaultCSV-1 and we already mined one block after the + // commitmment was published, so take that into account. + ht.Miner.MineBlocks(defaultCSV - 1 - 1) + ht.Miner.MineBlocksAndAssertNumTxes(1, 1) + + // Now the channel should be fully closed also from Carol's POV. + ht.AssertNumPendingForceClose(carol, 0) + + // We'll now mine the remaining blocks to prompt Dave to sweep + // his CLTV-constrained output. + resp := dave.RPC.PendingChannels() + blocksTilMaturity := + resp.PendingForceClosingChannels[0].BlocksTilMaturity + require.Positive(ht, blocksTilMaturity) + + ht.Miner.MineBlocks(uint32(blocksTilMaturity)) + ht.Miner.MineBlocksAndAssertNumTxes(1, 1) + + // Now Dave should consider the channel fully closed. + ht.AssertNumPendingForceClose(dave, 0) + } else { + // Dave should sweep his funds immediately, as they are not + // timelocked. We also expect Dave to sweep his anchor, if + // present. + ht.Miner.AssertNumTxsInMempool(expectedTxes) + + // Mine the sweep tx. + ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes) + + // Now Dave should consider the channel fully closed. + ht.AssertNumPendingForceClose(dave, 0) + + // After Carol's output matures, she should also reclaim her + // funds. + // + // The commit sweep resolver publishes the sweep tx at + // defaultCSV-1 and we already mined one block after the + // commitmment was published, so take that into account. + ht.Miner.MineBlocks(defaultCSV - 1 - 1) + ht.Miner.MineBlocksAndAssertNumTxes(1, 1) + + // Now the channel should be fully closed also from Carol's POV. + ht.AssertNumPendingForceClose(carol, 0) + } + + // We query Dave's balance to make sure it increased after the channel + // closed. This checks that he was able to sweep the funds he had in + // the channel. + daveBalResp := dave.RPC.WalletBalance() + daveBalance := daveBalResp.ConfirmedBalance + require.Greater(ht, daveBalance, daveStartingBalance, + "balance not increased") + + // Make sure Carol got her balance back. + err := wait.NoError(func() error { + carolBalResp := carol.RPC.WalletBalance() + carolBalance := carolBalResp.ConfirmedBalance + + // With Neutrino we don't get a backend error when trying to + // publish an orphan TX (which is what the sweep for the remote + // anchor is since the remote commitment TX was not broadcast). + // That's why the wallet still sees that as unconfirmed and we + // need to count the total balance instead of the confirmed. + if ht.IsNeutrinoBackend() { + carolBalance = carolBalResp.TotalBalance + } + + if carolBalance <= carolStartingBalance { + return fmt.Errorf("expected carol to have balance "+ + "above %d, instead had %v", + carolStartingBalance, carolBalance) + } + + return nil + }, defaultTimeout) + require.NoError(ht, err, "timeout while checking carol's balance") + + ht.AssertNodeNumChannels(dave, 0) + ht.AssertNodeNumChannels(carol, 0) +} diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 8b244d9b5..07ea01548 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -1019,7 +1019,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // Assert that once Dave comes up, they reconnect, Carol force closes // on chain, and both of them properly carry out the DLP protocol. - assertDLPExecuted( + assertDLPExecutedOld( net, t, carol, carolStartingBalance, dave, daveStartingBalance, lnrpc.CommitmentType_STATIC_REMOTE_KEY, ) diff --git a/lntest/itest/lnd_multi-hop_test.go b/lntest/itest/lnd_multi-hop_test.go index 180d0aa01..5f617d39e 100644 --- a/lntest/itest/lnd_multi-hop_test.go +++ b/lntest/itest/lnd_multi-hop_test.go @@ -279,7 +279,7 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, ctxc, cancel = context.WithCancel(ctxb) acceptStream, err := bob.ChannelAcceptor(ctxc) require.NoError(t.t, err) - go acceptChannel(t, true, acceptStream) + go acceptChannel(t.t, true, acceptStream) } aliceChanPoint := openChannelAndAssert( @@ -349,7 +349,7 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, ctxc, cancel = context.WithCancel(ctxb) acceptStream, err := carol.ChannelAcceptor(ctxc) require.NoError(t.t, err) - go acceptChannel(t, true, acceptStream) + go acceptChannel(t.t, true, acceptStream) } bobChanPoint := openChannelAndAssert( diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 0be090023..e185fc88d 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -247,10 +247,6 @@ var allTestCases = []*testCase{ name: "export channel backup", test: testExportChannelBackup, }, - { - name: "channel backup restore", - test: testChannelBackupRestore, - }, { name: "hold invoice sender persistence", test: testHoldInvoicePersistence, diff --git a/lntest/itest/lnd_zero_conf_test.go b/lntest/itest/lnd_zero_conf_test.go index 97951444e..f41c03408 100644 --- a/lntest/itest/lnd_zero_conf_test.go +++ b/lntest/itest/lnd_zero_conf_test.go @@ -65,7 +65,7 @@ func testZeroConfChannelOpen(net *lntest.NetworkHarness, t *harnessTest) { ctxc, cancel := context.WithCancel(ctxb) acceptStream, err := dave.ChannelAcceptor(ctxc) require.NoError(t.t, err) - go acceptChannel(t, true, acceptStream) + go acceptChannel(t.t, true, acceptStream) // Open a private zero-conf anchors channel of 1M satoshis. params := lntest.OpenChannelParams{ @@ -167,7 +167,7 @@ func testZeroConfChannelOpen(net *lntest.NetworkHarness, t *harnessTest) { ctxc, cancel = context.WithCancel(ctxb) acceptStream, err = carol.ChannelAcceptor(ctxc) require.NoError(t.t, err) - go acceptChannel(t, true, acceptStream) + go acceptChannel(t.t, true, acceptStream) // We'll open a public zero-conf anchors channel of 1M satoshis. params.Private = false @@ -604,7 +604,7 @@ func testPrivateUpdateAlias(net *lntest.NetworkHarness, t *harnessTest, ctxc, cancel := context.WithCancel(ctxb) acceptStream, err := dave.ChannelAcceptor(ctxc) require.NoError(t.t, err) - go acceptChannel(t, zeroConf, acceptStream) + go acceptChannel(t.t, zeroConf, acceptStream) // Open a private channel, optionally specifying a channel-type. params := lntest.OpenChannelParams{ diff --git a/lntest/itest/utils.go b/lntest/itest/utils.go index 2118807ca..eed3a1972 100644 --- a/lntest/itest/utils.go +++ b/lntest/itest/utils.go @@ -7,6 +7,7 @@ import ( "io" "strconv" "strings" + "testing" "time" "github.com/btcsuite/btcd/btcutil" @@ -19,6 +20,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/rpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" @@ -538,11 +540,9 @@ func parseDerivationPath(path string) ([]uint32, error) { // acceptChannel is used to accept a single channel that comes across. This // should be run in a goroutine and is used to test nodes with the zero-conf // feature bit. -func acceptChannel(t *harnessTest, zeroConf bool, - stream lnrpc.Lightning_ChannelAcceptorClient) { - +func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) { req, err := stream.Recv() - require.NoError(t.t, err) + require.NoError(t, err) resp := &lnrpc.ChannelAcceptResponse{ Accept: true, @@ -550,5 +550,5 @@ func acceptChannel(t *harnessTest, zeroConf bool, ZeroConf: zeroConf, } err = stream.Send(resp) - require.NoError(t.t, err) + require.NoError(t, err) }