From bbe69f1bd3183f04a4c107a7932c6e4987040a63 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Tue, 2 Aug 2022 20:56:13 +0800 Subject: [PATCH 01/47] multi: refactor `testSweepAllCoins` --- lntemp/harness.go | 16 +++ lntemp/harness_miner.go | 7 + lntemp/rpc/lnd.go | 36 ++++++ lntemp/rpc/wallet_kit.go | 25 ++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 179 ++++++++++---------------- lntest/itest/lnd_test_list_on_test.go | 4 - 7 files changed, 157 insertions(+), 114 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index 072dc85bf..ed9865f28 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -971,6 +971,22 @@ func (h *HarnessTest) FundCoinsUnconfirmed(amt btcutil.Amount, h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false) } +// FundCoinsNP2WKH attempts to send amt satoshis from the internal mining node +// to the targeted lightning node using a NP2WKH address. +func (h *HarnessTest) FundCoinsNP2WKH(amt btcutil.Amount, + target *node.HarnessNode) { + + h.fundCoins(amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true) +} + +// FundCoinsP2TR attempts to send amt satoshis from the internal mining node to +// the targeted lightning node using a P2TR address. +func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount, + target *node.HarnessNode) { + + h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true) +} + // CompletePaymentRequests sends payments from a node to complete all payment // requests. This function does not return until all payments successfully // complete without errors. diff --git a/lntemp/harness_miner.go b/lntemp/harness_miner.go index 1c37b33f6..bcccb8656 100644 --- a/lntemp/harness_miner.go +++ b/lntemp/harness_miner.go @@ -376,3 +376,10 @@ func (h *HarnessMiner) GetNumTxsFromMempool(n int) []*wire.MsgTx { return txes } + +// NewMinerAddress creates a new address for the miner and asserts. +func (h *HarnessMiner) NewMinerAddress() btcutil.Address { + addr, err := h.NewAddress() + require.NoError(h, err, "failed to create new miner address") + return addr +} diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 01022f501..fd0c8721b 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -319,3 +319,39 @@ func (h *HarnessRPC) ChannelAcceptor() (AcceptorClient, context.CancelFunc) { return resp, cancel } + +// SendCoins sends a given amount of money to the specified address from the +// passed node. +func (h *HarnessRPC) SendCoins( + req *lnrpc.SendCoinsRequest) *lnrpc.SendCoinsResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.LN.SendCoins(ctxt, req) + require.NoErrorf(h, err, "node %s failed to send coins to address %s", + h.Name, req.Addr) + + return resp +} + +// SendCoinsAssertErr sends a given amount of money to the specified address +// from the passed node and asserts an error has returned. +func (h *HarnessRPC) SendCoinsAssertErr(req *lnrpc.SendCoinsRequest) { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + _, err := h.LN.SendCoins(ctxt, req) + require.Error(h, err, "node %s didn't not return an error", h.Name) +} + +// GetTransactions makes a RPC call to GetTransactions and asserts. +func (h *HarnessRPC) GetTransactions() *lnrpc.TransactionDetails { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.LN.GetTransactions(ctxt, &lnrpc.GetTransactionsRequest{}) + require.NoErrorf(h, err, "failed to GetTransactions for %s", h.Name) + + return resp +} diff --git a/lntemp/rpc/wallet_kit.go b/lntemp/rpc/wallet_kit.go index ed0f86960..2feddf473 100644 --- a/lntemp/rpc/wallet_kit.go +++ b/lntemp/rpc/wallet_kit.go @@ -5,6 +5,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" + "github.com/stretchr/testify/require" ) // ===================== @@ -73,3 +74,27 @@ func (h *HarnessRPC) FinalizePsbt( return resp } + +// LabelTransactionAssertErr makes a RPC call to the node's LabelTransaction +// and asserts an error is returned. It then returns the error. +func (h *HarnessRPC) LabelTransactionAssertErr( + req *walletrpc.LabelTransactionRequest) error { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + _, err := h.WalletKit.LabelTransaction(ctxt, req) + require.Error(h, err, "expected error returned") + + return err +} + +// LabelTransaction makes a RPC call to the node's LabelTransaction +// and asserts no error is returned. +func (h *HarnessRPC) LabelTransaction(req *walletrpc.LabelTransactionRequest) { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + _, err := h.WalletKit.LabelTransaction(ctxt, req) + h.NoError(err, "LabelTransaction") +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 302d17890..8c55c9317 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -71,4 +71,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "data loss protection", TestFunc: testDataLossProtection, }, + { + Name: "sweep coins", + TestFunc: testSweepAllCoins, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 99472f148..a548a77c6 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -22,6 +22,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" + "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" @@ -1180,26 +1181,23 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { // testSweepAllCoins tests that we're able to properly sweep all coins from the // wallet into a single target address at the specified fee rate. -func testSweepAllCoins(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +// +// TODO(yy): expand this test to also use P2TR. +func testSweepAllCoins(ht *lntemp.HarnessTest) { // First, we'll make a new node, ainz who'll we'll use to test wallet // sweeping. - ainz := net.NewNode(t.t, "Ainz", nil) - defer shutdownAndAssert(net, t, ainz) + // + // NOTE: we won't use standby nodes here since the test will change + // each of the node's wallet state. + ainz := ht.NewNode("Ainz", nil) // Next, we'll give Ainz exactly 2 utxos of 1 BTC each, with one of // them being p2wkh and the other being a n2wpkh address. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, ainz) - - net.SendCoinsNP2WKH(t.t, btcutil.SatoshiPerBitcoin, ainz) + ht.FundCoins(btcutil.SatoshiPerBitcoin, ainz) + ht.FundCoinsNP2WKH(btcutil.SatoshiPerBitcoin, ainz) // Ensure that we can't send coins to our own Pubkey. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - info, err := ainz.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if err != nil { - t.Fatalf("unable to get node info: %v", err) - } + info := ainz.RPC.GetInfo() // Create a label that we will used to label the transaction with. sendCoinsLabel := "send all coins" @@ -1209,171 +1207,132 @@ func testSweepAllCoins(net *lntest.NetworkHarness, t *harnessTest) { SendAll: true, Label: sendCoinsLabel, } - _, err = ainz.SendCoins(ctxt, sweepReq) - if err == nil { - t.Fatalf("expected SendCoins to users own pubkey to fail") - } + ainz.RPC.SendCoinsAssertErr(sweepReq) - // Ensure that we can't send coins to another users Pubkey. - info, err = net.Alice.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if err != nil { - t.Fatalf("unable to get node info: %v", err) - } + // Ensure that we can't send coins to another user's Pubkey. + info = ht.Alice.RPC.GetInfo() sweepReq = &lnrpc.SendCoinsRequest{ Addr: info.IdentityPubkey, SendAll: true, Label: sendCoinsLabel, } - _, err = ainz.SendCoins(ctxt, sweepReq) - if err == nil { - t.Fatalf("expected SendCoins to Alices pubkey to fail") - } + ainz.RPC.SendCoinsAssertErr(sweepReq) // With the two coins above mined, we'll now instruct ainz to sweep all - // the coins to an external address not under its control. - // We will first attempt to send the coins to addresses that are not - // compatible with the current network. This is to test that the wallet - // will prevent any onchain transactions to addresses that are not on the + // the coins to an external address not under its control. We will + // first attempt to send the coins to addresses that are not compatible + // with the current network. This is to test that the wallet will + // prevent any onchain transactions to addresses that are not on the // same network as the user. // Send coins to a testnet3 address. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) sweepReq = &lnrpc.SendCoinsRequest{ Addr: "tb1qfc8fusa98jx8uvnhzavxccqlzvg749tvjw82tg", SendAll: true, Label: sendCoinsLabel, } - _, err = ainz.SendCoins(ctxt, sweepReq) - if err == nil { - t.Fatalf("expected SendCoins to different network to fail") - } + ainz.RPC.SendCoinsAssertErr(sweepReq) // Send coins to a mainnet address. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) sweepReq = &lnrpc.SendCoinsRequest{ Addr: "1MPaXKp5HhsLNjVSqaL7fChE3TVyrTMRT3", SendAll: true, Label: sendCoinsLabel, } - _, err = ainz.SendCoins(ctxt, sweepReq) - if err == nil { - t.Fatalf("expected SendCoins to different network to fail") - } + ainz.RPC.SendCoinsAssertErr(sweepReq) // Send coins to a compatible address. - minerAddr, err := net.Miner.NewAddress() - if err != nil { - t.Fatalf("unable to create new miner addr: %v", err) - } - + minerAddr := ht.Miner.NewMinerAddress() sweepReq = &lnrpc.SendCoinsRequest{ Addr: minerAddr.String(), SendAll: true, Label: sendCoinsLabel, } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = ainz.SendCoins(ctxt, sweepReq) - if err != nil { - t.Fatalf("unable to sweep coins: %v", err) - } + ainz.RPC.SendCoins(sweepReq) // We'll mine a block which should include the sweep transaction we // generated above. - block := mineBlocks(t, net, 1, 1)[0] + block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] // The sweep transaction should have exactly two inputs as we only had // two UTXOs in the wallet. sweepTx := block.Transactions[1] - if len(sweepTx.TxIn) != 2 { - t.Fatalf("expected 2 inputs instead have %v", len(sweepTx.TxIn)) + require.Len(ht, sweepTx.TxIn, 2, "expected 2 inputs") + + // assertTxLabel is a helper function which finds a target tx in our + // set of transactions and checks that it has the desired label. + assertTxLabel := func(targetTx, label string) { + // List all transactions relevant to our wallet, and find the + // tx so that we can check the correct label has been set. + txResp := ainz.RPC.GetTransactions() + + // Find our transaction in the set of transactions returned and + // check its label. + for _, txn := range txResp.Transactions { + if txn.TxHash == targetTx { + require.Equal(ht, label, txn.Label, + "labels not match") + } + } } sweepTxStr := sweepTx.TxHash().String() - assertTxLabel(t, ainz, sweepTxStr, sendCoinsLabel) + assertTxLabel(sweepTxStr, sendCoinsLabel) - // While we are looking at labels, we test our label transaction command - // to make sure it is behaving as expected. First, we try to label our - // transaction with an empty label, and check that we fail as expected. + // While we are looking at labels, we test our label transaction + // command to make sure it is behaving as expected. First, we try to + // label our transaction with an empty label, and check that we fail as + // expected. sweepHash := sweepTx.TxHash() - _, err = ainz.WalletKitClient.LabelTransaction( - ctxt, &walletrpc.LabelTransactionRequest{ - Txid: sweepHash[:], - Label: "", - Overwrite: false, - }, - ) - if err == nil { - t.Fatalf("expected error for zero transaction label") + req := &walletrpc.LabelTransactionRequest{ + Txid: sweepHash[:], + Label: "", + Overwrite: false, } + err := ainz.RPC.LabelTransactionAssertErr(req) // Our error will be wrapped in a rpc error, so we check that it // contains the error we expect. errZeroLabel := "cannot label transaction with empty label" - if !strings.Contains(err.Error(), errZeroLabel) { - t.Fatalf("expected: zero label error, got: %v", err) - } + require.Contains(ht, err.Error(), errZeroLabel, + "expected: zero label errorv") // Next, we try to relabel our transaction without setting the overwrite // boolean. We expect this to fail, because the wallet requires setting // of this param to prevent accidental overwrite of labels. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = ainz.WalletKitClient.LabelTransaction( - ctxt, &walletrpc.LabelTransactionRequest{ - Txid: sweepHash[:], - Label: "label that will not work", - Overwrite: false, - }, - ) - if err == nil { - t.Fatalf("expected error for tx already labelled") + req = &walletrpc.LabelTransactionRequest{ + Txid: sweepHash[:], + Label: "label that will not work", + Overwrite: false, } + err = ainz.RPC.LabelTransactionAssertErr(req) // Our error will be wrapped in a rpc error, so we check that it // contains the error we expect. - if !strings.Contains(err.Error(), wallet.ErrTxLabelExists.Error()) { - t.Fatalf("expected: label exists, got: %v", err) - } + require.Contains(ht, err.Error(), wallet.ErrTxLabelExists.Error()) // Finally, we overwrite our label with a new label, which should not // fail. newLabel := "new sweep tx label" - _, err = ainz.WalletKitClient.LabelTransaction( - ctxt, &walletrpc.LabelTransactionRequest{ - Txid: sweepHash[:], - Label: newLabel, - Overwrite: true, - }, - ) - if err != nil { - t.Fatalf("could not label tx: %v", err) + req = &walletrpc.LabelTransactionRequest{ + Txid: sweepHash[:], + Label: newLabel, + Overwrite: true, } + ainz.RPC.LabelTransaction(req) - assertTxLabel(t, ainz, sweepTxStr, newLabel) + assertTxLabel(sweepTxStr, newLabel) // Finally, Ainz should now have no coins at all within his wallet. - balReq := &lnrpc.WalletBalanceRequest{} - resp, err := ainz.WalletBalance(ctxt, balReq) - if err != nil { - t.Fatalf("unable to get ainz's balance: %v", err) - } - switch { - case resp.ConfirmedBalance != 0: - t.Fatalf("expected no confirmed balance, instead have %v", - resp.ConfirmedBalance) - - case resp.UnconfirmedBalance != 0: - t.Fatalf("expected no unconfirmed balance, instead have %v", - resp.UnconfirmedBalance) - } + resp := ainz.RPC.WalletBalance() + require.Zero(ht, resp.ConfirmedBalance, "wrong confirmed balance") + require.Zero(ht, resp.UnconfirmedBalance, "wrong unconfirmed balance") // If we try again, but this time specifying an amount, then the call // should fail. sweepReq.Amount = 10000 - _, err = ainz.SendCoins(ctxt, sweepReq) - if err == nil { - t.Fatalf("sweep attempt should fail") - } + ainz.RPC.SendCoinsAssertErr(sweepReq) } // testListAddresses tests that we get all the addresses and their diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index fe5ac6378..bfaf68e38 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "sweep coins", - test: testSweepAllCoins, - }, { name: "list addresses", test: testListAddresses, From d39303f2469305360281b500e1b3e3206e763d49 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 10 Nov 2022 14:47:48 +0800 Subject: [PATCH 02/47] itest: wrap `assertTxLabel` inside `wait` --- lntest/itest/lnd_misc_test.go | 36 ++++++++++++++++++++++++++++------- lntest/wait/wait.go | 3 +++ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index a548a77c6..d9af97f3c 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -1262,23 +1262,45 @@ func testSweepAllCoins(ht *lntemp.HarnessTest) { // assertTxLabel is a helper function which finds a target tx in our // set of transactions and checks that it has the desired label. - assertTxLabel := func(targetTx, label string) { + assertTxLabel := func(targetTx, label string) error { // List all transactions relevant to our wallet, and find the // tx so that we can check the correct label has been set. txResp := ainz.RPC.GetTransactions() - // Find our transaction in the set of transactions returned and - // check its label. + var target *lnrpc.Transaction + + // First we need to find the target tx. for _, txn := range txResp.Transactions { if txn.TxHash == targetTx { - require.Equal(ht, label, txn.Label, - "labels not match") + target = txn } } + + // If we cannot find it, return an error. + if target == nil { + return fmt.Errorf("target tx %v not found", targetTx) + } + + // Otherwise, check the labels are matched. + if target.Label == label { + return nil + } + + return fmt.Errorf("labels not match, want: "+ + "%v, got %v", label, target.Label) + } + + // waitTxLabel waits until the desired tx label is found or timeout. + waitTxLabel := func(targetTx, label string) { + err := wait.NoError(func() error { + return assertTxLabel(targetTx, label) + }, defaultTimeout) + + require.NoError(ht, err, "timeout assertTxLabel") } sweepTxStr := sweepTx.TxHash().String() - assertTxLabel(sweepTxStr, sendCoinsLabel) + waitTxLabel(sweepTxStr, sendCoinsLabel) // While we are looking at labels, we test our label transaction // command to make sure it is behaving as expected. First, we try to @@ -1322,7 +1344,7 @@ func testSweepAllCoins(ht *lntemp.HarnessTest) { } ainz.RPC.LabelTransaction(req) - assertTxLabel(sweepTxStr, newLabel) + waitTxLabel(sweepTxStr, newLabel) // Finally, Ainz should now have no coins at all within his wallet. resp := ainz.RPC.WalletBalance() diff --git a/lntest/wait/wait.go b/lntest/wait/wait.go index 16a5f2ab2..b6274086c 100644 --- a/lntest/wait/wait.go +++ b/lntest/wait/wait.go @@ -13,6 +13,9 @@ const PollInterval = 200 * time.Millisecond // timing doesn't always line up well when running integration tests with // several running lnd nodes. This function gives callers a way to assert that // some property is upheld within a particular time frame. +// +// TODO(yy): build a counter here so we know how many times we've tried the +// `pred`. func Predicate(pred func() bool, timeout time.Duration) error { exitTimer := time.After(timeout) result := make(chan bool, 1) From f5ebaa5f35326e5fa2eb56501250ba735e9517ab Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Tue, 2 Aug 2022 21:15:49 +0800 Subject: [PATCH 03/47] multi: refactor `testDisconnectingTargetPeer` --- lntemp/harness_assertion.go | 34 +++++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 133 +++++++++----------------- lntest/itest/lnd_test_list_on_test.go | 4 - 4 files changed, 81 insertions(+), 94 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 05a25a6b0..edf1eea15 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1295,3 +1295,37 @@ func (h *HarnessTest) AssertActiveNodesSynced() { h.WaitForBlockchainSync(node) } } + +// AssertPeerNotConnected asserts that the given node b is not connected to a. +func (h *HarnessTest) AssertPeerNotConnected(a, b *node.HarnessNode) { + err := wait.NoError(func() error { + // We require the RPC call to be succeeded and won't wait for + // it as it's an unexpected behavior. + resp := a.RPC.ListPeers() + + // If node B is seen in the ListPeers response from node A, + // then we return false as the connection has been fully + // established. + for _, peer := range resp.Peers { + if peer.PubKey == b.PubKeyStr { + return fmt.Errorf("peers %s and %s still "+ + "connected", a.Name(), b.Name()) + } + } + + return nil + }, DefaultTimeout) + require.NoError(h, err, "timeout checking peers not connected") +} + +// AssertNotConnected asserts that two peers are not connected. +func (h *HarnessTest) AssertNotConnected(a, b *node.HarnessNode) { + h.AssertPeerNotConnected(a, b) + h.AssertPeerNotConnected(b, a) +} + +// AssertConnected asserts that two peers are connected. +func (h *HarnessTest) AssertConnected(a, b *node.HarnessNode) { + h.AssertPeerConnected(a, b) + h.AssertPeerConnected(b, a) +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 8c55c9317..8647c8d30 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -75,4 +75,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "sweep coins", TestFunc: testSweepAllCoins, }, + { + Name: "disconnecting target peer", + TestFunc: testDisconnectingTargetPeer, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index d9af97f3c..b0558e757 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -11,7 +11,6 @@ import ( "time" "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcwallet/wallet" "github.com/davecgh/go-spew/spew" @@ -30,10 +29,12 @@ import ( "github.com/stretchr/testify/require" ) -// testDisconnectingTargetPeer performs a test which disconnects Alice-peer from -// Bob-peer and then re-connects them again. We expect Alice to be able to +// testDisconnectingTargetPeer performs a test which disconnects Alice-peer +// from Bob-peer and then re-connects them again. We expect Alice to be able to // disconnect at any point. -func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { +// +// TODO(yy): move to lnd_network_test. +func testDisconnectingTargetPeer(ht *lntemp.HarnessTest) { // We'll start both nodes with a high backoff so that they don't // reconnect automatically during our test. args := []string{ @@ -41,20 +42,12 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { "--maxbackoff=1m", } - alice := net.NewNode(t.t, "Alice", args) - defer shutdownAndAssert(net, t, alice) - - bob := net.NewNode(t.t, "Bob", args) - defer shutdownAndAssert(net, t, bob) + alice, bob := ht.Alice, ht.Bob + ht.RestartNodeWithExtraArgs(alice, args) + ht.RestartNodeWithExtraArgs(bob, args) // Start by connecting Alice and Bob with no channels. - net.ConnectNodes(t.t, alice, bob) - - // Check existing connection. - assertConnected(t, alice, bob) - - // Give Alice some coins so she can fund a channel. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice) + ht.EnsureConnected(alice, bob) chanAmt := funding.MaxBtcFundingAmount pushAmt := btcutil.Amount(0) @@ -62,109 +55,69 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { // Create a new channel that requires 1 confs before it's considered // open, then broadcast the funding transaction const numConfs = 1 - pendingUpdate, err := net.OpenPendingChannel( - alice, bob, chanAmt, pushAmt, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) + p := lntemp.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, } + stream := ht.OpenChannelAssertPending(alice, bob, p) // At this point, the channel's funding transaction will have been // broadcast, but not confirmed. Alice and Bob's nodes should reflect // this when queried via RPC. - assertNumOpenChannelsPending(t, alice, bob, 1) + ht.AssertNumPendingOpenChannels(alice, 1) + ht.AssertNumPendingOpenChannels(bob, 1) - // Disconnect Alice-peer from Bob-peer and get error causes by one - // pending channel with detach node is existing. - if err := net.DisconnectNodes(alice, bob); err != nil { - t.Fatalf("Bob's peer was disconnected from Alice's"+ - " while one pending channel is existing: err %v", err) - } - - time.Sleep(time.Millisecond * 300) + // Disconnect Alice-peer from Bob-peer should have no error. + ht.DisconnectNodes(alice, bob) // Assert that the connection was torn down. - assertNotConnected(t, alice, bob) - - fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid) - if err != nil { - t.Fatalf("unable to convert funding txid into chainhash.Hash:"+ - " %v", err) - } + ht.AssertNotConnected(alice, bob) // Mine a block, then wait for Alice's node to notify us that the - // channel has been opened. The funding transaction should be found - // within the newly mined block. - block := mineBlocks(t, net, numConfs, 1)[0] - assertTxInBlock(t, block, fundingTxID) + // channel has been opened. + ht.MineBlocksAndAssertNumTxes(numConfs, 1) - // At this point, the channel should be fully opened and there should be - // no pending channels remaining for either node. - time.Sleep(time.Millisecond * 300) - - assertNumOpenChannelsPending(t, alice, bob, 0) + // At this point, the channel should be fully opened and there should + // be no pending channels remaining for either node. + ht.AssertNumPendingOpenChannels(alice, 0) + ht.AssertNumPendingOpenChannels(bob, 0) // Reconnect the nodes so that the channel can become active. - net.ConnectNodes(t.t, alice, bob) + ht.ConnectNodes(alice, bob) - // The channel should be listed in the peer information returned by both - // peers. - outPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: pendingUpdate.OutputIndex, - } + // The channel should be listed in the peer information returned by + // both peers. + chanPoint := ht.WaitForChannelOpenEvent(stream) // Check both nodes to ensure that the channel is ready for operation. - if err := net.AssertChannelExists(alice, &outPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - if err := net.AssertChannelExists(bob, &outPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } + ht.AssertChannelExists(alice, chanPoint) + ht.AssertChannelExists(bob, chanPoint) - // Disconnect Alice-peer from Bob-peer and get error causes by one - // active channel with detach node is existing. - if err := net.DisconnectNodes(alice, bob); err != nil { - t.Fatalf("Bob's peer was disconnected from Alice's"+ - " while one active channel is existing: err %v", err) - } + // Disconnect Alice-peer from Bob-peer should have no error. + ht.DisconnectNodes(alice, bob) // Check existing connection. - assertNotConnected(t, alice, bob) + ht.AssertNotConnected(alice, bob) // Reconnect both nodes before force closing the channel. - net.ConnectNodes(t.t, alice, bob) + ht.ConnectNodes(alice, bob) - // Finally, immediately close the channel. This function will also block - // until the channel is closed and will additionally assert the relevant - // channel closing post conditions. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } + // Finally, immediately close the channel. This function will also + // block until the channel is closed and will additionally assert the + // relevant channel closing post conditions. + ht.ForceCloseChannel(alice, chanPoint) - closeChannelAndAssert(t, net, alice, chanPoint, true) - - // Disconnect Alice-peer from Bob-peer without getting error about - // existing channels. - if err := net.DisconnectNodes(alice, bob); err != nil { - t.Fatalf("unable to disconnect Bob's peer from Alice's: err %v", - err) - } + // Disconnect Alice-peer from Bob-peer should have no error. + ht.DisconnectNodes(alice, bob) // Check that the nodes not connected. - assertNotConnected(t, alice, bob) + ht.AssertNotConnected(alice, bob) // Finally, re-connect both nodes. - net.ConnectNodes(t.t, alice, bob) + ht.ConnectNodes(alice, bob) // Check existing connection. - assertConnected(t, alice, bob) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, alice, chanPoint) + ht.AssertConnected(alice, bob) } // testSphinxReplayPersistence verifies that replayed onion packets are rejected diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index bfaf68e38..879b48e5d 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -36,10 +36,6 @@ var allTestCases = []*testCase{ name: "open channel reorg test", test: testOpenChannelAfterReorg, }, - { - name: "disconnecting target peer", - test: testDisconnectingTargetPeer, - }, { name: "reconnect after ip change", test: testReconnectAfterIPChange, From 5336c8e5f5ac81f6f35caf8e56c35e31892ebeae Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Wed, 3 Aug 2022 00:25:19 +0800 Subject: [PATCH 04/47] multi: refactor `testSphinxReplayPersistence` --- lntemp/harness_assertion.go | 114 ++++++++++++++++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 166 ++++++++------------------ lntest/itest/lnd_test_list_on_test.go | 4 - 4 files changed, 171 insertions(+), 117 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index edf1eea15..fc0351160 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1329,3 +1329,117 @@ func (h *HarnessTest) AssertConnected(a, b *node.HarnessNode) { h.AssertPeerConnected(a, b) h.AssertPeerConnected(b, a) } + +// AssertAmountPaid checks that the ListChannels command of the provided +// node list the total amount sent and received as expected for the +// provided channel. +func (h *HarnessTest) AssertAmountPaid(channelName string, hn *node.HarnessNode, + chanPoint *lnrpc.ChannelPoint, amountSent, amountReceived int64) { + + checkAmountPaid := func() error { + // Find the targeted channel. + channel, err := h.findChannel(hn, chanPoint) + if err != nil { + return fmt.Errorf("assert amount failed: %w", err) + } + + if channel.TotalSatoshisSent != amountSent { + return fmt.Errorf("%v: incorrect amount"+ + " sent: %v != %v", channelName, + channel.TotalSatoshisSent, + amountSent) + } + if channel.TotalSatoshisReceived != + amountReceived { + + return fmt.Errorf("%v: incorrect amount"+ + " received: %v != %v", + channelName, + channel.TotalSatoshisReceived, + amountReceived) + } + + return nil + } + + // As far as HTLC inclusion in commitment transaction might be + // postponed we will try to check the balance couple of times, + // and then if after some period of time we receive wrong + // balance return the error. + err := wait.NoError(checkAmountPaid, DefaultTimeout) + require.NoError(h, err, "timeout while checking amount paid") +} + +// AssertLastHTLCError checks that the last sent HTLC of the last payment sent +// by the given node failed with the expected failure code. +func (h *HarnessTest) AssertLastHTLCError(hn *node.HarnessNode, + code lnrpc.Failure_FailureCode) { + + // Use -1 to specify the last HTLC. + h.assertHTLCError(hn, code, -1) +} + +// AssertFirstHTLCError checks that the first HTLC of the last payment sent +// by the given node failed with the expected failure code. +func (h *HarnessTest) AssertFirstHTLCError(hn *node.HarnessNode, + code lnrpc.Failure_FailureCode) { + + // Use 0 to specify the first HTLC. + h.assertHTLCError(hn, code, 0) +} + +// assertLastHTLCError checks that the HTLC at the specified index of the last +// payment sent by the given node failed with the expected failure code. +func (h *HarnessTest) assertHTLCError(hn *node.HarnessNode, + code lnrpc.Failure_FailureCode, index int) { + + req := &lnrpc.ListPaymentsRequest{ + IncludeIncomplete: true, + } + + err := wait.NoError(func() error { + paymentsResp := hn.RPC.ListPayments(req) + + payments := paymentsResp.Payments + if len(payments) == 0 { + return fmt.Errorf("no payments found") + } + + payment := payments[len(payments)-1] + htlcs := payment.Htlcs + if len(htlcs) == 0 { + return fmt.Errorf("no htlcs found") + } + + // If the index is greater than 0, check we have enough htlcs. + if index > 0 && len(htlcs) <= index { + return fmt.Errorf("not enough htlcs") + } + + // If index is less than or equal to 0, we will read the last + // htlc. + if index <= 0 { + index = len(htlcs) - 1 + } + + htlc := htlcs[index] + + // The htlc must have a status of failed. + if htlc.Status != lnrpc.HTLCAttempt_FAILED { + return fmt.Errorf("htlc should be failed") + } + // The failure field must not be empty. + if htlc.Failure == nil { + return fmt.Errorf("expected htlc failure") + } + + // Exit if the expected code is found. + if htlc.Failure.Code == code { + return nil + } + + return fmt.Errorf("unexpected failure code") + }, DefaultTimeout) + + require.NoError(h, err, "timeout checking HTLC error") +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 8647c8d30..f60a70b8d 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -79,4 +79,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "disconnecting target peer", TestFunc: testDisconnectingTargetPeer, }, + { + Name: "sphinx replay persistence", + TestFunc: testSphinxReplayPersistence, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index b0558e757..dda61bc2f 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -1,7 +1,6 @@ package itest import ( - "bytes" "context" "crypto/rand" "encoding/hex" @@ -13,12 +12,12 @@ import ( "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcwallet/wallet" - "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/funding" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp" @@ -120,38 +119,28 @@ func testDisconnectingTargetPeer(ht *lntemp.HarnessTest) { ht.AssertConnected(alice, bob) } -// testSphinxReplayPersistence verifies that replayed onion packets are rejected -// by a remote peer after a restart. We use a combination of unsafe -// configuration arguments to force Carol to replay the same sphinx packet after -// reconnecting to Dave, and compare the returned failure message with what we -// expect for replayed onion packets. -func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // Open a channel with 100k satoshis between Carol and Dave with Carol being - // the sole funder of the channel. +// testSphinxReplayPersistence verifies that replayed onion packets are +// rejected by a remote peer after a restart. We use a combination of unsafe +// configuration arguments to force Carol to replay the same sphinx packet +// after reconnecting to Dave, and compare the returned failure message with +// what we expect for replayed onion packets. +func testSphinxReplayPersistence(ht *lntemp.HarnessTest) { + // Open a channel with 100k satoshis between Carol and Dave with Carol + // being the sole funder of the channel. chanAmt := btcutil.Amount(100000) // First, we'll create Dave, the receiver, and start him in hodl mode. - dave := net.NewNode(t.t, "Dave", []string{"--hodl.exit-settle"}) - - // We must remember to shutdown the nodes we created for the duration - // of the tests, only leaving the two seed nodes (Alice and Bob) within - // our test network. - defer shutdownAndAssert(net, t, dave) + dave := ht.NewNode("Dave", []string{"--hodl.exit-settle"}) // Next, we'll create Carol and establish a channel to from her to // Dave. Carol is started in both unsafe-replay which will cause her to // replay any pending Adds held in memory upon reconnection. - carol := net.NewNode(t.t, "Carol", []string{"--unsafe-replay"}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", []string{"--unsafe-replay"}) + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) - net.ConnectNodes(t.t, carol, dave) - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol) - - chanPoint := openChannelAndAssert( - t, net, carol, dave, - lntest.OpenChannelParams{ + ht.ConnectNodes(carol, dave) + chanPoint := ht.OpenChannel( + carol, dave, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) @@ -161,136 +150,87 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { // by paying from Carol directly to Dave, because the '--unsafe-replay' // setup doesn't apply to locally added htlcs. In that case, the // mailbox, that is responsible for generating the replay, is bypassed. - fred := net.NewNode(t.t, "Fred", nil) - defer shutdownAndAssert(net, t, fred) + fred := ht.NewNode("Fred", nil) + ht.FundCoins(btcutil.SatoshiPerBitcoin, fred) - net.ConnectNodes(t.t, fred, carol) - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, fred) - - chanPointFC := openChannelAndAssert( - t, net, fred, carol, - lntest.OpenChannelParams{ + ht.ConnectNodes(fred, carol) + chanPointFC := ht.OpenChannel( + fred, carol, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) + defer ht.CloseChannel(fred, chanPointFC) // Now that the channel is open, create an invoice for Dave which // expects a payment of 1000 satoshis from Carol paid via a particular // preimage. const paymentAmt = 1000 - preimage := bytes.Repeat([]byte("A"), 32) + preimage := ht.Random32Bytes() invoice := &lnrpc.Invoice{ Memo: "testing", RPreimage: preimage, Value: paymentAmt, } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, err := dave.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + invoiceResp := dave.RPC.AddInvoice(invoice) // Wait for all channels to be recognized and advertized. - err = carol.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - err = dave.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - err = carol.WaitForNetworkChannelOpen(chanPointFC) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - err = fred.WaitForNetworkChannelOpen(chanPointFC) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } + ht.AssertTopologyChannelOpen(carol, chanPoint) + ht.AssertTopologyChannelOpen(dave, chanPoint) + ht.AssertTopologyChannelOpen(carol, chanPointFC) + ht.AssertTopologyChannelOpen(fred, chanPointFC) // With the invoice for Dave added, send a payment from Fred paying // to the above generated invoice. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - payStream, err := fred.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if err != nil { - t.Fatalf("unable to open payment stream: %v", err) + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: invoiceResp.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, } - - time.Sleep(200 * time.Millisecond) + payStream := fred.RPC.SendPayment(req) // Dave's invoice should not be marked as settled. - payHash := &lnrpc.PaymentHash{ - RHash: invoiceResp.RHash, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - dbInvoice, err := dave.LookupInvoice(ctxt, payHash) - if err != nil { - t.Fatalf("unable to lookup invoice: %v", err) - } - if dbInvoice.Settled { // nolint:staticcheck - t.Fatalf("dave's invoice should not be marked as settled: %v", - spew.Sdump(dbInvoice)) + msg := &invoicesrpc.LookupInvoiceMsg{ + InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{ + PaymentAddr: invoiceResp.PaymentAddr, + }, } + dbInvoice := dave.RPC.LookupInvoiceV2(msg) + require.NotEqual(ht, lnrpc.InvoiceHTLCState_SETTLED, dbInvoice.State, + "dave's invoice should not be marked as settled") // With the payment sent but hedl, all balance related stats should not // have changed. - err = wait.InvariantNoError( - assertAmountSent(0, carol, dave), 3*time.Second, - ) - if err != nil { - t.Fatalf(err.Error()) - } + ht.AssertAmountPaid("carol => dave", carol, chanPoint, 0, 0) + ht.AssertAmountPaid("dave <= carol", dave, chanPoint, 0, 0) + + // Before we restart Dave, make sure both Carol and Dave have added the + // HTLC. + ht.AssertNumActiveHtlcs(carol, 2) + ht.AssertNumActiveHtlcs(dave, 1) // With the first payment sent, restart dave to make sure he is // persisting the information required to detect replayed sphinx // packets. - if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to restart dave: %v", err) - } + ht.RestartNode(dave) // Carol should retransmit the Add hedl in her mailbox on startup. Dave // should not accept the replayed Add, and actually fail back the // pending payment. Even though he still holds the original settle, if // he does fail, it is almost certainly caused by the sphinx replay // protection, as it is the only validation we do in hodl mode. - result, err := getPaymentResult(payStream) - if err != nil { - t.Fatalf("unable to receive payment response: %v", err) - } - + // // Assert that Fred receives the expected failure after Carol sent a // duplicate packet that fails due to sphinx replay detection. - if result.Status == lnrpc.Payment_SUCCEEDED { - t.Fatalf("expected payment error") - } - assertLastHTLCError(t, fred, lnrpc.Failure_INVALID_ONION_KEY) + ht.AssertPaymentStatusFromStream(payStream, lnrpc.Payment_FAILED) + ht.AssertLastHTLCError(fred, lnrpc.Failure_INVALID_ONION_KEY) // Since the payment failed, the balance should still be left // unaltered. - err = wait.InvariantNoError( - assertAmountSent(0, carol, dave), 3*time.Second, - ) - if err != nil { - t.Fatalf(err.Error()) - } - - closeChannelAndAssert(t, net, carol, chanPoint, true) + ht.AssertAmountPaid("carol => dave", carol, chanPoint, 0, 0) + ht.AssertAmountPaid("dave <= carol", dave, chanPoint, 0, 0) // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, carol, chanPoint) + ht.ForceCloseChannel(carol, chanPoint) } // testListChannels checks that the response from ListChannels is correct. It diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 879b48e5d..1c75310bc 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -64,10 +64,6 @@ var allTestCases = []*testCase{ name: "single hop invoice", test: testSingleHopInvoice, }, - { - name: "sphinx replay persistence", - test: testSphinxReplayPersistence, - }, { name: "list channels", test: testListChannels, From d1ac08b33617dd86e19858ce17e0fdcb1ba423af Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Wed, 3 Aug 2022 01:00:20 +0800 Subject: [PATCH 05/47] itest+lntemp: refactor `testListChannels` --- lntemp/harness.go | 10 ++ lntest/itest/assertions.go | 28 ------ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 139 +++++++++++--------------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 71 insertions(+), 114 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index ed9865f28..927f8c7cd 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -1259,3 +1259,13 @@ func (h *HarnessTest) MineBlocksAndAssertNumTxes(num uint32, return blocks } + +// QueryChannelByChanPoint tries to find a channel matching the channel point +// and asserts. It returns the channel found. +func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode, + chanPoint *lnrpc.ChannelPoint) *lnrpc.Channel { + + channel, err := h.findChannel(hn, chanPoint) + require.NoError(h, err, "failed to query channel") + return channel +} diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index 435e7a4a7..1e6b325a7 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -977,34 +977,6 @@ func assertLastHTLCError(t *harnessTest, node *lntest.HarnessNode, require.Equal(t.t, code, htlc.Failure.Code, "unexpected failure code") } -func assertChannelConstraintsEqual( - t *harnessTest, want, got *lnrpc.ChannelConstraints) { - - t.t.Helper() - - require.Equal(t.t, want.CsvDelay, got.CsvDelay, "CsvDelay mismatched") - require.Equal( - t.t, want.ChanReserveSat, got.ChanReserveSat, - "ChanReserveSat mismatched", - ) - require.Equal( - t.t, want.DustLimitSat, got.DustLimitSat, - "DustLimitSat mismatched", - ) - require.Equal( - t.t, want.MaxPendingAmtMsat, got.MaxPendingAmtMsat, - "MaxPendingAmtMsat mismatched", - ) - require.Equal( - t.t, want.MinHtlcMsat, got.MinHtlcMsat, - "MinHtlcMsat mismatched", - ) - require.Equal( - t.t, want.MaxAcceptedHtlcs, got.MaxAcceptedHtlcs, - "MaxAcceptedHtlcs mismatched", - ) -} - // assertAmountPaid checks that the ListChannels command of the provided // node list the total amount sent and received as expected for the // provided channel. diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index f60a70b8d..77ec03502 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -83,4 +83,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "sphinx replay persistence", TestFunc: testSphinxReplayPersistence, }, + { + Name: "list channels", + TestFunc: testListChannels, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index dda61bc2f..f95ab658e 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -237,31 +237,21 @@ func testSphinxReplayPersistence(ht *lntemp.HarnessTest) { // tests the values in all ChannelConstraints are returned as expected. Once // ListChannels becomes mature, a test against all fields in ListChannels // should be performed. -func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testListChannels(ht *lntemp.HarnessTest) { const aliceRemoteMaxHtlcs = 50 const bobRemoteMaxHtlcs = 100 - // Create two fresh nodes and open a channel between them. - alice := net.NewNode(t.t, "Alice", nil) - defer shutdownAndAssert(net, t, alice) + // Get the standby nodes and open a channel between them. + alice, bob := ht.Alice, ht.Bob - bob := net.NewNode( - t.t, "Bob", []string{ - fmt.Sprintf( - "--default-remote-max-htlcs=%v", - bobRemoteMaxHtlcs, - ), - }, - ) - defer shutdownAndAssert(net, t, bob) + args := []string{fmt.Sprintf( + "--default-remote-max-htlcs=%v", + bobRemoteMaxHtlcs, + )} + ht.RestartNodeWithExtraArgs(bob, args) // Connect Alice to Bob. - net.ConnectNodes(t.t, alice, bob) - - // Give Alice some coins so she can fund a channel. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice) + ht.EnsureConnected(alice, bob) // Open a channel with 100k satoshis between Alice and Bob with Alice // being the sole funder of the channel. The minial HTLC amount is set @@ -270,54 +260,30 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { chanAmt := btcutil.Amount(100000) pushAmt := btcutil.Amount(1000) - chanPoint := openChannelAndAssert( - t, net, alice, bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - MinHtlc: customizedMinHtlc, - RemoteMaxHtlcs: aliceRemoteMaxHtlcs, - }, - ) - - // Wait for Alice and Bob to receive the channel edge from the - // funding manager. - err := alice.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->bob channel before "+ - "timeout: %v", err) - } - - err = bob.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("bob didn't see the bob->alice channel before "+ - "timeout: %v", err) + p := lntemp.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + MinHtlc: customizedMinHtlc, + RemoteMaxHtlcs: aliceRemoteMaxHtlcs, } + chanPoint := ht.OpenChannel(alice, bob, p) + defer ht.CloseChannel(alice, chanPoint) // Alice should have one channel opened with Bob. - assertNodeNumChannels(t, alice, 1) + ht.AssertNodeNumChannels(alice, 1) // Bob should have one channel opened with Alice. - assertNodeNumChannels(t, bob, 1) - - // Get the ListChannel response from Alice. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := alice.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to query for %s's channel list: %v", - alice.Name(), err) - } + ht.AssertNodeNumChannels(bob, 1) // Check the returned response is correct. - aliceChannel := resp.Channels[0] + aliceChannel := ht.QueryChannelByChanPoint(alice, chanPoint) // Since Alice is the initiator, she pays the commit fee. aliceBalance := int64(chanAmt) - aliceChannel.CommitFee - int64(pushAmt) // Check the balance related fields are correct. - require.Equal(t.t, aliceBalance, aliceChannel.LocalBalance) - require.EqualValues(t.t, pushAmt, aliceChannel.RemoteBalance) - require.EqualValues(t.t, pushAmt, aliceChannel.PushAmountSat) + require.Equal(ht, aliceBalance, aliceChannel.LocalBalance) + require.EqualValues(ht, pushAmt, aliceChannel.RemoteBalance) + require.EqualValues(ht, pushAmt, aliceChannel.PushAmountSat) // Calculate the dust limit we'll use for the test. dustLimit := lnwallet.DustLimitForSize(input.UnknownWitnessSize) @@ -333,13 +299,13 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { MaxAcceptedHtlcs: bobRemoteMaxHtlcs, } assertChannelConstraintsEqual( - t, defaultConstraints, aliceChannel.LocalConstraints, + ht, defaultConstraints, aliceChannel.LocalConstraints, ) - // customizedConstraints is a ChannelConstraints with customized values. - // Ideally, all these values can be passed in when creating the channel. - // Currently, only the MinHtlcMsat is customized. It is used to check - // against Alice's remote channel constratins. + // customizedConstraints is a ChannelConstraints with customized + // values. Ideally, all these values can be passed in when creating the + // channel. Currently, only the MinHtlcMsat is customized. It is used + // to check against Alice's remote channel constratins. customizedConstraints := &lnrpc.ChannelConstraints{ CsvDelay: 4, ChanReserveSat: 1000, @@ -349,39 +315,27 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { MaxAcceptedHtlcs: aliceRemoteMaxHtlcs, } assertChannelConstraintsEqual( - t, customizedConstraints, aliceChannel.RemoteConstraints, + ht, customizedConstraints, aliceChannel.RemoteConstraints, ) // Get the ListChannel response for Bob. - listReq = &lnrpc.ListChannelsRequest{} - ctxb = context.Background() - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err = bob.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to query for %s's channel "+ - "list: %v", bob.Name(), err) - } - - bobChannel := resp.Channels[0] - if bobChannel.ChannelPoint != aliceChannel.ChannelPoint { - t.Fatalf("Bob's channel point mismatched, want: %s, got: %s", - chanPoint.String(), bobChannel.ChannelPoint, - ) - } + bobChannel := ht.QueryChannelByChanPoint(bob, chanPoint) + require.Equal(ht, aliceChannel.ChannelPoint, bobChannel.ChannelPoint, + "Bob's channel point mismatched") // Check the balance related fields are correct. - require.Equal(t.t, aliceBalance, bobChannel.RemoteBalance) - require.EqualValues(t.t, pushAmt, bobChannel.LocalBalance) - require.EqualValues(t.t, pushAmt, bobChannel.PushAmountSat) + require.Equal(ht, aliceBalance, bobChannel.RemoteBalance) + require.EqualValues(ht, pushAmt, bobChannel.LocalBalance) + require.EqualValues(ht, pushAmt, bobChannel.PushAmountSat) // Check channel constraints match. Alice's local channel constraint // should be equal to Bob's remote channel constraint, and her remote // one should be equal to Bob's local one. assertChannelConstraintsEqual( - t, aliceChannel.LocalConstraints, bobChannel.RemoteConstraints, + ht, aliceChannel.LocalConstraints, bobChannel.RemoteConstraints, ) assertChannelConstraintsEqual( - t, aliceChannel.RemoteConstraints, bobChannel.LocalConstraints, + ht, aliceChannel.RemoteConstraints, bobChannel.LocalConstraints, ) } @@ -1397,3 +1351,24 @@ func testListAddresses(net *lntest.NetworkHarness, t *harnessTest) { require.Equal(t.t, len(generatedAddr), foundAddresses) } + +func assertChannelConstraintsEqual(ht *lntemp.HarnessTest, + want, got *lnrpc.ChannelConstraints) { + + require.Equal(ht, want.CsvDelay, got.CsvDelay, "CsvDelay mismatched") + + require.Equal(ht, want.ChanReserveSat, got.ChanReserveSat, + "ChanReserveSat mismatched") + + require.Equal(ht, want.DustLimitSat, got.DustLimitSat, + "DustLimitSat mismatched") + + require.Equal(ht, want.MaxPendingAmtMsat, got.MaxPendingAmtMsat, + "MaxPendingAmtMsat mismatched") + + require.Equal(ht, want.MinHtlcMsat, got.MinHtlcMsat, + "MinHtlcMsat mismatched") + + require.Equal(ht, want.MaxAcceptedHtlcs, got.MaxAcceptedHtlcs, + "MaxAcceptedHtlcs mismatched") +} diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 1c75310bc..bb3f27921 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -64,10 +64,6 @@ var allTestCases = []*testCase{ name: "single hop invoice", test: testSingleHopInvoice, }, - { - name: "list channels", - test: testListChannels, - }, { name: "test update node announcement rpc", test: testUpdateNodeAnnouncement, From ba61e9edffb30bc3644e7e5fcf59f4832466ec3f Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Wed, 3 Aug 2022 21:29:25 +0800 Subject: [PATCH 06/47] itest+lntemp: refactor `testMaxPendingChannels` --- lntemp/harness.go | 43 +++++++++++++---- lntemp/harness_assertion.go | 22 +++++++-- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_misc_test.go | 68 +++++++++------------------ lntest/itest/lnd_test_list_on_test.go | 4 -- 5 files changed, 78 insertions(+), 63 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index 927f8c7cd..397ba3524 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -693,12 +693,10 @@ type OpenChannelParams struct { ScidAlias bool } -// OpenChannelAssertPending attempts to open a channel between srcNode and -// destNode with the passed channel funding parameters. Once the `OpenChannel` -// is called, it will consume the first event it receives from the open channel -// client and asserts it's a channel pending event. -func (h *HarnessTest) OpenChannelAssertPending(srcNode, - destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient { +// prepareOpenChannel waits for both nodes to be synced to chain and returns an +// OpenChannelRequest. +func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode, + p OpenChannelParams) *lnrpc.OpenChannelRequest { // Wait until srcNode and destNode have the latest chain synced. // Otherwise, we may run into a check within the funding manager that @@ -714,8 +712,8 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode, minConfs = 0 } - // Prepare the request and open the channel. - openReq := &lnrpc.OpenChannelRequest{ + // Prepare the request. + return &lnrpc.OpenChannelRequest{ NodePubkey: destNode.PubKey[:], LocalFundingAmount: int64(p.Amt), PushSat: int64(p.PushAmt), @@ -730,6 +728,17 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode, ZeroConf: p.ZeroConf, ScidAlias: p.ScidAlias, } +} + +// OpenChannelAssertPending attempts to open a channel between srcNode and +// destNode with the passed channel funding parameters. Once the `OpenChannel` +// is called, it will consume the first event it receives from the open channel +// client and asserts it's a channel pending event. +func (h *HarnessTest) OpenChannelAssertPending(srcNode, + destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient { + + // Prepare the request and open the channel. + openReq := h.prepareOpenChannel(srcNode, destNode, p) respStream := srcNode.RPC.OpenChannel(openReq) // Consume the "channel pending" update. This waits until the node @@ -785,6 +794,24 @@ func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode, return fundingChanPoint } +// OpenChannelAssertErr opens a channel between node srcNode and destNode, +// asserts that the expected error is returned from the channel opening. +func (h *HarnessTest) OpenChannelAssertErr(srcNode, destNode *node.HarnessNode, + p OpenChannelParams, expectedErr error) { + + // Prepare the request and open the channel. + openReq := h.prepareOpenChannel(srcNode, destNode, p) + respStream := srcNode.RPC.OpenChannel(openReq) + + // Receive an error to be sent from the stream. + _, err := h.receiveOpenChannelUpdate(respStream) + + // Use string comparison here as we haven't codified all the RPC errors + // yet. + require.Containsf(h, err.Error(), expectedErr.Error(), "unexpected "+ + "error returned, want %v, got %v", expectedErr, err) +} + // CloseChannelAssertPending attempts to close the channel indicated by the // passed channel point, initiated by the passed node. Once the CloseChannel // rpc is called, it will consume one event and assert it's a close pending diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index fc0351160..ecbbbb178 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -199,6 +199,20 @@ func (h *HarnessTest) AssertNumEdges(hn *node.HarnessNode, func (h *HarnessTest) ReceiveOpenChannelUpdate( stream rpc.OpenChanClient) *lnrpc.OpenStatusUpdate { + update, err := h.receiveOpenChannelUpdate(stream) + require.NoError(h, err, "received err from open channel stream") + + return update +} + +// receiveOpenChannelUpdate waits until a message or an error is received on +// the stream or the timeout is reached. +// +// TODO(yy): use generics to unify all receiving stream update once go@1.18 is +// used. +func (h *HarnessTest) receiveOpenChannelUpdate( + stream rpc.OpenChanClient) (*lnrpc.OpenStatusUpdate, error) { + chanMsg := make(chan *lnrpc.OpenStatusUpdate) errChan := make(chan error) go func() { @@ -216,16 +230,14 @@ func (h *HarnessTest) ReceiveOpenChannelUpdate( case <-time.After(DefaultTimeout): require.Fail(h, "timeout", "timeout waiting for open channel "+ "update sent") + return nil, nil case err := <-errChan: - require.Failf(h, "open channel stream", - "received err from open channel stream: %v", err) + return nil, err case updateMsg := <-chanMsg: - return updateMsg + return updateMsg, nil } - - return nil } // WaitForChannelOpenEvent waits for a notification that a channel is open by diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 77ec03502..e6194cbfb 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -87,4 +87,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "list channels", TestFunc: testListChannels, }, + { + Name: "max pending channel", + TestFunc: testMaxPendingChannels, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index f95ab658e..709f6ff11 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -10,7 +10,6 @@ import ( "time" "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcwallet/wallet" "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/funding" @@ -342,7 +341,7 @@ func testListChannels(ht *lntemp.HarnessTest) { // testMaxPendingChannels checks that error is returned from remote peer if // max pending channel number was exceeded and that '--maxpendingchannels' flag // exists and works properly. -func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { +func testMaxPendingChannels(ht *lntemp.HarnessTest) { maxPendingChannels := lncfg.DefaultMaxPendingChannels + 1 amount := funding.MaxBtcFundingAmount @@ -351,22 +350,23 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { args := []string{ fmt.Sprintf("--maxpendingchannels=%v", maxPendingChannels), } - carol := net.NewNode(t.t, "Carol", args) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", args) - net.ConnectNodes(t.t, net.Alice, carol) + alice := ht.Alice + ht.ConnectNodes(alice, carol) carolBalance := btcutil.Amount(maxPendingChannels) * amount - net.SendCoins(t.t, carolBalance, carol) + ht.FundCoins(carolBalance, carol) // Send open channel requests without generating new blocks thereby // increasing pool of pending channels. Then check that we can't open // the channel if the number of pending channels exceed max value. - openStreams := make([]lnrpc.Lightning_OpenChannelClient, maxPendingChannels) + openStreams := make( + []lnrpc.Lightning_OpenChannelClient, maxPendingChannels, + ) for i := 0; i < maxPendingChannels; i++ { - stream := openChannelStream( - t, net, net.Alice, carol, - lntest.OpenChannelParams{ + stream := ht.OpenChannelAssertPending( + alice, carol, lntemp.OpenChannelParams{ Amt: amount, }, ) @@ -375,60 +375,36 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { // Carol exhausted available amount of pending channels, next open // channel request should cause ErrorGeneric to be sent back to Alice. - _, err := net.OpenChannel( - net.Alice, carol, lntest.OpenChannelParams{ + ht.OpenChannelAssertErr( + alice, carol, lntemp.OpenChannelParams{ Amt: amount, - }, + }, lnwire.ErrMaxPendingChannels, ) - if err == nil { - t.Fatalf("error wasn't received") - } else if !strings.Contains( - err.Error(), lnwire.ErrMaxPendingChannels.Error(), - ) { - - t.Fatalf("not expected error was received: %v", err) - } - // For now our channels are in pending state, in order to not interfere // with other tests we should clean up - complete opening of the // channel and then close it. - // Mine 6 blocks, then wait for node's to notify us that the channel has - // been opened. The funding transactions should be found within the + // Mine 6 blocks, then wait for node's to notify us that the channel + // has been opened. The funding transactions should be found within the // first newly mined block. 6 blocks make sure the funding transaction // has enough confirmations to be announced publicly. - block := mineBlocks(t, net, 6, maxPendingChannels)[0] + block := ht.MineBlocksAndAssertNumTxes(6, maxPendingChannels)[0] chanPoints := make([]*lnrpc.ChannelPoint, maxPendingChannels) for i, stream := range openStreams { - fundingChanPoint, err := net.WaitForChannelOpen(stream) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } + fundingChanPoint := ht.WaitForChannelOpenEvent(stream) - fundingTxID, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } + fundingTxID := ht.GetChanPointFundingTxid(fundingChanPoint) // Ensure that the funding transaction enters a block, and is // properly advertised by Alice. - assertTxInBlock(t, block, fundingTxID) - err = net.Alice.WaitForNetworkChannelOpen(fundingChanPoint) - if err != nil { - t.Fatalf("channel not seen on network before "+ - "timeout: %v", err) - } + ht.Miner.AssertTxInBlock(block, fundingTxID) + ht.AssertTopologyChannelOpen(alice, fundingChanPoint) // The channel should be listed in the peer information // returned by both peers. - chanPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: fundingChanPoint.OutputIndex, - } - err = net.AssertChannelExists(net.Alice, &chanPoint) - require.NoError(t.t, err, "unable to assert channel existence") + ht.AssertChannelExists(alice, fundingChanPoint) chanPoints[i] = fundingChanPoint } @@ -436,7 +412,7 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { // Next, close the channel between Alice and Carol, asserting that the // channel has been properly closed on-chain. for _, chanPoint := range chanPoints { - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + ht.CloseChannel(alice, chanPoint) } } diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index bb3f27921..23a034d78 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -72,10 +72,6 @@ var allTestCases = []*testCase{ name: "list outgoing payments", test: testListPayments, }, - { - name: "max pending channel", - test: testMaxPendingChannels, - }, { name: "multi-hop payments", test: testMultiHopPayments, From 0c8bce0c1cd6a1910aabf9805375ca14dca4f797 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 00:34:01 +0800 Subject: [PATCH 07/47] itest: refactor `testGarbageCollectLinkNodes` --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 213 ++++++-------------------- lntest/itest/lnd_test_list_on_test.go | 4 - 3 files changed, 52 insertions(+), 169 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index e6194cbfb..4fee4e991 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -91,4 +91,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "max pending channel", TestFunc: testMaxPendingChannels, }, + { + Name: "garbage collect link nodes", + TestFunc: testGarbageCollectLinkNodes, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 709f6ff11..80d5beb04 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -7,7 +7,6 @@ import ( "fmt" "io/ioutil" "strings" - "time" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcwallet/wallet" @@ -20,6 +19,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" @@ -416,32 +416,30 @@ func testMaxPendingChannels(ht *lntemp.HarnessTest) { } } -// testGarbageCollectLinkNodes tests that we properly garbage collect link nodes -// from the database and the set of persistent connections within the server. -func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() +// testGarbageCollectLinkNodes tests that we properly garbage collect link +// nodes from the database and the set of persistent connections within the +// server. +func testGarbageCollectLinkNodes(ht *lntemp.HarnessTest) { + const chanAmt = 1000000 - const ( - chanAmt = 1000000 - ) + alice, bob := ht.Alice, ht.Bob // Open a channel between Alice and Bob which will later be // cooperatively closed. - coopChanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, lntest.OpenChannelParams{ + coopChanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) // Create Carol's node and connect Alice to her. - carol := net.NewNode(t.t, "Carol", nil) - defer shutdownAndAssert(net, t, carol) - net.ConnectNodes(t.t, net.Alice, carol) + carol := ht.NewNode("Carol", nil) + ht.ConnectNodes(alice, carol) // Open a channel between Alice and Carol which will later be force // closed. - forceCloseChanPoint := openChannelAndAssert( - t, net, net.Alice, carol, lntest.OpenChannelParams{ + forceCloseChanPoint := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) @@ -449,200 +447,85 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { // Now, create Dave's a node and also open a channel between Alice and // him. This link will serve as the only persistent link throughout // restarts in this test. - dave := net.NewNode(t.t, "Dave", nil) - defer shutdownAndAssert(net, t, dave) + dave := ht.NewNode("Dave", nil) - net.ConnectNodes(t.t, net.Alice, dave) - persistentChanPoint := openChannelAndAssert( - t, net, net.Alice, dave, lntest.OpenChannelParams{ + ht.ConnectNodes(alice, dave) + persistentChanPoint := ht.OpenChannel( + alice, dave, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) - // isConnected is a helper closure that checks if a peer is connected to - // Alice. - isConnected := func(pubKey string) bool { - req := &lnrpc.ListPeersRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := net.Alice.ListPeers(ctxt, req) - if err != nil { - t.Fatalf("unable to retrieve alice's peers: %v", err) - } - - for _, peer := range resp.Peers { - if peer.PubKey == pubKey { - return true - } - } - - return false - } - // Restart both Bob and Carol to ensure Alice is able to reconnect to // them. - if err := net.RestartNode(net.Bob, nil); err != nil { - t.Fatalf("unable to restart bob's node: %v", err) - } - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("unable to restart carol's node: %v", err) - } + ht.RestartNode(bob) + ht.RestartNode(carol) - require.Eventually(t.t, func() bool { - return isConnected(net.Bob.PubKeyStr) - }, defaultTimeout, 20*time.Millisecond) - require.Eventually(t.t, func() bool { - return isConnected(carol.PubKeyStr) - }, defaultTimeout, 20*time.Millisecond) + ht.AssertConnected(alice, bob) + ht.AssertConnected(alice, carol) // We'll also restart Alice to ensure she can reconnect to her peers // with open channels. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice's node: %v", err) - } + ht.RestartNode(alice) - require.Eventually(t.t, func() bool { - return isConnected(net.Bob.PubKeyStr) - }, defaultTimeout, 20*time.Millisecond) - require.Eventually(t.t, func() bool { - return isConnected(carol.PubKeyStr) - }, defaultTimeout, 20*time.Millisecond) - require.Eventually(t.t, func() bool { - return isConnected(dave.PubKeyStr) - }, defaultTimeout, 20*time.Millisecond) - err := wait.Predicate(func() bool { - return isConnected(dave.PubKeyStr) - }, defaultTimeout) + ht.AssertConnected(alice, bob) + ht.AssertConnected(alice, carol) + ht.AssertConnected(alice, dave) // testReconnection is a helper closure that restarts the nodes at both // ends of a channel to ensure they do not reconnect after restarting. // When restarting Alice, we'll first need to ensure she has // reestablished her connection with Dave, as they still have an open // channel together. - testReconnection := func(node *lntest.HarnessNode) { + testReconnection := func(node *node.HarnessNode) { // Restart both nodes, to trigger the pruning logic. - if err := net.RestartNode(node, nil); err != nil { - t.Fatalf("unable to restart %v's node: %v", - node.Name(), err) - } - - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice's node: %v", err) - } + ht.RestartNode(node) + ht.RestartNode(alice) // Now restart both nodes and make sure they don't reconnect. - if err := net.RestartNode(node, nil); err != nil { - t.Fatalf("unable to restart %v's node: %v", node.Name(), - err) - } - err = wait.Invariant(func() bool { - return !isConnected(node.PubKeyStr) - }, 5*time.Second) - if err != nil { - t.Fatalf("alice reconnected to %v", node.Name()) - } + ht.RestartNode(node) + ht.AssertNotConnected(alice, node) - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("unable to restart alice's node: %v", err) - } - err = wait.Predicate(func() bool { - return isConnected(dave.PubKeyStr) - }, defaultTimeout) - if err != nil { - t.Fatalf("alice didn't reconnect to Dave") - } - - err = wait.Invariant(func() bool { - return !isConnected(node.PubKeyStr) - }, 5*time.Second) - if err != nil { - t.Fatalf("alice reconnected to %v", node.Name()) - } + ht.RestartNode(alice) + ht.AssertConnected(alice, dave) + ht.AssertNotConnected(alice, node) } // Now, we'll close the channel between Alice and Bob and ensure there // is no reconnection logic between the both once the channel is fully // closed. - closeChannelAndAssert(t, net, net.Alice, coopChanPoint, false) + ht.CloseChannel(alice, coopChanPoint) - testReconnection(net.Bob) + testReconnection(bob) // We'll do the same with Alice and Carol, but this time we'll force // close the channel instead. - closeChannelAndAssert(t, net, net.Alice, forceCloseChanPoint, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Alice, forceCloseChanPoint) + ht.ForceCloseChannel(alice, forceCloseChanPoint) // We'll need to mine some blocks in order to mark the channel fully // closed. - _, err = net.Miner.Client.Generate(chainreg.DefaultBitcoinTimeLockDelta - defaultCSV) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + ht.MineBlocks( + chainreg.DefaultBitcoinTimeLockDelta - defaultCSV, + ) // Before we test reconnection, we'll ensure that the channel has been // fully cleaned up for both Carol and Alice. - var predErr error - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - predErr = checkNumForceClosedChannels(pendingChanResp, 0) - if predErr != nil { - return false - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = carol.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - predErr = checkNumForceClosedChannels(pendingChanResp, 0) - - return predErr == nil - }, defaultTimeout) - if err != nil { - t.Fatalf("channels not marked as fully resolved: %v", predErr) - } + ht.AssertNumPendingForceClose(alice, 0) + ht.AssertNumPendingForceClose(carol, 0) testReconnection(carol) // Finally, we'll ensure that Bob and Carol no longer show in Alice's // channel graph. - describeGraphReq := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - channelGraph, err := net.Alice.DescribeGraph(ctxt, describeGraphReq) - if err != nil { - t.Fatalf("unable to query for alice's channel graph: %v", err) - } - for _, node := range channelGraph.Nodes { - if node.PubKey == net.Bob.PubKeyStr { - t.Fatalf("did not expect to find bob in the channel " + - "graph, but did") - } - if node.PubKey == carol.PubKeyStr { - t.Fatalf("did not expect to find carol in the channel " + - "graph, but did") - } - } + req := &lnrpc.ChannelGraphRequest{IncludeUnannounced: true} + channelGraph := alice.RPC.DescribeGraph(req) + require.NotContains(ht, channelGraph.Nodes, bob.PubKeyStr, + "did not expect to find bob in the channel graph, but did") + require.NotContains(ht, channelGraph.Nodes, carol.PubKeyStr, + "did not expect to find carol in the channel graph, but did") // Now that the test is done, we can also close the persistent link. - closeChannelAndAssert(t, net, net.Alice, persistentChanPoint, false) + ht.CloseChannel(alice, persistentChanPoint) } // testRejectHTLC tests that a node can be created with the flag --rejecthtlc. diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 23a034d78..a4b9211ac 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -179,10 +179,6 @@ var allTestCases = []*testCase{ name: "failing link", test: testFailingChannel, }, - { - name: "garbage collect link nodes", - test: testGarbageCollectLinkNodes, - }, { name: "abandonchannel", test: testAbandonChannel, From 1938e42f4e44b7dba7d8cc00826c3141c6a7ced7 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 00:37:47 +0800 Subject: [PATCH 08/47] itest: refactor `testRejectHTLC` --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 108 +++++++------------------- lntest/itest/lnd_test_list_on_test.go | 4 - 3 files changed, 34 insertions(+), 82 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 4fee4e991..ebbe9d12b 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -95,4 +95,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "garbage collect link nodes", TestFunc: testGarbageCollectLinkNodes, }, + { + Name: "reject onward htlc", + TestFunc: testRejectHTLC, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 80d5beb04..39c375c42 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -2,7 +2,6 @@ package itest import ( "context" - "crypto/rand" "encoding/hex" "fmt" "io/ioutil" @@ -531,41 +530,35 @@ func testGarbageCollectLinkNodes(ht *lntemp.HarnessTest) { // testRejectHTLC tests that a node can be created with the flag --rejecthtlc. // This means that the node will reject all forwarded HTLCs but can still // accept direct HTLCs as well as send HTLCs. -func testRejectHTLC(net *lntest.NetworkHarness, t *harnessTest) { +func testRejectHTLC(ht *lntemp.HarnessTest) { // RejectHTLC // Alice ------> Carol ------> Bob // const chanAmt = btcutil.Amount(1000000) - ctxb := context.Background() + alice, bob := ht.Alice, ht.Bob // Create Carol with reject htlc flag. - carol := net.NewNode(t.t, "Carol", []string{"--rejecthtlc"}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", []string{"--rejecthtlc"}) // Connect Alice to Carol. - net.ConnectNodes(t.t, net.Alice, carol) + ht.ConnectNodes(alice, carol) // Connect Carol to Bob. - net.ConnectNodes(t.t, carol, net.Bob) + ht.ConnectNodes(carol, bob) // Send coins to Carol. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol) - - // Send coins to Alice. - net.SendCoins(t.t, btcutil.SatoshiPerBitcent, net.Alice) + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) // Open a channel between Alice and Carol. - chanPointAlice := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{ + chanPointAlice := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) // Open a channel between Carol and Bob. - chanPointCarol := openChannelAndAssert( - t, net, carol, net.Bob, - lntest.OpenChannelParams{ + chanPointCarol := ht.OpenChannel( + carol, bob, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) @@ -573,103 +566,62 @@ func testRejectHTLC(net *lntest.NetworkHarness, t *harnessTest) { // Channel should be ready for payments. const payAmt = 100 - // Helper closure to generate a random pre image. - genPreImage := func() []byte { - preimage := make([]byte, 32) - - _, err := rand.Read(preimage) - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - - return preimage - } - // Create an invoice from Carol of 100 satoshis. // We expect Alice to be able to pay this invoice. - preimage := genPreImage() - carolInvoice := &lnrpc.Invoice{ Memo: "testing - alice should pay carol", - RPreimage: preimage, + RPreimage: ht.Random32Bytes(), Value: payAmt, } // Carol adds the invoice to her database. - resp, err := carol.AddInvoice(ctxb, carolInvoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + resp := carol.RPC.AddInvoice(carolInvoice) // Alice pays Carols invoice. - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to send payments from alice to carol: %v", err) - } + ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest}) // Create an invoice from Bob of 100 satoshis. // We expect Carol to be able to pay this invoice. - preimage = genPreImage() - bobInvoice := &lnrpc.Invoice{ Memo: "testing - carol should pay bob", - RPreimage: preimage, + RPreimage: ht.Random32Bytes(), Value: payAmt, } // Bob adds the invoice to his database. - resp, err = net.Bob.AddInvoice(ctxb, bobInvoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + resp = bob.RPC.AddInvoice(bobInvoice) // Carol pays Bobs invoice. - err = completePaymentRequests( - carol, carol.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to send payments from carol to bob: %v", err) - } + ht.CompletePaymentRequests(carol, []string{resp.PaymentRequest}) // Create an invoice from Bob of 100 satoshis. // Alice attempts to pay Bob but this should fail, since we are // using Carol as a hop and her node will reject onward HTLCs. - preimage = genPreImage() - bobInvoice = &lnrpc.Invoice{ Memo: "testing - alice tries to pay bob", - RPreimage: preimage, + RPreimage: ht.Random32Bytes(), Value: payAmt, } // Bob adds the invoice to his database. - resp, err = net.Bob.AddInvoice(ctxb, bobInvoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + resp = bob.RPC.AddInvoice(bobInvoice) - // Alice attempts to pay Bobs invoice. This payment should be rejected since - // we are using Carol as an intermediary hop, Carol is running lnd with - // --rejecthtlc. - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err == nil { - t.Fatalf( - "should have been rejected, carol will not accept forwarded htlcs", - ) + // Alice attempts to pay Bobs invoice. This payment should be rejected + // since we are using Carol as an intermediary hop, Carol is running + // lnd with --rejecthtlc. + paymentReq := &routerrpc.SendPaymentRequest{ + PaymentRequest: resp.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, } + payStream := alice.RPC.SendPayment(paymentReq) + ht.AssertPaymentStatusFromStream(payStream, lnrpc.Payment_FAILED) - assertLastHTLCError(t, net.Alice, lnrpc.Failure_CHANNEL_DISABLED) + ht.AssertLastHTLCError(alice, lnrpc.Failure_CHANNEL_DISABLED) // Close all channels. - closeChannelAndAssert(t, net, net.Alice, chanPointAlice, false) - closeChannelAndAssert(t, net, carol, chanPointCarol, false) + ht.CloseChannel(alice, chanPointAlice) + ht.CloseChannel(carol, chanPointCarol) } func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) { diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index a4b9211ac..b6235e966 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -120,10 +120,6 @@ var allTestCases = []*testCase{ name: "multi-hop htlc error propagation", test: testHtlcErrorPropagation, }, - { - name: "reject onward htlc", - test: testRejectHTLC, - }, // TODO(roasbeef): multi-path integration test { name: "node announcement", From 4104a72b3a8ff7cb9f2c2e382133894e3ab76bfc Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 00:40:47 +0800 Subject: [PATCH 09/47] lntemp+itest: refactor `testNodeSignVerify` --- lntemp/rpc/lnd.go | 26 +++++++++ lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_misc_test.go | 76 +++++++++------------------ lntest/itest/lnd_test_list_on_test.go | 4 -- 4 files changed, 54 insertions(+), 56 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index fd0c8721b..a8827b3e5 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -355,3 +355,29 @@ func (h *HarnessRPC) GetTransactions() *lnrpc.TransactionDetails { return resp } + +// SignMessage makes a RPC call to node's SignMessage and asserts. +func (h *HarnessRPC) SignMessage(msg []byte) *lnrpc.SignMessageResponse { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + req := &lnrpc.SignMessageRequest{Msg: msg} + resp, err := h.LN.SignMessage(ctxt, req) + require.NoErrorf(h, err, "SignMessage rpc call failed") + + return resp +} + +// VerifyMessage makes a RPC call to node's VerifyMessage and asserts. +func (h *HarnessRPC) VerifyMessage(msg []byte, + sig string) *lnrpc.VerifyMessageResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + req := &lnrpc.VerifyMessageRequest{Msg: msg, Signature: sig} + resp, err := h.LN.VerifyMessage(ctxt, req) + require.NoErrorf(h, err, "VerifyMessage failed") + + return resp +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index ebbe9d12b..3434bcfb9 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -99,4 +99,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "reject onward htlc", TestFunc: testRejectHTLC, }, + { + Name: "node sign verify", + TestFunc: testNodeSignVerify, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 39c375c42..987925125 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -624,78 +624,50 @@ func testRejectHTLC(ht *lntemp.HarnessTest) { ht.CloseChannel(carol, chanPointCarol) } -func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +// testNodeSignVerify checks that only connected nodes are allowed to perform +// signing and verifying messages. +func testNodeSignVerify(ht *lntemp.HarnessTest) { chanAmt := funding.MaxBtcFundingAmount pushAmt := btcutil.Amount(100000) + alice, bob := ht.Alice, ht.Bob // Create a channel between alice and bob. - aliceBobCh := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ + aliceBobCh := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, }, ) - aliceMsg := []byte("alice msg") - // alice signs "alice msg" and sends her signature to bob. - sigReq := &lnrpc.SignMessageRequest{Msg: aliceMsg} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - sigResp, err := net.Alice.SignMessage(ctxt, sigReq) - if err != nil { - t.Fatalf("SignMessage rpc call failed: %v", err) - } + aliceMsg := []byte("alice msg") + sigResp := alice.RPC.SignMessage(aliceMsg) aliceSig := sigResp.Signature - // bob verifying alice's signature should succeed since alice and bob are - // connected. - verifyReq := &lnrpc.VerifyMessageRequest{Msg: aliceMsg, Signature: aliceSig} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - verifyResp, err := net.Bob.VerifyMessage(ctxt, verifyReq) - if err != nil { - t.Fatalf("VerifyMessage failed: %v", err) - } - if !verifyResp.Valid { - t.Fatalf("alice's signature didn't validate") - } - if verifyResp.Pubkey != net.Alice.PubKeyStr { - t.Fatalf("alice's signature doesn't contain alice's pubkey.") - } + // bob verifying alice's signature should succeed since alice and bob + // are connected. + verifyResp := bob.RPC.VerifyMessage(aliceMsg, aliceSig) + require.True(ht, verifyResp.Valid, "alice's signature didn't validate") + require.Equal(ht, verifyResp.Pubkey, alice.PubKeyStr, + "alice's signature doesn't contain alice's pubkey.") // carol is a new node that is unconnected to alice or bob. - carol := net.NewNode(t.t, "Carol", nil) - defer shutdownAndAssert(net, t, carol) - - carolMsg := []byte("carol msg") + carol := ht.NewNode("Carol", nil) // carol signs "carol msg" and sends her signature to bob. - sigReq = &lnrpc.SignMessageRequest{Msg: carolMsg} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - sigResp, err = carol.SignMessage(ctxt, sigReq) - if err != nil { - t.Fatalf("SignMessage rpc call failed: %v", err) - } + carolMsg := []byte("carol msg") + sigResp = carol.RPC.SignMessage(carolMsg) carolSig := sigResp.Signature - // bob verifying carol's signature should fail since they are not connected. - verifyReq = &lnrpc.VerifyMessageRequest{Msg: carolMsg, Signature: carolSig} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - verifyResp, err = net.Bob.VerifyMessage(ctxt, verifyReq) - if err != nil { - t.Fatalf("VerifyMessage failed: %v", err) - } - if verifyResp.Valid { - t.Fatalf("carol's signature should not be valid") - } - if verifyResp.Pubkey != carol.PubKeyStr { - t.Fatalf("carol's signature doesn't contain her pubkey") - } + // bob verifying carol's signature should fail since they are not + // connected. + verifyResp = bob.RPC.VerifyMessage(carolMsg, carolSig) + require.False(ht, verifyResp.Valid, "carol's signature didn't validate") + require.Equal(ht, verifyResp.Pubkey, carol.PubKeyStr, + "carol's signature doesn't contain alice's pubkey.") // Close the channel between alice and bob. - closeChannelAndAssert(t, net, net.Alice, aliceBobCh, false) + ht.CloseChannel(alice, aliceBobCh) } // testAbandonChannel abandones a channel and asserts that it is no diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index b6235e966..a8832a9ac 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -125,10 +125,6 @@ var allTestCases = []*testCase{ name: "node announcement", test: testNodeAnnouncement, }, - { - name: "node sign verify", - test: testNodeSignVerify, - }, { name: "derive shared key", test: testDeriveSharedKey, From 2c12c8a77c2db6d3ae702311935966006ae0ecc4 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 18 Aug 2022 17:30:34 +0800 Subject: [PATCH 10/47] multi: refactor `testListAddresses` --- lntemp/rpc/wallet_kit.go | 26 ++++++++++++ lntemp/utils.go | 39 +++++++++++++++++ lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_misc_test.go | 60 +++++++++++---------------- lntest/itest/lnd_test_list_on_test.go | 4 -- lntest/itest/utils.go | 38 ----------------- 6 files changed, 94 insertions(+), 77 deletions(-) diff --git a/lntemp/rpc/wallet_kit.go b/lntemp/rpc/wallet_kit.go index 2feddf473..14713f896 100644 --- a/lntemp/rpc/wallet_kit.go +++ b/lntemp/rpc/wallet_kit.go @@ -98,3 +98,29 @@ func (h *HarnessRPC) LabelTransaction(req *walletrpc.LabelTransactionRequest) { _, err := h.WalletKit.LabelTransaction(ctxt, req) h.NoError(err, "LabelTransaction") } + +// DeriveNextKey makes a RPC call to the DeriveNextKey and asserts. +func (h *HarnessRPC) DeriveNextKey( + req *walletrpc.KeyReq) *signrpc.KeyDescriptor { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + key, err := h.WalletKit.DeriveNextKey(ctxt, req) + h.NoError(err, "DeriveNextKey") + + return key +} + +// ListAddresses makes a RPC call to the ListAddresses and asserts. +func (h *HarnessRPC) ListAddresses( + req *walletrpc.ListAddressesRequest) *walletrpc.ListAddressesResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + key, err := h.WalletKit.ListAddresses(ctxt, req) + h.NoError(err, "ListAddresses") + + return key +} diff --git a/lntemp/utils.go b/lntemp/utils.go index e8f9171e5..d6d279765 100644 --- a/lntemp/utils.go +++ b/lntemp/utils.go @@ -5,6 +5,8 @@ import ( "io" "math" "os" + "strconv" + "strings" "github.com/lightningnetwork/lnd/lntest" ) @@ -53,3 +55,40 @@ func errNumNotMatched(name string, subject string, return fmt.Errorf("%s: assert %s failed: want %d, got: %d, total: "+ "%d, previously had: %d", name, subject, want, got, total, old) } + +// parseDerivationPath parses a path in the form of m/x'/y'/z'/a/b into a slice +// of [x, y, z, a, b], meaning that the apostrophe is ignored and 2^31 is _not_ +// added to the numbers. +func ParseDerivationPath(path string) ([]uint32, error) { + path = strings.TrimSpace(path) + if len(path) == 0 { + return nil, fmt.Errorf("path cannot be empty") + } + if !strings.HasPrefix(path, "m/") { + return nil, fmt.Errorf("path must start with m/") + } + + // Just the root key, no path was provided. This is valid but not useful + // in most cases. + rest := strings.ReplaceAll(path, "m/", "") + if rest == "" { + return []uint32{}, nil + } + + parts := strings.Split(rest, "/") + indices := make([]uint32, len(parts)) + for i := 0; i < len(parts); i++ { + part := parts[i] + if strings.Contains(parts[i], "'") { + part = strings.TrimRight(parts[i], "'") + } + parsed, err := strconv.ParseInt(part, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse part \"%s\": "+ + "%v", part, err) + } + indices[i] = uint32(parsed) + } + + return indices, nil +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 3434bcfb9..0f275f563 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -103,4 +103,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "node sign verify", TestFunc: testNodeSignVerify, }, + { + Name: "list addresses", + TestFunc: testListAddresses, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 987925125..d77d71ca4 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -989,16 +989,13 @@ func testSweepAllCoins(ht *lntemp.HarnessTest) { // testListAddresses tests that we get all the addresses and their // corresponding balance correctly. -func testListAddresses(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testListAddresses(ht *lntemp.HarnessTest) { // First, we'll make a new node - Alice, which will be generating // new addresses. - alice := net.NewNode(t.t, "Alice", nil) - defer shutdownAndAssert(net, t, alice) + alice := ht.NewNode("Alice", nil) // Next, we'll give Alice exactly 1 utxo of 1 BTC. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice) + ht.FundCoins(btcutil.SatoshiPerBitcoin, alice) type addressDetails struct { Balance int64 @@ -1010,81 +1007,75 @@ func testListAddresses(net *lntest.NetworkHarness, t *harnessTest) { // Create an address generated from internal keys. keyLoc := &walletrpc.KeyReq{KeyFamily: 123} - keyDesc, err := alice.WalletKitClient.DeriveNextKey(ctxb, keyLoc) - require.NoError(t.t, err) + keyDesc := alice.RPC.DeriveNextKey(keyLoc) // Hex Encode the public key. pubkeyString := hex.EncodeToString(keyDesc.RawKeyBytes) // Create a p2tr address. - resp, err := alice.NewAddress(ctxb, &lnrpc.NewAddressRequest{ + resp := alice.RPC.NewAddress(&lnrpc.NewAddressRequest{ Type: lnrpc.AddressType_TAPROOT_PUBKEY, }) - require.NoError(t.t, err) generatedAddr[resp.Address] = addressDetails{ Balance: 200_000, Type: walletrpc.AddressType_TAPROOT_PUBKEY, } // Create a p2wkh address. - resp, err = alice.NewAddress(ctxb, &lnrpc.NewAddressRequest{ + resp = alice.RPC.NewAddress(&lnrpc.NewAddressRequest{ Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH, }) - require.NoError(t.t, err) generatedAddr[resp.Address] = addressDetails{ Balance: 300_000, Type: walletrpc.AddressType_WITNESS_PUBKEY_HASH, } // Create a np2wkh address. - resp, err = alice.NewAddress(ctxb, &lnrpc.NewAddressRequest{ + resp = alice.RPC.NewAddress(&lnrpc.NewAddressRequest{ Type: lnrpc.AddressType_NESTED_PUBKEY_HASH, }) - require.NoError(t.t, err) generatedAddr[resp.Address] = addressDetails{ Balance: 400_000, Type: walletrpc.AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH, } for addr, addressDetail := range generatedAddr { - _, err := alice.SendCoins(ctxb, &lnrpc.SendCoinsRequest{ + alice.RPC.SendCoins(&lnrpc.SendCoinsRequest{ Addr: addr, Amount: addressDetail.Balance, SpendUnconfirmed: true, }) - require.NoError(t.t, err) } - mineBlocks(t, net, 1, 3) + ht.MineBlocksAndAssertNumTxes(1, 3) // Get all the accounts except LND's custom accounts. - addressLists, err := alice.WalletKitClient.ListAddresses( - ctxb, &walletrpc.ListAddressesRequest{}, + addressLists := alice.RPC.ListAddresses( + &walletrpc.ListAddressesRequest{}, ) - require.NoError(t.t, err) foundAddresses := 0 for _, addressList := range addressLists.AccountWithAddresses { addresses := addressList.Addresses - derivationPath, err := parseDerivationPath( + derivationPath, err := lntemp.ParseDerivationPath( addressList.DerivationPath, ) - require.NoError(t.t, err) + require.NoError(ht, err) // Should not get an account with KeyFamily - 123. require.NotEqual( - t.t, uint32(keyLoc.KeyFamily), derivationPath[2], + ht, uint32(keyLoc.KeyFamily), derivationPath[2], ) for _, address := range addresses { if _, ok := generatedAddr[address.Address]; ok { addrDetails := generatedAddr[address.Address] require.Equal( - t.t, addrDetails.Balance, + ht, addrDetails.Balance, address.Balance, ) require.Equal( - t.t, addrDetails.Type, + ht, addrDetails.Type, addressList.AddressType, ) foundAddresses++ @@ -1092,23 +1083,22 @@ func testListAddresses(net *lntest.NetworkHarness, t *harnessTest) { } } - require.Equal(t.t, len(generatedAddr), foundAddresses) + require.Equal(ht, len(generatedAddr), foundAddresses) foundAddresses = 0 // Get all the accounts (including LND's custom accounts). - addressLists, err = alice.WalletKitClient.ListAddresses( - ctxb, &walletrpc.ListAddressesRequest{ + addressLists = alice.RPC.ListAddresses( + &walletrpc.ListAddressesRequest{ ShowCustomAccounts: true, }, ) - require.NoError(t.t, err) for _, addressList := range addressLists.AccountWithAddresses { addresses := addressList.Addresses - derivationPath, err := parseDerivationPath( + derivationPath, err := lntemp.ParseDerivationPath( addressList.DerivationPath, ) - require.NoError(t.t, err) + require.NoError(ht, err) for _, address := range addresses { // Check if the KeyFamily in derivation path is 123. @@ -1116,15 +1106,15 @@ func testListAddresses(net *lntest.NetworkHarness, t *harnessTest) { // For LND's custom accounts, the address // represents the public key. pubkey := address.Address - require.Equal(t.t, pubkeyString, pubkey) + require.Equal(ht, pubkeyString, pubkey) } else if _, ok := generatedAddr[address.Address]; ok { addrDetails := generatedAddr[address.Address] require.Equal( - t.t, addrDetails.Balance, + ht, addrDetails.Balance, address.Balance, ) require.Equal( - t.t, addrDetails.Type, + ht, addrDetails.Type, addressList.AddressType, ) foundAddresses++ @@ -1132,7 +1122,7 @@ func testListAddresses(net *lntest.NetworkHarness, t *harnessTest) { } } - require.Equal(t.t, len(generatedAddr), foundAddresses) + require.Equal(ht, len(generatedAddr), foundAddresses) } func assertChannelConstraintsEqual(ht *lntemp.HarnessTest, diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index a8832a9ac..0cfefa34b 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "list addresses", - test: testListAddresses, - }, { name: "recovery info", test: testGetRecoveryInfo, diff --git a/lntest/itest/utils.go b/lntest/itest/utils.go index 17a261c1f..abeac91db 100644 --- a/lntest/itest/utils.go +++ b/lntest/itest/utils.go @@ -5,8 +5,6 @@ import ( "crypto/rand" "fmt" "io" - "strconv" - "strings" "testing" "time" @@ -501,42 +499,6 @@ func getOutputIndex(t *harnessTest, miner *lntest.HarnessMiner, return p2trOutputIndex } -// parseDerivationPath parses a path in the form of m/x'/y'/z'/a/b into a slice -// of [x, y, z, a, b], meaning that the apostrophe is ignored and 2^31 is _not_ -// added to the numbers. -func parseDerivationPath(path string) ([]uint32, error) { - path = strings.TrimSpace(path) - if len(path) == 0 { - return nil, fmt.Errorf("path cannot be empty") - } - if !strings.HasPrefix(path, "m/") { - return nil, fmt.Errorf("path must start with m/") - } - - // Just the root key, no path was provided. This is valid but not useful - // in most cases. - rest := strings.ReplaceAll(path, "m/", "") - if rest == "" { - return []uint32{}, nil - } - - parts := strings.Split(rest, "/") - indices := make([]uint32, len(parts)) - for i := 0; i < len(parts); i++ { - part := parts[i] - if strings.Contains(parts[i], "'") { - part = strings.TrimRight(parts[i], "'") - } - parsed, err := strconv.ParseInt(part, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse part \"%s\": "+ - "%v", part, err) - } - indices[i] = uint32(parsed) - } - return indices, nil -} - // acceptChannel is used to accept a single channel that comes across. This // should be run in a goroutine and is used to test nodes with the zero-conf // feature bit. From b1b989afaea008f47268d6a199d3485ee704e8b4 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 00:49:39 +0800 Subject: [PATCH 11/47] lntemp+itest: refactor `testAbandonChannel` --- lntemp/harness_assertion.go | 24 ++++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_misc_test.go | 120 ++++++-------------------- lntest/itest/lnd_test_list_on_test.go | 4 - 4 files changed, 52 insertions(+), 100 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index ecbbbb178..41912d355 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1455,3 +1455,27 @@ func (h *HarnessTest) assertHTLCError(hn *node.HarnessNode, require.NoError(h, err, "timeout checking HTLC error") } + +// AssertZombieChannel asserts that a given channel found using the chanID is +// marked as zombie. +func (h *HarnessTest) AssertZombieChannel(hn *node.HarnessNode, chanID uint64) { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + err := wait.NoError(func() error { + _, err := hn.RPC.LN.GetChanInfo( + ctxt, &lnrpc.ChanInfoRequest{ChanId: chanID}, + ) + if err == nil { + return fmt.Errorf("expected error but got nil") + } + + if !strings.Contains(err.Error(), "marked as zombie") { + return fmt.Errorf("expected error to contain '%s' but "+ + "was '%v'", "marked as zombie", err) + } + + return nil + }, DefaultTimeout) + require.NoError(h, err, "timeout while checking zombie channel") +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 0f275f563..6ecbcea88 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -107,4 +107,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "list addresses", TestFunc: testListAddresses, }, + { + Name: "abandonchannel", + TestFunc: testAbandonChannel, + }, } diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index d77d71ca4..65847e989 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -1,11 +1,9 @@ package itest import ( - "context" "encoding/hex" "fmt" "io/ioutil" - "strings" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcwallet/wallet" @@ -19,7 +17,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" @@ -670,116 +667,52 @@ func testNodeSignVerify(ht *lntemp.HarnessTest) { ht.CloseChannel(alice, aliceBobCh) } -// testAbandonChannel abandones a channel and asserts that it is no -// longer open and not in one of the pending closure states. It also -// verifies that the abandoned channel is reported as closed with close -// type 'abandoned'. -func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() +// testAbandonChannel abandons a channel and asserts that it is no longer open +// and not in one of the pending closure states. It also verifies that the +// abandoned channel is reported as closed with close type 'abandoned'. +func testAbandonChannel(ht *lntemp.HarnessTest) { + alice, bob := ht.Alice, ht.Bob // First establish a channel between Alice and Bob. - channelParam := lntest.OpenChannelParams{ + channelParam := lntemp.OpenChannelParams{ Amt: funding.MaxBtcFundingAmount, PushAmt: btcutil.Amount(100000), } - - chanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, channelParam, - ) - txid, err := lnrpc.GetChanPointFundingTxid(chanPoint) - require.NoError(t.t, err, "alice bob get channel funding txid") - chanPointStr := fmt.Sprintf("%v:%v", txid, chanPoint.OutputIndex) - - // Wait for channel to be confirmed open. - err = net.Alice.WaitForNetworkChannelOpen(chanPoint) - require.NoError(t.t, err, "alice wait for network channel open") - err = net.Bob.WaitForNetworkChannelOpen(chanPoint) - require.NoError(t.t, err, "bob wait for network channel open") + chanPoint := ht.OpenChannel(alice, bob, channelParam) // Now that the channel is open, we'll obtain its channel ID real quick // so we can use it to query the graph below. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - aliceChannelList, err := net.Alice.ListChannels(ctxt, listReq) - require.NoError(t.t, err) - var chanID uint64 - for _, channel := range aliceChannelList.Channels { - if channel.ChannelPoint == chanPointStr { - chanID = channel.ChanId - } - } + chanID := ht.QueryChannelByChanPoint(alice, chanPoint).ChanId - require.NotZero(t.t, chanID, "unable to find channel") - - // To make sure the channel is removed from the backup file as well when - // being abandoned, grab a backup snapshot so we can compare it with the - // later state. - bkupBefore, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) - require.NoError(t.t, err, "channel backup before abandoning channel") + // To make sure the channel is removed from the backup file as well + // when being abandoned, grab a backup snapshot so we can compare it + // with the later state. + bkupBefore, err := ioutil.ReadFile(alice.Cfg.ChanBackupPath()) + require.NoError(ht, err, "channel backup before abandoning channel") // Send request to abandon channel. abandonChannelRequest := &lnrpc.AbandonChannelRequest{ ChannelPoint: chanPoint, } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) - require.NoError(t.t, err, "abandon channel") + alice.RPC.AbandonChannel(abandonChannelRequest) // Assert that channel in no longer open. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChannelList, err = net.Alice.ListChannels(ctxt, listReq) - require.NoError(t.t, err, "list channels") - require.Zero(t.t, len(aliceChannelList.Channels), "alice open channels") + ht.AssertNodeNumChannels(alice, 0) // Assert that channel is not pending closure. - pendingReq := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePendingList, err := net.Alice.PendingChannels(ctxt, pendingReq) - require.NoError(t.t, err, "alice list pending channels") - require.Zero( - t.t, len(alicePendingList.PendingClosingChannels), //nolint:staticcheck - "alice pending channels", - ) - require.Zero( - t.t, len(alicePendingList.PendingForceClosingChannels), - "alice pending force close channels", - ) - require.Zero( - t.t, len(alicePendingList.WaitingCloseChannels), - "alice waiting close channels", - ) + ht.AssertNumWaitingClose(alice, 0) // Assert that channel is listed as abandoned. - closedReq := &lnrpc.ClosedChannelsRequest{ - Abandoned: true, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceClosedList, err := net.Alice.ClosedChannels(ctxt, closedReq) - require.NoError(t.t, err, "alice list closed channels") - require.Len(t.t, aliceClosedList.Channels, 1, "alice closed channels") + req := &lnrpc.ClosedChannelsRequest{Abandoned: true} + aliceClosedList := alice.RPC.ClosedChannels(req) + require.Len(ht, aliceClosedList.Channels, 1, "alice closed channels") // Ensure that the channel can no longer be found in the channel graph. - err = wait.NoError(func() error { - _, err := net.Alice.GetChanInfo(ctxb, &lnrpc.ChanInfoRequest{ - ChanId: chanID, - }) - if err == nil { - return fmt.Errorf("expected error but got nil") - } - - if !strings.Contains(err.Error(), "marked as zombie") { - return fmt.Errorf("expected error to contain '%s' but "+ - "was '%v'", "marked as zombie", err) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err, "marked as zombie") + ht.AssertZombieChannel(alice, chanID) // Make sure the channel is no longer in the channel backup list. err = wait.NoError(func() error { - bkupAfter, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) + bkupAfter, err := ioutil.ReadFile(alice.Cfg.ChanBackupPath()) if err != nil { return fmt.Errorf("could not get channel backup "+ "before abandoning channel: %v", err) @@ -792,21 +725,16 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { return nil }, defaultTimeout) - require.NoError(t.t, err, "channel removed from backup file") + require.NoError(ht, err, "channel removed from backup file") // Calling AbandonChannel again, should result in no new errors, as the // channel has already been removed. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) - require.NoError(t.t, err, "abandon channel second time") + alice.RPC.AbandonChannel(abandonChannelRequest) // Now that we're done with the test, the channel can be closed. This // is necessary to avoid unexpected outcomes of other tests that use // Bob's lnd instance. - closeChannelAndAssert(t, net, net.Bob, chanPoint, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Bob, chanPoint) + ht.ForceCloseChannel(bob, chanPoint) } // testSweepAllCoins tests that we're able to properly sweep all coins from the diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 0cfefa34b..e60df6240 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -167,10 +167,6 @@ var allTestCases = []*testCase{ name: "failing link", test: testFailingChannel, }, - { - name: "abandonchannel", - test: testAbandonChannel, - }, { name: "revoked uncooperative close retribution zero value remote output", test: testRevokedCloseRetributionZeroValueRemoteOutput, From b4d288cc106f287a5e35e14f9c2915c38a2a37b9 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 01:00:09 +0800 Subject: [PATCH 12/47] lntemp+itest: refactor `testGetRecoveryInfo` --- lntemp/rpc/lnd.go | 18 +++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_recovery_test.go | 102 +++++++++----------------- lntest/itest/lnd_test_list_on_test.go | 4 - 4 files changed, 57 insertions(+), 71 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index a8827b3e5..7f7261233 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -381,3 +381,21 @@ func (h *HarnessRPC) VerifyMessage(msg []byte, return resp } + +// GetRecoveryInfo uses the specified node to make a RPC call to +// GetRecoveryInfo and asserts. +func (h *HarnessRPC) GetRecoveryInfo( + req *lnrpc.GetRecoveryInfoRequest) *lnrpc.GetRecoveryInfoResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + if req == nil { + req = &lnrpc.GetRecoveryInfoRequest{} + } + + resp, err := h.LN.GetRecoveryInfo(ctxt, req) + require.NoErrorf(h, err, "failed to GetRecoveryInfo") + + return resp +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 6ecbcea88..362ce0557 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -111,4 +111,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "abandonchannel", TestFunc: testAbandonChannel, }, + { + Name: "recovery info", + TestFunc: testGetRecoveryInfo, + }, } diff --git a/lntest/itest/lnd_recovery_test.go b/lntest/itest/lnd_recovery_test.go index 37513be99..1fa369ed0 100644 --- a/lntest/itest/lnd_recovery_test.go +++ b/lntest/itest/lnd_recovery_test.go @@ -2,12 +2,14 @@ package itest import ( "context" + "fmt" "math" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil/hdkeychain" "github.com/lightningnetwork/lnd/aezeed" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" @@ -15,87 +17,53 @@ import ( // testGetRecoveryInfo checks whether lnd gives the right information about // the wallet recovery process. -func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testGetRecoveryInfo(ht *lntemp.HarnessTest) { // First, create a new node with strong passphrase and grab the mnemonic // used for key derivation. This will bring up Carol with an empty // wallet, and such that she is synced up. password := []byte("The Magic Words are Squeamish Ossifrage") - carol, mnemonic, _, err := net.NewNodeWithSeed( - "Carol", nil, password, false, - ) - if err != nil { - t.Fatalf("unable to create node with seed; %v", err) - } - - shutdownAndAssert(net, t, carol) + carol, mnemonic, _ := ht.NewNodeWithSeed("Carol", nil, password, false) checkInfo := func(expectedRecoveryMode, expectedRecoveryFinished bool, expectedProgress float64, recoveryWindow int32) { // Restore Carol, passing in the password, mnemonic, and // desired recovery window. - node, err := net.RestoreNodeWithSeed( - "Carol", nil, password, mnemonic, "", recoveryWindow, - nil, + node := ht.RestoreNodeWithSeed( + carol.Name(), nil, password, mnemonic, "", + recoveryWindow, nil, ) - if err != nil { - t.Fatalf("unable to restore node: %v", err) - } - - // Wait for Carol to sync to the chain. - _, minerHeight, err := net.Miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - err = waitForNodeBlockHeight(node, minerHeight) - if err != nil { - t.Fatalf("unable to sync to chain: %v", err) - } // Query carol for her current wallet recovery progress. - var ( - recoveryMode bool - recoveryFinished bool - progress float64 - ) - - err = wait.Predicate(func() bool { + err := wait.NoError(func() error { // Verify that recovery info gives the right response. - req := &lnrpc.GetRecoveryInfoRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.GetRecoveryInfo(ctxt, req) - if err != nil { - t.Fatalf("unable to query recovery info: %v", err) + resp := node.RPC.GetRecoveryInfo(nil) + + mode := resp.RecoveryMode + finished := resp.RecoveryFinished + progress := resp.Progress + + if mode != expectedRecoveryMode { + return fmt.Errorf("expected recovery mode %v "+ + "got %v", expectedRecoveryMode, mode) + } + if finished != expectedRecoveryFinished { + return fmt.Errorf("expected finished %v "+ + "got %v", expectedRecoveryFinished, + finished) + } + if progress != expectedProgress { + return fmt.Errorf("expected progress %v"+ + "got %v", expectedProgress, progress) } - recoveryMode = resp.RecoveryMode - recoveryFinished = resp.RecoveryFinished - progress = resp.Progress - - if recoveryMode != expectedRecoveryMode || - recoveryFinished != expectedRecoveryFinished || - progress != expectedProgress { - - return false - } - - return true + return nil }, defaultTimeout) - if err != nil { - t.Fatalf("expected recovery mode to be %v, got %v, "+ - "expected recovery finished to be %v, got %v, "+ - "expected progress %v, got %v", - expectedRecoveryMode, recoveryMode, - expectedRecoveryFinished, recoveryFinished, - expectedProgress, progress, - ) - } + require.NoError(ht, err) // Lastly, shutdown this Carol so we can move on to the next // restoration. - shutdownAndAssert(net, t, node) + ht.Shutdown(node) } // Restore Carol with a recovery window of 0. Since it's not in recovery @@ -103,15 +71,15 @@ func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) { // recoveryFinished=false, and progress=0 checkInfo(false, false, 0, 0) - // Change the recovery windown to be 1 to turn on recovery mode. Since the - // current chain height is the same as the birthday height, it should - // indicate the recovery process is finished. + // Change the recovery windown to be 1 to turn on recovery mode. Since + // the current chain height is the same as the birthday height, it + // should indicate the recovery process is finished. checkInfo(true, true, 1, 1) // We now go ahead 5 blocks. Because the wallet's syncing process is - // controlled by a goroutine in the background, it will catch up quickly. - // This makes the recovery progress back to 1. - mineBlocks(t, net, 5, 0) + // controlled by a goroutine in the background, it will catch up + // quickly. This makes the recovery progress back to 1. + ht.MineBlocks(5) checkInfo(true, true, 1, 1) } diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index e60df6240..01ebff1d8 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "recovery info", - test: testGetRecoveryInfo, - }, { name: "onchain fund recovery", test: testOnchainFundRecovery, From 5208c501a24ab1719d65a4bff7fc2f210a03d81a Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 01:08:52 +0800 Subject: [PATCH 13/47] itest: refactor `testOnchainFundRecovery` --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_recovery_test.go | 144 +++++++++----------------- lntest/itest/lnd_test_list_on_test.go | 4 - 3 files changed, 53 insertions(+), 99 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 362ce0557..5df30b400 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -115,4 +115,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "recovery info", TestFunc: testGetRecoveryInfo, }, + { + Name: "onchain fund recovery", + TestFunc: testOnchainFundRecovery, + }, } diff --git a/lntest/itest/lnd_recovery_test.go b/lntest/itest/lnd_recovery_test.go index 1fa369ed0..e6482b861 100644 --- a/lntest/itest/lnd_recovery_test.go +++ b/lntest/itest/lnd_recovery_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "fmt" "math" @@ -9,8 +8,9 @@ import ( "github.com/btcsuite/btcd/btcutil/hdkeychain" "github.com/lightningnetwork/lnd/aezeed" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -87,18 +87,12 @@ func testGetRecoveryInfo(ht *lntemp.HarnessTest) { // when providing a valid aezeed that owns outputs on the chain. This test // performs multiple restorations using the same seed and various recovery // windows to ensure we detect funds properly. -func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testOnchainFundRecovery(ht *lntemp.HarnessTest) { // First, create a new node with strong passphrase and grab the mnemonic // used for key derivation. This will bring up Carol with an empty // wallet, and such that she is synced up. password := []byte("The Magic Words are Squeamish Ossifrage") - carol, mnemonic, _, err := net.NewNodeWithSeed( - "Carol", nil, password, false, - ) - require.NoError(t.t, err) - shutdownAndAssert(net, t, carol) + carol, mnemonic, _ := ht.NewNodeWithSeed("Carol", nil, password, false) // As long as the mnemonic is non-nil and the extended key is empty, the // closure below will always restore the node from the seed. The tests @@ -110,17 +104,16 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { // given recovery window. Additionally, the caller can specify an action // to perform on the restored node before the node is shutdown. restoreCheckBalance := func(expAmount int64, expectedNumUTXOs uint32, - recoveryWindow int32, fn func(*lntest.HarnessNode)) { + recoveryWindow int32, fn func(*node.HarnessNode)) { - t.t.Helper() + ht.Helper() // Restore Carol, passing in the password, mnemonic, and // desired recovery window. - node, err := net.RestoreNodeWithSeed( - "Carol", nil, password, mnemonic, rootKey, + node := ht.RestoreNodeWithSeed( + carol.Name(), nil, password, mnemonic, rootKey, recoveryWindow, nil, ) - require.NoError(t.t, err) // Query carol for her current wallet balance, and also that we // gain the expected number of UTXOs. @@ -128,38 +121,33 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { currBalance int64 currNumUTXOs uint32 ) - err = wait.Predicate(func() bool { - req := &lnrpc.WalletBalanceRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.WalletBalance(ctxt, req) - require.NoError(t.t, err) + err := wait.NoError(func() error { + resp := node.RPC.WalletBalance() currBalance = resp.ConfirmedBalance - utxoReq := &lnrpc.ListUnspentRequest{ + req := &walletrpc.ListUnspentRequest{ + Account: "", MaxConfs: math.MaxInt32, + MinConfs: 0, } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - utxoResp, err := node.ListUnspent(ctxt, utxoReq) - require.NoError(t.t, err) + utxoResp := node.RPC.ListUnspent(req) currNumUTXOs = uint32(len(utxoResp.Utxos)) // Verify that Carol's balance and number of UTXOs // matches what's expected. if expAmount != currBalance { - return false + return fmt.Errorf("balance not matched, want "+ + "%d, got %d", expAmount, currBalance) } if currNumUTXOs != expectedNumUTXOs { - return false + return fmt.Errorf("num of UTXOs not matched, "+ + "want %d, got %d", expectedNumUTXOs, + currNumUTXOs) } - return true + return nil }, defaultTimeout) - if err != nil { - t.Fatalf("expected restored node to have %d satoshis, "+ - "instead has %d satoshis, expected %d utxos "+ - "instead has %d", expAmount, currBalance, - expectedNumUTXOs, currNumUTXOs) - } + require.NoError(ht, err, "timeout checking Carol") // If the user provided a callback, execute the commands against // the restored Carol. @@ -167,71 +155,41 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { fn(node) } - // Check if the previous outpoints are set correctly. - req := &lnrpc.GetTransactionsRequest{ - StartHeight: 0, - EndHeight: -1, - } - txDetails, err := node.GetTransactions(ctxb, req) - require.NoError(t.t, err) - - for _, tx := range txDetails.Transactions { - require.Greater(t.t, len(tx.PreviousOutpoints), 0) - } - // Lastly, shutdown this Carol so we can move on to the next // restoration. - shutdownAndAssert(net, t, node) + ht.Shutdown(node) } // Create a closure-factory for building closures that can generate and // skip a configurable number of addresses, before finally sending coins // to a next generated address. The returned closure will apply the same // behavior to both default P2WKH and NP2WKH scopes. - skipAndSend := func(nskip int) func(*lntest.HarnessNode) { - return func(node *lntest.HarnessNode) { - t.t.Helper() - - newP2WKHAddrReq := &lnrpc.NewAddressRequest{ - Type: AddrTypeWitnessPubkeyHash, - } - - newNP2WKHAddrReq := &lnrpc.NewAddressRequest{ - Type: AddrTypeNestedPubkeyHash, - } - - newP2TRAddrReq := &lnrpc.NewAddressRequest{ - Type: AddrTypeTaprootPubkey, - } + skipAndSend := func(nskip int) func(*node.HarnessNode) { + return func(node *node.HarnessNode) { + ht.Helper() // Generate and skip the number of addresses requested. - ctxt, cancel := context.WithTimeout( - ctxb, defaultTimeout, - ) - defer cancel() for i := 0; i < nskip; i++ { - _, err = node.NewAddress(ctxt, newP2WKHAddrReq) - require.NoError(t.t, err) + req := &lnrpc.NewAddressRequest{} - _, err = node.NewAddress(ctxt, newNP2WKHAddrReq) - require.NoError(t.t, err) + req.Type = AddrTypeWitnessPubkeyHash + node.RPC.NewAddress(req) - _, err = node.NewAddress(ctxt, newP2TRAddrReq) - require.NoError(t.t, err) + req.Type = AddrTypeNestedPubkeyHash + node.RPC.NewAddress(req) + + req.Type = AddrTypeTaprootPubkey + node.RPC.NewAddress(req) } // Send one BTC to the next P2WKH address. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, node) + ht.FundCoins(btcutil.SatoshiPerBitcoin, node) // And another to the next NP2WKH address. - net.SendCoinsNP2WKH( - t.t, btcutil.SatoshiPerBitcoin, node, - ) + ht.FundCoinsNP2WKH(btcutil.SatoshiPerBitcoin, node) // Add another whole coin to the P2TR address. - net.SendCoinsP2TR( - t.t, btcutil.SatoshiPerBitcoin, node, - ) + ht.FundCoinsP2TR(btcutil.SatoshiPerBitcoin, node) } } @@ -284,25 +242,21 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { // avoid fee discrepancies and a change output is formed. const minerAmt = 8 * btcutil.SatoshiPerBitcoin const finalBalance = 9 * btcutil.SatoshiPerBitcoin - promptChangeAddr := func(node *lntest.HarnessNode) { - t.t.Helper() + promptChangeAddr := func(node *node.HarnessNode) { + ht.Helper() - minerAddr, err := net.Miner.NewAddress() - require.NoError(t.t, err) - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.SendCoins(ctxt, &lnrpc.SendCoinsRequest{ + minerAddr := ht.Miner.NewMinerAddress() + req := &lnrpc.SendCoinsRequest{ Addr: minerAddr.String(), Amount: minerAmt, - }) - require.NoError(t.t, err) - txid, err := waitForTxInMempool( - net.Miner.Client, minerMempoolTimeout, - ) - require.NoError(t.t, err) - require.Equal(t.t, txid.String(), resp.Txid) + } + resp := node.RPC.SendCoins(req) - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, txid) + txid := ht.Miner.AssertNumTxsInMempool(1)[0] + require.Equal(ht, txid.String(), resp.Txid) + + block := ht.MineBlocks(1)[0] + ht.Miner.AssertTxInBlock(block, txid) } restoreCheckBalance(finalBalance, 9, 20, promptChangeAddr) @@ -318,11 +272,11 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { var seedMnemonic aezeed.Mnemonic copy(seedMnemonic[:], mnemonic) cipherSeed, err := seedMnemonic.ToCipherSeed(password) - require.NoError(t.t, err) + require.NoError(ht, err) extendedRootKey, err := hdkeychain.NewMaster( cipherSeed.Entropy[:], harnessNetParams, ) - require.NoError(t.t, err) + require.NoError(ht, err) rootKey = extendedRootKey.String() mnemonic = nil diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 01ebff1d8..433cc71df 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "onchain fund recovery", - test: testOnchainFundRecovery, - }, { name: "basic funding flow with all input types", test: testChannelFundingInputTypes, From 6a42270ba648bdf3044374e890cccfe35f9c335f Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 01:22:05 +0800 Subject: [PATCH 14/47] itest: refactor `testChannelFundingInputTypes` --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_funding_test.go | 149 ++++++++++++-------------- lntest/itest/lnd_test_list_on_test.go | 4 - 3 files changed, 74 insertions(+), 83 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 5df30b400..b93088b10 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -119,4 +119,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "onchain fund recovery", TestFunc: testOnchainFundRecovery, }, + { + Name: "basic funding flow with all input types", + TestFunc: testChannelFundingInputTypes, + }, } diff --git a/lntest/itest/lnd_funding_test.go b/lntest/itest/lnd_funding_test.go index 140d93050..84dea2b41 100644 --- a/lntest/itest/lnd_funding_test.go +++ b/lntest/itest/lnd_funding_test.go @@ -320,79 +320,72 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { // testChannelFundingInputTypes tests that any type of supported input type can // be used to fund channels. -func testChannelFundingInputTypes(net *lntest.NetworkHarness, t *harnessTest) { +func testChannelFundingInputTypes(ht *lntemp.HarnessTest) { const ( chanAmt = funding.MaxBtcFundingAmount burnAddr = "bcrt1qxsnqpdc842lu8c0xlllgvejt6rhy49u6fmpgyz" ) - addrTypes := []lnrpc.AddressType{ - lnrpc.AddressType_WITNESS_PUBKEY_HASH, - lnrpc.AddressType_NESTED_PUBKEY_HASH, - lnrpc.AddressType_TAPROOT_PUBKEY, + + fundWithTypes := []func(amt btcutil.Amount, target *node.HarnessNode){ + ht.FundCoins, ht.FundCoinsNP2WKH, ht.FundCoinsP2TR, } + alice := ht.Alice + // We'll start off by creating a node for Carol. - carol := net.NewNode(t.t, "Carol", nil) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", nil) // Now, we'll connect her to Alice so that they can open a // channel together. - net.ConnectNodes(t.t, carol, net.Alice) + ht.ConnectNodes(carol, alice) - for _, addrType := range addrTypes { + // Creates a helper closure to be used below which asserts the + // proper response to a channel balance RPC. + checkChannelBalance := func(node *node.HarnessNode, local, + remote, pendingLocal, pendingRemote btcutil.Amount) { + + expectedResponse := &lnrpc.ChannelBalanceResponse{ + LocalBalance: &lnrpc.Amount{ + Sat: uint64(local), + Msat: uint64(lnwire.NewMSatFromSatoshis(local)), + }, + RemoteBalance: &lnrpc.Amount{ + Sat: uint64(remote), + Msat: uint64(lnwire.NewMSatFromSatoshis( + remote, + )), + }, + PendingOpenLocalBalance: &lnrpc.Amount{ + Sat: uint64(pendingLocal), + Msat: uint64(lnwire.NewMSatFromSatoshis( + pendingLocal, + )), + }, + PendingOpenRemoteBalance: &lnrpc.Amount{ + Sat: uint64(pendingRemote), + Msat: uint64(lnwire.NewMSatFromSatoshis( + pendingRemote, + )), + }, + UnsettledLocalBalance: &lnrpc.Amount{}, + UnsettledRemoteBalance: &lnrpc.Amount{}, + // Deprecated fields. + Balance: int64(local), + PendingOpenBalance: int64(pendingLocal), + } + ht.AssertChannelBalanceResp(node, expectedResponse) + } + + for _, funder := range fundWithTypes { // We'll send her some confirmed funds. - err := net.SendCoinsOfType(chanAmt*2, carol, addrType, true) - require.NoErrorf( - t.t, err, "unable to send coins for carol and addr "+ - "type %v", addrType, - ) + funder(chanAmt*2, carol) - chanOpenUpdate := openChannelStream( - t, net, carol, net.Alice, lntest.OpenChannelParams{ + chanOpenUpdate := ht.OpenChannelAssertPending( + carol, alice, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) - // Creates a helper closure to be used below which asserts the - // proper response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, - local, remote, pendingLocal, - pendingRemote btcutil.Amount) { - - expectedResponse := &lnrpc.ChannelBalanceResponse{ - LocalBalance: &lnrpc.Amount{ - Sat: uint64(local), - Msat: uint64(lnwire.NewMSatFromSatoshis( - local, - )), - }, - RemoteBalance: &lnrpc.Amount{ - Sat: uint64(remote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - remote, - )), - }, - PendingOpenLocalBalance: &lnrpc.Amount{ - Sat: uint64(pendingLocal), - Msat: uint64(lnwire.NewMSatFromSatoshis( - pendingLocal, - )), - }, - PendingOpenRemoteBalance: &lnrpc.Amount{ - Sat: uint64(pendingRemote), - Msat: uint64(lnwire.NewMSatFromSatoshis( - pendingRemote, - )), - }, - UnsettledLocalBalance: &lnrpc.Amount{}, - UnsettledRemoteBalance: &lnrpc.Amount{}, - // Deprecated fields. - Balance: int64(local), - PendingOpenBalance: int64(pendingLocal), - } - assertChannelBalanceResp(t, node, expectedResponse) - } - // As the channel is pending open, it's expected Carol has both // zero local and remote balances, and pending local/remote // should not be zero. @@ -405,52 +398,50 @@ func testChannelFundingInputTypes(net *lntest.NetworkHarness, t *harnessTest) { // For Alice, her local/remote balances should be zero, and the // local/remote balances are the mirror of Carol's. - checkChannelBalance(net.Alice, 0, 0, 0, carolLocalBalance) + checkChannelBalance(alice, 0, 0, 0, carolLocalBalance) // Confirm the channel and wait for it to be recognized by both // parties. Two transactions should be mined, the unconfirmed // spend and the funding tx. - mineBlocks(t, net, 6, 1) - chanPoint, err := net.WaitForChannelOpen(chanOpenUpdate) - require.NoError( - t.t, err, "error while waiting for channel open", - ) + ht.MineBlocksAndAssertNumTxes(1, 1) + chanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate) // With the channel open, we'll check the balances on each side // of the channel as a sanity check to ensure things worked out // as intended. checkChannelBalance(carol, carolLocalBalance, 0, 0, 0) - checkChannelBalance(net.Alice, 0, carolLocalBalance, 0, 0) + checkChannelBalance(alice, 0, carolLocalBalance, 0, 0) + + // TODO(yy): remove the sleep once the following bug is fixed. + // + // We may get the error `unable to gracefully close channel + // while peer is offline (try force closing it instead): + // channel link not found`. This happens because the channel + // link hasn't been added yet but we now proceed to closing the + // channel. We may need to revisit how the channel open event + // is created and make sure the event is only sent after all + // relevant states have been updated. + time.Sleep(2 * time.Second) // Now that we're done with the test, the channel can be closed. - closeChannelAndAssert(t, net, carol, chanPoint, false) + ht.CloseChannel(carol, chanPoint) // Empty out the wallet so there aren't any lingering coins. - sendAllCoinsConfirm(net, carol, t, burnAddr) + sendAllCoinsConfirm(ht, carol, burnAddr) } } // sendAllCoinsConfirm sends all coins of the node's wallet to the given address // and awaits one confirmation. -func sendAllCoinsConfirm(net *lntest.NetworkHarness, node *lntest.HarnessNode, - t *harnessTest, addr string) { - - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() +func sendAllCoinsConfirm(ht *lntemp.HarnessTest, node *node.HarnessNode, + addr string) { sweepReq := &lnrpc.SendCoinsRequest{ Addr: addr, SendAll: true, } - _, err := node.SendCoins(ctxt, sweepReq) - require.NoError(t.t, err) - - // Make sure the unconfirmed tx is seen in the mempool. - _, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout) - require.NoError(t.t, err, "failed to find tx in miner mempool") - - mineBlocks(t, net, 1, 1) + node.RPC.SendCoins(sweepReq) + ht.MineBlocksAndAssertNumTxes(1, 1) } // testExternalFundingChanPoint tests that we're able to carry out a normal diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 433cc71df..f204a9c7c 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "basic funding flow with all input types", - test: testChannelFundingInputTypes, - }, { name: "unconfirmed channel funding", test: testUnconfirmedChannelFunding, From 6a66f3984f722b8a71e8f26f0da71a311a7844aa Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 01:35:18 +0800 Subject: [PATCH 15/47] lntemp+itest: refactir `testUnconfirmedChannelFunding` --- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_funding_test.go | 55 ++++++++++++++++----------- lntest/itest/lnd_test_list_on_test.go | 4 -- 3 files changed, 37 insertions(+), 26 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index b93088b10..f6b10af85 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -123,4 +123,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "basic funding flow with all input types", TestFunc: testChannelFundingInputTypes, }, + { + Name: "unconfirmed channel funding", + TestFunc: testUnconfirmedChannelFunding, + }, } diff --git a/lntest/itest/lnd_funding_test.go b/lntest/itest/lnd_funding_test.go index 84dea2b41..486f9525c 100644 --- a/lntest/itest/lnd_funding_test.go +++ b/lntest/itest/lnd_funding_test.go @@ -218,31 +218,27 @@ func basicChannelFundingTest(ht *lntemp.HarnessTest, // testUnconfirmedChannelFunding tests that our unconfirmed change outputs can // be used to fund channels. -func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { +func testUnconfirmedChannelFunding(ht *lntemp.HarnessTest) { const ( chanAmt = funding.MaxBtcFundingAmount pushAmt = btcutil.Amount(100000) ) // We'll start off by creating a node for Carol. - carol := net.NewNode(t.t, "Carol", nil) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", nil) - // We'll send her some confirmed funds. - net.SendCoinsUnconfirmed(t.t, chanAmt*2, carol) + alice := ht.Alice - // Make sure the unconfirmed tx is seen in the mempool. - _, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout) - require.NoError(t.t, err, "failed to find tx in miner mempool") + // We'll send her some unconfirmed funds. + ht.FundCoinsUnconfirmed(2*chanAmt, carol) // Now, we'll connect her to Alice so that they can open a channel // together. The funding flow should select Carol's unconfirmed output // as she doesn't have any other funds since it's a new node. - net.ConnectNodes(t.t, carol, net.Alice) + ht.ConnectNodes(carol, alice) - chanOpenUpdate := openChannelStream( - t, net, carol, net.Alice, - lntest.OpenChannelParams{ + chanOpenUpdate := ht.OpenChannelAssertPending( + carol, alice, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, SpendUnconfirmed: true, @@ -251,7 +247,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { // Creates a helper closure to be used below which asserts the proper // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, + checkChannelBalance := func(node *node.HarnessNode, local, remote, pendingLocal, pendingRemote btcutil.Amount) { expectedResponse := &lnrpc.ChannelBalanceResponse{ @@ -285,7 +281,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { Balance: int64(local), PendingOpenBalance: int64(pendingLocal), } - assertChannelBalanceResp(t, node, expectedResponse) + ht.AssertChannelBalanceResp(node, expectedResponse) } // As the channel is pending open, it's expected Carol has both zero @@ -300,22 +296,37 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { // For Alice, her local/remote balances should be zero, and the // local/remote balances are the mirror of Carol's. - checkChannelBalance(net.Alice, 0, 0, pushAmt, carolLocalBalance) + checkChannelBalance(alice, 0, 0, pushAmt, carolLocalBalance) // Confirm the channel and wait for it to be recognized by both - // parties. Two transactions should be mined, the unconfirmed spend and - // the funding tx. - mineBlocks(t, net, 6, 2) - chanPoint, err := net.WaitForChannelOpen(chanOpenUpdate) - require.NoError(t.t, err, "error while waitinng for channel open") + // parties. For neutrino backend, the funding transaction should be + // mined. Otherwise, two transactions should be mined, the unconfirmed + // spend and the funding tx. + if ht.IsNeutrinoBackend() { + ht.MineBlocksAndAssertNumTxes(6, 1) + } else { + ht.MineBlocksAndAssertNumTxes(6, 2) + } + + chanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate) // With the channel open, we'll check the balances on each side of the // channel as a sanity check to ensure things worked out as intended. checkChannelBalance(carol, carolLocalBalance, pushAmt, 0, 0) - checkChannelBalance(net.Alice, pushAmt, carolLocalBalance, 0, 0) + checkChannelBalance(alice, pushAmt, carolLocalBalance, 0, 0) + + // TODO(yy): remove the sleep once the following bug is fixed. + // + // We may get the error `unable to gracefully close channel while peer + // is offline (try force closing it instead): channel link not found`. + // This happens because the channel link hasn't been added yet but we + // now proceed to closing the channel. We may need to revisit how the + // channel open event is created and make sure the event is only sent + // after all relevant states have been updated. + time.Sleep(2 * time.Second) // Now that we're done with the test, the channel can be closed. - closeChannelAndAssert(t, net, carol, chanPoint, false) + ht.CloseChannel(carol, chanPoint) } // testChannelFundingInputTypes tests that any type of supported input type can diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index f204a9c7c..9c65aca59 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "unconfirmed channel funding", - test: testUnconfirmedChannelFunding, - }, { name: "update channel policy", test: testUpdateChannelPolicy, From 21097feb858a3517bcf97bf46b7e7e38d17fd78e Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 02:36:12 +0800 Subject: [PATCH 16/47] multi: refactor `testFundingPersistence` This commit refactors the test `testFundingPersistence`. In addition, it also changes the old `OpenChannelAssertPending` method and adds a new method `OpenChannelAssertStream` for clarity. --- lntemp/harness.go | 35 ++++++- lntemp/harness_assertion.go | 24 +++++ lntemp/rpc/lnd.go | 10 +- lntemp/utils.go | 14 +++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_funding_test.go | 126 +++++++++----------------- lntest/itest/lnd_misc_test.go | 6 +- lntest/itest/lnd_test_list_on_test.go | 4 - 8 files changed, 127 insertions(+), 96 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index 397ba3524..36d03a94c 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -734,8 +734,9 @@ func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode, // destNode with the passed channel funding parameters. Once the `OpenChannel` // is called, it will consume the first event it receives from the open channel // client and asserts it's a channel pending event. -func (h *HarnessTest) OpenChannelAssertPending(srcNode, - destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient { +func (h *HarnessTest) openChannelAssertPending(srcNode, + destNode *node.HarnessNode, + p OpenChannelParams) (*lnrpc.PendingUpdate, rpc.OpenChanClient) { // Prepare the request and open the channel. openReq := h.prepareOpenChannel(srcNode, destNode, p) @@ -747,11 +748,35 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode, resp := h.ReceiveOpenChannelUpdate(respStream) // Check that the update is channel pending. - _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) + update, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) require.Truef(h, ok, "expected channel pending: update, instead got %v", resp) - return respStream + return update.ChanPending, respStream +} + +// OpenChannelAssertPending attempts to open a channel between srcNode and +// destNode with the passed channel funding parameters. Once the `OpenChannel` +// is called, it will consume the first event it receives from the open channel +// client and asserts it's a channel pending event. It returns the +// `PendingUpdate`. +func (h *HarnessTest) OpenChannelAssertPending(srcNode, + destNode *node.HarnessNode, p OpenChannelParams) *lnrpc.PendingUpdate { + + resp, _ := h.openChannelAssertPending(srcNode, destNode, p) + return resp +} + +// OpenChannelAssertStream attempts to open a channel between srcNode and +// destNode with the passed channel funding parameters. Once the `OpenChannel` +// is called, it will consume the first event it receives from the open channel +// client and asserts it's a channel pending event. It returns the open channel +// stream. +func (h *HarnessTest) OpenChannelAssertStream(srcNode, + destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient { + + _, stream := h.openChannelAssertPending(srcNode, destNode, p) + return stream } // OpenChannel attempts to open a channel with the specified parameters @@ -763,7 +788,7 @@ func (h *HarnessTest) OpenChannelAssertPending(srcNode, func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode, p OpenChannelParams) *lnrpc.ChannelPoint { - chanOpenUpdate := h.OpenChannelAssertPending(alice, bob, p) + chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p) // Mine 6 blocks, then wait for Alice's node to notify us that the // channel has been opened. The funding transaction should be found diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 41912d355..1b9c29a9c 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1479,3 +1479,27 @@ func (h *HarnessTest) AssertZombieChannel(hn *node.HarnessNode, chanID uint64) { }, DefaultTimeout) require.NoError(h, err, "timeout while checking zombie channel") } + +// AssertTxAtHeight gets all of the transactions that a node's wallet has a +// record of at the target height, and finds and returns the tx with the target +// txid, failing if it is not found. +func (h *HarnessTest) AssertTxAtHeight(hn *node.HarnessNode, height int32, + txid *chainhash.Hash) *lnrpc.Transaction { + + req := &lnrpc.GetTransactionsRequest{ + StartHeight: height, + EndHeight: height, + } + txns := hn.RPC.GetTransactions(req) + + for _, tx := range txns.Transactions { + if tx.TxHash == txid.String() { + return tx + } + } + + require.Failf(h, "fail to find tx", "tx:%v not found at height:%v", + txid, height) + + return nil +} diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 7f7261233..5cf5d5b2b 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -346,11 +346,17 @@ func (h *HarnessRPC) SendCoinsAssertErr(req *lnrpc.SendCoinsRequest) { } // GetTransactions makes a RPC call to GetTransactions and asserts. -func (h *HarnessRPC) GetTransactions() *lnrpc.TransactionDetails { +func (h *HarnessRPC) GetTransactions( + req *lnrpc.GetTransactionsRequest) *lnrpc.TransactionDetails { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) defer cancel() - resp, err := h.LN.GetTransactions(ctxt, &lnrpc.GetTransactionsRequest{}) + if req == nil { + req = &lnrpc.GetTransactionsRequest{} + } + + resp, err := h.LN.GetTransactions(ctxt, req) require.NoErrorf(h, err, "failed to GetTransactions for %s", h.Name) return resp diff --git a/lntemp/utils.go b/lntemp/utils.go index d6d279765..590380423 100644 --- a/lntemp/utils.go +++ b/lntemp/utils.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" + "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest" ) @@ -92,3 +93,16 @@ func ParseDerivationPath(path string) ([]uint32, error) { return indices, nil } + +// ChanPointFromPendingUpdate constructs a channel point from a lnrpc pending +// update. +func ChanPointFromPendingUpdate(pu *lnrpc.PendingUpdate) *lnrpc.ChannelPoint { + chanPoint := &lnrpc.ChannelPoint{ + FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ + FundingTxidBytes: pu.Txid, + }, + OutputIndex: pu.OutputIndex, + } + + return chanPoint +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index f6b10af85..cc875f805 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -127,4 +127,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "unconfirmed channel funding", TestFunc: testUnconfirmedChannelFunding, }, + { + Name: "funding flow persistence", + TestFunc: testChannelFundingPersistence, + }, } diff --git a/lntest/itest/lnd_funding_test.go b/lntest/itest/lnd_funding_test.go index 486f9525c..5ad24f948 100644 --- a/lntest/itest/lnd_funding_test.go +++ b/lntest/itest/lnd_funding_test.go @@ -237,7 +237,7 @@ func testUnconfirmedChannelFunding(ht *lntemp.HarnessTest) { // as she doesn't have any other funds since it's a new node. ht.ConnectNodes(carol, alice) - chanOpenUpdate := ht.OpenChannelAssertPending( + chanOpenUpdate := ht.OpenChannelAssertStream( carol, alice, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, @@ -391,7 +391,7 @@ func testChannelFundingInputTypes(ht *lntemp.HarnessTest) { // We'll send her some confirmed funds. funder(chanAmt*2, carol) - chanOpenUpdate := ht.OpenChannelAssertPending( + chanOpenUpdate := ht.OpenChannelAssertStream( carol, alice, lntemp.OpenChannelParams{ Amt: chanAmt, }, @@ -572,7 +572,7 @@ func testExternalFundingChanPoint(ht *lntemp.HarnessTest) { // representation of channels if the system is restarted or disconnected. // testFundingPersistence mirrors testBasicChannelFunding, but adds restarts // and checks for the state of channels with unconfirmed funding transactions. -func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { +func testChannelFundingPersistence(ht *lntemp.HarnessTest) { chanAmt := funding.MaxBtcFundingAmount pushAmt := btcutil.Amount(0) @@ -580,140 +580,102 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { // confirmation before it's open, with the current set of defaults, // we'll need to create a new node instance. const numConfs = 5 - carolArgs := []string{fmt.Sprintf("--bitcoin.defaultchanconfs=%v", numConfs)} - carol := net.NewNode(t.t, "Carol", carolArgs) + carolArgs := []string{ + fmt.Sprintf("--bitcoin.defaultchanconfs=%v", numConfs), + } + carol := ht.NewNode("Carol", carolArgs) - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - net.ConnectNodes(t.t, net.Alice, carol) + alice := ht.Alice + ht.ConnectNodes(alice, carol) // Create a new channel that requires 5 confs before it's considered // open, then broadcast the funding transaction - pendingUpdate, err := net.OpenPendingChannel( - net.Alice, carol, chanAmt, pushAmt, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) + param := lntemp.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, } + update := ht.OpenChannelAssertPending(alice, carol, param) // At this point, the channel's funding transaction will have been // broadcast, but not confirmed. Alice and Bob's nodes should reflect // this when queried via RPC. - assertNumOpenChannelsPending(t, net.Alice, carol, 1) + ht.AssertNumPendingOpenChannels(alice, 1) + ht.AssertNumPendingOpenChannels(carol, 1) // Restart both nodes to test that the appropriate state has been // persisted and that both nodes recover gracefully. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) + ht.RestartNode(carol) - fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid) - if err != nil { - t.Fatalf("unable to convert funding txid into chainhash.Hash:"+ - " %v", err) - } - fundingTxStr := fundingTxID.String() + fundingTxID, err := chainhash.NewHash(update.Txid) + require.NoError(ht, err, "unable to convert funding txid "+ + "into chainhash.Hash") // Mine a block, then wait for Alice's node to notify us that the // channel has been opened. The funding transaction should be found // within the newly mined block. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, fundingTxID) + block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] + ht.Miner.AssertTxInBlock(block, fundingTxID) // Get the height that our transaction confirmed at. - _, height, err := net.Miner.Client.GetBestBlock() - require.NoError(t.t, err, "could not get best block") + _, height := ht.Miner.GetBestBlock() // Restart both nodes to test that the appropriate state has been // persisted and that both nodes recover gracefully. - if err := net.RestartNode(net.Alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } - if err := net.RestartNode(carol, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) + ht.RestartNode(carol) // The following block ensures that after both nodes have restarted, // they have reconnected before the execution of the next test. - net.EnsureConnected(t.t, net.Alice, carol) + ht.EnsureConnected(alice, carol) // Next, mine enough blocks s.t the channel will open with a single // additional block mined. - if _, err := net.Miner.Client.Generate(3); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } + ht.MineBlocks(3) // Assert that our wallet has our opening transaction with a label // that does not have a channel ID set yet, because we have not // reached our required confirmations. - tx := findTxAtHeight(t, height, fundingTxStr, net.Alice) + tx := ht.AssertTxAtHeight(alice, height, fundingTxID) // At this stage, we expect the transaction to be labelled, but not with // our channel ID because our transaction has not yet confirmed. label := labels.MakeLabel(labels.LabelTypeChannelOpen, nil) - require.Equal(t.t, label, tx.Label, "open channel label wrong") + require.Equal(ht, label, tx.Label, "open channel label wrong") // Both nodes should still show a single channel as pending. - time.Sleep(time.Second * 1) - assertNumOpenChannelsPending(t, net.Alice, carol, 1) + ht.AssertNumPendingOpenChannels(alice, 1) + ht.AssertNumPendingOpenChannels(carol, 1) // Finally, mine the last block which should mark the channel as open. - if _, err := net.Miner.Client.Generate(1); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } + ht.MineBlocks(1) // At this point, the channel should be fully opened and there should // be no pending channels remaining for either node. - time.Sleep(time.Second * 1) - assertNumOpenChannelsPending(t, net.Alice, carol, 0) + ht.AssertNumPendingOpenChannels(alice, 0) + ht.AssertNumPendingOpenChannels(carol, 0) // The channel should be listed in the peer information returned by // both peers. - outPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: pendingUpdate.OutputIndex, - } + chanPoint := lntemp.ChanPointFromPendingUpdate(update) // Re-lookup our transaction in the block that it confirmed in. - tx = findTxAtHeight(t, height, fundingTxStr, net.Alice) + tx = ht.AssertTxAtHeight(alice, height, fundingTxID) + + // Check both nodes to ensure that the channel is ready for operation. + chanAlice := ht.AssertChannelExists(alice, chanPoint) + ht.AssertChannelExists(carol, chanPoint) // Create an additional check for our channel assertion that will // check that our label is as expected. - check := func(channel *lnrpc.Channel) { - shortChanID := lnwire.NewShortChanIDFromInt( - channel.ChanId, - ) - - label := labels.MakeLabel( - labels.LabelTypeChannelOpen, &shortChanID, - ) - require.Equal(t.t, label, tx.Label, - "open channel label not updated") - } - - // Check both nodes to ensure that the channel is ready for operation. - err = net.AssertChannelExists(net.Alice, &outPoint, check) - if err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } - if err := net.AssertChannelExists(carol, &outPoint); err != nil { - t.Fatalf("unable to assert channel existence: %v", err) - } + shortChanID := lnwire.NewShortChanIDFromInt(chanAlice.ChanId) + label = labels.MakeLabel(labels.LabelTypeChannelOpen, &shortChanID) + require.Equal(ht, label, tx.Label, "open channel label not updated") // Finally, immediately close the channel. This function will also // block until the channel is closed and will additionally assert the // relevant channel closing post conditions. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + ht.CloseChannel(alice, chanPoint) } // testBatchChanFunding makes sure multiple channels can be opened in one batch diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index 65847e989..9d80c9b3e 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -53,7 +53,7 @@ func testDisconnectingTargetPeer(ht *lntemp.HarnessTest) { Amt: chanAmt, PushAmt: pushAmt, } - stream := ht.OpenChannelAssertPending(alice, bob, p) + stream := ht.OpenChannelAssertStream(alice, bob, p) // At this point, the channel's funding transaction will have been // broadcast, but not confirmed. Alice and Bob's nodes should reflect @@ -361,7 +361,7 @@ func testMaxPendingChannels(ht *lntemp.HarnessTest) { []lnrpc.Lightning_OpenChannelClient, maxPendingChannels, ) for i := 0; i < maxPendingChannels; i++ { - stream := ht.OpenChannelAssertPending( + stream := ht.OpenChannelAssertStream( alice, carol, lntemp.OpenChannelParams{ Amt: amount, }, @@ -823,7 +823,7 @@ func testSweepAllCoins(ht *lntemp.HarnessTest) { assertTxLabel := func(targetTx, label string) error { // List all transactions relevant to our wallet, and find the // tx so that we can check the correct label has been set. - txResp := ainz.RPC.GetTransactions() + txResp := ainz.RPC.GetTransactions(nil) var target *lnrpc.Transaction diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 9c65aca59..4d069d853 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -24,10 +24,6 @@ var allTestCases = []*testCase{ name: "graph topology notifications", test: testGraphTopologyNotifications, }, - { - name: "funding flow persistence", - test: testChannelFundingPersistence, - }, { name: "channel force closure", test: testChannelForceClosure, From 1d7c568fcffb363ff6aec83203b6a0350022fbc0 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 02:48:19 +0800 Subject: [PATCH 17/47] lntemp+itest: refactor `testBatchChanFunding` --- lntemp/rpc/lnd.go | 27 +++++++++ lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_funding_test.go | 79 +++++++++------------------ lntest/itest/lnd_test_list_on_test.go | 4 -- 4 files changed, 58 insertions(+), 56 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 5cf5d5b2b..360277369 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -405,3 +405,30 @@ func (h *HarnessRPC) GetRecoveryInfo( return resp } + +// BatchOpenChannel makes a RPC call to BatchOpenChannel and asserts. +func (h *HarnessRPC) BatchOpenChannel( + req *lnrpc.BatchOpenChannelRequest) *lnrpc.BatchOpenChannelResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.LN.BatchOpenChannel(ctxt, req) + require.NoErrorf(h, err, "failed to batch open channel") + + return resp +} + +// BatchOpenChannelAssertErr makes a RPC call to BatchOpenChannel and asserts +// there's an error returned. +func (h *HarnessRPC) BatchOpenChannelAssertErr( + req *lnrpc.BatchOpenChannelRequest) error { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + _, err := h.LN.BatchOpenChannel(ctxt, req) + require.Error(h, err, "expecte batch open channel fail") + + return err +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index cc875f805..c6bac981e 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -131,4 +131,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "funding flow persistence", TestFunc: testChannelFundingPersistence, }, + { + Name: "batch channel funding", + TestFunc: testBatchChanFunding, + }, } diff --git a/lntest/itest/lnd_funding_test.go b/lntest/itest/lnd_funding_test.go index 5ad24f948..bd57997ee 100644 --- a/lntest/itest/lnd_funding_test.go +++ b/lntest/itest/lnd_funding_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "fmt" "testing" "time" @@ -16,7 +15,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/require" ) @@ -680,23 +678,20 @@ func testChannelFundingPersistence(ht *lntemp.HarnessTest) { // testBatchChanFunding makes sure multiple channels can be opened in one batch // transaction in an atomic way. -func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testBatchChanFunding(ht *lntemp.HarnessTest) { // First, we'll create two new nodes that we'll use to open channels // to during this test. Carol has a high minimum funding amount that // we'll use to trigger an error during the batch channel open. - carol := net.NewNode(t.t, "carol", []string{"--minchansize=200000"}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("carol", []string{"--minchansize=200000"}) + dave := ht.NewNode("dave", nil) - dave := net.NewNode(t.t, "dave", nil) - defer shutdownAndAssert(net, t, dave) + alice, bob := ht.Alice, ht.Bob // Before we start the test, we'll ensure Alice is connected to Carol // and Dave so she can open channels to both of them (and Bob). - net.EnsureConnected(t.t, net.Alice, net.Bob) - net.EnsureConnected(t.t, net.Alice, carol) - net.EnsureConnected(t.t, net.Alice, dave) + ht.EnsureConnected(alice, bob) + ht.EnsureConnected(alice, carol) + ht.EnsureConnected(alice, dave) // Let's create our batch TX request. This first one should fail as we // open a channel to Carol that is too small for her min chan size. @@ -704,7 +699,7 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) { SatPerVbyte: 12, MinConfs: 1, Channels: []*lnrpc.BatchOpenChannel{{ - NodePubkey: net.Bob.PubKey[:], + NodePubkey: bob.PubKey[:], LocalFundingAmount: 100_000, }, { NodePubkey: carol.PubKey[:], @@ -715,22 +710,16 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) { }}, } - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - _, err := net.Alice.BatchOpenChannel(ctxt, batchReq) - require.Error(t.t, err) - require.Contains(t.t, err.Error(), "initial negotiation failed") + err := alice.RPC.BatchOpenChannelAssertErr(batchReq) + require.Contains(ht, err.Error(), "initial negotiation failed") - // Let's fix the minimum amount for Carol now and try again. + // Let's fix the minimum amount for Alice now and try again. batchReq.Channels[1].LocalFundingAmount = 200_000 - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - batchResp, err := net.Alice.BatchOpenChannel(ctxt, batchReq) - require.NoError(t.t, err) - require.Len(t.t, batchResp.PendingChannels, 3) + batchResp := alice.RPC.BatchOpenChannel(batchReq) + require.Len(ht, batchResp.PendingChannels, 3) txHash, err := chainhash.NewHash(batchResp.PendingChannels[0].Txid) - require.NoError(t.t, err) + require.NoError(ht, err) chanPoint1 := &lnrpc.ChannelPoint{ FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ @@ -751,23 +740,16 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) { OutputIndex: batchResp.PendingChannels[2].OutputIndex, } - block := mineBlocks(t, net, 6, 1)[0] - assertTxInBlock(t, block, txHash) - err = net.Alice.WaitForNetworkChannelOpen(chanPoint1) - require.NoError(t.t, err) - err = net.Alice.WaitForNetworkChannelOpen(chanPoint2) - require.NoError(t.t, err) - err = net.Alice.WaitForNetworkChannelOpen(chanPoint3) - require.NoError(t.t, err) + block := ht.MineBlocksAndAssertNumTxes(6, 1)[0] + ht.Miner.AssertTxInBlock(block, txHash) + ht.AssertTopologyChannelOpen(alice, chanPoint1) + ht.AssertTopologyChannelOpen(alice, chanPoint2) + ht.AssertTopologyChannelOpen(alice, chanPoint3) - // With the channel open, ensure that it is counted towards Carol's + // With the channel open, ensure that it is counted towards Alice's // total channel balance. - balReq := &lnrpc.ChannelBalanceRequest{} - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - balRes, err := net.Alice.ChannelBalance(ctxt, balReq) - require.NoError(t.t, err) - require.NotEqual(t.t, int64(0), balRes.LocalBalance.Sat) + balRes := alice.RPC.ChannelBalance() + require.NotEqual(ht, int64(0), balRes.LocalBalance.Sat) // Next, to make sure the channel functions as normal, we'll make some // payments within the channel. @@ -776,23 +758,16 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) { Memo: "new chans", Value: int64(payAmt), } - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - resp, err := carol.AddInvoice(ctxt, invoice) - require.NoError(t.t, err) - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - require.NoError(t.t, err) + resp := carol.RPC.AddInvoice(invoice) + ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest}) // To conclude, we'll close the newly created channel between Carol and // Dave. This function will also block until the channel is closed and // will additionally assert the relevant channel closing post // conditions. - closeChannelAndAssert(t, net, net.Alice, chanPoint1, false) - closeChannelAndAssert(t, net, net.Alice, chanPoint2, false) - closeChannelAndAssert(t, net, net.Alice, chanPoint3, false) + ht.CloseChannel(alice, chanPoint1) + ht.CloseChannel(alice, chanPoint2) + ht.CloseChannel(alice, chanPoint3) } // deriveFundingShim creates a channel funding shim by deriving the necessary diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 4d069d853..ab0bd99cf 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -227,10 +227,6 @@ var allTestCases = []*testCase{ name: "sign psbt", test: testSignPsbt, }, - { - name: "batch channel funding", - test: testBatchChanFunding, - }, { name: "psbt channel funding single step", test: testPsbtChanFundingSingleStep, From 22f8f834126a9b8e319fdc0f9d5714ab5eb1b777 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 03:38:09 +0800 Subject: [PATCH 18/47] multi: refactor `testUpdateChannelPolicy` --- lntemp/harness.go | 23 ++ lntemp/harness_assertion.go | 41 +++ lntemp/node/watcher.go | 6 +- lntemp/rpc/lnd.go | 50 ++++ lntemp/utils.go | 16 ++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_policy_test.go | 360 ++++++++++-------------- lntest/itest/lnd_test_list_on_test.go | 4 - 8 files changed, 290 insertions(+), 214 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index 36d03a94c..f15fb443a 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -1321,3 +1321,26 @@ func (h *HarnessTest) QueryChannelByChanPoint(hn *node.HarnessNode, require.NoError(h, err, "failed to query channel") return channel } + +// SendPaymentAndAssertStatus sends a payment from the passed node and asserts +// the desired status is reached. +func (h *HarnessTest) SendPaymentAndAssertStatus(hn *node.HarnessNode, + req *routerrpc.SendPaymentRequest, + status lnrpc.Payment_PaymentStatus) *lnrpc.Payment { + + stream := hn.RPC.SendPayment(req) + return h.AssertPaymentStatusFromStream(stream, status) +} + +// SendPaymentAssertFail sends a payment from the passed node and asserts the +// payment is failed with the specified failure reason . +func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode, + req *routerrpc.SendPaymentRequest, + reason lnrpc.PaymentFailureReason) *lnrpc.Payment { + + payment := h.SendPaymentAndAssertStatus(hn, req, lnrpc.Payment_FAILED) + require.Equal(h, reason, payment.FailureReason, + "payment failureReason not matched") + + return payment +} diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 1b9c29a9c..87704dc05 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1503,3 +1503,44 @@ func (h *HarnessTest) AssertTxAtHeight(hn *node.HarnessNode, height int32, return nil } + +// getChannelPolicies queries the channel graph and retrieves the current edge +// policies for the provided channel point. +func (h *HarnessTest) getChannelPolicies(hn *node.HarnessNode, + advertisingNode string, + cp *lnrpc.ChannelPoint) (*lnrpc.RoutingPolicy, error) { + + req := &lnrpc.ChannelGraphRequest{IncludeUnannounced: true} + chanGraph := hn.RPC.DescribeGraph(req) + + cpStr := channelPointStr(cp) + for _, e := range chanGraph.Edges { + if e.ChanPoint != cpStr { + continue + } + + if e.Node1Pub == advertisingNode { + return e.Node1Policy, nil + } + + return e.Node2Policy, nil + } + + // If we've iterated over all the known edges and we weren't + // able to find this specific one, then we'll fail. + return nil, fmt.Errorf("did not find edge with advertisingNode: %s"+ + ", channel point: %s", advertisingNode, cpStr) +} + +// AssertChannelPolicy asserts that the passed node's known channel policy for +// the passed chanPoint is consistent with the expected policy values. +func (h *HarnessTest) AssertChannelPolicy(hn *node.HarnessNode, + advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy, + chanPoint *lnrpc.ChannelPoint) { + + policy, err := h.getChannelPolicies(hn, advertisingNode, chanPoint) + require.NoErrorf(h, err, "%s: failed to find policy", hn.Name()) + + err = node.CheckChannelPolicy(policy, expectedPolicy) + require.NoErrorf(h, err, "%s: check policy failed", hn.Name()) +} diff --git a/lntemp/node/watcher.go b/lntemp/node/watcher.go index eeb68534a..1425ef1dd 100644 --- a/lntemp/node/watcher.go +++ b/lntemp/node/watcher.go @@ -559,7 +559,7 @@ func (nw *nodeWatcher) handlePolicyUpdateWatchRequest(req *chanWatchRequest) { // Check if the latest policy is matched. policy := policies[len(policies)-1] - if checkChannelPolicy(policy.RoutingPolicy, req.policy) == nil { + if CheckChannelPolicy(policy.RoutingPolicy, req.policy) == nil { close(req.eventChan) return } @@ -653,8 +653,8 @@ func (nw *nodeWatcher) getChannelPolicies(include bool) policyUpdateMap { return policyUpdates } -// checkChannelPolicy checks that the policy matches the expected one. -func checkChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) error { +// CheckChannelPolicy checks that the policy matches the expected one. +func CheckChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) error { if policy.FeeBaseMsat != expectedPolicy.FeeBaseMsat { return fmt.Errorf("expected base fee %v, got %v", expectedPolicy.FeeBaseMsat, policy.FeeBaseMsat) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 360277369..941b4371e 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -432,3 +432,53 @@ func (h *HarnessRPC) BatchOpenChannelAssertErr( return err } + +// QueryRoutes makes a RPC call to QueryRoutes and asserts. +func (h *HarnessRPC) QueryRoutes( + req *lnrpc.QueryRoutesRequest) *lnrpc.QueryRoutesResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + routes, err := h.LN.QueryRoutes(ctxt, req) + require.NoErrorf(h, err, "failed to query routes") + + return routes +} + +// SendToRoute makes a RPC call to SendToRoute and asserts. +func (h *HarnessRPC) SendToRoute() lnrpc.Lightning_SendToRouteClient { + // SendToRoute needs to have the context alive for the entire test case + // as the returned client will be used for send and receive payment + // stream. Thus we use runCtx here instead of a timeout context. + client, err := h.LN.SendToRoute(h.runCtx) + h.NoError(err, "SendToRoute") + + return client +} + +// SendToRouteSync makes a RPC call to SendToRouteSync and asserts. +func (h *HarnessRPC) SendToRouteSync( + req *lnrpc.SendToRouteRequest) *lnrpc.SendResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.LN.SendToRouteSync(ctxt, req) + require.NoErrorf(h, err, "unable to send to route for %s", h.Name) + + return resp +} + +// UpdateChannelPolicy makes a RPC call to UpdateChannelPolicy and asserts. +func (h *HarnessRPC) UpdateChannelPolicy( + req *lnrpc.PolicyUpdateRequest) *lnrpc.PolicyUpdateResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.LN.UpdateChannelPolicy(ctxt, req) + require.NoErrorf(h, err, "failed to update policy") + + return resp +} diff --git a/lntemp/utils.go b/lntemp/utils.go index 590380423..d9946f365 100644 --- a/lntemp/utils.go +++ b/lntemp/utils.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" + "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest" ) @@ -106,3 +107,18 @@ func ChanPointFromPendingUpdate(pu *lnrpc.PendingUpdate) *lnrpc.ChannelPoint { return chanPoint } + +// channelPointStr returns the string representation of the channel's +// funding transaction. +func channelPointStr(chanPoint *lnrpc.ChannelPoint) string { + fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint) + if err != nil { + return "" + } + cp := wire.OutPoint{ + Hash: *fundingTxID, + Index: chanPoint.OutputIndex, + } + + return cp.String() +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index c6bac981e..515f2ae99 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -135,4 +135,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "batch channel funding", TestFunc: testBatchChanFunding, }, + { + Name: "update channel policy", + TestFunc: testUpdateChannelPolicy, + }, } diff --git a/lntest/itest/lnd_channel_policy_test.go b/lntest/itest/lnd_channel_policy_test.go index 638878933..2db663742 100644 --- a/lntest/itest/lnd_channel_policy_test.go +++ b/lntest/itest/lnd_channel_policy_test.go @@ -3,7 +3,6 @@ package itest import ( "context" "math" - "strings" "time" "github.com/btcsuite/btcd/btcutil" @@ -11,6 +10,9 @@ import ( "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/funding" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/require" @@ -18,6 +20,7 @@ import ( // assertPolicyUpdate checks that a given policy update has been received by a // list of given nodes. +// TODO(yy): delete. func assertPolicyUpdate(t *harnessTest, nodes []*lntest.HarnessNode, advertisingNode string, policy *lnrpc.RoutingPolicy, chanPoint *lnrpc.ChannelPoint) { @@ -30,10 +33,8 @@ func assertPolicyUpdate(t *harnessTest, nodes []*lntest.HarnessNode, } // testUpdateChannelPolicy tests that policy updates made to a channel -// get propagated to other nodes in the network. -func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +// gets propagated to other nodes in the network. +func testUpdateChannelPolicy(ht *lntemp.HarnessTest) { const ( defaultFeeBase = 1000 defaultFeeRate = 1 @@ -45,19 +46,19 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { chanAmt := funding.MaxBtcFundingAmount pushAmt := chanAmt / 2 + alice, bob := ht.Alice, ht.Bob + // Create a channel Alice->Bob. - chanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, }, ) - defer closeChannelAndAssert(t, net, net.Alice, chanPoint, false) // We add all the nodes' update channels to a slice, such that we can // make sure they all receive the expected updates. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob} + nodes := []*node.HarnessNode{alice, bob} // Alice and Bob should see each other's ChannelUpdates, advertising the // default routing policies. @@ -69,65 +70,45 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { MaxHtlcMsat: defaultMaxHtlc, } - assertPolicyUpdate( - t, nodes, net.Alice.PubKeyStr, expectedPolicy, chanPoint, - ) - assertPolicyUpdate( - t, nodes, net.Bob.PubKeyStr, expectedPolicy, chanPoint, - ) + assertNodesPolicyUpdate(ht, nodes, alice, expectedPolicy, chanPoint) + assertNodesPolicyUpdate(ht, nodes, bob, expectedPolicy, chanPoint) // They should now know about the default policies. for _, node := range nodes { - assertChannelPolicy( - t, node, net.Alice.PubKeyStr, expectedPolicy, chanPoint, + ht.AssertChannelPolicy( + node, alice.PubKeyStr, expectedPolicy, chanPoint, ) - assertChannelPolicy( - t, node, net.Bob.PubKeyStr, expectedPolicy, chanPoint, + ht.AssertChannelPolicy( + node, bob.PubKeyStr, expectedPolicy, chanPoint, ) } - err := net.Alice.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - err = net.Bob.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - // Create Carol with options to rate limit channel updates up to 2 per // day, and create a new channel Bob->Carol. - carol := net.NewNode( - t.t, "Carol", []string{ + carol := ht.NewNode( + "Carol", []string{ "--gossip.max-channel-update-burst=2", "--gossip.channel-update-interval=24h", }, ) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - + ht.ConnectNodes(carol, bob) nodes = append(nodes, carol) // Send some coins to Carol that can be used for channel funding. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol) - - net.ConnectNodes(t.t, carol, net.Bob) + ht.FundCoins(btcutil.SatoshiPerBitcoin, carol) // Open the channel Carol->Bob with a custom min_htlc value set. Since // Carol is opening the channel, she will require Bob to not forward // HTLCs smaller than this value, and hence he should advertise it as // part of his ChannelUpdate. const customMinHtlc = 5000 - chanPoint2 := openChannelAndAssert( - t, net, carol, net.Bob, - lntest.OpenChannelParams{ + chanPoint2 := ht.OpenChannel( + carol, bob, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, MinHtlc: customMinHtlc, }, ) - defer closeChannelAndAssert(t, net, net.Bob, chanPoint2, false) expectedPolicyBob := &lnrpc.RoutingPolicy{ FeeBaseMsat: defaultFeeBase, @@ -144,37 +125,24 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { MaxHtlcMsat: defaultMaxHtlc, } - assertPolicyUpdate( - t, nodes, net.Bob.PubKeyStr, expectedPolicyBob, chanPoint2, - ) - assertPolicyUpdate( - t, nodes, carol.PubKeyStr, expectedPolicyCarol, chanPoint2, + assertNodesPolicyUpdate(ht, nodes, bob, expectedPolicyBob, chanPoint2) + assertNodesPolicyUpdate( + ht, nodes, carol, expectedPolicyCarol, chanPoint2, ) // Check that all nodes now know about the updated policies. for _, node := range nodes { - assertChannelPolicy( - t, node, net.Bob.PubKeyStr, expectedPolicyBob, - chanPoint2, + ht.AssertChannelPolicy( + node, bob.PubKeyStr, expectedPolicyBob, chanPoint2, ) - assertChannelPolicy( - t, node, carol.PubKeyStr, expectedPolicyCarol, - chanPoint2, + ht.AssertChannelPolicy( + node, carol.PubKeyStr, expectedPolicyCarol, chanPoint2, ) } - err = net.Alice.WaitForNetworkChannelOpen(chanPoint2) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - err = net.Bob.WaitForNetworkChannelOpen(chanPoint2) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - err = carol.WaitForNetworkChannelOpen(chanPoint2) - if err != nil { - t.Fatalf("carol didn't report channel: %v", err) - } + // Make sure Alice and Carol have seen each other's channels. + ht.AssertTopologyChannelOpen(alice, chanPoint2) + ht.AssertTopologyChannelOpen(carol, chanPoint) // First we'll try to send a payment from Alice to Carol with an amount // less than the min_htlc value required by Carol. This payment should @@ -184,23 +152,19 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { Memo: "testing", Value: int64(payAmt), } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := carol.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) + resp := carol.RPC.AddInvoice(invoice) // Alice knows about the channel policy of Carol and should therefore // not be able to find a path during routing. - expErr := lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE - if err.Error() != expErr.String() { - t.Fatalf("expected %v, instead got %v", expErr, err) + payReq := &routerrpc.SendPaymentRequest{ + PaymentRequest: resp.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, } + ht.SendPaymentAssertFail( + alice, payReq, + lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE, + ) // Now we try to send a payment over the channel with a value too low // to be accepted. First we query for a route to route a payment of @@ -211,16 +175,8 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { Amt: int64(payAmt), FinalCltvDelta: defaultTimeLockDelta, } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routes, err := net.Alice.QueryRoutes(ctxt, routesReq) - if err != nil { - t.Fatalf("unable to get route: %v", err) - } - - if len(routes.Routes) != 1 { - t.Fatalf("expected to find 1 route, got %v", len(routes.Routes)) - } + routes := alice.RPC.QueryRoutes(routesReq) + require.Len(ht, routes.Routes, 1) // We change the route to carry a payment of 4000 mSAT instead of 5000 // mSAT. @@ -233,27 +189,19 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { routes.Routes[0].Hops[1].AmtToForwardMsat = amtMSat // Send the payment with the modified value. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePayStream, err := net.Alice.SendToRoute(ctxt) // nolint:staticcheck - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } + alicePayStream := alice.RPC.SendToRoute() + sendReq := &lnrpc.SendToRouteRequest{ PaymentHash: resp.RHash, Route: routes.Routes[0], } - - err = alicePayStream.Send(sendReq) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + err := alicePayStream.Send(sendReq) + require.NoError(ht, err, "unable to send payment") // We expect this payment to fail, and that the min_htlc value is // communicated back to us, since the attempted HTLC value was too low. sendResp, err := alicePayStream.Recv() - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(ht, err, "unable to receive payment stream") // Expected as part of the error message. substrs := []string{ @@ -261,10 +209,7 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { "HtlcMinimumMsat: (lnwire.MilliSatoshi) 5000 mSAT", } for _, s := range substrs { - if !strings.Contains(sendResp.PaymentError, s) { - t.Fatalf("expected error to contain \"%v\", instead "+ - "got %v", s, sendResp.PaymentError) - } + require.Contains(ht, sendResp.PaymentError, s) } // Make sure sending using the original value succeeds. @@ -292,22 +237,14 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { } err = alicePayStream.Send(sendReq) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(ht, err, "unable to send payment") sendResp, err = alicePayStream.Recv() - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(ht, err, "unable to receive payment stream") + require.Empty(ht, sendResp.PaymentError, "expected payment to succeed") - if sendResp.PaymentError != "" { - t.Fatalf("expected payment to succeed, instead got %v", - sendResp.PaymentError) - } - - // With our little cluster set up, we'll update the fees and the max htlc - // size for the Bob side of the Alice->Bob channel, and make sure + // With our little cluster set up, we'll update the fees and the max + // htlc size for the Bob side of the Alice->Bob channel, and make sure // all nodes learn about it. baseFee := int64(1500) feeRate := int64(12) @@ -331,21 +268,15 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { ChanPoint: chanPoint, }, } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Bob.UpdateChannelPolicy(ctxt, req); err != nil { - t.Fatalf("unable to get alice's balance: %v", err) - } + bob.RPC.UpdateChannelPolicy(req) // Wait for all nodes to have seen the policy update done by Bob. - assertPolicyUpdate( - t, nodes, net.Bob.PubKeyStr, expectedPolicy, chanPoint, - ) + assertNodesPolicyUpdate(ht, nodes, bob, expectedPolicy, chanPoint) // Check that all nodes now know about Bob's updated policy. for _, node := range nodes { - assertChannelPolicy( - t, node, net.Bob.PubKeyStr, expectedPolicy, chanPoint, + ht.AssertChannelPolicy( + node, bob.PubKeyStr, expectedPolicy, chanPoint, ) } @@ -361,39 +292,21 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { Memo: "testing", Value: int64(payAmt), } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err = carol.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } + resp = carol.RPC.AddInvoice(invoice) - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, - []string{resp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest}) // We'll now open a channel from Alice directly to Carol. - net.ConnectNodes(t.t, net.Alice, carol) - chanPoint3 := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{ + ht.ConnectNodes(alice, carol) + chanPoint3 := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, }, ) - defer closeChannelAndAssert(t, net, net.Alice, chanPoint3, false) - err = net.Alice.WaitForNetworkChannelOpen(chanPoint3) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - err = carol.WaitForNetworkChannelOpen(chanPoint3) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } + // Make sure Bob knows this channel. + ht.AssertTopologyChannelOpen(bob, chanPoint3) // Make a global update, and check that both channels' new policies get // propagated. @@ -414,28 +327,21 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { MaxHtlcMsat: maxHtlc, } req.Scope = &lnrpc.PolicyUpdateRequest_Global{} - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = net.Alice.UpdateChannelPolicy(ctxt, req) - if err != nil { - t.Fatalf("unable to update alice's channel policy: %v", err) - } + alice.RPC.UpdateChannelPolicy(req) // Wait for all nodes to have seen the policy updates for both of // Alice's channels. - assertPolicyUpdate( - t, nodes, net.Alice.PubKeyStr, expectedPolicy, chanPoint, - ) - assertPolicyUpdate( - t, nodes, net.Alice.PubKeyStr, expectedPolicy, chanPoint3, - ) + assertNodesPolicyUpdate(ht, nodes, alice, expectedPolicy, chanPoint) + assertNodesPolicyUpdate(ht, nodes, alice, expectedPolicy, chanPoint3) // And finally check that all nodes remembers the policy update they // received. for _, node := range nodes { - assertChannelPolicy( - t, node, net.Alice.PubKeyStr, expectedPolicy, - chanPoint, chanPoint3, + ht.AssertChannelPolicy( + node, alice.PubKeyStr, expectedPolicy, chanPoint, + ) + ht.AssertChannelPolicy( + node, alice.PubKeyStr, expectedPolicy, chanPoint3, ) } @@ -443,60 +349,87 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { // we'll send two more update from Alice. Carol should accept the first, // but not the second, as she only allows two updates per day and a day // has yet to elapse from the previous update. - const numUpdatesTilRateLimit = 2 - for i := 0; i < numUpdatesTilRateLimit; i++ { - prevAlicePolicy := *expectedPolicy - baseFee *= 2 - expectedPolicy.FeeBaseMsat = baseFee - req.BaseFeeMsat = baseFee - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - _, err = net.Alice.UpdateChannelPolicy(ctxt, req) - require.NoError(t.t, err) + // assertAliceAndBob is a helper closure which updates Alice's policy + // and asserts that both Alice and Bob have heard and updated the + // policy in their graph. + assertAliceAndBob := func(req *lnrpc.PolicyUpdateRequest, + expectedPolicy *lnrpc.RoutingPolicy) { + + alice.RPC.UpdateChannelPolicy(req) // Wait for all nodes to have seen the policy updates for both // of Alice's channels. Carol will not see the last update as // the limit has been reached. - assertPolicyUpdate( - t, []*lntest.HarnessNode{net.Alice, net.Bob}, - net.Alice.PubKeyStr, expectedPolicy, chanPoint, + assertNodesPolicyUpdate( + ht, []*node.HarnessNode{alice, bob}, + alice, expectedPolicy, chanPoint, ) - assertPolicyUpdate( - t, []*lntest.HarnessNode{net.Alice, net.Bob}, - net.Alice.PubKeyStr, expectedPolicy, chanPoint3, + assertNodesPolicyUpdate( + ht, []*node.HarnessNode{alice, bob}, + alice, expectedPolicy, chanPoint3, ) // Check that all nodes remember the policy update // they received. - assertChannelPolicy( - t, net.Alice, net.Alice.PubKeyStr, - expectedPolicy, chanPoint, chanPoint3, + ht.AssertChannelPolicy( + alice, alice.PubKeyStr, expectedPolicy, chanPoint, ) - assertChannelPolicy( - t, net.Bob, net.Alice.PubKeyStr, - expectedPolicy, chanPoint, chanPoint3, + ht.AssertChannelPolicy( + alice, alice.PubKeyStr, expectedPolicy, chanPoint3, ) - - // Carol was added last, which is why we check the last index. - // Since Carol didn't receive the last update, she still has - // Alice's old policy. - if i == numUpdatesTilRateLimit-1 { - expectedPolicy = &prevAlicePolicy - } - assertPolicyUpdate( - t, []*lntest.HarnessNode{carol}, - net.Alice.PubKeyStr, expectedPolicy, chanPoint, + ht.AssertChannelPolicy( + bob, alice.PubKeyStr, expectedPolicy, chanPoint, ) - assertPolicyUpdate( - t, []*lntest.HarnessNode{carol}, - net.Alice.PubKeyStr, expectedPolicy, chanPoint3, - ) - assertChannelPolicy( - t, carol, net.Alice.PubKeyStr, - expectedPolicy, chanPoint, chanPoint3, + ht.AssertChannelPolicy( + bob, alice.PubKeyStr, expectedPolicy, chanPoint3, ) } + + // Double the base fee and attach to the policy. + baseFee1 := baseFee * 2 + expectedPolicy.FeeBaseMsat = baseFee1 + req.BaseFeeMsat = baseFee1 + assertAliceAndBob(req, expectedPolicy) + + // Check that Carol has both heard the policy and updated it in her + // graph. + assertNodesPolicyUpdate( + ht, []*node.HarnessNode{carol}, + alice, expectedPolicy, chanPoint, + ) + assertNodesPolicyUpdate( + ht, []*node.HarnessNode{carol}, + alice, expectedPolicy, chanPoint3, + ) + ht.AssertChannelPolicy( + carol, alice.PubKeyStr, expectedPolicy, chanPoint, + ) + ht.AssertChannelPolicy( + carol, alice.PubKeyStr, expectedPolicy, chanPoint3, + ) + + // Double the base fee and attach to the policy. + baseFee2 := baseFee1 * 2 + expectedPolicy.FeeBaseMsat = baseFee2 + req.BaseFeeMsat = baseFee2 + assertAliceAndBob(req, expectedPolicy) + + // Since Carol didn't receive the last update, she still has Alice's + // old policy. We validate this by checking the base fee is the older + // one. + expectedPolicy.FeeBaseMsat = baseFee1 + ht.AssertChannelPolicy( + carol, alice.PubKeyStr, expectedPolicy, chanPoint, + ) + ht.AssertChannelPolicy( + carol, alice.PubKeyStr, expectedPolicy, chanPoint3, + ) + + // Close all channels. + ht.CloseChannel(alice, chanPoint) + ht.CloseChannel(bob, chanPoint2) + ht.CloseChannel(alice, chanPoint3) } // testSendUpdateDisableChannel ensures that a channel update with the disable @@ -915,3 +848,16 @@ func testUpdateChannelPolicyFeeRateAccuracy(net *lntest.NetworkHarness, t, nodes, net.Alice.PubKeyStr, expectedPolicy, chanPoint, ) } + +// assertNodesPolicyUpdate checks that a given policy update has been received +// by a list of given nodes. +func assertNodesPolicyUpdate(ht *lntemp.HarnessTest, nodes []*node.HarnessNode, + advertisingNode *node.HarnessNode, policy *lnrpc.RoutingPolicy, + chanPoint *lnrpc.ChannelPoint) { + + for _, node := range nodes { + ht.AssertChannelPolicyUpdate( + node, advertisingNode, policy, chanPoint, false, + ) + } +} diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index ab0bd99cf..0341d7ac9 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "update channel policy", - test: testUpdateChannelPolicy, - }, { name: "update channel policy fee rate accuracy", test: testUpdateChannelPolicyFeeRateAccuracy, From b579ed72d9e639747b5193babac1b7c3ee4fbb61 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 05:37:43 +0800 Subject: [PATCH 19/47] lntemp+itest: refactor `testSendUpdateDisableChannel` --- lntemp/harness.go | 98 +++++++++ lntemp/harness_assertion.go | 52 +++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_policy_test.go | 280 ++++++++++++++---------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 316 insertions(+), 122 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index f15fb443a..ea75edd5f 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" "testing" + "time" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -1344,3 +1345,100 @@ func (h *HarnessTest) SendPaymentAssertFail(hn *node.HarnessNode, return payment } + +// OpenChannelRequest is used to open a channel using the method +// OpenMultiChannelsAsync. +type OpenChannelRequest struct { + // Local is the funding node. + Local *node.HarnessNode + + // Remote is the receiving node. + Remote *node.HarnessNode + + // Param is the open channel params. + Param OpenChannelParams + + // stream is the client created after calling OpenChannel RPC. + stream rpc.OpenChanClient + + // result is a channel used to send the channel point once the funding + // has succeeded. + result chan *lnrpc.ChannelPoint +} + +// OpenMultiChannelsAsync takes a list of OpenChannelRequest and opens them in +// batch. The channel points are returned in same the order of the requests +// once all of the channel open succeeded. +// +// NOTE: compared to open multiple channel sequentially, this method will be +// faster as it doesn't need to mine 6 blocks for each channel open. However, +// it does make debugging the logs more difficult as messages are intertwined. +func (h *HarnessTest) OpenMultiChannelsAsync( + reqs []*OpenChannelRequest) []*lnrpc.ChannelPoint { + + // openChannel opens a channel based on the request. + openChannel := func(req *OpenChannelRequest) { + stream := h.OpenChannelAssertStream( + req.Local, req.Remote, req.Param, + ) + req.stream = stream + } + + // assertChannelOpen is a helper closure that asserts a channel is + // open. + assertChannelOpen := func(req *OpenChannelRequest) { + // Wait for the channel open event from the stream. + cp := h.WaitForChannelOpenEvent(req.stream) + + // Check that both alice and bob have seen the channel + // from their channel watch request. + h.AssertTopologyChannelOpen(req.Local, cp) + h.AssertTopologyChannelOpen(req.Remote, cp) + + // Finally, check that the channel can be seen in their + // ListChannels. + h.AssertChannelExists(req.Local, cp) + h.AssertChannelExists(req.Remote, cp) + + req.result <- cp + } + + // Go through the requests and make the OpenChannel RPC call. + for _, r := range reqs { + openChannel(r) + } + + // Mine one block to confirm all the funding transactions. + h.MineBlocksAndAssertNumTxes(1, len(reqs)) + + // Mine 5 more blocks so all the public channels are announced to the + // network. + h.MineBlocks(numBlocksOpenChannel - 1) + + // Once the blocks are mined, we fire goroutines for each of the + // request to watch for the channel openning. + for _, r := range reqs { + r.result = make(chan *lnrpc.ChannelPoint, 1) + go assertChannelOpen(r) + } + + // Finally, collect the results. + channelPoints := make([]*lnrpc.ChannelPoint, 0) + for _, r := range reqs { + select { + case cp := <-r.result: + channelPoints = append(channelPoints, cp) + + case <-time.After(lntest.ChannelOpenTimeout): + require.Failf(h, "timeout", "wait channel point "+ + "timeout for channel %s=>%s", r.Local.Name(), + r.Remote.Name()) + } + } + + // Assert that we have the expected num of channel points. + require.Len(h, channelPoints, len(reqs), + "returned channel points not match") + + return channelPoints +} diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 87704dc05..6a9f753da 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "encoding/hex" + "encoding/json" "fmt" "math" "strings" @@ -78,6 +79,22 @@ func (h *HarnessTest) ConnectNodes(a, b *node.HarnessNode) { h.AssertPeerConnected(a, b) } +// ConnectNodesPerm creates a persistent connection between the two nodes and +// asserts the connection is succeeded. +func (h *HarnessTest) ConnectNodesPerm(a, b *node.HarnessNode) { + bobInfo := b.RPC.GetInfo() + + req := &lnrpc.ConnectPeerRequest{ + Addr: &lnrpc.LightningAddress{ + Pubkey: bobInfo.IdentityPubkey, + Host: b.Cfg.P2PAddr(), + }, + Perm: true, + } + a.RPC.ConnectPeer(req) + h.AssertPeerConnected(a, b) +} + // DisconnectNodes disconnects the given two nodes and asserts the // disconnection is succeeded. The request is made from node a and sent to node // b. @@ -1544,3 +1561,38 @@ func (h *HarnessTest) AssertChannelPolicy(hn *node.HarnessNode, err = node.CheckChannelPolicy(policy, expectedPolicy) require.NoErrorf(h, err, "%s: check policy failed", hn.Name()) } + +// AssertNumPolicyUpdates asserts that a given number of channel policy updates +// has been seen in the specified node. +func (h *HarnessTest) AssertNumPolicyUpdates(hn *node.HarnessNode, + chanPoint *lnrpc.ChannelPoint, + advertisingNode *node.HarnessNode, num int) { + + op := h.OutPointFromChannelPoint(chanPoint) + + var policies []*node.PolicyUpdateInfo + + err := wait.NoError(func() error { + policyMap := hn.Watcher.GetPolicyUpdates(op) + nodePolicy, ok := policyMap[advertisingNode.PubKeyStr] + if ok { + policies = nodePolicy + } + + if len(policies) == num { + return nil + } + + p, err := json.MarshalIndent(policies, "", "\t") + require.NoError(h, err, "encode policy err") + + return fmt.Errorf("expected to find %d policy updates, "+ + "instead got: %d, chanPoint: %v, "+ + "advertisingNode: %s:%s, policy: %s", num, + len(policies), op, advertisingNode.Name(), + advertisingNode.PubKeyStr, p) + }, DefaultTimeout) + + require.NoError(h, err, "%s: timeout waiting for num of policy updates", + hn.Name()) +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 515f2ae99..f41b00b64 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -139,4 +139,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "update channel policy", TestFunc: testUpdateChannelPolicy, }, + { + Name: "send update disable channel", + TestFunc: testSendUpdateDisableChannel, + }, } diff --git a/lntest/itest/lnd_channel_policy_test.go b/lntest/itest/lnd_channel_policy_test.go index 2db663742..d38e25a5b 100644 --- a/lntest/itest/lnd_channel_policy_test.go +++ b/lntest/itest/lnd_channel_policy_test.go @@ -2,6 +2,7 @@ package itest import ( "context" + "fmt" "math" "time" @@ -435,71 +436,92 @@ func testUpdateChannelPolicy(ht *lntemp.HarnessTest) { // testSendUpdateDisableChannel ensures that a channel update with the disable // flag set is sent once a channel has been either unilaterally or cooperatively // closed. -func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) { - const ( - chanAmt = 100000 - ) +// +// NOTE: this test can be flaky as we are testing the chan-enable-timeout and +// chan-disable-timeout flags here. For instance, if some operations take more +// than 6 seconds to finish, the channel will be marked as disabled, thus a +// following operation will fail if it relies on the channel being enabled. +func testSendUpdateDisableChannel(ht *lntemp.HarnessTest) { + const chanAmt = 100000 - // Open a channel between Alice and Bob and Alice and Carol. These will - // be closed later on in order to trigger channel update messages - // marking the channels as disabled. - chanPointAliceBob := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) + alice, bob := ht.Alice, ht.Bob - carol := net.NewNode( - t.t, "Carol", []string{ - "--minbackoff=10s", - "--chan-enable-timeout=1.5s", - "--chan-disable-timeout=3s", - "--chan-status-sample-interval=.5s", - }) - defer shutdownAndAssert(net, t, carol) + // Create a new node Eve, which will be restarted later with a config + // that has an inactive channel timeout of just 6 seconds (down from + // the default 20m). It will be used to test channel updates for + // channels going inactive. + // + // NOTE: we don't create Eve with the chan-disable-timeout here because + // the following channel openings might take longer than that timeout + // value, which will cause the channel Eve=>Carol being marked as + // disabled. + eve := ht.NewNode("Eve", nil) - net.ConnectNodes(t.t, net.Alice, carol) - chanPointAliceCarol := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // We create a new node Eve that has an inactive channel timeout of - // just 2 seconds (down from the default 20m). It will be used to test - // channel updates for channels going inactive. - eve := net.NewNode( - t.t, "Eve", []string{ - "--minbackoff=10s", - "--chan-enable-timeout=1.5s", - "--chan-disable-timeout=3s", - "--chan-status-sample-interval=.5s", - }) - defer shutdownAndAssert(net, t, eve) - - // Give Eve some coins. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, eve) - - // Connect Eve to Carol and Bob, and open a channel to carol. - net.ConnectNodes(t.t, eve, carol) - net.ConnectNodes(t.t, eve, net.Bob) - - chanPointEveCarol := openChannelAndAssert( - t, net, eve, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) + // Create a new node Carol, which will later be restarted with the same + // config as Eve's. + carol := ht.NewNode("Carol", nil) // Launch a node for Dave which will connect to Bob in order to receive // graph updates from. This will ensure that the channel updates are // propagated throughout the network. - dave := net.NewNode(t.t, "Dave", nil) - defer shutdownAndAssert(net, t, dave) + dave := ht.NewNode("Dave", nil) - net.ConnectNodes(t.t, net.Bob, dave) + // We will start our test by creating the following topology, + // Alice --- Bob --- Dave + // | | + // Carol --- Eve + ht.EnsureConnected(alice, bob) + ht.ConnectNodes(alice, carol) + ht.ConnectNodes(bob, dave) + ht.ConnectNodes(eve, carol) + + // Connect Eve and Bob using a persistent connection. Later after Eve + // is restarted, they will connect again automatically. + ht.ConnectNodesPerm(bob, eve) + + // Give Eve some coins. + ht.FundCoins(btcutil.SatoshiPerBitcoin, eve) + + // We now proceed to open channels: Alice=>Bob, Alice=>Carol and + // Eve=>Carol. + p := lntemp.OpenChannelParams{Amt: chanAmt} + reqs := []*lntemp.OpenChannelRequest{ + {Local: alice, Remote: bob, Param: p}, + {Local: alice, Remote: carol, Param: p}, + {Local: eve, Remote: carol, Param: p}, + } + resp := ht.OpenMultiChannelsAsync(reqs) + + // Extract channel points from the response. + chanPointAliceBob := resp[0] + chanPointAliceCarol := resp[1] + chanPointEveCarol := resp[2] + + // We will use 10 seconds as the disable timeout. + chanDisableTimeout := 10 + chanEnableTimeout := 5 + + // waitChanDisabled is a helper closure to wait the chanDisableTimeout + // seconds such that the channel disable logic is taking effect. + waitChanDisabled := func() { + time.Sleep(time.Duration(chanDisableTimeout) * time.Second) + } + + // With the channels open, we now restart Carol and Eve to use + // customized timeout values. + nodeCfg := []string{ + "--minbackoff=60s", + fmt.Sprintf("--chan-enable-timeout=%ds", chanEnableTimeout), + fmt.Sprintf("--chan-disable-timeout=%ds", chanDisableTimeout), + "--chan-status-sample-interval=.5s", + } + ht.RestartNodeWithExtraArgs(carol, nodeCfg) + ht.RestartNodeWithExtraArgs(eve, nodeCfg) + + // Dave should know all the channels. + ht.AssertTopologyChannelOpen(dave, chanPointAliceBob) + ht.AssertTopologyChannelOpen(dave, chanPointAliceCarol) + ht.AssertTopologyChannelOpen(dave, chanPointEveCarol) // We should expect to see a channel update with the default routing // policy, except that it should indicate the channel is disabled. @@ -514,110 +536,132 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) { // assertPolicyUpdate checks that the required policy update has // happened on the given node. - assertPolicyUpdate := func(node *lntest.HarnessNode, - policy *lnrpc.RoutingPolicy, chanPoint *lnrpc.ChannelPoint) { + assertPolicyUpdate := func(node *node.HarnessNode, + policy *lnrpc.RoutingPolicy, chanPoint *lnrpc.ChannelPoint, + numUpdates int) { - require.NoError( - t.t, dave.WaitForChannelPolicyUpdate( - node.PubKeyStr, policy, chanPoint, false, - ), "error while waiting for channel update", + ht.AssertNumPolicyUpdates(dave, chanPoint, node, numUpdates) + ht.AssertChannelPolicyUpdate( + dave, node, policy, chanPoint, false, ) } - // Let Carol go offline. Since Eve has an inactive timeout of 2s, we + // Let Carol go offline. Since Eve has an inactive timeout of 6s, we // expect her to send an update disabling the channel. - restartCarol, err := net.SuspendNode(carol) - if err != nil { - t.Fatalf("unable to suspend carol: %v", err) - } + restartCarol := ht.SuspendNode(carol) - assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol) + // We expect to see a total of 2 channel policy updates from the + // channel Carol <-> Eve and advertised by Eve using the route + // Eve->Bob->Dave. + waitChanDisabled() + assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol, 2) // We restart Carol. Since the channel now becomes active again, Eve // should send a ChannelUpdate setting the channel no longer disabled. - if err := restartCarol(); err != nil { - t.Fatalf("unable to restart carol: %v", err) - } + require.NoError(ht, restartCarol(), "unable to restart carol") expectedPolicy.Disabled = false - assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol) + // We expect to see a total of 3 channel policy updates from the + // channel Carol <-> Eve and advertised by Eve using the route + // Eve->Bob->Dave. + assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol, 3) // Wait until Carol and Eve are reconnected before we disconnect them // again. - net.EnsureConnected(t.t, eve, carol) + ht.EnsureConnected(eve, carol) // Now we'll test a long disconnection. Disconnect Carol and Eve and // ensure they both detect each other as disabled. Their min backoffs // are high enough to not interfere with disabling logic. - if err := net.DisconnectNodes(carol, eve); err != nil { - t.Fatalf("unable to disconnect Carol from Eve: %v", err) - } + ht.DisconnectNodes(carol, eve) // Wait for a disable from both Carol and Eve to come through. expectedPolicy.Disabled = true - assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol) - assertPolicyUpdate(carol, expectedPolicy, chanPointEveCarol) + // We expect to see a total of 4 channel policy updates from the + // channel Carol <-> Eve and advertised by Eve using the route + // Eve->Bob->Dave. + waitChanDisabled() + assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol, 4) + + // Because Carol has restarted twice before, depending on how much time + // it has taken, she might mark the channel disabled and enable it + // multiple times. Thus we could see a total of 2 or 4 or 6 channel + // policy updates from the channel Carol <-> Eve and advertised by + // Carol using the route Carol->Alice->Bob->Dave. + // + // Assume there are 2 channel policy updates from Carol, and update it + // if more has found + numCarol := 2 + op := ht.OutPointFromChannelPoint(chanPointEveCarol) + policyMap := dave.Watcher.GetPolicyUpdates(op) + nodePolicy, ok := policyMap[carol.PubKeyStr] + switch { + case !ok: + break + case len(nodePolicy) > 2: + numCarol = 4 + case len(nodePolicy) > 4: + numCarol = 6 + } + assertPolicyUpdate(carol, expectedPolicy, chanPointEveCarol, numCarol) // Reconnect Carol and Eve, this should cause them to reenable the // channel from both ends after a short delay. - net.EnsureConnected(t.t, carol, eve) + ht.EnsureConnected(carol, eve) expectedPolicy.Disabled = false - assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol) - assertPolicyUpdate(carol, expectedPolicy, chanPointEveCarol) + // We expect to see a total of 5 channel policy updates from the + // channel Carol <-> Eve and advertised by Eve using the route + // Eve->Bob->Dave. + assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol, 5) + // We expect to see a total of 3 or 5 channel policy updates from the + // channel Carol <-> Eve and advertised by Carol using the route + // Carol->Alice->Bob->Dave. + numCarol++ + assertPolicyUpdate(carol, expectedPolicy, chanPointEveCarol, numCarol) // Now we'll test a short disconnection. Disconnect Carol and Eve, then // reconnect them after one second so that their scheduled disables are // aborted. One second is twice the status sample interval, so this // should allow for the disconnect to be detected, but still leave time - // to cancel the announcement before the 3 second inactive timeout is + // to cancel the announcement before the 6 second inactive timeout is // hit. - if err := net.DisconnectNodes(carol, eve); err != nil { - t.Fatalf("unable to disconnect Carol from Eve: %v", err) - } + ht.DisconnectNodes(carol, eve) time.Sleep(time.Second) - net.EnsureConnected(t.t, eve, carol) + ht.EnsureConnected(eve, carol) - // Since the disable should have been canceled by both Carol and Eve, we - // expect no channel updates to appear on the network, which means we - // expect the polices stay unchanged(Disable == false). - assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol) - assertPolicyUpdate(carol, expectedPolicy, chanPointEveCarol) + // Since the disable should have been canceled by both Carol and Eve, + // we expect no channel updates to appear on the network, which means + // we expect the polices stay unchanged(Disable == false). + assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol, 5) + assertPolicyUpdate(carol, expectedPolicy, chanPointEveCarol, numCarol) // Close Alice's channels with Bob and Carol cooperatively and - // unilaterally respectively. - _, _, err = net.CloseChannel(net.Alice, chanPointAliceBob, false) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } - - _, _, err = net.CloseChannel(net.Alice, chanPointAliceCarol, true) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + // unilaterally respectively. Note that the CloseChannel will mine a + // block and check that the closing transaction can be found in both + // the mempool and the block. + ht.CloseChannel(alice, chanPointAliceBob) + ht.ForceCloseChannel(alice, chanPointAliceCarol) // Now that the channel close processes have been started, we should // receive an update marking each as disabled. expectedPolicy.Disabled = true - assertPolicyUpdate(net.Alice, expectedPolicy, chanPointAliceBob) - assertPolicyUpdate(net.Alice, expectedPolicy, chanPointAliceCarol) - - // Finally, close the channels by mining the closing transactions. - mineBlocks(t, net, 1, 2) + // We expect to see a total of 2 channel policy updates from the + // channel Alice <-> Bob and advertised by Alice using the route + // Alice->Bob->Dave. + assertPolicyUpdate(alice, expectedPolicy, chanPointAliceBob, 2) + // We expect to see a total of 2 channel policy updates from the + // channel Alice <-> Carol and advertised by Alice using the route + // Alice->Bob->Dave. + assertPolicyUpdate(alice, expectedPolicy, chanPointAliceCarol, 2) // Also do this check for Eve's channel with Carol. - _, _, err = net.CloseChannel(eve, chanPointEveCarol, false) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + ht.CloseChannel(eve, chanPointEveCarol) - assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol) - - mineBlocks(t, net, 1, 1) - - // And finally, clean up the force closed channel by mining the - // sweeping transaction. - cleanupForceClose(t, net, net.Alice, chanPointAliceCarol) + // We expect to see a total of 5 channel policy updates from the + // channel Carol <-> Eve and advertised by Eve using the route + // Eve->Bob->Dave. + assertPolicyUpdate(eve, expectedPolicy, chanPointEveCarol, 6) } // testUpdateChannelPolicyForPrivateChannel tests when a private channel diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 0341d7ac9..5c1b90fc3 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -163,10 +163,6 @@ var allTestCases = []*testCase{ name: "route fee cutoff", test: testRouteFeeCutoff, }, - { - name: "send update disable channel", - test: testSendUpdateDisableChannel, - }, { name: "streaming channel backup update", test: testChannelBackupUpdates, From cd7f02c866ff09317d96f918f1e339b0823514f4 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:10:44 +0800 Subject: [PATCH 20/47] lntemp+itest: refactor `testUpdateChannelPolicyForPrivateChannel` --- lntemp/harness_assertion.go | 38 ++++++-- lntemp/node/config.go | 1 + lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_policy_test.go | 112 ++++++++---------------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 74 insertions(+), 85 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 6a9f753da..4bfbd502d 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1261,21 +1261,20 @@ func (h *HarnessTest) AssertNumHTLCsAndStage(hn *node.HarnessNode, // findPayment queries the payment from the node's ListPayments which matches // the specified preimage hash. func (h *HarnessTest) findPayment(hn *node.HarnessNode, - preimage lntypes.Preimage) *lnrpc.Payment { + paymentHash string) *lnrpc.Payment { req := &lnrpc.ListPaymentsRequest{IncludeIncomplete: true} paymentsResp := hn.RPC.ListPayments(req) - payHash := preimage.Hash() for _, p := range paymentsResp.Payments { - if p.PaymentHash != payHash.String() { + if p.PaymentHash != paymentHash { continue } return p } - require.Fail(h, "payment: %v not found", payHash) + require.Fail(h, "payment: %v not found", paymentHash) return nil } @@ -1291,7 +1290,7 @@ func (h *HarnessTest) AssertPaymentStatus(hn *node.HarnessNode, var target *lnrpc.Payment err := wait.NoError(func() error { - p := h.findPayment(hn, preimage) + p := h.findPayment(hn, preimage.Hash().String()) if status == p.Status { target = p return nil @@ -1596,3 +1595,32 @@ func (h *HarnessTest) AssertNumPolicyUpdates(hn *node.HarnessNode, require.NoError(h, err, "%s: timeout waiting for num of policy updates", hn.Name()) } + +// AssertNumPayments asserts that the number of payments made within the test +// scope is as expected, including the incomplete ones. +func (h *HarnessTest) AssertNumPayments(hn *node.HarnessNode, + num int) []*lnrpc.Payment { + + // Get the number of payments we already have from the previous test. + have := hn.State.Payment.Total + + req := &lnrpc.ListPaymentsRequest{ + IncludeIncomplete: true, + } + + var payments []*lnrpc.Payment + err := wait.NoError(func() error { + resp := hn.RPC.ListPayments(req) + + payments = resp.Payments + if len(payments) == num { + return nil + } + + return errNumNotMatched(hn.Name(), "num of payments", + num, len(payments), have+len(payments), have) + }, DefaultTimeout) + require.NoError(h, err, "timeout checking num of payments") + + return payments +} diff --git a/lntemp/node/config.go b/lntemp/node/config.go index e076a8438..f9798ede4 100644 --- a/lntemp/node/config.go +++ b/lntemp/node/config.go @@ -166,6 +166,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string { "--debuglevel=debug", "--bitcoin.defaultchanconfs=1", "--accept-keysend", + "--keep-failed-payment-attempts", fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval), fmt.Sprintf("--bitcoin.defaultremotedelay=%v", lntest.DefaultCSV), diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index f41b00b64..c25ff6061 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -143,4 +143,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "send update disable channel", TestFunc: testSendUpdateDisableChannel, }, + { + Name: "private channel update policy", + TestFunc: testUpdateChannelPolicyForPrivateChannel, + }, } diff --git a/lntest/itest/lnd_channel_policy_test.go b/lntest/itest/lnd_channel_policy_test.go index d38e25a5b..b2369de62 100644 --- a/lntest/itest/lnd_channel_policy_test.go +++ b/lntest/itest/lnd_channel_policy_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/funding" "github.com/lightningnetwork/lnd/lnrpc" @@ -672,80 +671,53 @@ func testSendUpdateDisableChannel(ht *lntemp.HarnessTest) { // Bob will update the base fee via UpdateChannelPolicy, we will test that // Alice will not fail the payment and send it using the updated channel // policy. -func testUpdateChannelPolicyForPrivateChannel(net *lntest.NetworkHarness, - t *harnessTest) { - - ctxb := context.Background() - defer ctxb.Done() +func testUpdateChannelPolicyForPrivateChannel(ht *lntemp.HarnessTest) { + const ( + chanAmt = btcutil.Amount(100000) + paymentAmt = 20000 + baseFeeMSat = 33000 + ) // We'll create the following topology first, // Alice <--public:100k--> Bob <--private:100k--> Carol - const chanAmt = btcutil.Amount(100000) + alice, bob := ht.Alice, ht.Bob // Open a channel with 100k satoshis between Alice and Bob. - chanPointAliceBob := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ + chanPointAliceBob := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{ Amt: chanAmt, }, ) - defer closeChannelAndAssert(t, net, net.Alice, chanPointAliceBob, false) - - // Get Alice's funding point. - aliceChanTXID, err := lnrpc.GetChanPointFundingTxid(chanPointAliceBob) - require.NoError(t.t, err, "unable to get txid") - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAliceBob.OutputIndex, - } // Create a new node Carol. - carol := net.NewNode(t.t, "Carol", nil) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", nil) // Connect Carol to Bob. - net.ConnectNodes(t.t, carol, net.Bob) + ht.ConnectNodes(carol, bob) // Open a channel with 100k satoshis between Bob and Carol. - chanPointBobCarol := openChannelAndAssert( - t, net, net.Bob, carol, - lntest.OpenChannelParams{ + chanPointBobCarol := ht.OpenChannel( + bob, carol, lntemp.OpenChannelParams{ Amt: chanAmt, Private: true, }, ) - defer closeChannelAndAssert(t, net, net.Bob, chanPointBobCarol, false) // Carol should be aware of the channel between Alice and Bob. - err = carol.WaitForNetworkChannelOpen(chanPointAliceBob) - require.NoError(t.t, err) - - // Get Bob's funding point. - bobChanTXID, err := lnrpc.GetChanPointFundingTxid(chanPointBobCarol) - require.NoError(t.t, err, "unable to get txid") - bobFundPoint := wire.OutPoint{ - Hash: *bobChanTXID, - Index: chanPointBobCarol.OutputIndex, - } + ht.AssertTopologyChannelOpen(carol, chanPointAliceBob) // We should have the following topology now, // Alice <--public:100k--> Bob <--private:100k--> Carol // // Now we will create an invoice for Carol. - const paymentAmt = 20000 invoice := &lnrpc.Invoice{ Memo: "routing hints", Value: paymentAmt, Private: true, } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := carol.AddInvoice(ctxt, invoice) - require.NoError(t.t, err, "unable to create invoice for carol") + resp := carol.RPC.AddInvoice(invoice) // Bob now updates the channel edge policy for the private channel. - const ( - baseFeeMSat = 33000 - ) timeLockDelta := uint32(chainreg.DefaultBitcoinTimeLockDelta) updateFeeReq := &lnrpc.PolicyUpdateRequest{ BaseFeeMsat: baseFeeMSat, @@ -754,58 +726,46 @@ func testUpdateChannelPolicyForPrivateChannel(net *lntest.NetworkHarness, ChanPoint: chanPointBobCarol, }, } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = net.Bob.UpdateChannelPolicy(ctxt, updateFeeReq) - require.NoError(t.t, err, "unable to update chan policy") + bob.RPC.UpdateChannelPolicy(updateFeeReq) // Alice pays the invoices. She will use the updated baseFeeMSat in the // payment - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) payReqs := []string{resp.PaymentRequest} - require.NoError(t.t, - completePaymentRequests( - net.Alice, net.Alice.RouterClient, payReqs, true, - ), "unable to send payment", - ) + ht.CompletePaymentRequests(alice, payReqs) // Check that Alice did make the payment with two HTLCs, one failed and // one succeeded. - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, err := net.Alice.ListPayments( - ctxt, &lnrpc.ListPaymentsRequest{}, - ) - require.NoError(t.t, err, "failed to obtain payments for Alice") - require.Equal(t.t, 1, len(paymentsResp.Payments), "expected 1 payment") + payment := ht.AssertNumPayments(alice, 1)[0] - htlcs := paymentsResp.Payments[0].Htlcs - require.Equal(t.t, 2, len(htlcs), "expected to have 2 HTLCs") - require.Equal( - t.t, lnrpc.HTLCAttempt_FAILED, htlcs[0].Status, - "the first HTLC attempt should fail", - ) - require.Equal( - t.t, lnrpc.HTLCAttempt_SUCCEEDED, htlcs[1].Status, - "the second HTLC attempt should succeed", - ) + htlcs := payment.Htlcs + require.Equal(ht, 2, len(htlcs), "expected to have 2 HTLCs") + require.Equal(ht, lnrpc.HTLCAttempt_FAILED, htlcs[0].Status, + "the first HTLC attempt should fail") + require.Equal(ht, lnrpc.HTLCAttempt_SUCCEEDED, htlcs[1].Status, + "the second HTLC attempt should succeed") // Carol should have received 20k satoshis from Bob. - assertAmountPaid(t, "Carol(remote) [<=private] Bob(local)", - carol, bobFundPoint, 0, paymentAmt) + ht.AssertAmountPaid("Carol(remote) [<=private] Bob(local)", + carol, chanPointBobCarol, 0, paymentAmt) // Bob should have sent 20k satoshis to Carol. - assertAmountPaid(t, "Bob(local) [private=>] Carol(remote)", - net.Bob, bobFundPoint, paymentAmt, 0) + ht.AssertAmountPaid("Bob(local) [private=>] Carol(remote)", + bob, chanPointBobCarol, paymentAmt, 0) // Calculate the amount in satoshis. amtExpected := int64(paymentAmt + baseFeeMSat/1000) // Bob should have received 20k satoshis + fee from Alice. - assertAmountPaid(t, "Bob(remote) <= Alice(local)", - net.Bob, aliceFundPoint, 0, amtExpected) + ht.AssertAmountPaid("Bob(remote) <= Alice(local)", + bob, chanPointAliceBob, 0, amtExpected) // Alice should have sent 20k satoshis + fee to Bob. - assertAmountPaid(t, "Alice(local) => Bob(remote)", - net.Alice, aliceFundPoint, amtExpected, 0) + ht.AssertAmountPaid("Alice(local) => Bob(remote)", + alice, chanPointAliceBob, amtExpected, 0) + + // Finally, close the channels. + ht.CloseChannel(alice, chanPointAliceBob) + ht.CloseChannel(bob, chanPointBobCarol) } // testUpdateChannelPolicyFeeRateAccuracy tests that updating the channel policy diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 5c1b90fc3..e76c16753 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -68,10 +68,6 @@ var allTestCases = []*testCase{ name: "private channels", test: testPrivateChannels, }, - { - name: "private channel update policy", - test: testUpdateChannelPolicyForPrivateChannel, - }, { name: "invoice routing hints", test: testInvoiceRoutingHints, From 6b943a042ca9b4ab9791016b12e04683f1657398 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:14:06 +0800 Subject: [PATCH 21/47] itest: refactor `testUpdateChannelPolicyFeeRateAccuracy` --- lntest/itest/list_on_test.go | 4 +++ lntest/itest/lnd_channel_policy_test.go | 36 ++++++++----------------- lntest/itest/lnd_test_list_on_test.go | 4 --- 3 files changed, 15 insertions(+), 29 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index c25ff6061..364bf04a7 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -147,4 +147,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "private channel update policy", TestFunc: testUpdateChannelPolicyForPrivateChannel, }, + { + Name: "update channel policy fee rate accuracy", + TestFunc: testUpdateChannelPolicyFeeRateAccuracy, + }, } diff --git a/lntest/itest/lnd_channel_policy_test.go b/lntest/itest/lnd_channel_policy_test.go index b2369de62..236f0e73b 100644 --- a/lntest/itest/lnd_channel_policy_test.go +++ b/lntest/itest/lnd_channel_policy_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "fmt" "math" "time" @@ -771,24 +770,21 @@ func testUpdateChannelPolicyForPrivateChannel(ht *lntemp.HarnessTest) { // testUpdateChannelPolicyFeeRateAccuracy tests that updating the channel policy // rounds fee rate values correctly as well as setting fee rate with ppm works // as expected. -func testUpdateChannelPolicyFeeRateAccuracy(net *lntest.NetworkHarness, - t *harnessTest) { - +func testUpdateChannelPolicyFeeRateAccuracy(ht *lntemp.HarnessTest) { chanAmt := funding.MaxBtcFundingAmount pushAmt := chanAmt / 2 // Create a channel Alice -> Bob. - chanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ + alice, bob := ht.Alice, ht.Bob + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, }, ) - defer closeChannelAndAssert(t, net, net.Alice, chanPoint, false) // Nodes that we need to make sure receive the channel updates. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob} + nodes := []*node.HarnessNode{alice, bob} baseFee := int64(1500) timeLockDelta := uint32(66) @@ -823,17 +819,10 @@ func testUpdateChannelPolicyFeeRateAccuracy(net *lntest.NetworkHarness, ChanPoint: chanPoint, }, } - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Alice.UpdateChannelPolicy(ctxt, req); err != nil { - t.Fatalf("unable to get alice's balance: %v", err) - } + alice.RPC.UpdateChannelPolicy(req) // Make sure that both Alice and Bob sees the same policy after update. - assertPolicyUpdate( - t, nodes, net.Alice.PubKeyStr, expectedPolicy, chanPoint, - ) + assertNodesPolicyUpdate(ht, nodes, alice, expectedPolicy, chanPoint) // Now use the new PPM feerate field and make sure that the feerate is // correctly set. @@ -842,15 +831,12 @@ func testUpdateChannelPolicyFeeRateAccuracy(net *lntest.NetworkHarness, req.FeeRatePpm = feeRatePPM expectedPolicy.FeeRateMilliMsat = int64(feeRatePPM) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Alice.UpdateChannelPolicy(ctxt, req); err != nil { - t.Fatalf("unable to get alice's balance: %v", err) - } + alice.RPC.UpdateChannelPolicy(req) // Make sure that both Alice and Bob sees the same policy after update. - assertPolicyUpdate( - t, nodes, net.Alice.PubKeyStr, expectedPolicy, chanPoint, - ) + assertNodesPolicyUpdate(ht, nodes, alice, expectedPolicy, chanPoint) + + ht.CloseChannel(alice, chanPoint) } // assertNodesPolicyUpdate checks that a given policy update has been received diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index e76c16753..f695e6834 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -4,10 +4,6 @@ package itest var allTestCases = []*testCase{ - { - name: "update channel policy fee rate accuracy", - test: testUpdateChannelPolicyFeeRateAccuracy, - }, { name: "open channel reorg test", test: testOpenChannelAfterReorg, From 4c16e053bf4d6333ce1e0a1846041b7b99e8cec0 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:20:13 +0800 Subject: [PATCH 22/47] lntemp+itest: refactor `testNetworkConnectionTimeout` --- lntemp/rpc/lnd.go | 12 ++++++ lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_network_test.go | 60 ++++++++++++--------------- lntest/itest/lnd_test_list_on_test.go | 4 -- 4 files changed, 42 insertions(+), 38 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 941b4371e..98fb50baa 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -98,6 +98,18 @@ func (h *HarnessRPC) ConnectPeer( return resp } +// ConnectPeerAssertErr makes a RPC call to ConnectPeer and asserts an error +// returned. +func (h *HarnessRPC) ConnectPeerAssertErr(req *lnrpc.ConnectPeerRequest) error { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + _, err := h.LN.ConnectPeer(ctxt, req) + require.Error(h, err, "expected an error from ConnectPeer") + + return err +} + // ListChannels list the channels for the given node and asserts it's // successful. func (h *HarnessRPC) ListChannels( diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 364bf04a7..3b3a66e8b 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -151,4 +151,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "update channel policy fee rate accuracy", TestFunc: testUpdateChannelPolicyFeeRateAccuracy, }, + { + Name: "connection timeout", + TestFunc: testNetworkConnectionTimeout, + }, } diff --git a/lntest/itest/lnd_network_test.go b/lntest/itest/lnd_network_test.go index f2ab0bd58..3beebc8fd 100644 --- a/lntest/itest/lnd_network_test.go +++ b/lntest/itest/lnd_network_test.go @@ -10,21 +10,21 @@ import ( "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/stretchr/testify/require" ) // testNetworkConnectionTimeout checks that the connectiontimeout is taking -// effect. It creates a node with a small connection timeout value, and connects -// it to a non-routable IP address. -func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) { +// effect. It creates a node with a small connection timeout value, and +// connects it to a non-routable IP address. +func testNetworkConnectionTimeout(ht *lntemp.HarnessTest) { var ( - ctxt, _ = context.WithTimeout( - context.Background(), defaultTimeout, - ) // testPub is a random public key for testing only. testPub = "0332bda7da70fefe4b6ab92f53b3c4f4ee7999" + "f312284a8e89c8670bb3f67dbee2" + // testHost is a non-routable IP address. It's used to cause a // connection timeout. testHost = "10.255.255.255" @@ -32,8 +32,7 @@ func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) { // First, test the global timeout settings. // Create Carol with a connection timeout of 1 millisecond. - carol := net.NewNode(t.t, "Carol", []string{"--connectiontimeout=1ms"}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", []string{"--connectiontimeout=1ms"}) // Try to connect Carol to a non-routable IP address, which should give // us a timeout error. @@ -43,12 +42,27 @@ func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) { Host: testHost, }, } - assertTimeoutError(ctxt, t, carol, req) + + // assertTimeoutError asserts that a connection timeout error is + // raised. A context with a default timeout is used to make the + // request. If our customized connection timeout is less than the + // default, we won't see the request context times out, instead a + // network connection timeout will be returned. + assertTimeoutError := func(hn *node.HarnessNode, + req *lnrpc.ConnectPeerRequest) { + + err := hn.RPC.ConnectPeerAssertErr(req) + + // Check that the network returns a timeout error. + require.Containsf(ht, err.Error(), "i/o timeout", + "expected to get a timeout error, instead got: %v", err) + } + + assertTimeoutError(carol, req) // Second, test timeout on the connect peer request. // Create Dave with the default timeout setting. - dave := net.NewNode(t.t, "Dave", nil) - defer shutdownAndAssert(net, t, dave) + dave := ht.NewNode("Dave", nil) // Try to connect Dave to a non-routable IP address, using a timeout // value of 1ms, which should give us a timeout error immediately. @@ -59,7 +73,7 @@ func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) { }, Timeout: 1, } - assertTimeoutError(ctxt, t, dave, req) + assertTimeoutError(dave, req) } // testReconnectAfterIPChange verifies that if a persistent inbound node changes @@ -229,28 +243,6 @@ func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) { assertConnected(t, dave, charlie) } -// assertTimeoutError asserts that a connection timeout error is raised. A -// context with a default timeout is used to make the request. If our customized -// connection timeout is less than the default, we won't see the request context -// times out, instead a network connection timeout will be returned. -func assertTimeoutError(ctxt context.Context, t *harnessTest, - node *lntest.HarnessNode, req *lnrpc.ConnectPeerRequest) { - - t.t.Helper() - - err := connect(ctxt, node, req) - - // a DeadlineExceeded error will appear in the context if the above - // ctxtTimeout value is reached. - require.NoError(t.t, ctxt.Err(), "context time out") - - // Check that the network returns a timeout error. - require.Containsf( - t.t, err.Error(), "i/o timeout", - "expected to get a timeout error, instead got: %v", err, - ) -} - func connect(ctxt context.Context, node *lntest.HarnessNode, req *lnrpc.ConnectPeerRequest) error { diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index f695e6834..ae09423c4 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -259,10 +259,6 @@ var allTestCases = []*testCase{ name: "maximum channel size", test: testMaxChannelSize, }, - { - name: "connection timeout", - test: testNetworkConnectionTimeout, - }, { name: "stateless init", test: testStatelessInit, From cc1d5f5f8d896316ae52999e5f3224bc93315493 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:28:31 +0800 Subject: [PATCH 23/47] lntemp+itest: refactor `testReconnectAfterIPChange` --- lntemp/harness_assertion.go | 13 +++ lntemp/node/watcher.go | 10 +- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_network_test.go | 133 ++++++++++---------------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 75 insertions(+), 89 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 4bfbd502d..e9c4e7df4 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1624,3 +1624,16 @@ func (h *HarnessTest) AssertNumPayments(hn *node.HarnessNode, return payments } + +// AssertNumNodeAnns asserts that a given number of node announcements has been +// seen in the specified node. +func (h *HarnessTest) AssertNumNodeAnns(hn *node.HarnessNode, + pubkey string, num int) []*lnrpc.NodeUpdate { + + // We will get the current number of channel updates first and add it + // to our expected number of newly created channel updates. + anns, err := hn.Watcher.WaitForNumNodeUpdates(pubkey, num) + require.NoError(h, err, "failed to assert num of channel updates") + + return anns +} diff --git a/lntemp/node/watcher.go b/lntemp/node/watcher.go index 1425ef1dd..c081daaad 100644 --- a/lntemp/node/watcher.go +++ b/lntemp/node/watcher.go @@ -124,10 +124,12 @@ func (nw *nodeWatcher) WaitForNumChannelUpdates(op wire.OutPoint, // WaitForNumNodeUpdates will block until a given number of node updates has // been seen in the node's network topology. func (nw *nodeWatcher) WaitForNumNodeUpdates(pubkey string, - expected int) error { + expected int) ([]*lnrpc.NodeUpdate, error) { + updates := make([]*lnrpc.NodeUpdate, 0) checkNumUpdates := func() error { - num := len(nw.GetNodeUpdates(pubkey)) + updates = nw.GetNodeUpdates(pubkey) + num := len(updates) if num >= expected { return nil } @@ -136,7 +138,9 @@ func (nw *nodeWatcher) WaitForNumNodeUpdates(pubkey string, "want %d, got %d", expected, num) } - return wait.NoError(checkNumUpdates, DefaultTimeout) + err := wait.NoError(checkNumUpdates, DefaultTimeout) + + return updates, err } // WaitForChannelOpen will block until a channel with the target outpoint is diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 3b3a66e8b..33a5682b8 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -155,4 +155,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "connection timeout", TestFunc: testNetworkConnectionTimeout, }, + { + Name: "reconnect after ip change", + TestFunc: testReconnectAfterIPChange, + }, } diff --git a/lntest/itest/lnd_network_test.go b/lntest/itest/lnd_network_test.go index 3beebc8fd..d4030bc45 100644 --- a/lntest/itest/lnd_network_test.go +++ b/lntest/itest/lnd_network_test.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -78,7 +79,7 @@ func testNetworkConnectionTimeout(ht *lntemp.HarnessTest) { // testReconnectAfterIPChange verifies that if a persistent inbound node changes // its listening address then it's peer will still be able to reconnect to it. -func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) { +func testReconnectAfterIPChange(ht *lntemp.HarnessTest) { // In this test, the following network will be set up. A single // dash line represents a peer connection and a double dash line // represents a channel. @@ -104,115 +105,81 @@ func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) { // reconnect. // Create a new node, Charlie. - charlie := net.NewNode(t.t, "Charlie", nil) - defer shutdownAndAssert(net, t, charlie) + charlie := ht.NewNode("Charlie", nil) - // We derive two ports for Dave, and we initialise his node with - // these ports advertised as `--externalip` arguments. - ip1 := lntest.NextAvailablePort() + // We derive an extra port for Dave, and we initialise his node with + // the port advertised as `--externalip` arguments. ip2 := lntest.NextAvailablePort() + // Create a new node, Dave, which will initialize a P2P port for him. + daveArgs := []string{fmt.Sprintf("--externalip=127.0.0.1:%d", ip2)} + dave := ht.NewNode("Dave", daveArgs) + + // We now have two ports, the initial P2P port from creating the node, + // and the `externalip` specified above. advertisedAddrs := []string{ - fmt.Sprintf("127.0.0.1:%d", ip1), + fmt.Sprintf("127.0.0.1:%d", dave.Cfg.P2PPort), fmt.Sprintf("127.0.0.1:%d", ip2), } - var daveArgs []string - for _, addr := range advertisedAddrs { - daveArgs = append(daveArgs, "--externalip="+addr) - } - - // withP2PPort is a helper closure used to set the P2P port that a node - // should use. - var withP2PPort = func(port int) lntest.NodeOption { - return func(cfg *lntest.BaseNodeConfig) { - cfg.P2PPort = port - } - } - - // Create a new node, Dave, and ensure that his initial P2P port is - // ip1 derived above. - dave := net.NewNode(t.t, "Dave", daveArgs, withP2PPort(ip1)) - defer shutdownAndAssert(net, t, dave) - - // Subscribe to graph notifications from Charlie so that we can tell - // when he receives Dave's NodeAnnouncements. - ctxb := context.Background() - charlieSub := subscribeGraphNotifications(ctxb, t, charlie) - defer close(charlieSub.quit) - // Connect Alice to Dave and Charlie. - net.ConnectNodes(t.t, net.Alice, dave) - net.ConnectNodes(t.t, net.Alice, charlie) + alice := ht.Alice + ht.ConnectNodes(alice, dave) + ht.ConnectNodes(alice, charlie) // We'll then go ahead and open a channel between Alice and Dave. This // ensures that Charlie receives the node announcement from Alice as // part of the announcement broadcast. - chanPoint := openChannelAndAssert( - t, net, net.Alice, dave, lntest.OpenChannelParams{ - Amt: 1000000, - }, + chanPoint := ht.OpenChannel( + alice, dave, lntemp.OpenChannelParams{Amt: 1000000}, ) - defer closeChannelAndAssert(t, net, net.Alice, chanPoint, false) // waitForNodeAnnouncement is a closure used to wait on the given graph // subscription for a node announcement from a node with the given // public key. It also waits for the node announcement that advertises // a particular set of addresses. - waitForNodeAnnouncement := func(graphSub graphSubscription, - nodePubKey string, addrs []string) { + waitForNodeAnnouncement := func(nodePubKey string, addrs []string) { + err := wait.NoError(func() error { + // Expect to have at least 1 node announcement now. + updates := ht.AssertNumNodeAnns(charlie, nodePubKey, 1) - for { - select { - case graphUpdate := <-graphSub.updateChan: - nextUpdate: - for _, update := range graphUpdate.NodeUpdates { - if update.IdentityKey != nodePubKey { - continue - } + // Get latest node update from the node. + update := updates[len(updates)-1] - addrMap := make(map[string]bool) - for _, addr := range update.NodeAddresses { - addrMap[addr.GetAddr()] = true - } - - for _, addr := range addrs { - if !addrMap[addr] { - continue nextUpdate - } - } - - return - } - - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - - case <-time.After(defaultTimeout): - t.Fatalf("did not receive node ann update") + addrMap := make(map[string]bool) + for _, addr := range update.NodeAddresses { + addrMap[addr.GetAddr()] = true } - } + + // Check that our wanted addresses can be found from + // the node update. + for _, addr := range addrs { + if !addrMap[addr] { + return fmt.Errorf("address %s not "+ + "found", addr) + } + } + + return nil + }, defaultTimeout) + require.NoError(ht, err, "timeout checking node ann") } // Wait for Charlie to receive Dave's initial NodeAnnouncement. - waitForNodeAnnouncement(charlieSub, dave.PubKeyStr, advertisedAddrs) + waitForNodeAnnouncement(dave.PubKeyStr, advertisedAddrs) - // Now create a persistent connection between Charlie and Bob with no - // channels. Charlie is the outbound node and Bob is the inbound node. - net.ConnectNodesPerm(t.t, charlie, dave) - - // Assert that Dave and Charlie are connected - assertConnected(t, dave, charlie) + // Now create a persistent connection between Charlie and Dave with no + // channels. Charlie is the outbound node and Dave is the inbound node. + ht.ConnectNodesPerm(charlie, dave) // Change Dave's P2P port to the second IP address that he advertised // and restart his node. dave.Cfg.P2PPort = ip2 - err := net.RestartNode(dave, nil) - require.NoError(t.t, err) + ht.RestartNode(dave) // assert that Dave and Charlie reconnect successfully after Dave // changes to his second advertised address. - assertConnected(t, dave, charlie) + ht.AssertConnected(dave, charlie) // Next we test the case where Dave changes his listening address to one // that was not listed in his original advertised addresses. The desired @@ -227,20 +194,22 @@ func testReconnectAfterIPChange(net *lntest.NetworkHarness, t *harnessTest) { "--externalip=127.0.0.1:%d", dave.Cfg.P2PPort, ), } - err = net.RestartNode(dave, nil) - require.NoError(t.t, err) + ht.RestartNode(dave) // Show that Charlie does receive Dave's new listening address in // a Node Announcement. waitForNodeAnnouncement( - charlieSub, dave.PubKeyStr, + dave.PubKeyStr, []string{fmt.Sprintf("127.0.0.1:%d", dave.Cfg.P2PPort)}, ) // assert that Dave and Charlie do reconnect after Dave changes his P2P // address to one not listed in Dave's original advertised list of // addresses. - assertConnected(t, dave, charlie) + ht.AssertConnected(dave, charlie) + + // Finally, close the channel. + ht.CloseChannel(alice, chanPoint) } func connect(ctxt context.Context, node *lntest.HarnessNode, diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index ae09423c4..7f65a1a2f 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -8,10 +8,6 @@ var allTestCases = []*testCase{ name: "open channel reorg test", test: testOpenChannelAfterReorg, }, - { - name: "reconnect after ip change", - test: testReconnectAfterIPChange, - }, { name: "graph topology notifications", test: testGraphTopologyNotifications, From a211e6093604699f3c19621acf60b77468a5cd0f Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:32:20 +0800 Subject: [PATCH 24/47] itest: refactor `testAddPeerConfig` --- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_network_test.go | 68 +++++---------------------- lntest/itest/lnd_test_list_on_test.go | 4 -- 3 files changed, 15 insertions(+), 61 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 33a5682b8..57474ae99 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -159,4 +159,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "reconnect after ip change", TestFunc: testReconnectAfterIPChange, }, + { + Name: "addpeer config", + TestFunc: testAddPeerConfig, + }, } diff --git a/lntest/itest/lnd_network_test.go b/lntest/itest/lnd_network_test.go index d4030bc45..6763a952e 100644 --- a/lntest/itest/lnd_network_test.go +++ b/lntest/itest/lnd_network_test.go @@ -1,13 +1,9 @@ package itest import ( - "context" "fmt" - network "net" - "strings" - "time" + "net" - "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntemp" @@ -212,75 +208,33 @@ func testReconnectAfterIPChange(ht *lntemp.HarnessTest) { ht.CloseChannel(alice, chanPoint) } -func connect(ctxt context.Context, node *lntest.HarnessNode, - req *lnrpc.ConnectPeerRequest) error { - - syncTimeout := time.After(15 * time.Second) - ticker := time.NewTicker(time.Millisecond * 100) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - _, err := node.ConnectPeer(ctxt, req) - // If there's no error, return nil - if err == nil { - return err - } - // If the error is no ErrServerNotActive, return it. - // Otherwise, we will retry until timeout. - if !strings.Contains(err.Error(), - lnd.ErrServerNotActive.Error()) { - - return err - } - case <-syncTimeout: - return fmt.Errorf("chain backend did not " + - "finish syncing") - } - } - return nil -} - // testAddPeerConfig tests that the "--addpeer" config flag successfully adds // a new peer. -func testAddPeerConfig(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - alice := net.Alice - info, err := alice.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - require.NoError(t.t, err) +func testAddPeerConfig(ht *lntemp.HarnessTest) { + alice := ht.Alice + info := alice.RPC.GetInfo() alicePeerAddress := info.Uris[0] // Create a new node (Carol) with Alice as a peer. - args := []string{ - fmt.Sprintf("--addpeer=%v", alicePeerAddress), - } - carol := net.NewNode(t.t, "Carol", args) - defer shutdownAndAssert(net, t, carol) + args := []string{fmt.Sprintf("--addpeer=%v", alicePeerAddress)} + carol := ht.NewNode("Carol", args) - assertConnected(t, alice, carol) + ht.EnsureConnected(alice, carol) // If we list Carol's peers, Alice should already be // listed as one, since we specified her using the // addpeer flag. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - listPeersRequest := &lnrpc.ListPeersRequest{} - listPeersResp, err := carol.ListPeers(ctxt, listPeersRequest) - require.NoError(t.t, err) + listPeersResp := carol.RPC.ListPeers() parsedPeerAddr, err := lncfg.ParseLNAddressString( - alicePeerAddress, "9735", network.ResolveTCPAddr, + alicePeerAddress, "9735", net.ResolveTCPAddr, ) - require.NoError(t.t, err) + require.NoError(ht, err) parsedKeyStr := fmt.Sprintf( "%x", parsedPeerAddr.IdentityKey.SerializeCompressed(), ) - require.Equal(t.t, parsedKeyStr, listPeersResp.Peers[0].PubKey) + require.Equal(ht, parsedKeyStr, listPeersResp.Peers[0].PubKey) } diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 7f65a1a2f..066ed3499 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -295,10 +295,6 @@ var allTestCases = []*testCase{ name: "taproot", test: testTaproot, }, - { - name: "addpeer config", - test: testAddPeerConfig, - }, { name: "resolution handoff", test: testResHandoff, From 1084e32f0e9bf3ad581573d21cbb939f78ade216 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:36:36 +0800 Subject: [PATCH 25/47] itest: refactor `testUnannouncedChannels` --- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_channel_graph_test.go | 89 ++++---------------------- lntest/itest/lnd_test_list_on_test.go | 4 -- 3 files changed, 15 insertions(+), 82 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 57474ae99..abade5130 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -163,4 +163,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "addpeer config", TestFunc: testAddPeerConfig, }, + { + Name: "unannounced channels", + TestFunc: testUnannouncedChannels, + }, } diff --git a/lntest/itest/lnd_channel_graph_test.go b/lntest/itest/lnd_channel_graph_test.go index 0010dbdb5..b7335e203 100644 --- a/lntest/itest/lnd_channel_graph_test.go +++ b/lntest/itest/lnd_channel_graph_test.go @@ -222,106 +222,39 @@ func testUpdateChanStatus(ht *lntemp.HarnessTest) { // testUnannouncedChannels checks unannounced channels are not returned by // describeGraph RPC request unless explicitly asked for. -func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testUnannouncedChannels(ht *lntemp.HarnessTest) { amount := funding.MaxBtcFundingAmount + alice, bob := ht.Alice, ht.Bob // Open a channel between Alice and Bob, ensuring the // channel has been opened properly. - chanOpenUpdate := openChannelStream( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: amount, - }, + chanOpenUpdate := ht.OpenChannelAssertStream( + alice, bob, lntemp.OpenChannelParams{Amt: amount}, ) // Mine 2 blocks, and check that the channel is opened but not yet // announced to the network. - mineBlocks(t, net, 2, 1) + ht.MineBlocksAndAssertNumTxes(2, 1) // One block is enough to make the channel ready for use, since the // nodes have defaultNumConfs=1 set. - fundingChanPoint, err := net.WaitForChannelOpen(chanOpenUpdate) - if err != nil { - t.Fatalf("error while waiting for channel open: %v", err) - } + fundingChanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate) // Alice should have 1 edge in her graph. - req := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err := net.Alice.DescribeGraph(ctxt, req) - if err != nil { - t.Fatalf("unable to query alice's graph: %v", err) - } - - numEdges := len(chanGraph.Edges) - if numEdges != 1 { - t.Fatalf("expected to find 1 edge in the graph, found %d", numEdges) - } + ht.AssertNumEdges(alice, 1, true) // Channels should not be announced yet, hence Alice should have no // announced edges in her graph. - req.IncludeUnannounced = false - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err = net.Alice.DescribeGraph(ctxt, req) - if err != nil { - t.Fatalf("unable to query alice's graph: %v", err) - } - - numEdges = len(chanGraph.Edges) - if numEdges != 0 { - t.Fatalf("expected to find 0 announced edges in the graph, found %d", - numEdges) - } + ht.AssertNumEdges(alice, 0, false) // Mine 4 more blocks, and check that the channel is now announced. - mineBlocks(t, net, 4, 0) + ht.MineBlocks(4) // Give the network a chance to learn that auth proof is confirmed. - var predErr error - err = wait.Predicate(func() bool { - // The channel should now be announced. Check that Alice has 1 - // announced edge. - req.IncludeUnannounced = false - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err = net.Alice.DescribeGraph(ctxt, req) - if err != nil { - predErr = fmt.Errorf("unable to query alice's graph: %v", err) - return false - } - - numEdges = len(chanGraph.Edges) - if numEdges != 1 { - predErr = fmt.Errorf("expected to find 1 announced edge in "+ - "the graph, found %d", numEdges) - return false - } - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("%v", predErr) - } - - // The channel should now be announced. Check that Alice has 1 announced - // edge. - req.IncludeUnannounced = false - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err = net.Alice.DescribeGraph(ctxt, req) - if err != nil { - t.Fatalf("unable to query alice's graph: %v", err) - } - - numEdges = len(chanGraph.Edges) - if numEdges != 1 { - t.Fatalf("expected to find 1 announced edge in the graph, found %d", - numEdges) - } + ht.AssertNumEdges(alice, 1, false) // Close the channel used during the test. - closeChannelAndAssert(t, net, net.Alice, fundingChanPoint, false) + ht.CloseChannel(alice, fundingChanPoint) } func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) { diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 066ed3499..e6497d086 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -52,10 +52,6 @@ var allTestCases = []*testCase{ name: "send to route error propagation", test: testSendToRouteErrorPropagation, }, - { - name: "unannounced channels", - test: testUnannouncedChannels, - }, { name: "private channels", test: testPrivateChannels, From 7029698c16e1aa2c949cadf07ef914ad5a9bcf9b Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:46:45 +0800 Subject: [PATCH 26/47] lntemp+itest: refactor `testGraphTopologyNotifications` --- lntemp/harness_assertion.go | 10 + lntest/itest/assertions.go | 22 --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_graph_test.go | 263 ++++++------------------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 79 insertions(+), 224 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index e9c4e7df4..6b89d6aad 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1637,3 +1637,13 @@ func (h *HarnessTest) AssertNumNodeAnns(hn *node.HarnessNode, return anns } + +// AssertNumChannelUpdates asserts that a given number of channel updates has +// been seen in the specified node's network topology. +func (h *HarnessTest) AssertNumChannelUpdates(hn *node.HarnessNode, + chanPoint *lnrpc.ChannelPoint, num int) { + + op := h.OutPointFromChannelPoint(chanPoint) + err := hn.Watcher.WaitForNumChannelUpdates(op, num) + require.NoError(h, err, "failed to assert num of channel updates") +} diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index 1e6b325a7..9cbffd8b4 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -1171,28 +1171,6 @@ func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode, ) } -func assertSyncType(t *harnessTest, node *lntest.HarnessNode, - peer string, syncType lnrpc.Peer_SyncType) { - - t.t.Helper() - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.ListPeers(ctxt, &lnrpc.ListPeersRequest{}) - require.NoError(t.t, err) - - for _, rpcPeer := range resp.Peers { - if rpcPeer.PubKey != peer { - continue - } - - require.Equal(t.t, syncType, rpcPeer.SyncType) - return - } - - t.t.Fatalf("unable to find peer: %s", peer) -} - // assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs // matching payHashes on _all_ their channels. func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error { diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index abade5130..4bc888360 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -167,4 +167,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "unannounced channels", TestFunc: testUnannouncedChannels, }, + { + Name: "graph topology notifications", + TestFunc: testGraphTopologyNotifications, + }, } diff --git a/lntest/itest/lnd_channel_graph_test.go b/lntest/itest/lnd_channel_graph_test.go index b7335e203..d48dfd528 100644 --- a/lntest/itest/lnd_channel_graph_test.go +++ b/lntest/itest/lnd_channel_graph_test.go @@ -1,7 +1,6 @@ package itest import ( - "bytes" "context" "fmt" "io" @@ -257,32 +256,30 @@ func testUnannouncedChannels(ht *lntemp.HarnessTest) { ht.CloseChannel(alice, fundingChanPoint) } -func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) { - t.t.Run("pinned", func(t *testing.T) { - ht := newHarnessTest(t, net) - testGraphTopologyNtfns(net, ht, true) +func testGraphTopologyNotifications(ht *lntemp.HarnessTest) { + ht.Run("pinned", func(t *testing.T) { + subT := ht.Subtest(t) + testGraphTopologyNtfns(subT, true) }) - t.t.Run("unpinned", func(t *testing.T) { - ht := newHarnessTest(t, net) - testGraphTopologyNtfns(net, ht, false) + ht.Run("unpinned", func(t *testing.T) { + subT := ht.Subtest(t) + testGraphTopologyNtfns(subT, false) }) } -func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned bool) { - ctxb := context.Background() - +func testGraphTopologyNtfns(ht *lntemp.HarnessTest, pinned bool) { const chanAmt = funding.MaxBtcFundingAmount // Spin up Bob first, since we will need to grab his pubkey when // starting Alice to test pinned syncing. - bob := net.NewNode(t.t, "bob", nil) - defer shutdownAndAssert(net, t, bob) - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - bobInfo, err := bob.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - require.NoError(t.t, err) + bob := ht.Bob + bobInfo := bob.RPC.GetInfo() bobPubkey := bobInfo.IdentityPubkey + // Restart Bob as he may have leftover announcements from previous + // tests, causing the graph to be unsynced. + ht.RestartNodeWithExtraArgs(bob, nil) + // For unpinned syncing, start Alice as usual. Otherwise grab Bob's // pubkey to include in his pinned syncer set. var aliceArgs []string @@ -293,169 +290,64 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b } } - alice := net.NewNode(t.t, "alice", aliceArgs) - defer shutdownAndAssert(net, t, alice) + alice := ht.Alice + ht.RestartNodeWithExtraArgs(alice, aliceArgs) // Connect Alice and Bob. - net.EnsureConnected(t.t, alice, bob) - - // Alice stimmy. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice) - - // Bob stimmy. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob) + ht.EnsureConnected(alice, bob) // Assert that Bob has the correct sync type before proceeding. if pinned { - assertSyncType(t, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC) + assertSyncType(ht, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC) } else { - assertSyncType(t, alice, bobPubkey, lnrpc.Peer_ACTIVE_SYNC) + assertSyncType(ht, alice, bobPubkey, lnrpc.Peer_ACTIVE_SYNC) } // Regardless of syncer type, ensure that both peers report having // completed their initial sync before continuing to make a channel. - waitForGraphSync(t, alice) - - // Let Alice subscribe to graph notifications. - graphSub := subscribeGraphNotifications(ctxb, t, alice) - defer close(graphSub.quit) + ht.WaitForGraphSync(alice) // Open a new channel between Alice and Bob. - chanPoint := openChannelAndAssert( - t, net, alice, bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{Amt: chanAmt}, ) // The channel opening above should have triggered a few notifications // sent to the notification client. We'll expect two channel updates, // and two node announcements. - var numChannelUpds int - var numNodeAnns int - for numChannelUpds < 2 && numNodeAnns < 2 { - select { - // Ensure that a new update for both created edges is properly - // dispatched to our registered client. - case graphUpdate := <-graphSub.updateChan: - // Process all channel updates presented in this update - // message. - for _, chanUpdate := range graphUpdate.ChannelUpdates { - switch chanUpdate.AdvertisingNode { - case alice.PubKeyStr: - case bob.PubKeyStr: - default: - t.Fatalf("unknown advertising node: %v", - chanUpdate.AdvertisingNode) - } - switch chanUpdate.ConnectingNode { - case alice.PubKeyStr: - case bob.PubKeyStr: - default: - t.Fatalf("unknown connecting node: %v", - chanUpdate.ConnectingNode) - } + ht.AssertNumChannelUpdates(alice, chanPoint, 2) + ht.AssertNumNodeAnns(alice, alice.PubKeyStr, 1) + ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1) - if chanUpdate.Capacity != int64(chanAmt) { - t.Fatalf("channel capacities mismatch:"+ - " expected %v, got %v", chanAmt, - btcutil.Amount(chanUpdate.Capacity)) - } - numChannelUpds++ - } + _, blockHeight := ht.Miner.GetBestBlock() - for _, nodeUpdate := range graphUpdate.NodeUpdates { - switch nodeUpdate.IdentityKey { - case alice.PubKeyStr: - case bob.PubKeyStr: - default: - t.Fatalf("unknown node: %v", - nodeUpdate.IdentityKey) - } - numNodeAnns++ - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(time.Second * 10): - t.Fatalf("timeout waiting for graph notifications, "+ - "only received %d/2 chanupds and %d/2 nodeanns", - numChannelUpds, numNodeAnns) - } - } - - _, blockHeight, err := net.Miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current blockheight %v", err) - } - - // Now we'll test that updates are properly sent after channels are closed - // within the network. - closeChannelAndAssert(t, net, alice, chanPoint, false) + // Now we'll test that updates are properly sent after channels are + // closed within the network. + ht.CloseChannel(alice, chanPoint) // Now that the channel has been closed, we should receive a // notification indicating so. -out: - for { - select { - case graphUpdate := <-graphSub.updateChan: - if len(graphUpdate.ClosedChans) != 1 { - continue - } + closedChan := ht.AssertTopologyChannelClosed(alice, chanPoint) - closedChan := graphUpdate.ClosedChans[0] - if closedChan.ClosedHeight != uint32(blockHeight+1) { - t.Fatalf("close heights of channel mismatch: "+ - "expected %v, got %v", blockHeight+1, - closedChan.ClosedHeight) - } - chanPointTxid, err := lnrpc.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - closedChanTxid, err := lnrpc.GetChanPointFundingTxid( - closedChan.ChanPoint, - ) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - if !bytes.Equal(closedChanTxid[:], chanPointTxid[:]) { - t.Fatalf("channel point hash mismatch: "+ - "expected %v, got %v", chanPointTxid, - closedChanTxid) - } - if closedChan.ChanPoint.OutputIndex != chanPoint.OutputIndex { - t.Fatalf("output index mismatch: expected %v, "+ - "got %v", chanPoint.OutputIndex, - closedChan.ChanPoint) - } + require.Equal(ht, uint32(blockHeight+1), closedChan.ClosedHeight, + "close heights of channel mismatch") - break out - - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(time.Second * 10): - t.Fatalf("notification for channel closure not " + - "sent") - } - } + fundingTxid := ht.OutPointFromChannelPoint(chanPoint) + closeTxid := ht.OutPointFromChannelPoint(closedChan.ChanPoint) + require.EqualValues(ht, fundingTxid, closeTxid, + "channel point hash mismatch") // For the final portion of the test, we'll ensure that once a new node // appears in the network, the proper notification is dispatched. Note // that a node that does not have any channels open is ignored, so first // we disconnect Alice and Bob, open a channel between Bob and Carol, // and finally connect Alice to Bob again. - if err := net.DisconnectNodes(alice, bob); err != nil { - t.Fatalf("unable to disconnect alice and bob: %v", err) - } - carol := net.NewNode(t.t, "Carol", nil) - defer shutdownAndAssert(net, t, carol) + ht.DisconnectNodes(alice, bob) - net.ConnectNodes(t.t, bob, carol) - chanPoint = openChannelAndAssert( - t, net, bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, + carol := ht.NewNode("Carol", nil) + ht.ConnectNodes(bob, carol) + chanPoint = ht.OpenChannel( + bob, carol, lntemp.OpenChannelParams{Amt: chanAmt}, ) // Reconnect Alice and Bob. This should result in the nodes syncing up @@ -464,60 +356,15 @@ out: // and Carol. Note that we will also receive a node announcement from // Bob, since a node will update its node announcement after a new // channel is opened. - net.EnsureConnected(t.t, alice, bob) + ht.EnsureConnected(alice, bob) // We should receive an update advertising the newly connected node, // Bob's new node announcement, and the channel between Bob and Carol. - numNodeAnns = 0 - numChannelUpds = 0 - for numChannelUpds < 2 && numNodeAnns < 1 { - select { - case graphUpdate := <-graphSub.updateChan: - for _, nodeUpdate := range graphUpdate.NodeUpdates { - switch nodeUpdate.IdentityKey { - case carol.PubKeyStr: - case bob.PubKeyStr: - default: - t.Fatalf("unknown node update pubey: %v", - nodeUpdate.IdentityKey) - } - numNodeAnns++ - } - - for _, chanUpdate := range graphUpdate.ChannelUpdates { - switch chanUpdate.AdvertisingNode { - case carol.PubKeyStr: - case bob.PubKeyStr: - default: - t.Fatalf("unknown advertising node: %v", - chanUpdate.AdvertisingNode) - } - switch chanUpdate.ConnectingNode { - case carol.PubKeyStr: - case bob.PubKeyStr: - default: - t.Fatalf("unknown connecting node: %v", - chanUpdate.ConnectingNode) - } - - if chanUpdate.Capacity != int64(chanAmt) { - t.Fatalf("channel capacities mismatch:"+ - " expected %v, got %v", chanAmt, - btcutil.Amount(chanUpdate.Capacity)) - } - numChannelUpds++ - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(time.Second * 10): - t.Fatalf("timeout waiting for graph notifications, "+ - "only received %d/2 chanupds and %d/2 nodeanns", - numChannelUpds, numNodeAnns) - } - } + ht.AssertNumChannelUpdates(alice, chanPoint, 2) + ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1) // Close the channel between Bob and Carol. - closeChannelAndAssert(t, net, bob, chanPoint, false) + ht.CloseChannel(bob, chanPoint) } // testNodeAnnouncement ensures that when a node is started with one or more @@ -939,3 +786,23 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { // Close the channel between Bob and Dave. closeChannelAndAssert(t, net, net.Bob, chanPoint, false) } + +// assertSyncType asserts that the peer has an expected syncType. +// +// NOTE: only made for tests in this file. +func assertSyncType(ht *lntemp.HarnessTest, hn *node.HarnessNode, + peer string, syncType lnrpc.Peer_SyncType) { + + resp := hn.RPC.ListPeers() + for _, rpcPeer := range resp.Peers { + if rpcPeer.PubKey != peer { + continue + } + + require.Equal(ht, syncType, rpcPeer.SyncType) + + return + } + + ht.Fatalf("unable to find peer: %s", peer) +} diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index e6497d086..a5a79a744 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -8,10 +8,6 @@ var allTestCases = []*testCase{ name: "open channel reorg test", test: testOpenChannelAfterReorg, }, - { - name: "graph topology notifications", - test: testGraphTopologyNotifications, - }, { name: "channel force closure", test: testChannelForceClosure, From c990e053c61b8df14d6413588ff6c3f680bf3d29 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:49:43 +0800 Subject: [PATCH 27/47] itest: refactor `testNodeAnnouncement` --- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_channel_graph_test.go | 69 ++++++-------------------- lntest/itest/lnd_test_list_on_test.go | 5 -- 3 files changed, 19 insertions(+), 59 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 4bc888360..9f98018ab 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -171,4 +171,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "graph topology notifications", TestFunc: testGraphTopologyNotifications, }, + { + Name: "node announcement", + TestFunc: testNodeAnnouncement, + }, } diff --git a/lntest/itest/lnd_channel_graph_test.go b/lntest/itest/lnd_channel_graph_test.go index d48dfd528..fa27a4f77 100644 --- a/lntest/itest/lnd_channel_graph_test.go +++ b/lntest/itest/lnd_channel_graph_test.go @@ -370,17 +370,15 @@ func testGraphTopologyNtfns(ht *lntemp.HarnessTest, pinned bool) { // testNodeAnnouncement ensures that when a node is started with one or more // external IP addresses specified on the command line, that those addresses // announced to the network and reported in the network graph. -func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - aliceSub := subscribeGraphNotifications(ctxb, t, net.Alice) - defer close(aliceSub.quit) +func testNodeAnnouncement(ht *lntemp.HarnessTest) { + alice, bob := ht.Alice, ht.Bob advertisedAddrs := []string{ "192.168.1.1:8333", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8337", "bkb6azqggsaiskzi.onion:9735", - "fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swjwid.onion:1234", + "fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swj" + + "wid.onion:1234", } var lndArgs []string @@ -388,29 +386,17 @@ func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { lndArgs = append(lndArgs, "--externalip="+addr) } - dave := net.NewNode(t.t, "Dave", lndArgs) - defer shutdownAndAssert(net, t, dave) + dave := ht.NewNode("Dave", lndArgs) // We must let Dave have an open channel before he can send a node // announcement, so we open a channel with Bob, - net.ConnectNodes(t.t, net.Bob, dave) - - // Alice shouldn't receive any new updates yet since the channel has yet - // to be opened. - select { - case <-aliceSub.updateChan: - t.Fatalf("received unexpected update from dave") - case <-time.After(time.Second): - } + ht.ConnectNodes(bob, dave) // We'll then go ahead and open a channel between Bob and Dave. This // ensures that Alice receives the node announcement from Bob as part of // the announcement broadcast. - chanPoint := openChannelAndAssert( - t, net, net.Bob, dave, - lntest.OpenChannelParams{ - Amt: 1000000, - }, + chanPoint := ht.OpenChannel( + bob, dave, lntemp.OpenChannelParams{Amt: 1000000}, ) assertAddrs := func(addrsFound []string, targetAddrs ...string) { @@ -420,45 +406,20 @@ func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { } for _, addr := range targetAddrs { - if _, ok := addrs[addr]; !ok { - t.Fatalf("address %v not found in node "+ - "announcement", addr) - } + _, ok := addrs[addr] + require.True(ht, ok, "address %v not found in node "+ + "announcement", addr) } } - - waitForAddrsInUpdate := func(graphSub graphSubscription, - nodePubKey string, targetAddrs ...string) { - - for { - select { - case graphUpdate := <-graphSub.updateChan: - for _, update := range graphUpdate.NodeUpdates { - if update.IdentityKey == nodePubKey { - assertAddrs( - update.Addresses, // nolint:staticcheck - targetAddrs..., - ) - return - } - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(defaultTimeout): - t.Fatalf("did not receive node ann update") - } - } - } - // We'll then wait for Alice to receive Dave's node announcement // including the expected advertised addresses from Bob since they // should already be connected. - waitForAddrsInUpdate( - aliceSub, dave.PubKeyStr, advertisedAddrs..., - ) + allUpdates := ht.AssertNumNodeAnns(alice, dave.PubKeyStr, 1) + nodeUpdate := allUpdates[len(allUpdates)-1] + assertAddrs(nodeUpdate.Addresses, advertisedAddrs...) // Close the channel between Bob and Dave. - closeChannelAndAssert(t, net, net.Bob, chanPoint, false) + ht.CloseChannel(bob, chanPoint) } // graphSubscription houses the proxied update and error chans for a node's diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index a5a79a744..ea51f9630 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -72,11 +72,6 @@ var allTestCases = []*testCase{ name: "multi-hop htlc error propagation", test: testHtlcErrorPropagation, }, - // TODO(roasbeef): multi-path integration test - { - name: "node announcement", - test: testNodeAnnouncement, - }, { name: "derive shared key", test: testDeriveSharedKey, From fc9c6c426e08181d0a6140d67da45343f0049968 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 06:59:21 +0800 Subject: [PATCH 28/47] lntemp+itest: refactor `testUpdateNodeAnnouncement` --- lntemp/rpc/peers.go | 34 +++ lntest/itest/assertions.go | 25 --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_graph_test.go | 293 ++++++++++--------------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 158 insertions(+), 202 deletions(-) diff --git a/lntemp/rpc/peers.go b/lntemp/rpc/peers.go index 2c1578dc2..ed4c0761a 100644 --- a/lntemp/rpc/peers.go +++ b/lntemp/rpc/peers.go @@ -1,5 +1,39 @@ package rpc +import ( + "context" + + "github.com/lightningnetwork/lnd/lnrpc/peersrpc" + "github.com/stretchr/testify/require" +) + // ===================== // PeerClient related RPCs. // ===================== + +type ( + AnnReq *peersrpc.NodeAnnouncementUpdateRequest + AnnResp *peersrpc.NodeAnnouncementUpdateResponse +) + +// UpdateNodeAnnouncement makes an UpdateNodeAnnouncement RPC call the the +// peersrpc client and asserts. +func (h *HarnessRPC) UpdateNodeAnnouncement(req AnnReq) AnnResp { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.Peer.UpdateNodeAnnouncement(ctxt, req) + require.NoErrorf(h, err, "failed to update announcement") + + return resp +} + +// UpdateNodeAnnouncementErr makes an UpdateNodeAnnouncement RPC call the the +// peersrpc client and asserts an error is returned. +func (h *HarnessRPC) UpdateNodeAnnouncementErr(req AnnReq) { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + _, err := h.Peer.UpdateNodeAnnouncement(ctxt, req) + require.Error(h, err, "expect an error from update announcement") +} diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index 9cbffd8b4..239123d55 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -16,7 +16,6 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/peersrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntest" @@ -1564,27 +1563,3 @@ func assertNodeAnnouncement(t *harnessTest, n1, n2 *lnrpc.NodeUpdate) { } } } - -// assertUpdateNodeAnnouncementResponse is a helper function to assert -// the response expected values. -func assertUpdateNodeAnnouncementResponse(t *harnessTest, - response *peersrpc.NodeAnnouncementUpdateResponse, - expectedOps map[string]int) { - - require.Equal( - t.t, len(response.Ops), len(expectedOps), - "unexpected number of Ops updating dave's node announcement", - ) - - ops := make(map[string]int, len(response.Ops)) - for _, op := range response.Ops { - ops[op.Entity] = len(op.Actions) - } - - for k, v := range expectedOps { - if v != ops[k] { - t.Fatalf("unexpected number of actions for operation "+ - "%s: got %d wanted %d", k, ops[k], v) - } - } -} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 9f98018ab..865e959ca 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -175,4 +175,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "node announcement", TestFunc: testNodeAnnouncement, }, + { + Name: "update node announcement rpc", + TestFunc: testUpdateNodeAnnouncement, + }, } diff --git a/lntest/itest/lnd_channel_graph_test.go b/lntest/itest/lnd_channel_graph_test.go index fa27a4f77..e9e7ae648 100644 --- a/lntest/itest/lnd_channel_graph_test.go +++ b/lntest/itest/lnd_channel_graph_test.go @@ -1,12 +1,9 @@ package itest import ( - "context" "fmt" - "io" "strings" "testing" - "time" "github.com/btcsuite/btcd/btcutil" "github.com/lightningnetwork/lnd/chainreg" @@ -16,7 +13,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -422,110 +418,11 @@ func testNodeAnnouncement(ht *lntemp.HarnessTest) { ht.CloseChannel(bob, chanPoint) } -// graphSubscription houses the proxied update and error chans for a node's -// graph subscriptions. -type graphSubscription struct { - updateChan chan *lnrpc.GraphTopologyUpdate - errChan chan error - quit chan struct{} -} - -// subscribeGraphNotifications subscribes to channel graph updates and launches -// a goroutine that forwards these to the returned channel. -func subscribeGraphNotifications(ctxb context.Context, t *harnessTest, - node *lntest.HarnessNode) graphSubscription { - - // We'll first start by establishing a notification client which will - // send us notifications upon detected changes in the channel graph. - req := &lnrpc.GraphTopologySubscription{} - ctx, cancelFunc := context.WithCancel(ctxb) - topologyClient, err := node.SubscribeChannelGraph(ctx, req) - require.NoError(t.t, err, "unable to create topology client") - - // We'll launch a goroutine that will be responsible for proxying all - // notifications recv'd from the client into the channel below. - errChan := make(chan error, 1) - quit := make(chan struct{}) - graphUpdates := make(chan *lnrpc.GraphTopologyUpdate, 20) - go func() { - for { - defer cancelFunc() - - select { - case <-quit: - return - default: - graphUpdate, err := topologyClient.Recv() - select { - case <-quit: - return - default: - } - - if err == io.EOF { - return - } else if err != nil { - select { - case errChan <- err: - case <-quit: - } - return - } - - select { - case graphUpdates <- graphUpdate: - case <-quit: - return - } - } - } - }() - - return graphSubscription{ - updateChan: graphUpdates, - errChan: errChan, - quit: quit, - } -} - -// waitForNodeAnnUpdates monitors the nodeAnnUpdates until we get one for -// the expected node and asserts that has the expected information. -func waitForNodeAnnUpdates(graphSub graphSubscription, nodePubKey string, - expectedUpdate *lnrpc.NodeUpdate, t *harnessTest) { - - for { - select { - case graphUpdate := <-graphSub.updateChan: - for _, update := range graphUpdate.NodeUpdates { - if update.IdentityKey == nodePubKey { - assertNodeAnnouncement( - t, update, expectedUpdate, - ) - return - } - } - case err := <-graphSub.errChan: - t.Fatalf("unable to recv graph update: %v", err) - case <-time.After(defaultTimeout): - t.Fatalf("did not receive node ann update") - } - } -} - // testUpdateNodeAnnouncement ensures that the RPC endpoint validates // the requests correctly and that the new node announcement is brodcasted // with the right information after updating our node. -func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { - // context timeout for the whole test. - ctxt, cancel := context.WithTimeout( - context.Background(), defaultTimeout, - ) - defer cancel() - - // Launch notification clients for alice, such that we can - // get notified when there are updates in the graph. - aliceSub := subscribeGraphNotifications(ctxt, t, net.Alice) - defer close(aliceSub.quit) +func testUpdateNodeAnnouncement(ht *lntemp.HarnessTest) { + alice, bob := ht.Alice, ht.Bob var lndArgs []string @@ -534,20 +431,42 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { "192.168.1.1:8333", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8337", "bkb6azqggsaiskzi.onion:9735", - "fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swjwid.onion:1234", + "fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swj" + + "wid.onion:1234", } for _, addr := range extraAddrs { lndArgs = append(lndArgs, "--externalip="+addr) } - dave := net.NewNode(t.t, "Dave", lndArgs) - defer shutdownAndAssert(net, t, dave) + dave := ht.NewNode("Dave", lndArgs) - // Get dave default information so we can compare - // it lately with the brodcasted updates. - nodeInfoReq := &lnrpc.GetInfoRequest{} - resp, err := dave.GetInfo(ctxt, nodeInfoReq) - require.NoError(t.t, err, "unable to get dave's information") + // assertNodeAnn is a helper closure that checks a given node update + // from Dave is seen by Alice. + assertNodeAnn := func(expected *lnrpc.NodeUpdate) { + err := wait.NoError(func() error { + // Get a list of node updates seen by Alice. + updates := alice.Watcher.GetNodeUpdates(dave.PubKeyStr) + // Check at least one of the updates matches the given + // node update. + for _, update := range updates { + err := compareNodeAnns(update, expected) + // Found a match, return nil. + if err == nil { + return nil + } + } + + // We've check all the updates and no match found. + return fmt.Errorf("alice didn't see the update: %v", + expected) + }, defaultTimeout) + + require.NoError(ht, err, "assertNodeAnn failed") + } + + // Get dave default information so we can compare it lately with the + // brodcasted updates. + resp := dave.RPC.GetInfo() defaultAddrs := make([]*lnrpc.NodeAddress, 0, len(resp.Uris)) for _, uri := range resp.GetUris() { values := strings.Split(uri, "@") @@ -564,9 +483,8 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { // update this one for another one unset by default at random. featureBit := lnrpc.FeatureBit_WUMBO_CHANNELS_REQ featureIdx := uint32(featureBit) - if _, ok := resp.Features[featureIdx]; ok { - t.Fatalf("unexpected feature bit enabled by default") - } + _, ok := resp.Features[featureIdx] + require.False(ht, ok, "unexpected feature bit enabled by default") defaultDaveNodeAnn := &lnrpc.NodeUpdate{ Alias: resp.Alias, @@ -576,44 +494,33 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { // Dave must have an open channel before he can send a node // announcement, so we open a channel with Bob. - net.ConnectNodes(t.t, net.Bob, dave) + ht.ConnectNodes(bob, dave) // Go ahead and open a channel between Bob and Dave. This // ensures that Alice receives the node announcement from Bob as part of // the announcement broadcast. - chanPoint := openChannelAndAssert( - t, net, net.Bob, dave, - lntest.OpenChannelParams{ + chanPoint := ht.OpenChannel( + bob, dave, lntemp.OpenChannelParams{ Amt: 1000000, }, ) - require.NoError(t.t, err, "unexpected error opening a channel") // Wait for Alice to receive dave's node announcement with the default // values. - waitForNodeAnnUpdates( - aliceSub, dave.PubKeyStr, defaultDaveNodeAnn, t, - ) + assertNodeAnn(defaultDaveNodeAnn) - // We cannot differentiate between requests with Alias = "" and requests - // that do not provide that field. If a user sets Alias = "" in the request - // the field will simply be ignored. The request must fail because no - // modifiers are applied. - invalidNodeAnnReq := &peersrpc.NodeAnnouncementUpdateRequest{ - Alias: "", - } - - _, err = dave.UpdateNodeAnnouncement(ctxt, invalidNodeAnnReq) - require.Error(t.t, err, "requests without modifiers should field") + // We cannot differentiate between requests with Alias = "" and + // requests that do not provide that field. If a user sets Alias = "" + // in the request the field will simply be ignored. The request must + // fail because no modifiers are applied. + invalidNodeAnnReq := &peersrpc.NodeAnnouncementUpdateRequest{Alias: ""} + dave.RPC.UpdateNodeAnnouncementErr(invalidNodeAnnReq) // Alias too long. invalidNodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{ Alias: strings.Repeat("a", 50), } - - _, err = dave.UpdateNodeAnnouncement(ctxt, invalidNodeAnnReq) - require.Error(t.t, err, "failed to validate an invalid alias for an "+ - "update node announcement request") + dave.RPC.UpdateNodeAnnouncementErr(invalidNodeAnnReq) // Update Node. newAlias := "new-alias" @@ -653,8 +560,7 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { FeatureUpdates: updateFeatureActions, } - response, err := dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq) - require.NoError(t.t, err, "unable to update dave's node announcement") + response := dave.RPC.UpdateNodeAnnouncement(nodeAnnReq) expectedOps := map[string]int{ "features": 1, @@ -662,7 +568,7 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { "alias": 1, "addresses": 3, } - assertUpdateNodeAnnouncementResponse(t, response, expectedOps) + assertUpdateNodeAnnouncementResponse(ht, response, expectedOps) newNodeAddresses := []*lnrpc.NodeAddress{} // We removed the first address. @@ -683,28 +589,18 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { // We'll then wait for Alice to receive dave's node announcement // with the new values. - waitForNodeAnnUpdates( - aliceSub, dave.PubKeyStr, newDaveNodeAnn, t, - ) + assertNodeAnn(newDaveNodeAnn) // Check that the feature bit was set correctly. - resp, err = dave.GetInfo(ctxt, nodeInfoReq) - require.NoError(t.t, err, "unable to get dave's information") - - if _, ok := resp.Features[featureIdx]; !ok { - t.Fatalf("failed to set feature bit") - } + resp = dave.RPC.GetInfo() + _, ok = resp.Features[featureIdx] + require.True(ht, ok, "failed to set feature bit") // Check that we cannot set a feature bit that is already set. nodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{ FeatureUpdates: updateFeatureActions, } - - _, err = dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq) - require.Error( - t.t, err, "missing expected error: cannot set a feature bit "+ - "that is already set", - ) + dave.RPC.UpdateNodeAnnouncementErr(nodeAnnReq) // Check that we can unset feature bits. updateFeatureActions = []*peersrpc.UpdateFeatureAction{ @@ -717,35 +613,25 @@ func testUpdateNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) { nodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{ FeatureUpdates: updateFeatureActions, } - - response, err = dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq) - require.NoError(t.t, err, "unable to update dave's node announcement") + response = dave.RPC.UpdateNodeAnnouncement(nodeAnnReq) expectedOps = map[string]int{ "features": 1, } - assertUpdateNodeAnnouncementResponse(t, response, expectedOps) + assertUpdateNodeAnnouncementResponse(ht, response, expectedOps) - resp, err = dave.GetInfo(ctxt, nodeInfoReq) - require.NoError(t.t, err, "unable to get dave's information") - - if _, ok := resp.Features[featureIdx]; ok { - t.Fatalf("failed to unset feature bit") - } + resp = dave.RPC.GetInfo() + _, ok = resp.Features[featureIdx] + require.False(ht, ok, "failed to unset feature bit") // Check that we cannot unset a feature bit that is already unset. nodeAnnReq = &peersrpc.NodeAnnouncementUpdateRequest{ FeatureUpdates: updateFeatureActions, } - - _, err = dave.UpdateNodeAnnouncement(ctxt, nodeAnnReq) - require.Error( - t.t, err, "missing expected error: cannot unset a feature bit "+ - "that is already unset", - ) + dave.RPC.UpdateNodeAnnouncementErr(nodeAnnReq) // Close the channel between Bob and Dave. - closeChannelAndAssert(t, net, net.Bob, chanPoint, false) + ht.CloseChannel(bob, chanPoint) } // assertSyncType asserts that the peer has an expected syncType. @@ -767,3 +653,64 @@ func assertSyncType(ht *lntemp.HarnessTest, hn *node.HarnessNode, ht.Fatalf("unable to find peer: %s", peer) } + +// compareNodeAnns compares that two node announcements match or returns an +// error. +// +// NOTE: only used for tests in this file. +func compareNodeAnns(n1, n2 *lnrpc.NodeUpdate) error { + // Alias should match. + if n1.Alias != n2.Alias { + return fmt.Errorf("alias not match") + } + + // Color should match. + if n1.Color != n2.Color { + return fmt.Errorf("color not match") + } + + // NodeAddresses should match. + if len(n1.NodeAddresses) != len(n2.NodeAddresses) { + return fmt.Errorf("node addresses don't match") + } + + addrs := make(map[string]struct{}, len(n1.NodeAddresses)) + for _, nodeAddr := range n1.NodeAddresses { + addrs[nodeAddr.Addr] = struct{}{} + } + + for _, nodeAddr := range n2.NodeAddresses { + if _, ok := addrs[nodeAddr.Addr]; !ok { + return fmt.Errorf("address %v not found in node "+ + "announcement", nodeAddr.Addr) + } + } + + return nil +} + +// assertUpdateNodeAnnouncementResponse is a helper function to assert +// the response expected values. +// +// NOTE: only used for tests in this file. +func assertUpdateNodeAnnouncementResponse(ht *lntemp.HarnessTest, + response *peersrpc.NodeAnnouncementUpdateResponse, + expectedOps map[string]int) { + + require.Equal( + ht, len(response.Ops), len(expectedOps), + "unexpected number of Ops updating dave's node announcement", + ) + + ops := make(map[string]int, len(response.Ops)) + for _, op := range response.Ops { + ops[op.Entity] = len(op.Actions) + } + + for k, v := range expectedOps { + if v != ops[k] { + ht.Fatalf("unexpected number of actions for operation "+ + "%s: got %d wanted %d", k, ops[k], v) + } + } +} diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index ea51f9630..e90923273 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -24,10 +24,6 @@ var allTestCases = []*testCase{ name: "single hop invoice", test: testSingleHopInvoice, }, - { - name: "test update node announcement rpc", - test: testUpdateNodeAnnouncement, - }, { name: "list outgoing payments", test: testListPayments, From e963506f7d786cfc71a994e32989b11e4837f87e Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 07:14:26 +0800 Subject: [PATCH 29/47] lntemp+itest: refactor `testListPayments` --- lntemp/harness_assertion.go | 1 + lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_payment_test.go | 131 +++++++------------------- lntest/itest/lnd_test_list_on_test.go | 4 - 4 files changed, 39 insertions(+), 101 deletions(-) diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index 6b89d6aad..dec709968 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -1606,6 +1606,7 @@ func (h *HarnessTest) AssertNumPayments(hn *node.HarnessNode, req := &lnrpc.ListPaymentsRequest{ IncludeIncomplete: true, + IndexOffset: hn.State.Payment.LastIndexOffset, } var payments []*lnrpc.Payment diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 865e959ca..56e76917b 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -179,4 +179,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "update node announcement rpc", TestFunc: testUpdateNodeAnnouncement, }, + { + Name: "list outgoing payments", + TestFunc: testListPayments, + }, } diff --git a/lntest/itest/lnd_payment_test.go b/lntest/itest/lnd_payment_test.go index f67910c5e..337b21ff2 100644 --- a/lntest/itest/lnd_payment_test.go +++ b/lntest/itest/lnd_payment_test.go @@ -6,49 +6,29 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "reflect" "time" "github.com/btcsuite/btcd/btcutil" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) -func testListPayments(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First start by deleting all payments that Alice knows of. This will - // allow us to execute the test with a clean state for Alice. - delPaymentsReq := &lnrpc.DeleteAllPaymentsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Alice.DeleteAllPayments(ctxt, delPaymentsReq); err != nil { - t.Fatalf("unable to delete payments: %v", err) - } +func testListPayments(ht *lntemp.HarnessTest) { + alice, bob := ht.Alice, ht.Bob // Check that there are no payments before test. - reqInit := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - paymentsRespInit, err := net.Alice.ListPayments(ctxt, reqInit) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsRespInit.Payments) != 0 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsRespInit.Payments), 0) - } + ht.AssertNumPayments(alice, 0) // Open a channel with 100k satoshis between Alice and Bob with Alice // being the sole funder of the channel. chanAmt := btcutil.Amount(100000) - chanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{Amt: chanAmt}, ) // Now that the channel is open, create an invoice for Bob which @@ -61,101 +41,58 @@ func testListPayments(net *lntest.NetworkHarness, t *harnessTest) { RPreimage: preimage, Value: paymentAmt, } - addInvoiceCtxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, err := net.Bob.AddInvoice(addInvoiceCtxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - // Wait for Alice to recognize and advertise the new channel generated - // above. - if err = net.Alice.WaitForNetworkChannelOpen(chanPoint); err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - if err = net.Bob.WaitForNetworkChannelOpen(chanPoint); err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } + invoiceResp := bob.RPC.AddInvoice(invoice) // With the invoice for Bob added, send a payment towards Alice paying // to the above generated invoice. - sendAndAssertSuccess( - t, net.Alice, &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitSat: 1000000, - }, - ) + payReqs := []string{invoiceResp.PaymentRequest} + ht.CompletePaymentRequests(alice, payReqs) // Grab Alice's list of payments, she should show the existence of // exactly one payment. - req := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - paymentsResp, err := net.Alice.ListPayments(ctxt, req) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsResp.Payments) != 1 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsResp.Payments), 1) - } - p := paymentsResp.Payments[0] // nolint:staticcheck + p := ht.AssertNumPayments(alice, 1)[0] path := p.Htlcs[len(p.Htlcs)-1].Route.Hops // Ensure that the stored path shows a direct payment to Bob with no // other nodes in-between. - if len(path) != 1 || path[0].PubKey != net.Bob.PubKeyStr { - t.Fatalf("incorrect path") - } + require.Len(ht, path, 1, "wrong number of routes in path") + require.Equal(ht, bob.PubKeyStr, path[0].PubKey, "wrong pub key") // The payment amount should also match our previous payment directly. - if p.Value != paymentAmt { // nolint:staticcheck - t.Fatalf("incorrect amount, got %v, want %v", - p.Value, paymentAmt) // nolint:staticcheck - } + require.EqualValues(ht, paymentAmt, p.ValueSat, "incorrect sat amount") + require.EqualValues(ht, paymentAmt*1000, p.ValueMsat, + "incorrect msat amount") // The payment hash (or r-hash) should have been stored correctly. correctRHash := hex.EncodeToString(invoiceResp.RHash) - if !reflect.DeepEqual(p.PaymentHash, correctRHash) { - t.Fatalf("incorrect RHash, got %v, want %v", - p.PaymentHash, correctRHash) - } + require.Equal(ht, correctRHash, p.PaymentHash, "incorrect RHash") - // As we made a single-hop direct payment, there should have been no fee - // applied. - if p.Fee != 0 { // nolint:staticcheck - t.Fatalf("incorrect Fee, got %v, want %v", p.Fee, 0) // nolint:staticcheck - } + // As we made a single-hop direct payment, there should have been no + // fee applied. + require.Zero(ht, p.FeeSat, "fee should be 0") + require.Zero(ht, p.FeeMsat, "fee should be 0") // Finally, verify that the payment request returned by the rpc matches // the invoice that we paid. - if p.PaymentRequest != invoiceResp.PaymentRequest { - t.Fatalf("incorrect payreq, got: %v, want: %v", - p.PaymentRequest, invoiceResp.PaymentRequest) - } + require.Equal(ht, invoiceResp.PaymentRequest, p.PaymentRequest, + "incorrect payreq") // Delete all payments from Alice. DB should have no payments. - delReq := &lnrpc.DeleteAllPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - _, err = net.Alice.DeleteAllPayments(ctxt, delReq) - if err != nil { - t.Fatalf("Can't delete payments at the end: %v", err) - } + alice.RPC.DeleteAllPayments() // Check that there are no payments after test. - listReq := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - paymentsResp, err = net.Alice.ListPayments(ctxt, listReq) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsResp.Payments) != 0 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsResp.Payments), 0) - } + ht.AssertNumPayments(alice, 0) - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + // TODO(yy): remove the sleep once the following bug is fixed. + // When the invoice is reported settled, the commitment dance is not + // yet finished, which can cause an error when closing the channel, + // saying there's active HTLCs. We need to investigate this issue and + // reverse the order to, first finish the commitment dance, then report + // the invoice as settled. + time.Sleep(2 * time.Second) + + // Close the channel. + defer ht.CloseChannel(alice, chanPoint) } // testPaymentFollowingChannelOpen tests that the channel transition from diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index e90923273..dcdd9cb83 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -24,10 +24,6 @@ var allTestCases = []*testCase{ name: "single hop invoice", test: testSingleHopInvoice, }, - { - name: "list outgoing payments", - test: testListPayments, - }, { name: "multi-hop payments", test: testMultiHopPayments, From d260ac1ab507899c6919b85c68d060e319d8f1d5 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 08:25:45 +0800 Subject: [PATCH 30/47] itest: refactor `testPaymentFollowingChannelOpen` --- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_payment_test.go | 71 +++++++++------------------ lntest/itest/lnd_test_list_on_test.go | 4 -- 3 files changed, 28 insertions(+), 51 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 56e76917b..42abe6a16 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -183,4 +183,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "list outgoing payments", TestFunc: testListPayments, }, + { + Name: "immediate payment after channel opened", + TestFunc: testPaymentFollowingChannelOpen, + }, } diff --git a/lntest/itest/lnd_payment_test.go b/lntest/itest/lnd_payment_test.go index 337b21ff2..de275fa7b 100644 --- a/lntest/itest/lnd_payment_test.go +++ b/lntest/itest/lnd_payment_test.go @@ -100,81 +100,58 @@ func testListPayments(ht *lntemp.HarnessTest) { // subsystems trying to update the channel state in the db. We follow this // transition with a payment that updates the commitment state and verify that // the pending state is up to date. -func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testPaymentFollowingChannelOpen(ht *lntemp.HarnessTest) { const paymentAmt = btcutil.Amount(100) channelCapacity := paymentAmt * 1000 // We first establish a channel between Alice and Bob. - pendingUpdate, err := net.OpenPendingChannel( - net.Alice, net.Bob, channelCapacity, 0, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) + alice, bob := ht.Alice, ht.Bob + p := lntemp.OpenChannelParams{ + Amt: channelCapacity, } + pendingUpdate := ht.OpenChannelAssertPending(alice, bob, p) // At this point, the channel's funding transaction will have been // broadcast, but not confirmed. Alice and Bob's nodes // should reflect this when queried via RPC. - assertNumOpenChannelsPending(t, net.Alice, net.Bob, 1) + ht.AssertNodesNumPendingOpenChannels(alice, bob, 1) // We are restarting Bob's node to let the link be created for the // pending channel. - if err := net.RestartNode(net.Bob, nil); err != nil { - t.Fatalf("Bob restart failed: %v", err) - } + ht.RestartNode(bob) // We ensure that Bob reconnects to Alice. - net.EnsureConnected(t.t, net.Bob, net.Alice) + ht.EnsureConnected(bob, alice) - // We mine one block for the channel to be confirmed. - _ = mineBlocks(t, net, 6, 1)[0] + // We mine six blocks for the channel to be confirmed. + ht.MineBlocksAndAssertNumTxes(6, 1) // We verify that the channel is open from both nodes point of view. - assertNumOpenChannelsPending(t, net.Alice, net.Bob, 0) + chanPoint := lntemp.ChanPointFromPendingUpdate(pendingUpdate) + ht.AssertNodesNumPendingOpenChannels(alice, bob, 0) + ht.AssertChannelExists(alice, chanPoint) + ht.AssertChannelExists(bob, chanPoint) // With the channel open, we'll create invoices for Bob that Alice will // pay to in order to advance the state of the channel. - bobPayReqs, _, _, err := createPayReqs( - net.Bob, paymentAmt, 1, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } + bobPayReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, 1) // Send payment to Bob so that a channel update to disk will be // executed. - sendAndAssertSuccess( - t, net.Alice, &routerrpc.SendPaymentRequest{ - PaymentRequest: bobPayReqs[0], - TimeoutSeconds: 60, - FeeLimitSat: 1000000, - }, - ) + ht.CompletePaymentRequests(alice, []string{bobPayReqs[0]}) - // At this point we want to make sure the channel is opened and not - // pending. - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - res, err := net.Bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{}) - if err != nil { - t.Fatalf("unable to list bob channels: %v", err) - } - if len(res.Channels) == 0 { - t.Fatalf("bob list of channels is empty") - } + // TODO(yy): remove the sleep once the following bug is fixed. + // When the invoice is reported settled, the commitment dance is not + // yet finished, which can cause an error when closing the channel, + // saying there's active HTLCs. We need to investigate this issue and + // reverse the order to, first finish the commitment dance, then report + // the invoice as settled. + time.Sleep(2 * time.Second) // Finally, immediately close the channel. This function will also // block until the channel is closed and will additionally assert the // relevant channel closing post conditions. - chanPoint := &lnrpc.ChannelPoint{ - FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ - FundingTxidBytes: pendingUpdate.Txid, - }, - OutputIndex: pendingUpdate.OutputIndex, - } - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + ht.CloseChannel(alice, chanPoint) } // testAsyncPayments tests the performance of the async payments. diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index dcdd9cb83..f36729b01 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -170,10 +170,6 @@ var allTestCases = []*testCase{ name: "delete macaroon id", test: testDeleteMacaroonID, }, - { - name: "immediate payment after channel opened", - test: testPaymentFollowingChannelOpen, - }, { name: "psbt channel funding", test: testPsbtChanFunding, From 4e821a6e3ddda845e64e124d08836537fff25463 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 08:37:28 +0800 Subject: [PATCH 31/47] lntemp+itest: refactor `testInvoiceSubscriptions` --- lntemp/harness.go | 33 ++++ lntemp/rpc/lnd.go | 23 +++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_payment_test.go | 225 +++++++------------------- lntest/itest/lnd_test_list_on_test.go | 4 - 5 files changed, 122 insertions(+), 167 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index ea75edd5f..dfc8a6b27 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -1442,3 +1442,36 @@ func (h *HarnessTest) OpenMultiChannelsAsync( return channelPoints } + +// ReceiveInvoiceUpdate waits until a message is received on the subscribe +// invoice stream or the timeout is reached. +func (h *HarnessTest) ReceiveInvoiceUpdate( + stream rpc.InvoiceUpdateClient) *lnrpc.Invoice { + + chanMsg := make(chan *lnrpc.Invoice) + errChan := make(chan error) + go func() { + // Consume one message. This will block until the message is + // received. + resp, err := stream.Recv() + if err != nil { + errChan <- err + return + } + chanMsg <- resp + }() + + select { + case <-time.After(DefaultTimeout): + require.Fail(h, "timeout", "timeout receiving invoice update") + + case err := <-errChan: + require.Failf(h, "err from stream", + "received err from stream: %v", err) + + case updateMsg := <-chanMsg: + return updateMsg + } + + return nil +} diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 98fb50baa..a9ec94701 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -494,3 +494,26 @@ func (h *HarnessRPC) UpdateChannelPolicy( return resp } + +type InvoiceUpdateClient lnrpc.Lightning_SubscribeInvoicesClient + +// SubscribeInvoices creates a subscription client for invoice events and +// asserts its creation. +// +// NOTE: make sure to subscribe an invoice as early as possible as it takes +// some time for the lnd to create the subscription client. If an invoice is +// added right after the subscription, it may be missed. However, if AddIndex +// or SettleIndex is used in the request, it will be fine as a backlog will +// always be sent. +func (h *HarnessRPC) SubscribeInvoices( + req *lnrpc.InvoiceSubscription) InvoiceUpdateClient { + + // SubscribeInvoices needs to have the context alive for the + // entire test case as the returned client will be used for send and + // receive events stream. Thus we use runCtx here instead of a timeout + // context. + client, err := h.LN.SubscribeInvoices(h.runCtx, req) + require.NoError(h, err, "unable to create invoice subscription client") + + return client +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 42abe6a16..6e8698033 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -187,4 +187,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "immediate payment after channel opened", TestFunc: testPaymentFollowingChannelOpen, }, + { + Name: "invoice update subscription", + TestFunc: testInvoiceSubscriptions, + }, } diff --git a/lntest/itest/lnd_payment_test.go b/lntest/itest/lnd_payment_test.go index de275fa7b..01627fecd 100644 --- a/lntest/itest/lnd_payment_test.go +++ b/lntest/itest/lnd_payment_test.go @@ -511,182 +511,86 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) closeChannelAndAssert(t, net, net.Alice, chanPoint, false) } -func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testInvoiceSubscriptions(ht *lntemp.HarnessTest) { const chanAmt = btcutil.Amount(500000) - // Open a channel with 500k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - chanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Next create a new invoice for Bob requesting 1k satoshis. - // TODO(roasbeef): make global list of invoices for each node to re-use - // and avoid collisions - const paymentAmt = 1000 - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: makeFakePayHash(t), - Value: paymentAmt, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, err := net.Bob.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - lastAddIndex := invoiceResp.AddIndex + alice, bob := ht.Alice, ht.Bob // Create a new invoice subscription client for Bob, the notification // should be dispatched shortly below. req := &lnrpc.InvoiceSubscription{} - ctx, cancelInvoiceSubscription := context.WithCancel(ctxb) - bobInvoiceSubscription, err := net.Bob.SubscribeInvoices(ctx, req) - if err != nil { - t.Fatalf("unable to subscribe to bob's invoice updates: %v", err) + bobInvoiceSubscription := bob.RPC.SubscribeInvoices(req) + + // Open a channel with 500k satoshis between Alice and Bob with Alice + // being the sole funder of the channel. + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{Amt: chanAmt}, + ) + + // Next create a new invoice for Bob requesting 1k satoshis. + const paymentAmt = 1000 + invoice := &lnrpc.Invoice{ + Memo: "testing", + RPreimage: ht.Random32Bytes(), + Value: paymentAmt, } + invoiceResp := bob.RPC.AddInvoice(invoice) + lastAddIndex := invoiceResp.AddIndex - var settleIndex uint64 - quit := make(chan struct{}) - updateSent := make(chan struct{}) - go func() { - invoiceUpdate, err := bobInvoiceSubscription.Recv() - select { - case <-quit: - // Received cancellation - return - default: - } - - if err != nil { - t.Fatalf("unable to recv invoice update: %v", err) - } - - // The invoice update should exactly match the invoice created - // above, but should now be settled and have SettleDate - if !invoiceUpdate.Settled { // nolint:staticcheck - t.Fatalf("invoice not settled but should be") - } - if invoiceUpdate.SettleDate == 0 { - t.Fatalf("invoice should have non zero settle date, but doesn't") - } - - if !bytes.Equal(invoiceUpdate.RPreimage, invoice.RPreimage) { - t.Fatalf("payment preimages don't match: expected %v, got %v", - invoice.RPreimage, invoiceUpdate.RPreimage) - } - - if invoiceUpdate.SettleIndex == 0 { - t.Fatalf("invoice should have settle index") - } - - settleIndex = invoiceUpdate.SettleIndex - - close(updateSent) - }() - - // Wait for the channel to be recognized by both Alice and Bob before - // continuing the rest of the test. - err = net.Alice.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - // TODO(roasbeef): will need to make num blocks to advertise a - // node param - close(quit) - t.Fatalf("channel not seen by alice before timeout: %v", err) - } + // With the above invoice added, we should receive an update event. + invoiceUpdate := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription) + require.NotEqual(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State, + "invoice should not be settled") // With the assertion above set up, send a payment from Alice to Bob // which should finalize and settle the invoice. - sendReq := &routerrpc.SendPaymentRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - stream, err := net.Alice.RouterClient.SendPaymentV2(ctxt, sendReq) - if err != nil { - close(quit) - t.Fatalf("unable to send payment: %v", err) - } - result, err := getPaymentResult(stream) - if err != nil { - close(quit) - t.Fatalf("cannot get payment result: %v", err) - } - if result.Status != lnrpc.Payment_SUCCEEDED { - close(quit) - t.Fatalf("error when attempting recv: %v", result.Status) - } + ht.CompletePaymentRequests(alice, []string{invoiceResp.PaymentRequest}) - select { - case <-time.After(time.Second * 10): - close(quit) - t.Fatalf("update not sent after 10 seconds") - case <-updateSent: // Fall through on success - } - - // With the base case working, we'll now cancel Bob's current - // subscription in order to exercise the backlog fill behavior. - cancelInvoiceSubscription() + // The invoice update should exactly match the invoice created + // above, but should now be settled and have SettleDate + invoiceUpdate = ht.ReceiveInvoiceUpdate(bobInvoiceSubscription) + require.Equal(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State, + "invoice not settled but should be") + require.NotZero(ht, invoiceUpdate.SettleDate, + "invoice should have non zero settle date, but doesn't") + require.Equal(ht, invoice.RPreimage, invoiceUpdate.RPreimage, + "payment preimages don't match") + require.NotZero(ht, invoiceUpdate.SettleIndex, + "invoice should have settle index") + settleIndex := invoiceUpdate.SettleIndex // We'll now add 3 more invoices to Bob's invoice registry. const numInvoices = 3 - payReqs, _, newInvoices, err := createPayReqs( - net.Bob, paymentAmt, numInvoices, + payReqs, _, newInvoices := ht.CreatePayReqs( + bob, paymentAmt, numInvoices, ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } // Now that the set of invoices has been added, we'll re-register for // streaming invoice notifications for Bob, this time specifying the // add invoice of the last prior invoice. - req = &lnrpc.InvoiceSubscription{ - AddIndex: lastAddIndex, - } - ctx, cancelInvoiceSubscription = context.WithCancel(ctxb) - bobInvoiceSubscription, err = net.Bob.SubscribeInvoices(ctx, req) - if err != nil { - t.Fatalf("unable to subscribe to bob's invoice updates: %v", err) - } + req = &lnrpc.InvoiceSubscription{AddIndex: lastAddIndex} + bobInvoiceSubscription = bob.RPC.SubscribeInvoices(req) // Since we specified a value of the prior add index above, we should // now immediately get the invoices we just added as we should get the // backlog of notifications. for i := 0; i < numInvoices; i++ { - invoiceUpdate, err := bobInvoiceSubscription.Recv() - if err != nil { - t.Fatalf("unable to receive subscription") - } + invoiceUpdate := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription) // We should now get the ith invoice we added, as they should // be returned in order. - if invoiceUpdate.Settled { // nolint:staticcheck - t.Fatalf("should have only received add events") - } + require.NotEqual(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State, + "should have only received add events") + originalInvoice := newInvoices[i] rHash := sha256.Sum256(originalInvoice.RPreimage) - if !bytes.Equal(invoiceUpdate.RHash, rHash[:]) { - t.Fatalf("invoices have mismatched payment hashes: "+ - "expected %x, got %x", rHash[:], - invoiceUpdate.RHash) - } + require.Equal(ht, rHash[:], invoiceUpdate.RHash, + "invoices have mismatched payment hashes") } - cancelInvoiceSubscription() - // We'll now have Bob settle out the remainder of these invoices so we // can test that all settled invoices are properly notified. - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, payReqs, true, - ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + ht.CompletePaymentRequests(alice, payReqs) // With the set of invoices paid, we'll now cancel the old // subscription, and create a new one for Bob, this time using the @@ -694,13 +598,7 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) { req = &lnrpc.InvoiceSubscription{ SettleIndex: settleIndex, } - ctx, cancelInvoiceSubscription = context.WithCancel(ctxb) - bobInvoiceSubscription, err = net.Bob.SubscribeInvoices(ctx, req) - if err != nil { - t.Fatalf("unable to subscribe to bob's invoice updates: %v", err) - } - - defer cancelInvoiceSubscription() + bobInvoiceSubscription = bob.RPC.SubscribeInvoices(req) // As we specified the index of the past settle index, we should now // receive notifications for the three HTLCs that we just settled. As @@ -712,30 +610,31 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) { settledInvoices[rHash] = struct{}{} } for i := 0; i < numInvoices; i++ { - invoiceUpdate, err := bobInvoiceSubscription.Recv() - if err != nil { - t.Fatalf("unable to receive subscription") - } + invoiceUpdate := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription) // We should now get the ith invoice we added, as they should // be returned in order. - if !invoiceUpdate.Settled { // nolint:staticcheck - t.Fatalf("should have only received settle events") - } + require.Equal(ht, lnrpc.Invoice_SETTLED, invoiceUpdate.State, + "should have only received settle events") var rHash [32]byte copy(rHash[:], invoiceUpdate.RHash) - if _, ok := settledInvoices[rHash]; !ok { - t.Fatalf("unknown invoice settled: %x", rHash) - } + require.Contains(ht, settledInvoices, rHash, + "unknown invoice settled") delete(settledInvoices, rHash) } // At this point, all the invoices should be fully settled. - if len(settledInvoices) != 0 { - t.Fatalf("not all invoices settled") - } + require.Empty(ht, settledInvoices, "not all invoices settled") - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + // TODO(yy): remove the sleep once the following bug is fixed. + // When the invoice is reported settled, the commitment dance is not + // yet finished, which can cause an error when closing the channel, + // saying there's active HTLCs. We need to investigate this issue and + // reverse the order to, first finish the commitment dance, then report + // the invoice as settled. + time.Sleep(2 * time.Second) + + ht.CloseChannel(alice, chanPoint) } diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index f36729b01..39e2d5d3d 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -56,10 +56,6 @@ var allTestCases = []*testCase{ name: "multiple channel creation and update subscription", test: testBasicChannelCreationAndUpdates, }, - { - name: "invoice update subscription", - test: testInvoiceSubscriptions, - }, { name: "multi-hop htlc error propagation", test: testHtlcErrorPropagation, From 40a5f94ef799ff88dc53d59c9a427c54e6e9c9db Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 09:05:48 +0800 Subject: [PATCH 32/47] lntemp+itest: refactor `testChannelBackupUpdates` --- lntemp/rpc/lnd.go | 51 ++++++++++++++++++++ lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_channel_backup_test.go | 62 +++++++++++-------------- lntest/itest/lnd_test_list_on_test.go | 4 -- 4 files changed, 81 insertions(+), 40 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index a9ec94701..25eb94625 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -2,6 +2,7 @@ package rpc import ( "context" + "strings" "github.com/lightningnetwork/lnd/lnrpc" "github.com/stretchr/testify/require" @@ -132,6 +133,29 @@ func (h *HarnessRPC) PendingChannels() *lnrpc.PendingChannelsResponse { pendingChansRequest := &lnrpc.PendingChannelsRequest{} resp, err := h.LN.PendingChannels(ctxt, pendingChansRequest) + + // TODO(yy): We may get a `unable to find arbitrator` error from the + // rpc point, due to a timing issue in rpcserver, + // 1. `r.server.chanStateDB.FetchClosedChannels` fetches + // the pending force close channel. + // 2. `r.arbitratorPopulateForceCloseResp` relies on the + // channel arbitrator to get the report, and, + // 3. the arbitrator may be deleted due to the force close + // channel being resolved. + // Somewhere along the line is missing a lock to keep the data + // consistent. + // + // Return if there's no error. + if err == nil { + return resp + } + + // Otherwise, give it a second shot if it's the arbitrator error. + if strings.Contains(err.Error(), "unable to find arbitrator") { + resp, err = h.LN.PendingChannels(ctxt, pendingChansRequest) + } + + // It's very unlikely we'd get the arbitrator not found error again. h.NoError(err, "PendingChannels") return resp @@ -517,3 +541,30 @@ func (h *HarnessRPC) SubscribeInvoices( return client } + +type BackupSubscriber lnrpc.Lightning_SubscribeChannelBackupsClient + +// SubscribeChannelBackups creates a client to listen to channel backup stream. +func (h *HarnessRPC) SubscribeChannelBackups() BackupSubscriber { + // Use runCtx here instead of timeout context to keep the stream client + // alive. + backupStream, err := h.LN.SubscribeChannelBackups( + h.runCtx, &lnrpc.ChannelBackupSubscription{}, + ) + require.NoErrorf(h, err, "unable to create backup stream") + + return backupStream +} + +// VerifyChanBackup makes a RPC call to node's VerifyChanBackup and asserts. +func (h *HarnessRPC) VerifyChanBackup( + ss *lnrpc.ChanBackupSnapshot) *lnrpc.VerifyChanBackupResponse { + + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + resp, err := h.LN.VerifyChanBackup(ctxt, ss) + require.NoErrorf(h, err, "unable to verify backup") + + return resp +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 6e8698033..07afeeee5 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -191,4 +191,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "invoice update subscription", TestFunc: testInvoiceSubscriptions, }, + { + Name: "streaming channel backup update", + TestFunc: testChannelBackupUpdates, + }, } diff --git a/lntest/itest/lnd_channel_backup_test.go b/lntest/itest/lnd_channel_backup_test.go index dc539ae03..09c9a14e7 100644 --- a/lntest/itest/lnd_channel_backup_test.go +++ b/lntest/itest/lnd_channel_backup_test.go @@ -782,12 +782,12 @@ func runChanRestoreScenarioForceClose(ht *lntemp.HarnessTest, zeroConf bool) { // testChannelBackupUpdates tests that both the streaming channel update RPC, // and the on-disk channel.backup are updated each time a channel is // opened/closed. -func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() +func testChannelBackupUpdates(ht *lntemp.HarnessTest) { + alice := ht.Alice // First, we'll make a temp directory that we'll use to store our // backup file, so we can check in on it during the test easily. - backupDir := t.t.TempDir() + backupDir := ht.T.TempDir() // First, we'll create a new node, Carol. We'll also create a temporary // file that Carol will use to store her channel backups. @@ -795,17 +795,11 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { backupDir, chanbackup.DefaultBackupFileName, ) carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath) - carol := net.NewNode(t.t, "carol", []string{carolArgs}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("carol", []string{carolArgs}) // Next, we'll register for streaming notifications for changes to the // backup file. - backupStream, err := carol.SubscribeChannelBackups( - ctxb, &lnrpc.ChannelBackupSubscription{}, - ) - if err != nil { - t.Fatalf("unable to create backup stream: %v", err) - } + backupStream := carol.RPC.SubscribeChannelBackups() // We'll use this goroutine to proxy any updates to a channel we can // easily use below. @@ -838,18 +832,16 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { // With Carol up, we'll now connect her to Alice, and open a channel // between them. - net.ConnectNodes(t.t, carol, net.Alice) + ht.ConnectNodes(carol, alice) // Next, we'll open two channels between Alice and Carol back to back. var chanPoints []*lnrpc.ChannelPoint numChans := 2 chanAmt := btcutil.Amount(1000000) for i := 0; i < numChans; i++ { - chanPoint := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{Amt: chanAmt}, + chanPoint := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{Amt: chanAmt}, ) - chanPoints = append(chanPoints, chanPoint) } @@ -860,12 +852,14 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { for i := 0; i < numNtfns; i++ { select { case err := <-streamErr: - t.Fatalf("error with backup stream: %v", err) + require.Failf(ht, "stream err", + "error with backup stream: %v", err) case currentBackup = <-backupUpdates: case <-time.After(time.Second * 5): - t.Fatalf("didn't receive channel backup "+ + require.Failf(ht, "timeout", "didn't "+ + "receive channel backup "+ "notification %v", i+1) } } @@ -885,32 +879,29 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { // nonce, we can't compare them directly, so instead // we'll compare the length which is a proxy for the // number of channels that the multi-backup contains. - rawBackup := currentBackup.MultiChanBackup.MultiChanBackup - if len(rawBackup) != len(packedBackup) { + backup := currentBackup.MultiChanBackup.MultiChanBackup + if len(backup) != len(packedBackup) { return fmt.Errorf("backup files don't match: "+ - "expected %x got %x", rawBackup, packedBackup) + "expected %x got %x", backup, + packedBackup) } // Additionally, we'll assert that both backups up // returned are valid. - for i, backup := range [][]byte{rawBackup, packedBackup} { + for _, backup := range [][]byte{backup, packedBackup} { snapshot := &lnrpc.ChanBackupSnapshot{ MultiChanBackup: &lnrpc.MultiChanBackup{ MultiChanBackup: backup, }, } - _, err := carol.VerifyChanBackup(ctxb, snapshot) - if err != nil { - return fmt.Errorf("unable to verify "+ - "backup #%d: %v", i, err) - } + + carol.RPC.VerifyChanBackup(snapshot) } return nil }, defaultTimeout) - if err != nil { - t.Fatalf("backup state invalid: %v", err) - } + require.NoError(ht, err, "timeout while checking "+ + "backup state: %v", err) } // As these two channels were just opened, we should've got two times @@ -931,11 +922,11 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { chanPoint := chanPoints[i] - closeChannelAndAssert(t, net, net.Alice, chanPoint, forceClose) - // If we force closed the channel, then we'll mine enough // blocks to ensure all outputs have been swept. if forceClose { + ht.ForceCloseChannel(alice, chanPoint) + // A local force closed channel will trigger a // notification once the commitment TX confirms on // chain. But that won't remove the channel from the @@ -943,13 +934,12 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { // locked contract was fully resolved on chain. assertBackupNtfns(1) - cleanupForceClose(t, net, net.Alice, chanPoint) - - // Now that the channel's been fully resolved, we expect - // another notification. + // Now that the channel's been fully resolved, we + // expect another notification. assertBackupNtfns(1) assertBackupFileState() } else { + ht.CloseChannel(alice, chanPoint) // We should get a single notification after closing, // and the on-disk state should match this latest // notifications. diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 39e2d5d3d..771b73416 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -126,10 +126,6 @@ var allTestCases = []*testCase{ name: "route fee cutoff", test: testRouteFeeCutoff, }, - { - name: "streaming channel backup update", - test: testChannelBackupUpdates, - }, { name: "export channel backup", test: testExportChannelBackup, From 876bb5c066888bf6daa118a2d4a053fdb3b6a88c Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 09:09:35 +0800 Subject: [PATCH 33/47] itest: refactor `testExportChannelBackup` --- lntest/itest/list_on_test.go | 4 ++ lntest/itest/lnd_channel_backup_test.go | 95 +++++++------------------ lntest/itest/lnd_test_list_on_test.go | 4 -- 3 files changed, 30 insertions(+), 73 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 07afeeee5..befa73514 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -195,4 +195,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "streaming channel backup update", TestFunc: testChannelBackupUpdates, }, + { + Name: "export channel backup", + TestFunc: testExportChannelBackup, + }, } diff --git a/lntest/itest/lnd_channel_backup_test.go b/lntest/itest/lnd_channel_backup_test.go index 09c9a14e7..b9b1d3500 100644 --- a/lntest/itest/lnd_channel_backup_test.go +++ b/lntest/itest/lnd_channel_backup_test.go @@ -20,7 +20,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -952,73 +951,49 @@ func testChannelBackupUpdates(ht *lntemp.HarnessTest) { // testExportChannelBackup tests that we're able to properly export either a // targeted channel's backup, or export backups of all the currents open // channels. -func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - +func testExportChannelBackup(ht *lntemp.HarnessTest) { // First, we'll create our primary test node: Carol. We'll use Carol to // open channels and also export backups that we'll examine throughout // the test. - carol := net.NewNode(t.t, "carol", nil) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("carol", nil) // With Carol up, we'll now connect her to Alice, and open a channel // between them. - net.ConnectNodes(t.t, carol, net.Alice) + alice := ht.Alice + ht.ConnectNodes(carol, alice) // Next, we'll open two channels between Alice and Carol back to back. var chanPoints []*lnrpc.ChannelPoint numChans := 2 chanAmt := btcutil.Amount(1000000) for i := 0; i < numChans; i++ { - chanPoint := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{Amt: chanAmt}, + chanPoint := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{Amt: chanAmt}, ) - chanPoints = append(chanPoints, chanPoint) } // Now that the channels are open, we should be able to fetch the // backups of each of the channels. for _, chanPoint := range chanPoints { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ExportChannelBackupRequest{ - ChanPoint: chanPoint, - } - chanBackup, err := carol.ExportChannelBackup(ctxt, req) - if err != nil { - t.Fatalf("unable to fetch backup for channel %v: %v", - chanPoint, err) - } + chanBackup := carol.RPC.ExportChanBackup(chanPoint) // The returned backup should be full populated. Since it's // encrypted, we can't assert any more than that atm. - if len(chanBackup.ChanBackup) == 0 { - t.Fatalf("obtained empty backup for channel: %v", chanPoint) - } + require.NotEmptyf(ht, chanBackup.ChanBackup, + "obtained empty backup for channel: %v", chanPoint) // The specified chanPoint in the response should match our // requested chanPoint. - if chanBackup.ChanPoint.String() != chanPoint.String() { - t.Fatalf("chanPoint mismatched: expected %v, got %v", - chanPoint.String(), - chanBackup.ChanPoint.String()) - } + require.Equal(ht, chanBackup.ChanPoint.String(), + chanPoint.String()) } // Before we proceed, we'll make two utility methods we'll use below // for our primary assertions. assertNumSingleBackups := func(numSingles int) { err := wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ChanBackupExportRequest{} - chanSnapshot, err := carol.ExportAllChannelBackups( - ctxt, req, - ) - if err != nil { - return fmt.Errorf("unable to export channel "+ - "backup: %v", err) - } + chanSnapshot := carol.RPC.ExportAllChanBackups() if chanSnapshot.SingleChanBackups == nil { return fmt.Errorf("single chan backups not " + @@ -1033,29 +1008,23 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { return nil }, defaultTimeout) - if err != nil { - t.Fatalf(err.Error()) - } + require.NoError(ht, err, "timeout checking num single backup") } + assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ChanBackupExportRequest{} - chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req) - if err != nil { - t.Fatalf("unable to export channel backup: %v", err) - } + chanSnapshot := carol.RPC.ExportAllChanBackups() return func(found bool, chanPoints map[wire.OutPoint]struct{}) { switch { case found && chanSnapshot.MultiChanBackup == nil: - t.Fatalf("multi-backup not present") + require.Fail(ht, "multi-backup not present") case !found && chanSnapshot.MultiChanBackup != nil && (len(chanSnapshot.MultiChanBackup.MultiChanBackup) != chanbackup.NilMultiSizePacked): - t.Fatalf("found multi-backup when non should " + - "be found") + require.Fail(ht, "found multi-backup when "+ + "non should be found") } if !found { @@ -1063,23 +1032,20 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { } backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints - if len(chanPoints) != len(backedUpChans) { - t.Fatalf("expected %v chans got %v", len(chanPoints), - len(backedUpChans)) - } + require.Len(ht, backedUpChans, len(chanPoints)) for _, chanPoint := range backedUpChans { - wirePoint := rpcPointToWirePoint(t, chanPoint) - if _, ok := chanPoints[wirePoint]; !ok { - t.Fatalf("unexpected backup: %v", wirePoint) - } + wp := ht.OutPointFromChannelPoint(chanPoint) + _, ok := chanPoints[wp] + require.True(ht, ok, "unexpected "+ + "backup: %v", wp) } } } chans := make(map[wire.OutPoint]struct{}) for _, chanPoint := range chanPoints { - chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{} + chans[ht.OutPointFromChannelPoint(chanPoint)] = struct{}{} } // We should have exactly two single channel backups contained, and we @@ -1091,11 +1057,11 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { // shouldn't be able to find that channel as a backup still. We should // also have one less single written to disk. for i, chanPoint := range chanPoints { - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + ht.CloseChannel(alice, chanPoint) assertNumSingleBackups(len(chanPoints) - i - 1) - delete(chans, rpcPointToWirePoint(t, chanPoint)) + delete(chans, ht.OutPointFromChannelPoint(chanPoint)) assertMultiBackupFound()(true, chans) } @@ -1399,15 +1365,6 @@ func copyPorts(oldNode *node.HarnessNode) node.Option { } } -func rpcPointToWirePoint(t *harnessTest, - chanPoint *lnrpc.ChannelPoint) wire.OutPoint { - - op, err := lntest.MakeOutpoint(chanPoint) - require.NoError(t.t, err, "unable to get txid") - - return op -} - // assertTimeLockSwept when dave's outputs matures, he should claim them. This // function will advance 2 blocks such that all the pending closing // transactions would be swept in the end. diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 771b73416..d8d658309 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -126,10 +126,6 @@ var allTestCases = []*testCase{ name: "route fee cutoff", test: testRouteFeeCutoff, }, - { - name: "export channel backup", - test: testExportChannelBackup, - }, { name: "hold invoice sender persistence", test: testHoldInvoicePersistence, From 8f472bf063d8a27a3e2ed1e64da23be22613bdb5 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 19:59:17 +0800 Subject: [PATCH 34/47] itest: refactor `testChannelBalance` --- lntest/itest/list_on_test.go | 4 +++ lntest/itest/lnd_channel_balance_test.go | 42 ++++++++---------------- lntest/itest/lnd_test_list_on_test.go | 4 --- 3 files changed, 17 insertions(+), 33 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index befa73514..ee402fe25 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -199,4 +199,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "export channel backup", TestFunc: testExportChannelBackup, }, + { + Name: "channel balance", + TestFunc: testChannelBalance, + }, } diff --git a/lntest/itest/lnd_channel_balance_test.go b/lntest/itest/lnd_channel_balance_test.go index f33b26fdb..066492fd7 100644 --- a/lntest/itest/lnd_channel_balance_test.go +++ b/lntest/itest/lnd_channel_balance_test.go @@ -9,6 +9,8 @@ import ( "github.com/lightningnetwork/lnd/funding" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" @@ -17,14 +19,14 @@ import ( // testChannelBalance creates a new channel between Alice and Bob, then checks // channel balance to be equal amount specified while creation of channel. -func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) { +func testChannelBalance(ht *lntemp.HarnessTest) { // Open a channel with 0.16 BTC between Alice and Bob, ensuring the // channel has been opened properly. amount := funding.MaxBtcFundingAmount // Creates a helper closure to be used below which asserts the proper // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, + checkChannelBalance := func(node *node.HarnessNode, local, remote btcutil.Amount) { expectedResponse := &lnrpc.ChannelBalanceResponse{ @@ -45,46 +47,28 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) { // Deprecated fields. Balance: int64(local), } - assertChannelBalanceResp(t, node, expectedResponse) + ht.AssertChannelBalanceResp(node, expectedResponse) } // Before beginning, make sure alice and bob are connected. - net.EnsureConnected(t.t, net.Alice, net.Bob) + alice, bob := ht.Alice, ht.Bob + ht.EnsureConnected(alice, bob) - chanPoint := openChannelAndAssert( - t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: amount, - }, + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{Amt: amount}, ) - - // Wait for both Alice and Bob to recognize this new channel. - err := net.Alice.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - err = net.Bob.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - - cType, err := channelCommitType(net.Alice, chanPoint) - if err != nil { - t.Fatalf("unable to get channel type: %v", err) - } + cType := ht.GetChannelCommitType(alice, chanPoint) // As this is a single funder channel, Alice's balance should be // exactly 0.5 BTC since now state transitions have taken place yet. - checkChannelBalance(net.Alice, amount-calcStaticFee(cType, 0), 0) + checkChannelBalance(alice, amount-calcStaticFee(cType, 0), 0) // Ensure Bob currently has no available balance within the channel. - checkChannelBalance(net.Bob, 0, amount-calcStaticFee(cType, 0)) + checkChannelBalance(bob, 0, amount-calcStaticFee(cType, 0)) // Finally close the channel between Alice and Bob, asserting that the // channel has been properly closed on-chain. - closeChannelAndAssert(t, net, net.Alice, chanPoint, false) + ht.CloseChannel(alice, chanPoint) } // testChannelUnsettledBalance will test that the UnsettledBalance field diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index d8d658309..4a7666569 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -12,10 +12,6 @@ var allTestCases = []*testCase{ name: "channel force closure", test: testChannelForceClosure, }, - { - name: "channel balance", - test: testChannelBalance, - }, { name: "channel unsettled balance", test: testChannelUnsettledBalance, From f7f259b92ae5b242619b1b0cfb736a33ae8f19a0 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 20:02:07 +0800 Subject: [PATCH 35/47] itest: refactor `testChannelUnsettledBalance` --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_balance_test.go | 138 ++++++++--------------- lntest/itest/lnd_test_list_on_test.go | 4 - 3 files changed, 51 insertions(+), 95 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index ee402fe25..9bbd507d4 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -203,4 +203,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "channel balance", TestFunc: testChannelBalance, }, + { + Name: "channel unsettled balance", + TestFunc: testChannelUnsettledBalance, + }, } diff --git a/lntest/itest/lnd_channel_balance_test.go b/lntest/itest/lnd_channel_balance_test.go index 066492fd7..9f4d5715b 100644 --- a/lntest/itest/lnd_channel_balance_test.go +++ b/lntest/itest/lnd_channel_balance_test.go @@ -1,17 +1,14 @@ package itest import ( - "context" "fmt" "github.com/btcsuite/btcd/btcutil" - "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/funding" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/require" @@ -76,13 +73,12 @@ func testChannelBalance(ht *lntemp.HarnessTest) { // Alice will send Htlcs to Carol while she is in hodl mode. This will result // in a build of pending Htlcs. We expect the channels unsettled balance to // equal the sum of all the Pending Htlcs. -func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) { +func testChannelUnsettledBalance(ht *lntemp.HarnessTest) { const chanAmt = btcutil.Amount(1000000) - ctxb := context.Background() // Creates a helper closure to be used below which asserts the proper // response to a channel balance RPC. - checkChannelBalance := func(node *lntest.HarnessNode, + checkChannelBalance := func(node *node.HarnessNode, local, remote, unsettledLocal, unsettledRemote btcutil.Amount) { expectedResponse := &lnrpc.ChannelBalanceResponse{ @@ -115,44 +111,25 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) { // Deprecated fields. Balance: int64(local), } - assertChannelBalanceResp(t, node, expectedResponse) + ht.AssertChannelBalanceResp(node, expectedResponse) } // Create carol in hodl mode. - carol := net.NewNode(t.t, "Carol", []string{"--hodl.exit-settle"}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", []string{"--hodl.exit-settle"}) // Connect Alice to Carol. - net.ConnectNodes(t.t, net.Alice, carol) + alice := ht.Alice + ht.ConnectNodes(alice, carol) // Open a channel between Alice and Carol. - chanPointAlice := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, + chanPointAlice := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{Amt: chanAmt}, ) - - // Wait for Alice and Carol to receive the channel edge from the - // funding manager. - err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - - err = carol.WaitForNetworkChannelOpen(chanPointAlice) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - - cType, err := channelCommitType(net.Alice, chanPointAlice) - require.NoError(t.t, err, "unable to get channel type") + cType := ht.GetChannelCommitType(alice, chanPointAlice) // Check alice's channel balance, which should have zero remote and zero // pending balance. - checkChannelBalance(net.Alice, chanAmt-calcStaticFee(cType, 0), 0, 0, 0) + checkChannelBalance(alice, chanAmt-calcStaticFee(cType, 0), 0, 0, 0) // Check carol's channel balance, which should have zero local and zero // pending balance. @@ -165,79 +142,61 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) { ) // Simulateneously send numInvoices payments from Alice to Carol. - carolPubKey := carol.PubKey[:] - errChan := make(chan error) for i := 0; i < numInvoices; i++ { go func() { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - _, err := net.Alice.RouterClient.SendPaymentV2(ctxt, - &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(payAmt), - PaymentHash: makeFakePayHash(t), - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }) - - if err != nil { - errChan <- err + req := &routerrpc.SendPaymentRequest{ + Dest: carol.PubKey[:], + Amt: int64(payAmt), + PaymentHash: ht.Random32Bytes(), + FinalCltvDelta: finalCltvDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, } + alice.RPC.SendPayment(req) }() } + // There should be a number of PendingHtlcs equal + // to the amount of Invoices sent. + ht.AssertNumActiveHtlcs(alice, numInvoices) + ht.AssertNumActiveHtlcs(carol, numInvoices) + + // Set the amount expected for the Unsettled Balance for this channel. + expectedBalance := numInvoices * payAmt + // Test that the UnsettledBalance for both Alice and Carol // is equal to the amount of invoices * payAmt. - var unsettledErr error - nodes := []*lntest.HarnessNode{net.Alice, carol} - err = wait.Predicate(func() bool { - // There should be a number of PendingHtlcs equal - // to the amount of Invoices sent. - unsettledErr = assertNumActiveHtlcs(nodes, numInvoices) - if unsettledErr != nil { - return false + checkUnsettledBalance := func() error { + // Get channel info for the Alice. + chanInfo := ht.QueryChannelByChanPoint(alice, chanPointAlice) + + // Check that UnsettledBalance is what we expect. + if int(chanInfo.UnsettledBalance) != expectedBalance { + return fmt.Errorf("unsettled balance failed "+ + "expected: %v, received: %v", expectedBalance, + chanInfo.UnsettledBalance) } - // Set the amount expected for the Unsettled Balance for - // this channel. - expectedBalance := numInvoices * payAmt + // Get channel info for the Carol. + chanInfo = ht.QueryChannelByChanPoint(carol, chanPointAlice) - // Check each nodes UnsettledBalance field. - for _, node := range nodes { - // Get channel info for the node. - chanInfo, err := getChanInfo(node) - if err != nil { - unsettledErr = err - return false - } - - // Check that UnsettledBalance is what we expect. - if int(chanInfo.UnsettledBalance) != expectedBalance { - unsettledErr = fmt.Errorf("unsettled balance failed "+ - "expected: %v, received: %v", expectedBalance, - chanInfo.UnsettledBalance) - return false - } + // Check that UnsettledBalance is what we expect. + if int(chanInfo.UnsettledBalance) != expectedBalance { + return fmt.Errorf("unsettled balance failed "+ + "expected: %v, received: %v", expectedBalance, + chanInfo.UnsettledBalance) } - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("unsettled balace error: %v", unsettledErr) - } - - // Check for payment errors. - select { - case err := <-errChan: - t.Fatalf("payment error: %v", err) - default: + return nil } + require.NoError(ht, wait.NoError(checkUnsettledBalance, defaultTimeout), + "timeout while checking unsettled balance") // Check alice's channel balance, which should have a remote unsettled // balance that equals to the amount of invoices * payAmt. The remote // balance remains zero. aliceLocal := chanAmt - calcStaticFee(cType, 0) - numInvoices*payAmt - checkChannelBalance(net.Alice, aliceLocal, 0, 0, numInvoices*payAmt) + checkChannelBalance(alice, aliceLocal, 0, 0, numInvoices*payAmt) // Check carol's channel balance, which should have a local unsettled // balance that equals to the amount of invoices * payAmt. The local @@ -245,8 +204,5 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) { checkChannelBalance(carol, 0, aliceLocal, numInvoices*payAmt, 0) // Force and assert the channel closure. - closeChannelAndAssert(t, net, net.Alice, chanPointAlice, true) - - // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Alice, chanPointAlice) + ht.ForceCloseChannel(alice, chanPointAlice) } diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 4a7666569..70b3d6371 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -12,10 +12,6 @@ var allTestCases = []*testCase{ name: "channel force closure", test: testChannelForceClosure, }, - { - name: "channel unsettled balance", - test: testChannelUnsettledBalance, - }, { name: "single hop invoice", test: testSingleHopInvoice, From 61364c0d9debb5e3378b255745ee3beb90995a49 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 20:15:01 +0800 Subject: [PATCH 36/47] lntemp+itest: refactor `testCommitmentTransactionDeadline` --- lntemp/harness.go | 48 ++++++++ lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_force_close_test.go | 120 ++++++------------- lntest/itest/lnd_test_list_on_test.go | 4 - 4 files changed, 89 insertions(+), 87 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index dfc8a6b27..0350ef995 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" @@ -595,6 +596,14 @@ func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) { h.feeService.SetFeeRate(fee, 1) } +// SetFeeEstimateWithConf sets a fee rate of a specified conf target to be +// returned from fee estimator. +func (h *HarnessTest) SetFeeEstimateWithConf( + fee chainfee.SatPerKWeight, conf uint32) { + + h.feeService.SetFeeRate(fee, conf) +} + // validateNodeState checks that the node doesn't have any uncleaned states // which will affect its following tests. func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) { @@ -1475,3 +1484,42 @@ func (h *HarnessTest) ReceiveInvoiceUpdate( return nil } + +// CalculateTxFee retrieves parent transactions and reconstructs the fee paid. +func (h *HarnessTest) CalculateTxFee(tx *wire.MsgTx) btcutil.Amount { + var balance btcutil.Amount + for _, in := range tx.TxIn { + parentHash := in.PreviousOutPoint.Hash + rawTx := h.Miner.GetRawTransaction(&parentHash) + parent := rawTx.MsgTx() + balance += btcutil.Amount( + parent.TxOut[in.PreviousOutPoint.Index].Value, + ) + } + + for _, out := range tx.TxOut { + balance -= btcutil.Amount(out.Value) + } + + return balance +} + +// CalculateTxesFeeRate takes a list of transactions and estimates the fee rate +// used to sweep them. +// +// NOTE: only used in current test file. +func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 { + const scale = 1000 + + var totalWeight, totalFee int64 + for _, tx := range txns { + utx := btcutil.NewTx(tx) + totalWeight += blockchain.GetTransactionWeight(utx) + + fee := h.CalculateTxFee(tx) + totalFee += int64(fee) + } + feeRate := totalFee * scale / totalWeight + + return feeRate +} diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 9bbd507d4..3dfe73ddd 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -207,4 +207,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "channel unsettled balance", TestFunc: testChannelUnsettledBalance, }, + { + Name: "commitment deadline", + TestFunc: testCommitmentTransactionDeadline, + }, } diff --git a/lntest/itest/lnd_channel_force_close_test.go b/lntest/itest/lnd_channel_force_close_test.go index 0d7c4e4e2..a0767dcf1 100644 --- a/lntest/itest/lnd_channel_force_close_test.go +++ b/lntest/itest/lnd_channel_force_close_test.go @@ -14,6 +14,8 @@ import ( "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" @@ -30,9 +32,7 @@ import ( // // Note that whether the deadline is used or not is implicitly checked by its // corresponding fee rates. -func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, - t *harnessTest) { - +func testCommitmentTransactionDeadline(ht *lntemp.HarnessTest) { // Get the default max fee rate used in sweeping the commitment // transaction. defaultMax := lnwallet.DefaultAnchorsCommitMaxFeeRateSatPerVByte @@ -65,27 +65,27 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // transaction to CPFP our commitment transaction. feeRateLarge := maxPerKw * 2 - ctxb := context.Background() - // Before we start, set up the default fee rate and we will test the // actual fee rate against it to decide whether we are using the // deadline to perform fee estimation. - net.SetFeeEstimate(feeRateDefault) + ht.SetFeeEstimate(feeRateDefault) // setupNode creates a new node and sends 1 btc to the node. - setupNode := func(name string) *lntest.HarnessNode { + setupNode := func(name string) *node.HarnessNode { // Create the node. args := []string{"--hodl.exit-settle"} - args = append(args, nodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)...) - node := net.NewNode(t.t, name, args) + args = append(args, nodeArgsForCommitType( + lnrpc.CommitmentType_ANCHORS)..., + ) + node := ht.NewNode(name, args) // Send some coins to the node. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, node) + ht.FundCoins(btcutil.SatoshiPerBitcoin, node) // For neutrino backend, we need one additional UTXO to create // the sweeping tx for the remote anchor. - if net.BackendCfg.Name() == lntest.NeutrinoBackendName { - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, node) + if ht.IsNeutrinoBackend() { + ht.FundCoins(btcutil.SatoshiPerBitcoin, node) } return node @@ -96,17 +96,17 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, calculateSweepFeeRate := func(expectedSweepTxNum int) int64 { // Create two nodes, Alice and Bob. alice := setupNode("Alice") - defer shutdownAndAssert(net, t, alice) + defer ht.Shutdown(alice) bob := setupNode("Bob") - defer shutdownAndAssert(net, t, bob) + defer ht.Shutdown(bob) // Connect Alice to Bob. - net.ConnectNodes(t.t, alice, bob) + ht.ConnectNodes(alice, bob) // Open a channel between Alice and Bob. - chanPoint := openChannelAndAssert( - t, net, alice, bob, lntest.OpenChannelParams{ + chanPoint := ht.OpenChannel( + alice, bob, lntemp.OpenChannelParams{ Amt: 10e6, PushAmt: 5e6, }, @@ -115,64 +115,38 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // Send a payment with a specified finalCTLVDelta, which will // be used as our deadline later on when Alice force closes the // channel. - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - _, err := alice.RouterClient.SendPaymentV2( - ctxt, &routerrpc.SendPaymentRequest{ - Dest: bob.PubKey[:], - Amt: 10e4, - PaymentHash: makeFakePayHash(t), - FinalCltvDelta: finalCTLV, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, err, "unable to send alice htlc") + req := &routerrpc.SendPaymentRequest{ + Dest: bob.PubKey[:], + Amt: 10e4, + PaymentHash: ht.Random32Bytes(), + FinalCltvDelta: finalCTLV, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + alice.RPC.SendPayment(req) // Once the HTLC has cleared, all the nodes in our mini network // should show that the HTLC has been locked in. - nodes := []*lntest.HarnessNode{alice, bob} - err = wait.NoError(func() error { - return assertNumActiveHtlcs(nodes, 1) - }, defaultTimeout) - require.NoError(t.t, err, "htlc mismatch") + ht.AssertNumActiveHtlcs(alice, 1) + ht.AssertNumActiveHtlcs(bob, 1) // Alice force closes the channel. - _, _, err = net.CloseChannel(alice, chanPoint, true) - require.NoError(t.t, err, "unable to force close channel") + ht.CloseChannelAssertPending(alice, chanPoint, true) // Now that the channel has been force closed, it should show // up in the PendingChannels RPC under the waiting close // section. - ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - require.NoError( - t.t, err, "unable to query for pending channels", - ) - require.NoError( - t.t, checkNumWaitingCloseChannels(pendingChanResp, 1), - ) + ht.AssertChannelWaitingClose(alice, chanPoint) // Check our sweep transactions can be found in mempool. - sweepTxns, err := getNTxsFromMempool( - net.Miner.Client, - expectedSweepTxNum, minerMempoolTimeout, - ) - require.NoError( - t.t, err, "failed to find commitment tx in mempool", - ) + sweepTxns := ht.Miner.GetNumTxsFromMempool(expectedSweepTxNum) // Mine a block to confirm these transactions such that they // don't remain in the mempool for any subsequent tests. - _, err = net.Miner.Client.Generate(1) - require.NoError(t.t, err, "unable to mine blocks") + ht.MineBlocks(1) // Calculate the fee rate used. - feeRate := calculateTxnsFeeRate(t.t, net.Miner, sweepTxns) + feeRate := ht.CalculateTxesFeeRate(sweepTxns) return feeRate } @@ -180,7 +154,7 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // Setup our fee estimation for the deadline. Because the fee rate is // smaller than the parent tx's fee rate, this value won't be used and // we should see only one sweep tx in the mempool. - net.SetFeeEstimateWithConf(feeRateSmall, deadline) + ht.SetFeeEstimateWithConf(feeRateSmall, deadline) // Calculate fee rate used. feeRate := calculateSweepFeeRate(1) @@ -188,7 +162,7 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // We expect the default max fee rate is used. Allow some deviation // because weight estimates during tx generation are estimates. require.InEpsilonf( - t.t, int64(maxPerKw), feeRate, 0.01, + ht, int64(maxPerKw), feeRate, 0.01, "expected fee rate:%d, got fee rate:%d", maxPerKw, feeRate, ) @@ -196,7 +170,7 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // greater than the parent tx's fee rate, this value will be used to // sweep the anchor transaction and we should see two sweep // transactions in the mempool. - net.SetFeeEstimateWithConf(feeRateLarge, deadline) + ht.SetFeeEstimateWithConf(feeRateLarge, deadline) // Calculate fee rate used. feeRate = calculateSweepFeeRate(2) @@ -204,31 +178,11 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // We expect the anchor to be swept with the deadline, which has the // fee rate of feeRateLarge. require.InEpsilonf( - t.t, int64(feeRateLarge), feeRate, 0.01, + ht, int64(feeRateLarge), feeRate, 0.01, "expected fee rate:%d, got fee rate:%d", feeRateLarge, feeRate, ) } -// calculateTxnsFeeRate takes a list of transactions and estimates the fee rate -// used to sweep them. -func calculateTxnsFeeRate(t *testing.T, - miner *lntest.HarnessMiner, txns []*wire.MsgTx) int64 { - - var totalWeight, totalFee int64 - for _, tx := range txns { - utx := btcutil.NewTx(tx) - totalWeight += blockchain.GetTransactionWeight(utx) - - fee, err := getTxFee(miner.Client, tx) - require.NoError(t, err) - - totalFee += int64(fee) - } - feeRate := totalFee * 1000 / totalWeight - - return feeRate -} - // testChannelForceClosure performs a test to exercise the behavior of "force" // closing a channel or unilaterally broadcasting the latest local commitment // state on-chain. The test creates a new channel between Alice and Carol, then diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 70b3d6371..2323ed5d1 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -126,10 +126,6 @@ var allTestCases = []*testCase{ name: "hold invoice force close", test: testHoldInvoiceForceClose, }, - { - name: "commitment deadline", - test: testCommitmentTransactionDeadline, - }, { name: "cpfp", test: testCPFP, From 008ef964fc7c67a6b60283ba45b503903a24a804 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 22:19:46 +0800 Subject: [PATCH 37/47] lntemp+itest: refactor `testChannelForceClosure` --- lntemp/harness.go | 105 +++ lntemp/rpc/wallet_kit.go | 25 + lntest/itest/assertions.go | 95 --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_force_close_test.go | 678 ++++++------------- lntest/itest/lnd_test_list_on_test.go | 4 - 6 files changed, 338 insertions(+), 573 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index 0350ef995..5a4d62ae8 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -16,6 +16,7 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntemp/rpc" "github.com/lightningnetwork/lnd/lntest" @@ -1523,3 +1524,107 @@ func (h *HarnessTest) CalculateTxesFeeRate(txns []*wire.MsgTx) int64 { return feeRate } + +type SweptOutput struct { + OutPoint wire.OutPoint + SweepTx *wire.MsgTx +} + +// FindCommitAndAnchor looks for a commitment sweep and anchor sweep in the +// mempool. Our anchor output is identified by having multiple inputs in its +// sweep transition, because we have to bring another input to add fees to the +// anchor. Note that the anchor swept output may be nil if the channel did not +// have anchors. +func (h *HarnessTest) FindCommitAndAnchor(sweepTxns []*wire.MsgTx, + closeTx string) (*SweptOutput, *SweptOutput) { + + var commitSweep, anchorSweep *SweptOutput + + for _, tx := range sweepTxns { + txHash := tx.TxHash() + sweepTx := h.Miner.GetRawTransaction(&txHash) + + // We expect our commitment sweep to have a single input, and, + // our anchor sweep to have more inputs (because the wallet + // needs to add balance to the anchor amount). We find their + // sweep txids here to setup appropriate resolutions. We also + // need to find the outpoint for our resolution, which we do by + // matching the inputs to the sweep to the close transaction. + inputs := sweepTx.MsgTx().TxIn + if len(inputs) == 1 { + commitSweep = &SweptOutput{ + OutPoint: inputs[0].PreviousOutPoint, + SweepTx: tx, + } + } else { + // Since we have more than one input, we run through + // them to find the one whose previous outpoint matches + // the closing txid, which means this input is spending + // the close tx. This will be our anchor output. + for _, txin := range inputs { + op := txin.PreviousOutPoint.Hash.String() + if op == closeTx { + anchorSweep = &SweptOutput{ + OutPoint: txin.PreviousOutPoint, + SweepTx: tx, + } + } + } + } + } + + return commitSweep, anchorSweep +} + +// AssertSweepFound looks up a sweep in a nodes list of broadcast sweeps and +// asserts it's found. +// +// NOTE: Does not account for node's internal state. +func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode, + sweep string, verbose bool) { + + // List all sweeps that alice's node had broadcast. + sweepResp := hn.RPC.ListSweeps(verbose) + + var found bool + if verbose { + found = findSweepInDetails(h, sweep, sweepResp) + } else { + found = findSweepInTxids(h, sweep, sweepResp) + } + + require.Truef(h, found, "%s: sweep: %v not found", sweep, hn.Name()) +} + +func findSweepInTxids(ht *HarnessTest, sweepTxid string, + sweepResp *walletrpc.ListSweepsResponse) bool { + + sweepTxIDs := sweepResp.GetTransactionIds() + require.NotNil(ht, sweepTxIDs, "expected transaction ids") + require.Nil(ht, sweepResp.GetTransactionDetails()) + + // Check that the sweep tx we have just produced is present. + for _, tx := range sweepTxIDs.TransactionIds { + if tx == sweepTxid { + return true + } + } + + return false +} + +func findSweepInDetails(ht *HarnessTest, sweepTxid string, + sweepResp *walletrpc.ListSweepsResponse) bool { + + sweepDetails := sweepResp.GetTransactionDetails() + require.NotNil(ht, sweepDetails, "expected transaction details") + require.Nil(ht, sweepResp.GetTransactionIds()) + + for _, tx := range sweepDetails.Transactions { + if tx.TxHash == sweepTxid { + return true + } + } + + return false +} diff --git a/lntemp/rpc/wallet_kit.go b/lntemp/rpc/wallet_kit.go index 14713f896..986e5b388 100644 --- a/lntemp/rpc/wallet_kit.go +++ b/lntemp/rpc/wallet_kit.go @@ -124,3 +124,28 @@ func (h *HarnessRPC) ListAddresses( return key } + +// ListSweeps makes a ListSweeps RPC call to the node's WalletKit client. +func (h *HarnessRPC) ListSweeps(verbose bool) *walletrpc.ListSweepsResponse { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + req := &walletrpc.ListSweepsRequest{Verbose: verbose} + resp, err := h.WalletKit.ListSweeps(ctxt, req) + h.NoError(err, "ListSweeps") + + return resp +} + +// PendingSweeps makes a RPC call to the node's WalletKitClient and asserts. +func (h *HarnessRPC) PendingSweeps() *walletrpc.PendingSweepsResponse { + ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout) + defer cancel() + + req := &walletrpc.PendingSweepsRequest{} + + resp, err := h.WalletKit.PendingSweeps(ctxt, req) + h.NoError(err, "PendingSweeps") + + return resp +} diff --git a/lntest/itest/assertions.go b/lntest/itest/assertions.go index 239123d55..0be40d324 100644 --- a/lntest/itest/assertions.go +++ b/lntest/itest/assertions.go @@ -17,7 +17,6 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" - "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" @@ -815,100 +814,6 @@ func checkPendingHtlcStageAndMaturity( return nil } -// assertReports checks that the count of resolutions we have present per -// type matches a set of expected resolutions. -func assertReports(t *harnessTest, node *lntest.HarnessNode, - channelPoint wire.OutPoint, expected map[string]*lnrpc.Resolution) { - - // Get our node's closed channels. - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - closed, err := node.ClosedChannels( - ctxt, &lnrpc.ClosedChannelsRequest{}, - ) - require.NoError(t.t, err) - - var resolutions []*lnrpc.Resolution - for _, close := range closed.Channels { - if close.ChannelPoint == channelPoint.String() { - resolutions = close.Resolutions - break - } - } - - require.NotNil(t.t, resolutions) - require.Equal(t.t, len(expected), len(resolutions)) - - for _, res := range resolutions { - outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr, - res.Outpoint.OutputIndex) - - expected, ok := expected[outPointStr] - require.True(t.t, ok) - require.Equal(t.t, expected, res) - } -} - -// assertSweepFound looks up a sweep in a nodes list of broadcast sweeps. -func assertSweepFound(t *testing.T, node *lntest.HarnessNode, - sweep string, verbose bool) { - - // List all sweeps that alice's node had broadcast. - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - sweepResp, err := node.WalletKitClient.ListSweeps( - ctx, &walletrpc.ListSweepsRequest{ - Verbose: verbose, - }, - ) - require.NoError(t, err) - - var found bool - if verbose { - found = findSweepInDetails(t, sweep, sweepResp) - } else { - found = findSweepInTxids(t, sweep, sweepResp) - } - - require.True(t, found, "sweep: %v not found", sweep) -} - -func findSweepInTxids(t *testing.T, sweepTxid string, - sweepResp *walletrpc.ListSweepsResponse) bool { - - sweepTxIDs := sweepResp.GetTransactionIds() - require.NotNil(t, sweepTxIDs, "expected transaction ids") - require.Nil(t, sweepResp.GetTransactionDetails()) - - // Check that the sweep tx we have just produced is present. - for _, tx := range sweepTxIDs.TransactionIds { - if tx == sweepTxid { - return true - } - } - - return false -} - -func findSweepInDetails(t *testing.T, sweepTxid string, - sweepResp *walletrpc.ListSweepsResponse) bool { - - sweepDetails := sweepResp.GetTransactionDetails() - require.NotNil(t, sweepDetails, "expected transaction details") - require.Nil(t, sweepResp.GetTransactionIds()) - - for _, tx := range sweepDetails.Transactions { - if tx.TxHash == sweepTxid { - return true - } - } - - return false -} - // assertAmountSent generates a closure which queries listchannels for sndr and // rcvr, and asserts that sndr sent amt satoshis, and that rcvr received amt // satoshis. diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index 3dfe73ddd..b6ff5840f 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -211,4 +211,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "commitment deadline", TestFunc: testCommitmentTransactionDeadline, }, + { + Name: "channel force closure", + TestFunc: testChannelForceClosure, + }, } diff --git a/lntest/itest/lnd_channel_force_close_test.go b/lntest/itest/lnd_channel_force_close_test.go index a0767dcf1..e3c4e9b17 100644 --- a/lntest/itest/lnd_channel_force_close_test.go +++ b/lntest/itest/lnd_channel_force_close_test.go @@ -6,8 +6,8 @@ import ( "fmt" "testing" - "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd" @@ -196,7 +196,7 @@ func testCommitmentTransactionDeadline(ht *lntemp.HarnessTest) { // process. // // TODO(roasbeef): also add an unsettled HTLC before force closing. -func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { +func testChannelForceClosure(ht *lntemp.HarnessTest) { // We'll test the scenario for some of the commitment types, to ensure // outputs can be swept. commitTypes := []lnrpc.CommitmentType{ @@ -208,32 +208,30 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { testName := fmt.Sprintf("committype=%v", channelType) channelType := channelType - success := t.t.Run(testName, func(t *testing.T) { - ht := newHarnessTest(t, net) + success := ht.Run(testName, func(t *testing.T) { + st := ht.Subtest(t) args := nodeArgsForCommitType(channelType) - alice := net.NewNode(ht.t, "Alice", args) - defer shutdownAndAssert(net, ht, alice) + alice := st.NewNode("Alice", args) + defer st.Shutdown(alice) // Since we'd like to test failure scenarios with // outstanding htlcs, we'll introduce another node into // our test network: Carol. carolArgs := []string{"--hodl.exit-settle"} carolArgs = append(carolArgs, args...) - carol := net.NewNode(ht.t, "Carol", carolArgs) - defer shutdownAndAssert(net, ht, carol) + carol := st.NewNode("Carol", carolArgs) + defer st.Shutdown(carol) // Each time, we'll send Alice new set of coins in // order to fund the channel. - net.SendCoins(t, btcutil.SatoshiPerBitcoin, alice) + st.FundCoins(btcutil.SatoshiPerBitcoin, alice) // Also give Carol some coins to allow her to sweep her // anchor. - net.SendCoins(t, btcutil.SatoshiPerBitcoin, carol) + st.FundCoins(btcutil.SatoshiPerBitcoin, carol) - channelForceClosureTest( - net, ht, alice, carol, channelType, - ) + channelForceClosureTest(st, alice, carol, channelType) }) if !success { return @@ -241,10 +239,8 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { } } -func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, - alice, carol *lntest.HarnessNode, channelType lnrpc.CommitmentType) { - - ctxb := context.Background() +func channelForceClosureTest(ht *lntemp.HarnessTest, + alice, carol *node.HarnessNode, channelType lnrpc.CommitmentType) { const ( chanAmt = btcutil.Amount(10e6) @@ -254,7 +250,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, ) const commitFeeRate = 20000 - net.SetFeeEstimate(commitFeeRate) + ht.SetFeeEstimate(commitFeeRate) // TODO(roasbeef): should check default value in config here // instead, or make delay a param @@ -262,86 +258,48 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // We must let Alice have an open channel before she can send a node // announcement, so we open a channel with Carol, - net.ConnectNodes(t.t, alice, carol) + ht.ConnectNodes(alice, carol) // We need one additional UTXO for sweeping the remote anchor. - net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice) + ht.FundCoins(btcutil.SatoshiPerBitcoin, alice) // Before we start, obtain Carol's current wallet balance, we'll check // to ensure that at the end of the force closure by Alice, Carol // recognizes his new on-chain output. - carolBalReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, err := carol.WalletBalance(ctxt, carolBalReq) - if err != nil { - t.Fatalf("unable to get carol's balance: %v", err) - } - + carolBalResp := carol.RPC.WalletBalance() carolStartingBalance := carolBalResp.ConfirmedBalance - chanPoint := openChannelAndAssert( - t, net, alice, carol, - lntest.OpenChannelParams{ + chanPoint := ht.OpenChannel( + alice, carol, lntemp.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, }, ) - // Wait for Alice and Carol to receive the channel edge from the - // funding manager. - err = alice.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - err = carol.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } - // Send payments from Alice to Carol, since Carol is htlchodl mode, the // htlc outputs should be left unsettled, and should be swept by the // utxo nursery. carolPubKey := carol.PubKey[:] for i := 0; i < numInvoices; i++ { - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - _, err := alice.RouterClient.SendPaymentV2( - ctx, - &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(paymentAmt), - PaymentHash: makeFakePayHash(t), - FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) + req := &routerrpc.SendPaymentRequest{ + Dest: carolPubKey, + Amt: int64(paymentAmt), + PaymentHash: ht.Random32Bytes(), + FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, } + alice.RPC.SendPayment(req) } // Once the HTLC has cleared, all the nodes n our mini network should // show that the HTLC has been locked in. - nodes := []*lntest.HarnessNode{alice, carol} - var predErr error - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, numInvoices) - return predErr == nil - }, defaultTimeout) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } + ht.AssertNumActiveHtlcs(alice, numInvoices) + ht.AssertNumActiveHtlcs(carol, numInvoices) // Fetch starting height of this test so we can compute the block // heights we expect certain events to take place. - _, curHeight, err := net.Miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block height") - } + _, curHeight := ht.Miner.GetBestBlock() // Using the current height of the chain, derive the relevant heights // for incubating two-stage htlcs. @@ -349,7 +307,9 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, startHeight = uint32(curHeight) commCsvMaturityHeight = startHeight + 1 + defaultCSV htlcExpiryHeight = padCLTV(startHeight + defaultCLTV) - htlcCsvMaturityHeight = padCLTV(startHeight + defaultCLTV + 1 + defaultCSV) + htlcCsvMaturityHeight = padCLTV( + startHeight + defaultCLTV + 1 + defaultCSV, + ) ) // If we are dealing with an anchor channel type, the sweeper will @@ -362,62 +322,30 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, ) } - aliceChan, err := getChanInfo(alice) - if err != nil { - t.Fatalf("unable to get alice's channel info: %v", err) - } - if aliceChan.NumUpdates == 0 { - t.Fatalf("alice should see at least one update to her channel") - } + aliceChan := ht.QueryChannelByChanPoint(alice, chanPoint) + require.NotZero(ht, aliceChan.NumUpdates, + "alice should see at least one update to her channel") // Now that the channel is open and we have unsettled htlcs, immediately // execute a force closure of the channel. This will also assert that // the commitment transaction was immediately broadcast in order to // fulfill the force closure request. const actualFeeRate = 30000 - net.SetFeeEstimate(actualFeeRate) + ht.SetFeeEstimate(actualFeeRate) - _, closingTxID, err := net.CloseChannel(alice, chanPoint, true) - if err != nil { - t.Fatalf("unable to execute force channel closure: %v", err) - } + ht.CloseChannelAssertPending(alice, chanPoint, true) // Now that the channel has been force closed, it should show up in the // PendingChannels RPC under the waiting close section. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - err = checkNumWaitingCloseChannels(pendingChanResp, 1) - if err != nil { - t.Fatalf(err.Error()) - } + waitingClose := ht.AssertChannelWaitingClose(alice, chanPoint) - // Compute the outpoint of the channel, which we will use repeatedly to - // locate the pending channel information in the rpc responses. - txid, err := lnrpc.GetChanPointFundingTxid(chanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - op := wire.OutPoint{ - Hash: *txid, - Index: chanPoint.OutputIndex, - } + // Immediately after force closing, all of the funds should be in + // limbo. + require.NotZero(ht, waitingClose.LimboBalance, + "all funds should still be in limbo") - waitingClose, err := findWaitingCloseChannel(pendingChanResp, &op) - if err != nil { - t.Fatalf(err.Error()) - } - - // Immediately after force closing, all of the funds should be in limbo. - if waitingClose.LimboBalance == 0 { - t.Fatalf("all funds should still be in limbo") - } - - // Create a map of outpoints to expected resolutions for alice and carol - // which we will add reports to as we sweep outputs. + // Create a map of outpoints to expected resolutions for alice and + // carol which we will add reports to as we sweep outputs. var ( aliceReports = make(map[string]*lnrpc.Resolution) carolReports = make(map[string]*lnrpc.Resolution) @@ -425,19 +353,17 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // The several restarts in this test are intended to ensure that when a // channel is force-closed, the UTXO nursery has persisted the state of - // the channel in the closure process and will recover the correct state - // when the system comes back on line. This restart tests state + // the channel in the closure process and will recover the correct + // state when the system comes back on line. This restart tests state // persistence at the beginning of the process, when the commitment // transaction has been broadcast but not yet confirmed in a block. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // To give the neutrino backend some time to catch up with the chain, we // wait here until we have enough UTXOs to actually sweep the local and // remote anchor. const expectedUtxos = 2 - assertNumUTXOs(t.t, alice, expectedUtxos) + ht.AssertNumUTXOs(alice, expectedUtxos) // Mine a block which should confirm the commitment transaction // broadcast as a result of the force closure. If there are anchors, we @@ -449,31 +375,19 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, expectedFeeRate = actualFeeRate } - sweepTxns, err := getNTxsFromMempool( - net.Miner.Client, expectedTxes, minerMempoolTimeout, - ) - require.NoError(t.t, err, "sweep txns in miner mempool") + sweepTxns := ht.Miner.GetNumTxsFromMempool(expectedTxes) // Verify fee rate of the commitment tx plus anchor if present. - var totalWeight, totalFee int64 - for _, tx := range sweepTxns { - utx := btcutil.NewTx(tx) - totalWeight += blockchain.GetTransactionWeight(utx) - - fee, err := getTxFee(net.Miner.Client, tx) - require.NoError(t.t, err) - totalFee += int64(fee) - } - feeRate := totalFee * 1000 / totalWeight + feeRate := ht.CalculateTxesFeeRate(sweepTxns) // Allow some deviation because weight estimates during tx generation // are estimates. - require.InEpsilon(t.t, expectedFeeRate, feeRate, 0.005) + require.InEpsilon(ht, expectedFeeRate, feeRate, 0.005) // Find alice's commit sweep and anchor sweep (if present) in the // mempool. aliceCloseTx := waitingClose.Commitments.LocalTxid - _, aliceAnchor := findCommitAndAnchor(t, net, sweepTxns, aliceCloseTx) + _, aliceAnchor := ht.FindCommitAndAnchor(sweepTxns, aliceCloseTx) // If we expect anchors, add alice's anchor to our expected set of // reports. @@ -491,35 +405,18 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, } } - if _, err := net.Miner.Client.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + ht.MineBlocks(1) // Now that the commitment has been confirmed, the channel should be // marked as force closed. - err = wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, + err := wait.NoError(func() error { + forceClose := ht.AssertChannelPendingForceClose( + alice, chanPoint, ) - if err != nil { - return fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - return err - } - - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - return err - } // Now that the channel has been force closed, it should now // have the height and number of blocks to confirm populated. - err = checkCommitmentMaturity( + err := checkCommitmentMaturity( forceClose, commCsvMaturityHeight, int32(defaultCSV), ) if err != nil { @@ -544,42 +441,30 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, return nil }, defaultTimeout) - if err != nil { - t.Fatalf(predErr.Error()) - } + require.NoError(ht, err, "timeout while checking force closed channel") // The following restart is intended to ensure that outputs from the // force close commitment transaction have been persisted once the // transaction has been confirmed, but before the outputs are spendable // (the "kindergarten" bucket.) - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Carol's sweep tx should be in the mempool already, as her output is // not timelocked. If there are anchors, we also expect Carol's anchor // sweep now. - sweepTxns, err = getNTxsFromMempool( - net.Miner.Client, expectedTxes, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("failed to find Carol's sweep in miner mempool: %v", - err) - } + sweepTxns = ht.Miner.GetNumTxsFromMempool(expectedTxes) // Calculate the total fee Carol paid. var totalFeeCarol btcutil.Amount for _, tx := range sweepTxns { - fee, err := getTxFee(net.Miner.Client, tx) - require.NoError(t.t, err) - + fee := ht.CalculateTxFee(tx) totalFeeCarol += fee } // We look up the sweep txns we have found in mempool and create // expected resolutions for carol. - carolCommit, carolAnchor := findCommitAndAnchor( - t, net, sweepTxns, aliceCloseTx, + carolCommit, carolAnchor := ht.FindCommitAndAnchor( + sweepTxns, aliceCloseTx, ) // If we have anchors, add an anchor resolution for carol. @@ -601,41 +486,28 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // For the persistence test, we generate two blocks, then trigger // a restart and then generate the final block that should trigger // the creation of the sweep transaction. - if _, err := net.Miner.Client.Generate(defaultCSV - 2); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } + ht.MineBlocks(defaultCSV - 2) // The following restart checks to ensure that outputs in the // kindergarten bucket are persisted while waiting for the required // number of confirmations to be reported. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Alice should see the channel in her set of pending force closed // channels with her funds still in limbo. var aliceBalance int64 + var closingTxID *chainhash.Hash err = wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, + forceClose := ht.AssertChannelPendingForceClose( + alice, chanPoint, ) - if err != nil { - return fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - } - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - return err - } - - forceClose, err := findForceClosedChannel( - pendingChanResp, &op, - ) + // Get the closing txid. + txid, err := chainhash.NewHashFromStr(forceClose.ClosingTxid) if err != nil { return err } + closingTxID = txid // Make a record of the balances we expect for alice and carol. aliceBalance = forceClose.Channel.LocalBalance @@ -668,38 +540,23 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, return nil }, defaultTimeout) - if err != nil { - t.Fatalf(err.Error()) - } + require.NoError(ht, err, "timeout while checking force closed channel") // Generate an additional block, which should cause the CSV delayed // output from the commitment txn to expire. - if _, err := net.Miner.Client.Generate(1); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } + ht.MineBlocks(1) // At this point, the CSV will expire in the next block, meaning that // the sweeping transaction should now be broadcast. So we fetch the // node's mempool to ensure it has been properly broadcast. - sweepingTXID, err := waitForTxInMempool( - net.Miner.Client, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("failed to get sweep tx from mempool: %v", err) - } + sweepingTXID := ht.Miner.AssertNumTxsInMempool(1)[0] // Fetch the sweep transaction, all input it's spending should be from // the commitment transaction which was broadcast on-chain. - sweepTx, err := net.Miner.Client.GetRawTransaction(sweepingTXID) - if err != nil { - t.Fatalf("unable to fetch sweep tx: %v", err) - } + sweepTx := ht.Miner.GetRawTransaction(sweepingTXID) for _, txIn := range sweepTx.MsgTx().TxIn { - if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) { - t.Fatalf("sweep transaction not spending from commit "+ - "tx %v, instead spending %v", - closingTxID, txIn.PreviousOutPoint) - } + require.Equal(ht, &txIn.PreviousOutPoint.Hash, + closingTxID, "sweep transaction not spending from commit") } // We expect a resolution which spends our commit output. @@ -730,127 +587,77 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Check that we can find the commitment sweep in our set of known // sweeps, using the simple transaction id ListSweeps output. - assertSweepFound(t.t, alice, sweepingTXID.String(), false) + ht.AssertSweepFound(alice, sweepingTXID.String(), false) // Restart Alice to ensure that she resumes watching the finalized // commitment sweep txid. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Next, we mine an additional block which should include the sweep // transaction as the input scripts and the sequence locks on the // inputs should be properly met. - blockHash, err := net.Miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } - block, err := net.Miner.Client.GetBlock(blockHash[0]) - if err != nil { - t.Fatalf("unable to get block: %v", err) - } - - assertTxInBlock(t, block, sweepTx.Hash()) + block := ht.MineBlocks(1)[0] + ht.Miner.AssertTxInBlock(block, sweepTx.Hash()) // Update current height - _, curHeight, err = net.Miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block height") - } + _, curHeight = ht.Miner.GetBestBlock() - err = wait.Predicate(func() bool { - // Now that the commit output has been fully swept, check to see - // that the channel remains open for the pending htlc outputs. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, + err = wait.NoError(func() error { + // Now that the commit output has been fully swept, check to + // see that the channel remains open for the pending htlc + // outputs. + forceClose := ht.AssertChannelPendingForceClose( + alice, chanPoint, ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - predErr = err - return false - } // The commitment funds will have been recovered after the // commit txn was included in the last block. The htlc funds // will be shown in limbo. - forceClose, err := findForceClosedChannel(pendingChanResp, &op) + err := checkPendingChannelNumHtlcs(forceClose, numInvoices) if err != nil { - predErr = err - return false + return err } - predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices) - if predErr != nil { - return false - } - predErr = checkPendingHtlcStageAndMaturity( + + err = checkPendingHtlcStageAndMaturity( forceClose, 1, htlcExpiryHeight, int32(htlcExpiryHeight)-curHeight, ) - if predErr != nil { - return false - } - if forceClose.LimboBalance == 0 { - predErr = fmt.Errorf("expected funds in limbo, found 0") - return false + if err != nil { + return err } - return true + if forceClose.LimboBalance == 0 { + return fmt.Errorf("expected funds in limbo, found 0") + } + + return nil }, defaultTimeout) - if err != nil { - t.Fatalf(predErr.Error()) - } + require.NoError(ht, err, "timeout checking pending "+ + "force close channel") // Compute the height preceding that which will cause the htlc CLTV // timeouts will expire. The outputs entered at the same height as the - // output spending from the commitment txn, so we must deduct the number - // of blocks we have generated since adding it to the nursery, and take - // an additional block off so that we end up one block shy of the expiry - // height, and add the block padding. + // output spending from the commitment txn, so we must deduct the + // number of blocks we have generated since adding it to the nursery, + // and take an additional block off so that we end up one block shy of + // the expiry height, and add the block padding. cltvHeightDelta := padCLTV(defaultCLTV - defaultCSV - 1 - 1) // Advance the blockchain until just before the CLTV expires, nothing // exciting should have happened during this time. - if _, err := net.Miner.Client.Generate(cltvHeightDelta); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + ht.MineBlocks(cltvHeightDelta) // We now restart Alice, to ensure that she will broadcast the presigned // htlc timeout txns after the delay expires after experiencing a while // waiting for the htlc outputs to incubate. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Alice should now see the channel in her set of pending force closed // channels with one pending HTLC. err = wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, + forceClose := ht.AssertChannelPendingForceClose( + alice, chanPoint, ) - if err != nil { - return fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - } - - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - return err - } - - forceClose, err := findForceClosedChannel( - pendingChanResp, &op, - ) - if err != nil { - return err - } // We should now be at the block just before the utxo nursery // will attempt to broadcast the htlc timeout transactions. @@ -876,15 +683,11 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, return nil }, defaultTimeout) - if err != nil { - t.Fatalf(err.Error()) - } + require.NoError(ht, err, "timeout while checking force closed channel") // Now, generate the block which will cause Alice to broadcast the // presigned htlc timeout txns. - if _, err = net.Miner.Client.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + ht.MineBlocks(1) // Since Alice had numInvoices (6) htlcs extended to Carol before force // closing, we expect Alice to broadcast an htlc timeout txn for each @@ -897,12 +700,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, } // Wait for them all to show up in the mempool. - htlcTxIDs, err := waitForNTxsInMempool( - net.Miner.Client, expectedTxes, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find htlc timeout txns in mempool: %v", err) - } + htlcTxIDs := ht.Miner.AssertNumTxsInMempool(expectedTxes) // Retrieve each htlc timeout txn from the mempool, and ensure it is // well-formed. This entails verifying that each only spends from @@ -927,25 +725,16 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // on-chain. In case of an anchor type channel, we expect one // extra input that is not spending from the commitment, that // is added for fees. - htlcTx, err := net.Miner.Client.GetRawTransaction(htlcTxID) - if err != nil { - t.Fatalf("unable to fetch sweep tx: %v", err) - } + htlcTx := ht.Miner.GetRawTransaction(htlcTxID) // Ensure the htlc transaction has the expected number of // inputs. inputs := htlcTx.MsgTx().TxIn - if len(inputs) != numInputs { - t.Fatalf("htlc transaction should only have %d txin, "+ - "has %d", numInputs, len(htlcTx.MsgTx().TxIn)) - } + require.Len(ht, inputs, numInputs, "num inputs mismatch") // The number of outputs should be the same. outputs := htlcTx.MsgTx().TxOut - if len(outputs) != numInputs { - t.Fatalf("htlc transaction should only have %d"+ - "txout, has: %v", numInputs, len(outputs)) - } + require.Len(ht, outputs, numInputs, "num outputs mismatch") // Ensure all the htlc transaction inputs are spending from the // commitment transaction, except if this is an extra input @@ -955,13 +744,11 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) { nonCommitmentInputs++ - if nonCommitmentInputs > 1 { - t.Fatalf("htlc transaction not "+ + require.Lessf(ht, nonCommitmentInputs, 2, + "htlc transaction not "+ "spending from commit "+ "tx %v, instead spending %v", - closingTxID, - txIn.PreviousOutPoint) - } + closingTxID, txIn.PreviousOutPoint) // This was an extra input added to pay fees, // continue to the next one. @@ -1018,22 +805,16 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // With the htlc timeout txns still in the mempool, we restart Alice to // verify that she can resume watching the htlc txns she broadcasted // before crashing. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Generate a block that mines the htlc timeout txns. Doing so now // activates the 2nd-stage CSV delayed outputs. - if _, err = net.Miner.Client.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + ht.MineBlocks(1) // Alice is restarted here to ensure that she promptly moved the crib // outputs to the kindergarten bucket after the htlc timeout txns were // confirmed. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Advance the chain until just before the 2nd-layer CSV delays expire. // For anchor channels thhis is one block earlier. @@ -1041,101 +822,60 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, if channelType == lnrpc.CommitmentType_ANCHORS { numBlocks = defaultCSV - 2 } - _, err = net.Miner.Client.Generate(numBlocks) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + ht.MineBlocks(numBlocks) // Restart Alice to ensure that she can recover from a failure before // having graduated the htlc outputs in the kindergarten bucket. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Now that the channel has been fully swept, it should no longer show // incubated, check to see that Alice's node still reports the channel // as pending force closed. - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = alice.PendingChannels( - ctxt, pendingChansRequest, + err = wait.NoError(func() error { + forceClose := ht.AssertChannelPendingForceClose( + alice, chanPoint, ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - predErr = err - return false - } - - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - predErr = err - return false - } if forceClose.LimboBalance == 0 { - predErr = fmt.Errorf("htlc funds should still be in limbo") - return false + return fmt.Errorf("htlc funds should still be in limbo") } - predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices) - return predErr == nil + return checkPendingChannelNumHtlcs(forceClose, numInvoices) }, defaultTimeout) - if err != nil { - t.Fatalf(predErr.Error()) - } + require.NoError(ht, err, "timeout while checking force closed channel") // Generate a block that causes Alice to sweep the htlc outputs in the // kindergarten bucket. - if _, err := net.Miner.Client.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } + ht.MineBlocks(1) // Wait for the single sweep txn to appear in the mempool. - htlcSweepTxID, err := waitForTxInMempool( - net.Miner.Client, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("failed to get sweep tx from mempool: %v", err) - } + htlcSweepTxID := ht.Miner.AssertNumTxsInMempool(1)[0] // Fetch the htlc sweep transaction from the mempool. - htlcSweepTx, err := net.Miner.Client.GetRawTransaction(htlcSweepTxID) - if err != nil { - t.Fatalf("unable to fetch sweep tx: %v", err) - } + htlcSweepTx := ht.Miner.GetRawTransaction(htlcSweepTxID) + // Ensure the htlc sweep transaction only has one input for each htlc // Alice extended before force closing. - if len(htlcSweepTx.MsgTx().TxIn) != numInvoices { - t.Fatalf("htlc transaction should have %d txin, "+ - "has %d", numInvoices, len(htlcSweepTx.MsgTx().TxIn)) - } - outputCount := len(htlcSweepTx.MsgTx().TxOut) - if outputCount != 1 { - t.Fatalf("htlc sweep transaction should have one output, has: "+ - "%v", outputCount) - } + require.Len(ht, htlcSweepTx.MsgTx().TxIn, numInvoices, + "htlc transaction has wrong num of inputs") + require.Len(ht, htlcSweepTx.MsgTx().TxOut, 1, + "htlc sweep transaction should have one output") // Ensure that each output spends from exactly one htlc timeout output. for _, txIn := range htlcSweepTx.MsgTx().TxIn { outpoint := txIn.PreviousOutPoint // Check that the input is a confirmed htlc timeout txn. - if _, ok := htlcTxOutpointSet[outpoint]; !ok { - t.Fatalf("htlc sweep output not spending from htlc "+ - "tx, instead spending output %v", outpoint) - } + _, ok := htlcTxOutpointSet[outpoint] + require.Truef(ht, ok, "htlc sweep output not spending from "+ + "htlc tx, instead spending output %v", outpoint) + // Increment our count for how many times this output was spent. htlcTxOutpointSet[outpoint]++ // Check that each is only spent once. - if htlcTxOutpointSet[outpoint] > 1 { - t.Fatalf("htlc sweep tx has multiple spends from "+ + require.Lessf(ht, htlcTxOutpointSet[outpoint], 2, + "htlc sweep tx has multiple spends from "+ "outpoint %v", outpoint) - } // Since we have now swept our htlc timeout tx, we expect to // have timeout resolutions for each of our htlcs. @@ -1155,111 +895,68 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Check that each HTLC output was spent exactly once. for op, num := range htlcTxOutpointSet { - if num != 1 { - t.Fatalf("HTLC outpoint %v was spent %v times", op, num) - } + require.Equalf(ht, 1, num, + "HTLC outpoint:%s was spent times", op) } // Check that we can find the htlc sweep in our set of sweeps using // the verbose output of the listsweeps output. - assertSweepFound(t.t, alice, htlcSweepTx.Hash().String(), true) + ht.AssertSweepFound(alice, htlcSweepTx.Hash().String(), true) // The following restart checks to ensure that the nursery store is // storing the txid of the previously broadcast htlc sweep txn, and that // it begins watching that txid after restarting. - if err := net.RestartNode(alice, nil); err != nil { - t.Fatalf("Node restart failed: %v", err) - } + ht.RestartNode(alice) // Now that the channel has been fully swept, it should no longer show // incubated, check to see that Alice's node still reports the channel // as pending force closed. - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, + err = wait.NoError(func() error { + forceClose := ht.AssertChannelPendingForceClose( + alice, chanPoint, ) + err := checkPendingChannelNumHtlcs(forceClose, numInvoices) if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - err = checkNumForceClosedChannels(pendingChanResp, 1) - if err != nil { - predErr = err - return false + return err } - // All htlcs should show zero blocks until maturity, as - // evidenced by having checked the sweep transaction in the - // mempool. - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - predErr = err - return false - } - predErr = checkPendingChannelNumHtlcs(forceClose, numInvoices) - if predErr != nil { - return false - } err = checkPendingHtlcStageAndMaturity( forceClose, 2, htlcCsvMaturityHeight, 0, ) if err != nil { - predErr = err - return false + return err } - return true + return nil }, defaultTimeout) - if err != nil { - t.Fatalf(predErr.Error()) - } + require.NoError(ht, err, "timeout while checking force closed channel") // Generate the final block that sweeps all htlc funds into the user's // wallet, and make sure the sweep is in this block. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, htlcSweepTxID) + block = ht.MineBlocksAndAssertNumTxes(1, 1)[0] + ht.Miner.AssertTxInBlock(block, htlcSweepTxID) // Now that the channel has been fully swept, it should no longer show // up within the pending channels RPC. - err = wait.Predicate(func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := alice.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - predErr = checkNumForceClosedChannels(pendingChanResp, 0) - if predErr != nil { - return false - } - + err = wait.NoError(func() error { + ht.AssertNumPendingForceClose(alice, 0) // In addition to there being no pending channels, we verify // that pending channels does not report any money still in // limbo. + pendingChanResp := alice.RPC.PendingChannels() if pendingChanResp.TotalLimboBalance != 0 { - predErr = errors.New("no user funds should be left " + + return errors.New("no user funds should be left " + "in limbo after incubation") - return false } - return true + return nil }, defaultTimeout) - if err != nil { - t.Fatalf(predErr.Error()) - } + require.NoError(ht, err, "timeout checking limbo balance") // At this point, Carol should now be aware of her new immediately // spendable on-chain balance, as it was Alice who broadcast the // commitment transaction. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, err = carol.WalletBalance(ctxt, carolBalReq) - require.NoError(t.t, err, "unable to get carol's balance") + carolBalResp = carol.RPC.WalletBalance() // Carol's expected balance should be its starting balance plus the // push amount sent by Alice and minus the miner fee paid. @@ -1272,16 +969,14 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, carolExpectedBalance += btcutil.Amount(anchorSize) } - require.Equal( - t.t, carolExpectedBalance, + require.Equal(ht, carolExpectedBalance, btcutil.Amount(carolBalResp.ConfirmedBalance), - "carol's balance is incorrect", - ) + "carol's balance is incorrect") // Finally, we check that alice and carol have the set of resolutions // we expect. - assertReports(t, alice, op, aliceReports) - assertReports(t, carol, op, carolReports) + assertReports(ht, alice, chanPoint, aliceReports) + assertReports(ht, carol, chanPoint, carolReports) } // padCLTV is a small helper function that pads a cltv value with a block @@ -1299,6 +994,7 @@ type sweptOutput struct { // mempool. Our anchor output is identified by having multiple inputs, because // we have to bring another input to add fees to the anchor. Note that the // anchor swept output may be nil if the channel did not have anchors. +// TODO(yy): delete. func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, sweepTxns []*wire.MsgTx, closeTx string) (*sweptOutput, *sweptOutput) { @@ -1528,3 +1224,37 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("%v", predErr) } } + +// assertReports checks that the count of resolutions we have present per +// type matches a set of expected resolutions. +// +// NOTE: only used in current test file. +func assertReports(ht *lntemp.HarnessTest, hn *node.HarnessNode, + chanPoint *lnrpc.ChannelPoint, expected map[string]*lnrpc.Resolution) { + + op := ht.OutPointFromChannelPoint(chanPoint) + + // Get our node's closed channels. + req := &lnrpc.ClosedChannelsRequest{Abandoned: false} + closed := hn.RPC.ClosedChannels(req) + + var resolutions []*lnrpc.Resolution + for _, close := range closed.Channels { + if close.ChannelPoint == op.String() { + resolutions = close.Resolutions + break + } + } + + require.NotNil(ht, resolutions) + require.Equal(ht, len(expected), len(resolutions)) + + for _, res := range resolutions { + outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr, + res.Outpoint.OutputIndex) + + expected, ok := expected[outPointStr] + require.True(ht, ok) + require.Equal(ht, expected, res) + } +} diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index 2323ed5d1..df7c87cb4 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -8,10 +8,6 @@ var allTestCases = []*testCase{ name: "open channel reorg test", test: testOpenChannelAfterReorg, }, - { - name: "channel force closure", - test: testChannelForceClosure, - }, { name: "single hop invoice", test: testSingleHopInvoice, From 966e037d53c5b31c2e7eef453484b86b551cc55a Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 4 Aug 2022 22:28:27 +0800 Subject: [PATCH 38/47] itest: refactor `testFailingChannel` --- lntest/itest/list_on_test.go | 4 + lntest/itest/lnd_channel_force_close_test.go | 175 +++---------------- lntest/itest/lnd_test_list_on_test.go | 4 - 3 files changed, 32 insertions(+), 151 deletions(-) diff --git a/lntest/itest/list_on_test.go b/lntest/itest/list_on_test.go index b6ff5840f..c8a2d0266 100644 --- a/lntest/itest/list_on_test.go +++ b/lntest/itest/list_on_test.go @@ -215,4 +215,8 @@ var allTestCasesTemp = []*lntemp.TestCase{ Name: "channel force closure", TestFunc: testChannelForceClosure, }, + { + Name: "failing link", + TestFunc: testFailingChannel, + }, } diff --git a/lntest/itest/lnd_channel_force_close_test.go b/lntest/itest/lnd_channel_force_close_test.go index e3c4e9b17..26d425c6f 100644 --- a/lntest/itest/lnd_channel_force_close_test.go +++ b/lntest/itest/lnd_channel_force_close_test.go @@ -2,7 +2,6 @@ package itest import ( "bytes" - "context" "fmt" "testing" @@ -1036,31 +1035,23 @@ func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, return commitSweep, anchorSweep } -// testFailingChannel tests that we will fail the channel by force closing ii +// testFailingChannel tests that we will fail the channel by force closing it // in the case where a counterparty tries to settle an HTLC with the wrong // preimage. -func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - const ( - paymentAmt = 10000 - ) +func testFailingChannel(ht *lntemp.HarnessTest) { + const paymentAmt = 10000 chanAmt := lnd.MaxFundingAmount // We'll introduce Carol, which will settle any incoming invoice with a // totally unrelated preimage. - carol := net.NewNode(t.t, "Carol", []string{"--hodl.bogus-settle"}) - defer shutdownAndAssert(net, t, carol) + carol := ht.NewNode("Carol", []string{"--hodl.bogus-settle"}) + + alice := ht.Alice + ht.ConnectNodes(alice, carol) // Let Alice connect and open a channel to Carol, - net.ConnectNodes(t.t, net.Alice, carol) - chanPoint := openChannelAndAssert( - t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) + ht.OpenChannel(alice, carol, lntemp.OpenChannelParams{Amt: chanAmt}) // With the channel open, we'll create a invoice for Carol that Alice // will attempt to pay. @@ -1070,159 +1061,49 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { RPreimage: preimage, Value: paymentAmt, } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := carol.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - carolPayReqs := []string{resp.PaymentRequest} - - // Wait for Alice to receive the channel edge from the funding manager. - err = net.Alice.WaitForNetworkChannelOpen(chanPoint) - if err != nil { - t.Fatalf("alice didn't see the alice->carol channel before "+ - "timeout: %v", err) - } + resp := carol.RPC.AddInvoice(invoice) // Send the payment from Alice to Carol. We expect Carol to attempt to // settle this payment with the wrong preimage. - err = completePaymentRequests( - net.Alice, net.Alice.RouterClient, carolPayReqs, false, - ) - if err != nil { - t.Fatalf("unable to send payments: %v", err) + // + // NOTE: cannot use `CompletePaymentRequestsNoWait` here as the channel + // will be force closed, so the num of updates check in that function + // won't work as the channel cannot be found. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: resp.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, } + ht.SendPaymentAndAssertStatus(alice, req, lnrpc.Payment_IN_FLIGHT) // Since Alice detects that Carol is trying to trick her by providing a // fake preimage, she should fail and force close the channel. - var predErr error - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.WaitingCloseChannels) - if n != 1 { - predErr = fmt.Errorf("expected to find %d channels "+ - "waiting close, found %d", 1, n) - return false - } - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("%v", predErr) - } + ht.AssertNumWaitingClose(alice, 1) // Mine a block to confirm the broadcasted commitment. - block := mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("transaction wasn't mined") - } + block := ht.MineBlocksAndAssertNumTxes(1, 1)[0] + require.Len(ht, block.Transactions, 2, "transaction wasn't mined") // The channel should now show up as force closed both for Alice and // Carol. - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.WaitingCloseChannels) - if n != 0 { - predErr = fmt.Errorf("expected to find %d channels "+ - "waiting close, found %d", 0, n) - return false - } - n = len(pendingChanResp.PendingForceClosingChannels) - if n != 1 { - predErr = fmt.Errorf("expected to find %d channel "+ - "pending force close, found %d", 1, n) - return false - } - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("%v", predErr) - } - - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.PendingForceClosingChannels) - if n != 1 { - predErr = fmt.Errorf("expected to find %d channel "+ - "pending force close, found %d", 1, n) - return false - } - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("%v", predErr) - } + ht.AssertNumPendingForceClose(alice, 1) + ht.AssertNumPendingForceClose(carol, 1) // Carol will use the correct preimage to resolve the HTLC on-chain. - _, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's resolve tx in mempool: %v", err) - } + ht.Miner.AssertNumTxsInMempool(1) // Mine enough blocks for Alice to sweep her funds from the force // closed channel. - _, err = net.Miner.Client.Generate(defaultCSV - 1) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + ht.MineBlocks(defaultCSV - 1) // Wait for the sweeping tx to be broadcast. - _, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Alice's sweep tx in mempool: %v", err) - } + ht.Miner.AssertNumTxsInMempool(1) // Mine the sweep. - _, err = net.Miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } + ht.MineBlocks(1) // No pending channels should be left. - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.PendingForceClosingChannels) - if n != 0 { - predErr = fmt.Errorf("expected to find %d channel "+ - "pending force close, found %d", 0, n) - return false - } - return true - }, defaultTimeout) - if err != nil { - t.Fatalf("%v", predErr) - } + ht.AssertNumPendingForceClose(alice, 0) } // assertReports checks that the count of resolutions we have present per diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go index df7c87cb4..c2d0ace9f 100644 --- a/lntest/itest/lnd_test_list_on_test.go +++ b/lntest/itest/lnd_test_list_on_test.go @@ -90,10 +90,6 @@ var allTestCases = []*testCase{ name: "revoked uncooperative close retribution", test: testRevokedCloseRetribution, }, - { - name: "failing link", - test: testFailingChannel, - }, { name: "revoked uncooperative close retribution zero value remote output", test: testRevokedCloseRetributionZeroValueRemoteOutput, From 666f877710e615f16fc7e1567d98066a7875d759 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 18 Aug 2022 20:49:16 +0800 Subject: [PATCH 39/47] lntemp: use `h.NoError` inside `HarnessRPC` --- lntemp/rpc/lnd.go | 25 ++++++++++++------------- lntemp/rpc/peers.go | 2 +- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/lntemp/rpc/lnd.go b/lntemp/rpc/lnd.go index 25eb94625..63ab65b90 100644 --- a/lntemp/rpc/lnd.go +++ b/lntemp/rpc/lnd.go @@ -365,8 +365,7 @@ func (h *HarnessRPC) SendCoins( defer cancel() resp, err := h.LN.SendCoins(ctxt, req) - require.NoErrorf(h, err, "node %s failed to send coins to address %s", - h.Name, req.Addr) + h.NoError(err, "SendCoins") return resp } @@ -393,7 +392,7 @@ func (h *HarnessRPC) GetTransactions( } resp, err := h.LN.GetTransactions(ctxt, req) - require.NoErrorf(h, err, "failed to GetTransactions for %s", h.Name) + h.NoError(err, "GetTransactions") return resp } @@ -405,7 +404,7 @@ func (h *HarnessRPC) SignMessage(msg []byte) *lnrpc.SignMessageResponse { req := &lnrpc.SignMessageRequest{Msg: msg} resp, err := h.LN.SignMessage(ctxt, req) - require.NoErrorf(h, err, "SignMessage rpc call failed") + h.NoError(err, "SignMessage") return resp } @@ -419,7 +418,7 @@ func (h *HarnessRPC) VerifyMessage(msg []byte, req := &lnrpc.VerifyMessageRequest{Msg: msg, Signature: sig} resp, err := h.LN.VerifyMessage(ctxt, req) - require.NoErrorf(h, err, "VerifyMessage failed") + h.NoError(err, "VerifyMessage") return resp } @@ -437,7 +436,7 @@ func (h *HarnessRPC) GetRecoveryInfo( } resp, err := h.LN.GetRecoveryInfo(ctxt, req) - require.NoErrorf(h, err, "failed to GetRecoveryInfo") + h.NoError(err, "GetRecoveryInfo") return resp } @@ -450,7 +449,7 @@ func (h *HarnessRPC) BatchOpenChannel( defer cancel() resp, err := h.LN.BatchOpenChannel(ctxt, req) - require.NoErrorf(h, err, "failed to batch open channel") + h.NoError(err, "BatchOpenChannel") return resp } @@ -477,7 +476,7 @@ func (h *HarnessRPC) QueryRoutes( defer cancel() routes, err := h.LN.QueryRoutes(ctxt, req) - require.NoErrorf(h, err, "failed to query routes") + h.NoError(err, "QueryRoutes") return routes } @@ -501,7 +500,7 @@ func (h *HarnessRPC) SendToRouteSync( defer cancel() resp, err := h.LN.SendToRouteSync(ctxt, req) - require.NoErrorf(h, err, "unable to send to route for %s", h.Name) + h.NoError(err, "SendToRouteSync") return resp } @@ -514,7 +513,7 @@ func (h *HarnessRPC) UpdateChannelPolicy( defer cancel() resp, err := h.LN.UpdateChannelPolicy(ctxt, req) - require.NoErrorf(h, err, "failed to update policy") + h.NoError(err, "UpdateChannelPolicy") return resp } @@ -537,7 +536,7 @@ func (h *HarnessRPC) SubscribeInvoices( // receive events stream. Thus we use runCtx here instead of a timeout // context. client, err := h.LN.SubscribeInvoices(h.runCtx, req) - require.NoError(h, err, "unable to create invoice subscription client") + h.NoError(err, "SubscribeInvoices") return client } @@ -551,7 +550,7 @@ func (h *HarnessRPC) SubscribeChannelBackups() BackupSubscriber { backupStream, err := h.LN.SubscribeChannelBackups( h.runCtx, &lnrpc.ChannelBackupSubscription{}, ) - require.NoErrorf(h, err, "unable to create backup stream") + h.NoError(err, "SubscribeChannelBackups") return backupStream } @@ -564,7 +563,7 @@ func (h *HarnessRPC) VerifyChanBackup( defer cancel() resp, err := h.LN.VerifyChanBackup(ctxt, ss) - require.NoErrorf(h, err, "unable to verify backup") + h.NoError(err, "VerifyChanBackup") return resp } diff --git a/lntemp/rpc/peers.go b/lntemp/rpc/peers.go index ed4c0761a..2ec69b48e 100644 --- a/lntemp/rpc/peers.go +++ b/lntemp/rpc/peers.go @@ -23,7 +23,7 @@ func (h *HarnessRPC) UpdateNodeAnnouncement(req AnnReq) AnnResp { defer cancel() resp, err := h.Peer.UpdateNodeAnnouncement(ctxt, req) - require.NoErrorf(h, err, "failed to update announcement") + h.NoError(err, "UpdateNodeAnnouncement") return resp } From 2fe7413f0ea42aaff3110561a2c78effbba0dbc5 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Fri, 19 Aug 2022 01:01:53 +0800 Subject: [PATCH 40/47] itest: increase test timeout to 120m We now update our test timeout from 60m to 120m because most of the tests would now take more than one hour to finish. --- make/testing_flags.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/make/testing_flags.mk b/make/testing_flags.mk index 893185cd8..59e4d3152 100644 --- a/make/testing_flags.mk +++ b/make/testing_flags.mk @@ -81,11 +81,11 @@ LOG_TAGS := nolog endif # If a timeout was requested, construct initialize the proper flag for the go -# test command. If not, we set 60m (up from the default 10m). +# test command. If not, we set 120m (up from the default 10m). ifneq ($(timeout),) TEST_FLAGS += -test.timeout=$(timeout) else -TEST_FLAGS += -test.timeout=60m +TEST_FLAGS += -test.timeout=120m endif GOLIST := go list -tags="$(DEV_TAGS)" -deps $(PKG)/... | grep '$(PKG)'| grep -v '/vendor/' From 4f9f2f79012e312a5adc60085018e83d79811f27 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Fri, 19 Aug 2022 16:52:59 +0800 Subject: [PATCH 41/47] github: update action versions --- .github/workflows/main.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c222d9d08..3c3009d04 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -401,7 +401,7 @@ jobs: args: backend=neutrino steps: - name: git checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: go cache uses: actions/cache@v1 @@ -415,7 +415,7 @@ jobs: lnd-${{ runner.os }}-go- - name: setup go ${{ env.GO_VERSION }} - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: '${{ env.GO_VERSION }}' @@ -449,7 +449,7 @@ jobs: GOPATH: ${{ github.workspace }}/go steps: - name: git checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: go cache uses: actions/cache@v1 @@ -463,7 +463,7 @@ jobs: lnd-${{ runner.os }}-go- - name: setup go ${{ env.GO_VERSION }} - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: '${{ env.GO_VERSION }}' @@ -472,6 +472,7 @@ jobs: - name: Zip log files on failure if: ${{ failure() }} + timeout-minutes: 1 # timeout after 1 minute run: 7z a logs-itest-windows.zip lntest/itest/**/*.log - name: Upload log files on failure From 60da8233962f9036cb94a93183619e35b95f05cf Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Tue, 25 Oct 2022 19:07:04 +0800 Subject: [PATCH 42/47] itest: fix make lint --- lntest/itest/lnd_channel_force_close_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lntest/itest/lnd_channel_force_close_test.go b/lntest/itest/lnd_channel_force_close_test.go index 26d425c6f..4c87ef973 100644 --- a/lntest/itest/lnd_channel_force_close_test.go +++ b/lntest/itest/lnd_channel_force_close_test.go @@ -554,8 +554,8 @@ func channelForceClosureTest(ht *lntemp.HarnessTest, // the commitment transaction which was broadcast on-chain. sweepTx := ht.Miner.GetRawTransaction(sweepingTXID) for _, txIn := range sweepTx.MsgTx().TxIn { - require.Equal(ht, &txIn.PreviousOutPoint.Hash, - closingTxID, "sweep transaction not spending from commit") + require.Equal(ht, &txIn.PreviousOutPoint.Hash, closingTxID, + "sweep transaction not spending from commit") } // We expect a resolution which spends our commit output. From b68ac6acdc7855c2e87eae7adbc9307cccd947f9 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Tue, 25 Oct 2022 19:13:22 +0800 Subject: [PATCH 43/47] golangci: update `gomnd` config --- .golangci.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 4ad0b273b..d1250fc7d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -78,6 +78,22 @@ linters-settings: # so no return split required. block-size: 3 + gomnd: + # List of numbers to exclude from analysis. + # The numbers should be written as string. + # Values always ignored: "1", "1.0", "0" and "0.0" + # Default: [] + ignored-numbers: + - '0666' + - '0755' + + # List of function patterns to exclude from analysis. + # Values always ignored: `time.Date` + # Default: [] + ignored-functions: + - 'math.*' + - 'strconv.ParseInt' + linters: enable-all: true From 03dae039a7555058d843f1463a087be2dea76476 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Thu, 10 Nov 2022 15:47:14 +0800 Subject: [PATCH 44/47] lntemp: remove `balanceCount` from node state Since channels are always closed when tests finish, there's no need to track them in node's state. --- lntemp/node/state.go | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/lntemp/node/state.go b/lntemp/node/state.go index 9dca10f42..5b3450d38 100644 --- a/lntemp/node/state.go +++ b/lntemp/node/state.go @@ -103,20 +103,6 @@ type invoiceCount struct { LastIndexOffset uint64 } -// balanceCount provides a summary over balances related to channels. -type balanceCount struct { - LocalBalance *lnrpc.Amount - RemoteBalance *lnrpc.Amount - UnsettledLocalBalance *lnrpc.Amount - UnsettledRemoteBalance *lnrpc.Amount - PendingOpenLocalBalance *lnrpc.Amount - PendingOpenRemoteBalance *lnrpc.Amount - - // Deprecated fields. - Balance int64 - PendingOpenBalance int64 -} - // walletBalance provides a summary over balances related the node's wallet. type walletBalance struct { TotalBalance int64 @@ -139,9 +125,6 @@ type State struct { // CloseChannel gives the summary of close channel related counts. CloseChannel closedChannelCount - // Balance gives the summary of the channel balance. - Balance balanceCount - // Wallet gives the summary of the wallet balance. Wallet walletBalance @@ -315,18 +298,6 @@ func (s *State) updateEdgeStats() { s.Edge.Public = len(resp.Edges) } -// updateChannelBalance creates stats for the node's channel balance. -func (s *State) updateChannelBalance() { - resp := s.rpc.ChannelBalance() - - s.Balance.LocalBalance = resp.LocalBalance - s.Balance.RemoteBalance = resp.RemoteBalance - s.Balance.UnsettledLocalBalance = resp.UnsettledLocalBalance - s.Balance.UnsettledRemoteBalance = resp.UnsettledRemoteBalance - s.Balance.PendingOpenLocalBalance = resp.PendingOpenLocalBalance - s.Balance.PendingOpenRemoteBalance = resp.PendingOpenRemoteBalance -} - // updateWalletBalance creates stats for the node's wallet balance. func (s *State) updateWalletBalance() { resp := s.rpc.WalletBalance() @@ -345,7 +316,6 @@ func (s *State) updateState() { s.updateInvoiceStats() s.updateUTXOStats() s.updateEdgeStats() - s.updateChannelBalance() s.updateWalletBalance() } From 3d7aae42b3cca9eb1c483911ce22ca8cf5f3bb9c Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Mon, 14 Nov 2022 02:49:46 +0800 Subject: [PATCH 45/47] lntest: increase node start timeout --- lntest/timeouts.go | 2 +- lntest/timeouts_remote_db.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lntest/timeouts.go b/lntest/timeouts.go index 6723050b2..4ad2d6653 100644 --- a/lntest/timeouts.go +++ b/lntest/timeouts.go @@ -28,5 +28,5 @@ const ( // NodeStartTimeout is the timeout value when waiting for a node to // become fully started. - NodeStartTimeout = time.Second * 60 + NodeStartTimeout = time.Second * 120 ) diff --git a/lntest/timeouts_remote_db.go b/lntest/timeouts_remote_db.go index a9d6977d0..4e563c6f4 100644 --- a/lntest/timeouts_remote_db.go +++ b/lntest/timeouts_remote_db.go @@ -28,5 +28,5 @@ const ( // NodeStartTimeout is the timeout value when waiting for a node to // become fully started. - NodeStartTimeout = time.Second * 60 + NodeStartTimeout = time.Second * 120 ) From aafe4c38fcf12dd810af5263e278cff074b49ece Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Mon, 14 Nov 2022 03:41:45 +0800 Subject: [PATCH 46/47] lntest: fix flake in channel backup related tests This commit fixes the channel backup tests by decreasing the recovery window used from 1000 to 100 to speed up the startup of node Dave. --- lntemp/harness_node_manager.go | 2 +- lntest/itest/lnd_channel_backup_test.go | 22 ++++++++++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/lntemp/harness_node_manager.go b/lntemp/harness_node_manager.go index 1292741c7..2b5f87e08 100644 --- a/lntemp/harness_node_manager.go +++ b/lntemp/harness_node_manager.go @@ -156,7 +156,7 @@ func (nm *nodeManager) restartNode(ctxt context.Context, node *node.HarnessNode, } if len(chanBackups) != 0 { unlockReq.ChannelBackups = chanBackups[0] - unlockReq.RecoveryWindow = 1000 + unlockReq.RecoveryWindow = 100 } err = wait.NoError(func() error { diff --git a/lntest/itest/lnd_channel_backup_test.go b/lntest/itest/lnd_channel_backup_test.go index b9b1d3500..f99b37af1 100644 --- a/lntest/itest/lnd_channel_backup_test.go +++ b/lntest/itest/lnd_channel_backup_test.go @@ -39,6 +39,10 @@ type ( password []byte, mnemonic []string) nodeRestorer ) +// revocationWindow is used when we specify the revocation window used when +// restoring node. +const revocationWindow = 100 + // chanRestoreScenario represents a test case used by testing the channel // restore methods. type chanRestoreScenario struct { @@ -309,7 +313,8 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) { return func() *node.HarnessNode { return st.RestoreNodeWithSeed( "dave", nil, password, mnemonic, - "", 1000, backupSnapshot, + "", revocationWindow, + backupSnapshot, copyPorts(oldNode), ) } @@ -341,7 +346,7 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) { return func() *node.HarnessNode { newNode := st.RestoreNodeWithSeed( "dave", nil, password, mnemonic, - "", 1000, nil, + "", revocationWindow, nil, copyPorts(oldNode), ) @@ -377,7 +382,7 @@ func testChannelBackupRestoreBasic(ht *lntemp.HarnessTest) { return func() *node.HarnessNode { newNode := st.RestoreNodeWithSeed( "dave", nil, password, mnemonic, - "", 1000, nil, + "", revocationWindow, nil, copyPorts(oldNode), ) @@ -665,11 +670,16 @@ func testChannelBackupRestoreLegacy(ht *lntemp.HarnessTest) { func testChannelBackupRestoreForceClose(ht *lntemp.HarnessTest) { // Restore a channel that was force closed by dave just before going // offline. - ht.Run("from backup file anchors", func(t *testing.T) { + success := ht.Run("from backup file anchors", func(t *testing.T) { st := ht.Subtest(t) runChanRestoreScenarioForceClose(st, false) }) + // Only run the second test if the first passed. + if !success { + return + } + // Restore a zero-conf anchors channel that was force closed by dave // just before going offline. ht.Run("from backup file anchors w/ zero-conf", func(t *testing.T) { @@ -1344,8 +1354,8 @@ func chanRestoreViaRPC(ht *lntemp.HarnessTest, password []byte, return func() *node.HarnessNode { newNode := ht.RestoreNodeWithSeed( - "dave", nil, password, mnemonic, "", 1000, nil, - copyPorts(oldNode), + "dave", nil, password, mnemonic, "", revocationWindow, + nil, copyPorts(oldNode), ) req := &lnrpc.RestoreChanBackupRequest{Backup: backup} newNode.RPC.RestoreChanBackups(req) From edeb8c74443ad52d46dba6ed8fb7a11d765ce24b Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Fri, 12 Aug 2022 18:01:24 +0800 Subject: [PATCH 47/47] docs: update release notes --- docs/release-notes/release-notes-0.16.0.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/release-notes/release-notes-0.16.0.md b/docs/release-notes/release-notes-0.16.0.md index 78d2ed5eb..fe0c9fa16 100644 --- a/docs/release-notes/release-notes-0.16.0.md +++ b/docs/release-notes/release-notes-0.16.0.md @@ -247,8 +247,9 @@ better testing suite for writing integration tests. A new defined structure is implemented, please refer to [README](https://github.com/lightningnetwork/lnd/tree/master/lntemp) for more details. Along the way, several -PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776)) have been made -to refactor the itest for code health and maintenance. +PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776), +[6822](https://github.com/lightningnetwork/lnd/pull/6822)) have been made to +refactor the itest for code health and maintenance. # Contributors (Alphabetical Order)