From 94c64a886ef77ba658e41034d978151f0f094f99 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Fri, 12 Aug 2022 13:07:16 +0800 Subject: [PATCH] lntemp+itest: remove unused code to prepare `lntemp`'s takeover This commit removes the old code living in `lntest` to prepare `lntemp`'s takeover. --- itest/assertions.go | 1357 ----------------- itest/lnd_channel_force_close_test.go | 68 + itest/lnd_channel_policy_test.go | 15 - itest/lnd_etcd_failover_test.go | 6 +- itest/lnd_multi-hop_test.go | 97 -- itest/lnd_network_test.go | 5 +- itest/lnd_neutrino_test.go | 3 +- itest/lnd_psbt_test.go | 55 - itest/lnd_revocation_test.go | 3 +- itest/lnd_test.go | 6 +- itest/lnd_zero_conf_test.go | 3 +- itest/test_harness.go | 351 ----- itest/utils.go | 412 +----- lntemp/fee_service.go | 4 +- lntemp/harness.go | 9 +- lntemp/harness_miner.go | 6 +- lntemp/harness_node_manager.go | 5 +- lntemp/harness_setup.go | 17 +- lntemp/node/config.go | 154 +- lntemp/node/harness_node.go | 19 +- lntemp/utils.go | 5 +- lntest/bitcoind_common.go | 19 +- lntest/btcd.go | 25 +- lntest/fee_service.go | 114 -- lntest/fee_service_test.go | 39 - lntest/harness_miner.go | 161 --- lntest/harness_net.go | 1759 ---------------------- lntest/harness_node.go | 1928 ------------------------- lntest/neutrino.go | 3 +- lntest/test_common.go | 190 --- 30 files changed, 305 insertions(+), 6533 deletions(-) delete mode 100644 itest/assertions.go delete mode 100644 itest/test_harness.go delete mode 100644 lntest/fee_service.go delete mode 100644 lntest/fee_service_test.go delete mode 100644 lntest/harness_miner.go delete mode 100644 lntest/harness_net.go delete mode 100644 lntest/harness_node.go delete mode 100644 lntest/test_common.go diff --git a/itest/assertions.go b/itest/assertions.go deleted file mode 100644 index a917c2c1a..000000000 --- a/itest/assertions.go +++ /dev/null @@ -1,1357 +0,0 @@ -package itest - -import ( - "context" - "encoding/hex" - "fmt" - "math" - "sync/atomic" - "testing" - "time" - - "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/rpcclient" - "github.com/btcsuite/btcd/wire" - "github.com/go-errors/errors" - "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/routerrpc" - "github.com/lightningnetwork/lnd/lntest" - "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -// openChannelStream blocks until an OpenChannel request for a channel funding -// by alice succeeds. If it does, a stream client is returned to receive events -// about the opening channel. -func openChannelStream(t *harnessTest, net *lntest.NetworkHarness, - alice, bob *lntest.HarnessNode, - p lntest.OpenChannelParams) lnrpc.Lightning_OpenChannelClient { - - t.t.Helper() - - // Wait until we are able to fund a channel successfully. This wait - // prevents us from erroring out when trying to create a channel while - // the node is starting up. - var chanOpenUpdate lnrpc.Lightning_OpenChannelClient - err := wait.NoError(func() error { - var err error - chanOpenUpdate, err = net.OpenChannel(alice, bob, p) - return err - }, defaultTimeout) - require.NoError(t.t, err, "unable to open channel") - - return chanOpenUpdate -} - -// openChannelAndAssert attempts to open a channel with the specified -// parameters extended from Alice to Bob. Additionally, two items are asserted -// after the channel is considered open: the funding transaction should be -// found within a block, and that Alice can report the status of the new -// channel. -func openChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness, - alice, bob *lntest.HarnessNode, - p lntest.OpenChannelParams) *lnrpc.ChannelPoint { - - t.t.Helper() - - chanOpenUpdate := openChannelStream(t, net, alice, bob, p) - - // Mine 6 blocks, then wait for Alice's node to notify us that the - // channel has been opened. The funding transaction should be found - // within the first newly mined block. We mine 6 blocks so that in the - // case that the channel is public, it is announced to the network. - block := mineBlocks(t, net, 6, 1)[0] - - fundingChanPoint, err := net.WaitForChannelOpen(chanOpenUpdate) - require.NoError(t.t, err, "error while waiting for channel open") - - fundingTxID, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint) - require.NoError(t.t, err, "unable to get txid") - - assertTxInBlock(t, block, fundingTxID) - - // The channel should be listed in the peer information returned by - // both peers. - chanPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: fundingChanPoint.OutputIndex, - } - require.NoError( - t.t, net.AssertChannelExists(alice, &chanPoint), - "unable to assert channel existence", - ) - require.NoError( - t.t, net.AssertChannelExists(bob, &chanPoint), - "unable to assert channel existence", - ) - - // They should also notice this channel from topology subscription. - err = alice.WaitForNetworkChannelOpen(fundingChanPoint) - require.NoError(t.t, err) - - err = bob.WaitForNetworkChannelOpen(fundingChanPoint) - require.NoError(t.t, err) - - return fundingChanPoint -} - -func waitForGraphSync(t *harnessTest, node *lntest.HarnessNode) { - t.t.Helper() - - err := wait.Predicate(func() bool { - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - require.NoError(t.t, err) - - return resp.SyncedToGraph - }, defaultTimeout) - require.NoError(t.t, err) -} - -// closeChannelAndAssert attempts to close a channel identified by the passed -// channel point owned by the passed Lightning node. A fully blocking channel -// closure is attempted, therefore the passed context should be a child derived -// via timeout from a base parent. Additionally, once the channel has been -// detected as closed, an assertion checks that the transaction is found within -// a block. Finally, this assertion verifies that the node always sends out a -// disable update when closing the channel if the channel was previously -// enabled. -// -// NOTE: This method assumes that the provided funding point is confirmed -// on-chain AND that the edge exists in the node's channel graph. If the funding -// transactions was reorged out at some point, use closeReorgedChannelAndAssert. -func closeChannelAndAssert(t *harnessTest, net *lntest.NetworkHarness, - node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint, - force bool) *chainhash.Hash { - - return closeChannelAndAssertType( - t, net, node, fundingChanPoint, false, force, - ) -} - -func closeChannelAndAssertType(t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, - anchors, force bool) *chainhash.Hash { - - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, channelCloseTimeout) - defer cancel() - - // Fetch the current channel policy. If the channel is currently - // enabled, we will register for graph notifications before closing to - // assert that the node sends out a disabling update as a result of the - // channel being closed. - curPolicy := getChannelPolicies( - t, node, node.PubKeyStr, fundingChanPoint, - )[0] - expectDisable := !curPolicy.Disabled - - closeUpdates, _, err := net.CloseChannel(node, fundingChanPoint, force) - require.NoError(t.t, err, "unable to close channel") - - // If the channel policy was enabled prior to the closure, wait until we - // received the disabled update. - if expectDisable { - curPolicy.Disabled = true - assertChannelPolicyUpdate( - t.t, node, node.PubKeyStr, - curPolicy, fundingChanPoint, false, - ) - } - - return assertChannelClosed( - ctxt, t, net, node, fundingChanPoint, anchors, closeUpdates, - ) -} - -// closeReorgedChannelAndAssert attempts to close a channel identified by the -// passed channel point owned by the passed Lightning node. A fully blocking -// channel closure is attempted, therefore the passed context should be a child -// derived via timeout from a base parent. Additionally, once the channel has -// been detected as closed, an assertion checks that the transaction is found -// within a block. -// -// NOTE: This method does not verify that the node sends a disable update for -// the closed channel. -func closeReorgedChannelAndAssert(t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash { - - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, channelCloseTimeout) - defer cancel() - - closeUpdates, _, err := net.CloseChannel(node, fundingChanPoint, force) - require.NoError(t.t, err, "unable to close channel") - - return assertChannelClosed( - ctx, t, net, node, fundingChanPoint, false, closeUpdates, - ) -} - -// assertChannelClosed asserts that the channel is properly cleaned up after -// initiating a cooperative or local close. -func assertChannelClosed(ctx context.Context, t *harnessTest, - net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, anchors bool, - closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash { - - txid, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint) - require.NoError(t.t, err, "unable to get txid") - chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex) - - // If the channel appears in list channels, ensure that its state - // contains ChanStatusCoopBroadcasted. - listChansRequest := &lnrpc.ListChannelsRequest{} - listChansResp, err := node.ListChannels(ctx, listChansRequest) - require.NoError(t.t, err, "unable to query for list channels") - - for _, channel := range listChansResp.Channels { - // Skip other channels. - if channel.ChannelPoint != chanPointStr { - continue - } - - // Assert that the channel is in coop broadcasted. - require.Contains( - t.t, channel.ChanStatusFlags, - channeldb.ChanStatusCoopBroadcasted.String(), - "channel not coop broadcasted", - ) - } - - // At this point, the channel should now be marked as being in the - // state of "waiting close". - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest) - require.NoError(t.t, err, "unable to query for pending channels") - - var found bool - for _, pendingClose := range pendingChanResp.WaitingCloseChannels { - if pendingClose.Channel.ChannelPoint == chanPointStr { - found = true - break - } - } - require.True(t.t, found, "channel not marked as waiting close") - - // We'll now, generate a single block, wait for the final close status - // update, then ensure that the closing transaction was included in the - // block. If there are anchors, we also expect an anchor sweep. - expectedTxes := 1 - if anchors { - expectedTxes = 2 - } - - block := mineBlocks(t, net, 1, expectedTxes)[0] - - closingTxid, err := net.WaitForChannelClose(closeUpdates) - require.NoError(t.t, err, "error while waiting for channel close") - - assertTxInBlock(t, block, closingTxid) - - // Finally, the transaction should no longer be in the waiting close - // state as we've just mined a block that should include the closing - // transaction. - err = wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := node.PendingChannels( - ctx, pendingChansRequest, - ) - if err != nil { - return false - } - - for _, pendingClose := range pendingChanResp.WaitingCloseChannels { - if pendingClose.Channel.ChannelPoint == chanPointStr { - return false - } - } - - return true - }, defaultTimeout) - require.NoError( - t.t, err, "closing transaction not marked as fully closed", - ) - - return closingTxid -} - -// findForceClosedChannel searches a pending channel response for a particular -// channel, returning the force closed channel upon success. -func findForceClosedChannel(pendingChanResp *lnrpc.PendingChannelsResponse, - op fmt.Stringer) (*lnrpc.PendingChannelsResponse_ForceClosedChannel, - error) { - - for _, forceClose := range pendingChanResp.PendingForceClosingChannels { - if forceClose.Channel.ChannelPoint == op.String() { - return forceClose, nil - } - } - - return nil, errors.New("channel not marked as force closed") -} - -// findWaitingCloseChannel searches a pending channel response for a particular -// channel, returning the waiting close channel upon success. -func findWaitingCloseChannel(pendingChanResp *lnrpc.PendingChannelsResponse, - op fmt.Stringer) (*lnrpc.PendingChannelsResponse_WaitingCloseChannel, - error) { - - for _, waitingClose := range pendingChanResp.WaitingCloseChannels { - if waitingClose.Channel.ChannelPoint == op.String() { - return waitingClose, nil - } - } - - return nil, errors.New("channel not marked as waiting close") -} - -// waitForChannelPendingForceClose waits for the node to report that the -// channel is pending force close, and that the UTXO nursery is aware of it. -func waitForChannelPendingForceClose(node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint) error { - - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - txid, err := lnrpc.GetChanPointFundingTxid(fundingChanPoint) - if err != nil { - return err - } - - op := wire.OutPoint{ - Hash: *txid, - Index: fundingChanPoint.OutputIndex, - } - - return wait.NoError(func() error { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := node.PendingChannels( - ctx, pendingChansRequest, - ) - if err != nil { - return fmt.Errorf("unable to get pending channels: %v", - err) - } - - forceClose, err := findForceClosedChannel(pendingChanResp, &op) - if err != nil { - return err - } - - // We must wait until the UTXO nursery has received the channel - // and is aware of its maturity height. - if forceClose.MaturityHeight == 0 { - return fmt.Errorf("channel had maturity height of 0") - } - - return nil - }, defaultTimeout) -} - -// lnrpcForceCloseChannel is a short type alias for a ridiculously long type -// name in the lnrpc package. -type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel - -// waitForNumChannelPendingForceClose waits for the node to report a certain -// number of channels in state pending force close. -func waitForNumChannelPendingForceClose(node *lntest.HarnessNode, - expectedNum int, - perChanCheck func(channel *lnrpcForceCloseChannel) error) error { - - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - return wait.NoError(func() error { - resp, err := node.PendingChannels( - ctx, &lnrpc.PendingChannelsRequest{}, - ) - if err != nil { - return fmt.Errorf("unable to get pending channels: %v", - err) - } - - forceCloseChans := resp.PendingForceClosingChannels - if len(forceCloseChans) != expectedNum { - return fmt.Errorf("%v should have %d pending "+ - "force close channels but has %d", - node.Cfg.Name, expectedNum, - len(forceCloseChans)) - } - - if perChanCheck != nil { - for _, forceCloseChan := range forceCloseChans { - err := perChanCheck(forceCloseChan) - if err != nil { - return err - } - } - } - - return nil - }, defaultTimeout) -} - -// cleanupForceClose mines a force close commitment found in the mempool and -// the following sweep transaction from the force closing node. -func cleanupForceClose(t *harnessTest, net *lntest.NetworkHarness, - node *lntest.HarnessNode, chanPoint *lnrpc.ChannelPoint) { - - // Wait for the channel to be marked pending force close. - err := waitForChannelPendingForceClose(node, chanPoint) - require.NoError(t.t, err, "channel not pending force close") - - // Mine enough blocks for the node to sweep its funds from the force - // closed channel. - // - // The commit sweep resolver is able to broadcast the sweep tx up to - // one block before the CSV elapses, so wait until defaulCSV-1. - _, err = net.Miner.Client.Generate(defaultCSV - 1) - require.NoError(t.t, err, "unable to generate blocks") - - // The node should now sweep the funds, clean up by mining the sweeping - // tx. - mineBlocks(t, net, 1, 1) -} - -// numOpenChannelsPending sends an RPC request to a node to get a count of the -// node's channels that are currently in a pending state (with a broadcast, but -// not confirmed funding transaction). -func numOpenChannelsPending(ctxt context.Context, - node *lntest.HarnessNode) (int, error) { - - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - resp, err := node.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - return 0, err - } - return len(resp.PendingOpenChannels), nil -} - -// assertNumOpenChannelsPending asserts that a pair of nodes have the expected -// number of pending channels between them. -func assertNumOpenChannelsPending(t *harnessTest, - alice, bob *lntest.HarnessNode, expected int) { - - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - err := wait.NoError(func() error { - aliceNumChans, err := numOpenChannelsPending(ctxt, alice) - if err != nil { - return fmt.Errorf("error fetching alice's node (%v) "+ - "pending channels %v", alice.NodeID, err) - } - bobNumChans, err := numOpenChannelsPending(ctxt, bob) - if err != nil { - return fmt.Errorf("error fetching bob's node (%v) "+ - "pending channels %v", bob.NodeID, err) - } - - aliceStateCorrect := aliceNumChans == expected - if !aliceStateCorrect { - return fmt.Errorf("number of pending channels for "+ - "alice incorrect. expected %v, got %v", - expected, aliceNumChans) - } - - bobStateCorrect := bobNumChans == expected - if !bobStateCorrect { - return fmt.Errorf("number of pending channels for bob "+ - "incorrect. expected %v, got %v", expected, - bobNumChans) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err) -} - -// checkPeerInPeersList returns true if Bob appears in Alice's peer list. -func checkPeerInPeersList(ctx context.Context, alice, - bob *lntest.HarnessNode) (bool, error) { - - peers, err := alice.ListPeers(ctx, &lnrpc.ListPeersRequest{}) - if err != nil { - return false, fmt.Errorf( - "error listing %s's node (%v) peers: %v", - alice.Name(), alice.NodeID, err, - ) - } - - for _, peer := range peers.Peers { - if peer.PubKey == bob.PubKeyStr { - return true, nil - } - } - - return false, nil -} - -// assertConnected asserts that two peers are connected. -func assertConnected(t *harnessTest, alice, bob *lntest.HarnessNode) { - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - err := wait.NoError(func() error { - bobIsAlicePeer, err := checkPeerInPeersList(ctxt, alice, bob) - if err != nil { - return err - } - - if !bobIsAlicePeer { - return fmt.Errorf( - "expected %s and %s to be connected "+ - "but %s is not in %s's peer list", - alice.Name(), bob.Name(), - bob.Name(), alice.Name(), - ) - } - - aliceIsBobPeer, err := checkPeerInPeersList(ctxt, bob, alice) - if err != nil { - return err - } - - if !aliceIsBobPeer { - return fmt.Errorf( - "expected %s and %s to be connected "+ - "but %s is not in %s's peer list", - alice.Name(), bob.Name(), - alice.Name(), bob.Name(), - ) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err) -} - -// assertNotConnected asserts that two peers are not connected. -func assertNotConnected(t *harnessTest, alice, bob *lntest.HarnessNode) { - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - err := wait.NoError(func() error { - bobIsAlicePeer, err := checkPeerInPeersList(ctxt, alice, bob) - if err != nil { - return err - } - - if bobIsAlicePeer { - return fmt.Errorf( - "expected %s and %s not to be "+ - "connected but %s is in %s's "+ - "peer list", - alice.Name(), bob.Name(), - bob.Name(), alice.Name(), - ) - } - - aliceIsBobPeer, err := checkPeerInPeersList(ctxt, bob, alice) - if err != nil { - return err - } - - if aliceIsBobPeer { - return fmt.Errorf( - "expected %s and %s not to be "+ - "connected but %s is in %s's "+ - "peer list", - alice.Name(), bob.Name(), - alice.Name(), bob.Name(), - ) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err) -} - -// shutdownAndAssert shuts down the given node and asserts that no errors -// occur. -func shutdownAndAssert(net *lntest.NetworkHarness, t *harnessTest, - node *lntest.HarnessNode) { - - // The process may not be in a state to always shutdown immediately, so - // we'll retry up to a hard limit to ensure we eventually shutdown. - err := wait.NoError(func() error { - return net.ShutdownNode(node) - }, defaultTimeout) - require.NoErrorf(t.t, err, "unable to shutdown %v", node.Name()) -} - -// assertChannelBalanceResp makes a ChannelBalance request and checks the -// returned response matches the expected. -func assertChannelBalanceResp(t *harnessTest, node *lntest.HarnessNode, - expected *lnrpc.ChannelBalanceResponse) { - - resp := getChannelBalance(t, node) - require.True(t.t, proto.Equal(expected, resp), "balance is incorrect") -} - -// getChannelBalance gets the channel balance. -func getChannelBalance(t *harnessTest, - node *lntest.HarnessNode) *lnrpc.ChannelBalanceResponse { - - t.t.Helper() - - ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout) - req := &lnrpc.ChannelBalanceRequest{} - resp, err := node.ChannelBalance(ctxt, req) - - require.NoError(t.t, err, "unable to get node's balance") - return resp -} - -// txStr returns the string representation of the channel's funding transaction. -func txStr(chanPoint *lnrpc.ChannelPoint) string { - fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint) - if err != nil { - return "" - } - cp := wire.OutPoint{ - Hash: *fundingTxID, - Index: chanPoint.OutputIndex, - } - return cp.String() -} - -// getChannelPolicies queries the channel graph and retrieves the current edge -// policies for the provided channel points. -func getChannelPolicies(t *harnessTest, node *lntest.HarnessNode, - advertisingNode string, - chanPoints ...*lnrpc.ChannelPoint) []*lnrpc.RoutingPolicy { - - ctxb := context.Background() - - descReq := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: true, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - chanGraph, err := node.DescribeGraph(ctxt, descReq) - require.NoError(t.t, err, "unable to query for alice's graph") - - var policies []*lnrpc.RoutingPolicy - err = wait.NoError(func() error { - out: - for _, chanPoint := range chanPoints { - for _, e := range chanGraph.Edges { - if e.ChanPoint != txStr(chanPoint) { - continue - } - - if e.Node1Pub == advertisingNode { - policies = append(policies, - e.Node1Policy) - } else { - policies = append(policies, - e.Node2Policy) - } - - continue out - } - - // If we've iterated over all the known edges and we weren't - // able to find this specific one, then we'll fail. - return fmt.Errorf("did not find edge %v", txStr(chanPoint)) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err) - - return policies -} - -// assertChannelPolicy asserts that the passed node's known channel policy for -// the passed chanPoint is consistent with the expected policy values. -func assertChannelPolicy(t *harnessTest, node *lntest.HarnessNode, - advertisingNode string, expectedPolicy *lnrpc.RoutingPolicy, - chanPoints ...*lnrpc.ChannelPoint) { - - policies := getChannelPolicies(t, node, advertisingNode, chanPoints...) - for _, policy := range policies { - err := lntest.CheckChannelPolicy(policy, expectedPolicy) - if err != nil { - t.Fatalf(fmt.Sprintf("%v: %s", err.Error(), node)) - } - } -} - -func checkCommitmentMaturity( - forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, - maturityHeight uint32, blocksTilMaturity int32) error { - - if forceClose.MaturityHeight != maturityHeight { - return fmt.Errorf("expected commitment maturity height to be "+ - "%d, found %d instead", maturityHeight, - forceClose.MaturityHeight) - } - if forceClose.BlocksTilMaturity != blocksTilMaturity { - return fmt.Errorf("expected commitment blocks til maturity to "+ - "be %d, found %d instead", blocksTilMaturity, - forceClose.BlocksTilMaturity) - } - - return nil -} - -// checkForceClosedChannelNumHtlcs verifies that a force closed channel has the -// proper number of htlcs. -func checkPendingChannelNumHtlcs( - forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, - expectedNumHtlcs int) error { - - if len(forceClose.PendingHtlcs) != expectedNumHtlcs { - return fmt.Errorf("expected force closed channel to have %d "+ - "pending htlcs, found %d instead", expectedNumHtlcs, - len(forceClose.PendingHtlcs)) - } - - return nil -} - -// checkNumForceClosedChannels checks that a pending channel response has the -// expected number of force closed channels. -func checkNumForceClosedChannels(pendingChanResp *lnrpc.PendingChannelsResponse, - expectedNumChans int) error { - - if len(pendingChanResp.PendingForceClosingChannels) != expectedNumChans { - return fmt.Errorf("expected to find %d force closed channels, "+ - "got %d", expectedNumChans, - len(pendingChanResp.PendingForceClosingChannels)) - } - - return nil -} - -// checkNumWaitingCloseChannels checks that a pending channel response has the -// expected number of channels waiting for closing tx to confirm. -func checkNumWaitingCloseChannels(pendingChanResp *lnrpc.PendingChannelsResponse, - expectedNumChans int) error { - - if len(pendingChanResp.WaitingCloseChannels) != expectedNumChans { - return fmt.Errorf("expected to find %d channels waiting "+ - "closure, got %d", expectedNumChans, - len(pendingChanResp.WaitingCloseChannels)) - } - - return nil -} - -// checkPendingHtlcStageAndMaturity uniformly tests all pending htlc's belonging -// to a force closed channel, testing for the expected stage number, blocks till -// maturity, and the maturity height. -func checkPendingHtlcStageAndMaturity( - forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, - stage, maturityHeight uint32, blocksTillMaturity int32) error { - - for _, pendingHtlc := range forceClose.PendingHtlcs { - if pendingHtlc.Stage != stage { - return fmt.Errorf("expected pending htlc to be stage "+ - "%d, found %d", stage, pendingHtlc.Stage) - } - if pendingHtlc.MaturityHeight != maturityHeight { - return fmt.Errorf("expected pending htlc maturity "+ - "height to be %d, instead has %d", - maturityHeight, pendingHtlc.MaturityHeight) - } - if pendingHtlc.BlocksTilMaturity != blocksTillMaturity { - return fmt.Errorf("expected pending htlc blocks til "+ - "maturity to be %d, instead has %d", - blocksTillMaturity, - pendingHtlc.BlocksTilMaturity) - } - } - - return nil -} - -// assertAmountSent generates a closure which queries listchannels for sndr and -// rcvr, and asserts that sndr sent amt satoshis, and that rcvr received amt -// satoshis. -// -// NOTE: This method assumes that each node only has one channel, and it is the -// channel used to send the payment. -func assertAmountSent(amt btcutil.Amount, sndr, rcvr *lntest.HarnessNode) func() error { - return func() error { - // Both channels should also have properly accounted from the - // amount that has been sent/received over the channel. - listReq := &lnrpc.ListChannelsRequest{} - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - sndrListChannels, err := sndr.ListChannels(ctxt, listReq) - if err != nil { - return fmt.Errorf("unable to query for %s's channel "+ - "list: %v", sndr.Name(), err) - } - sndrSatoshisSent := sndrListChannels.Channels[0].TotalSatoshisSent - if sndrSatoshisSent != int64(amt) { - return fmt.Errorf("%s's satoshis sent is incorrect "+ - "got %v, expected %v", sndr.Name(), - sndrSatoshisSent, amt) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - rcvrListChannels, err := rcvr.ListChannels(ctxt, listReq) - if err != nil { - return fmt.Errorf("unable to query for %s's channel "+ - "list: %v", rcvr.Name(), err) - } - rcvrSatoshisReceived := rcvrListChannels.Channels[0].TotalSatoshisReceived - if rcvrSatoshisReceived != int64(amt) { - return fmt.Errorf("%s's satoshis received is "+ - "incorrect got %v, expected %v", rcvr.Name(), - rcvrSatoshisReceived, amt) - } - - return nil - } -} - -// assertLastHTLCError checks that the last sent HTLC of the last payment sent -// by the given node failed with the expected failure code. -func assertLastHTLCError(t *harnessTest, node *lntest.HarnessNode, - code lnrpc.Failure_FailureCode) { - - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout) - paymentsResp, err := node.ListPayments(ctxt, req) - require.NoError(t.t, err, "error when obtaining payments") - - payments := paymentsResp.Payments - require.NotZero(t.t, len(payments), "no payments found") - - payment := payments[len(payments)-1] - htlcs := payment.Htlcs - require.NotZero(t.t, len(htlcs), "no htlcs") - - htlc := htlcs[len(htlcs)-1] - require.NotNil(t.t, htlc.Failure, "expected failure") - - require.Equal(t.t, code, htlc.Failure.Code, "unexpected failure code") -} - -// assertAmountPaid checks that the ListChannels command of the provided -// node list the total amount sent and received as expected for the -// provided channel. -func assertAmountPaid(t *harnessTest, channelName string, - node *lntest.HarnessNode, chanPoint wire.OutPoint, amountSent, - amountReceived int64) { - - ctxb := context.Background() - - checkAmountPaid := func() error { - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - resp, err := node.ListChannels(ctxt, listReq) - if err != nil { - return fmt.Errorf("unable to for node's "+ - "channels: %v", err) - } - for _, channel := range resp.Channels { - if channel.ChannelPoint != chanPoint.String() { - continue - } - - if channel.TotalSatoshisSent != amountSent { - return fmt.Errorf("%v: incorrect amount"+ - " sent: %v != %v", channelName, - channel.TotalSatoshisSent, - amountSent) - } - if channel.TotalSatoshisReceived != - amountReceived { - - return fmt.Errorf("%v: incorrect amount"+ - " received: %v != %v", - channelName, - channel.TotalSatoshisReceived, - amountReceived) - } - - return nil - } - return fmt.Errorf("channel not found") - } - - // As far as HTLC inclusion in commitment transaction might be - // postponed we will try to check the balance couple of times, - // and then if after some period of time we receive wrong - // balance return the error. - // TODO(roasbeef): remove sleep after invoice notification hooks - // are in place - var timeover uint32 - go func() { - <-time.After(defaultTimeout) - atomic.StoreUint32(&timeover, 1) - }() - - for { - isTimeover := atomic.LoadUint32(&timeover) == 1 - if err := checkAmountPaid(); err != nil { - require.Falsef( - t.t, isTimeover, - "Check amount Paid failed: %v", err, - ) - } else { - break - } - } -} - -// assertNumPendingChannels checks that a PendingChannels response from the -// node reports the expected number of pending channels. -func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode, - expWaitingClose, expPendingForceClose int) { - - ctxb := context.Background() - - var predErr error - err := wait.Predicate(func() bool { - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := node.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - n := len(pendingChanResp.WaitingCloseChannels) - if n != expWaitingClose { - predErr = fmt.Errorf("expected to find %d channels "+ - "waiting close, found %d", expWaitingClose, n) - return false - } - n = len(pendingChanResp.PendingForceClosingChannels) - if n != expPendingForceClose { - predErr = fmt.Errorf("expected to find %d channel "+ - "pending force close, found %d", expPendingForceClose, n) - return false - } - return true - }, defaultTimeout) - require.NoErrorf(t.t, err, "got err: %v", predErr) -} - -// assertNodeNumChannels polls the provided node's list channels rpc until it -// reaches the desired number of total channels. -func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode, - numChannels int) { - - ctxb := context.Background() - - // Poll node for its list of channels. - req := &lnrpc.ListChannelsRequest{} - - var predErr error - pred := func() bool { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := node.ListChannels(ctxt, req) - if err != nil { - predErr = fmt.Errorf("unable to query for node's "+ - "channels: %v", err) - return false - } - - // Return true if the query returned the expected number of - // channels. - num := len(chanInfo.Channels) - if num != numChannels { - predErr = fmt.Errorf("expected %v channels, got %v", - numChannels, num) - return false - } - return true - } - - require.NoErrorf( - t.t, wait.Predicate(pred, defaultTimeout), - "node has incorrect number of channels: %v", predErr, - ) -} - -// assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs -// matching payHashes on _all_ their channels. -func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - for _, node := range nodes { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return fmt.Errorf("unable to get node chans: %v", err) - } - - for _, channel := range nodeChans.Channels { - // Record all payment hashes active for this channel. - htlcHashes := make(map[string]struct{}) - for _, htlc := range channel.PendingHtlcs { - h := hex.EncodeToString(htlc.HashLock) - _, ok := htlcHashes[h] - if ok { - return fmt.Errorf("duplicate HashLock") - } - htlcHashes[h] = struct{}{} - } - - // Channel should have exactly the payHashes active. - if len(payHashes) != len(htlcHashes) { - return fmt.Errorf("node [%s:%x] had %v "+ - "htlcs active, expected %v", - node.Cfg.Name, node.PubKey[:], - len(htlcHashes), len(payHashes)) - } - - // Make sure all the payHashes are active. - for _, payHash := range payHashes { - h := hex.EncodeToString(payHash) - if _, ok := htlcHashes[h]; ok { - continue - } - return fmt.Errorf("node [%s:%x] didn't have: "+ - "the payHash %v active", node.Cfg.Name, - node.PubKey[:], h) - } - } - } - - return nil -} - -func assertNumActiveHtlcsChanPoint(node *lntest.HarnessNode, - chanPoint wire.OutPoint, numHtlcs int) error { - - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return err - } - - for _, channel := range nodeChans.Channels { - if channel.ChannelPoint != chanPoint.String() { - continue - } - - if len(channel.PendingHtlcs) != numHtlcs { - return fmt.Errorf("expected %v active HTLCs, got %v", - numHtlcs, len(channel.PendingHtlcs)) - } - return nil - } - - return fmt.Errorf("channel point %v not found", chanPoint) -} - -func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) error { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - for _, node := range nodes { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return err - } - - for _, channel := range nodeChans.Channels { - if len(channel.PendingHtlcs) != numHtlcs { - return fmt.Errorf("expected %v HTLCs, got %v", - numHtlcs, len(channel.PendingHtlcs)) - } - } - } - - return nil -} - -func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, - timeout time.Duration, inputs ...wire.OutPoint) chainhash.Hash { - - tx := getSpendingTxInMempool(t, miner, timeout, inputs...) - return tx.TxHash() -} - -// getSpendingTxInMempool waits for a transaction spending the given outpoint to -// appear in the mempool and returns that tx in full. -func getSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, - timeout time.Duration, inputs ...wire.OutPoint) *wire.MsgTx { - - inputSet := make(map[wire.OutPoint]struct{}, len(inputs)) - breakTimeout := time.After(timeout) - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-breakTimeout: - t.Fatalf("didn't find tx in mempool") - case <-ticker.C: - mempool, err := miner.GetRawMempool() - require.NoError(t.t, err, "unable to get mempool") - - if len(mempool) == 0 { - continue - } - - for _, txid := range mempool { - tx, err := miner.GetRawTransaction(txid) - require.NoError(t.t, err, "unable to fetch tx") - msgTx := tx.MsgTx() - - // Include the inputs again in case they were - // removed in a previous iteration. - for _, input := range inputs { - inputSet[input] = struct{}{} - } - - for _, txIn := range msgTx.TxIn { - input := txIn.PreviousOutPoint - delete(inputSet, input) - } - - if len(inputSet) > 0 { - // Missing input, check next transaction - // or try again. - continue - } - - // Transaction spends all expected inputs, - // return. - return msgTx - } - } - } -} - -// assertTxLabel is a helper function which finds a target tx in our set -// of transactions and checks that it has the desired label. -func assertTxLabel(t *harnessTest, node *lntest.HarnessNode, - targetTx, label string) { - - // List all transactions relevant to our wallet, and find the tx so that - // we can check the correct label has been set. - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - txResp, err := node.GetTransactions( - ctxt, &lnrpc.GetTransactionsRequest{}, - ) - require.NoError(t.t, err, "could not get transactions") - - // Find our transaction in the set of transactions returned and check - // its label. - for _, txn := range txResp.Transactions { - if txn.TxHash == targetTx { - require.Equal(t.t, label, txn.Label, "labels not match") - } - } -} - -// sendAndAssertSuccess sends the given payment requests and asserts that the -// payment completes successfully. -func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode, - req *routerrpc.SendPaymentRequest) *lnrpc.Payment { - - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - var result *lnrpc.Payment - err := wait.NoError(func() error { - stream, err := node.RouterClient.SendPaymentV2(ctx, req) - if err != nil { - return fmt.Errorf("unable to send payment: %v", err) - } - - result, err = getPaymentResult(stream) - if err != nil { - return fmt.Errorf("unable to get payment result: %v", - err) - } - - if result.Status != lnrpc.Payment_SUCCEEDED { - return fmt.Errorf("payment failed: %v", result.Status) - } - - return nil - }, defaultTimeout) - require.NoError(t.t, err) - - return result -} - -// sendAndAssertFailure sends the given payment requests and asserts that the -// payment fails with the expected reason. -func sendAndAssertFailure(t *harnessTest, node *lntest.HarnessNode, - req *routerrpc.SendPaymentRequest, - failureReason lnrpc.PaymentFailureReason) *lnrpc.Payment { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - - stream, err := node.RouterClient.SendPaymentV2(ctx, req) - require.NoError(t.t, err, "unable to send payment") - - result, err := getPaymentResult(stream) - require.NoError(t.t, err, "unable to get payment result") - - require.Equal( - t.t, lnrpc.Payment_FAILED, result.Status, - "payment was expected to fail, but succeeded", - ) - - require.Equal( - t.t, failureReason, result.FailureReason, - "payment failureReason not matched", - ) - - return result -} - -// getPaymentResult reads a final result from the stream and returns it. -func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( - *lnrpc.Payment, error) { - - for { - payment, err := stream.Recv() - if err != nil { - return nil, err - } - - if payment.Status != lnrpc.Payment_IN_FLIGHT { - return payment, nil - } - } -} - -// assertNumUTXOs waits for the given number of UTXOs to be available or fails -// if that isn't the case before the default timeout. -func assertNumUTXOs(t *testing.T, node *lntest.HarnessNode, expectedUtxos int) { - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - err := wait.NoError(func() error { - resp, err := node.ListUnspent( // nolint:staticcheck - ctxt, &lnrpc.ListUnspentRequest{ - MinConfs: 1, - MaxConfs: math.MaxInt32, - }, - ) - if err != nil { - return fmt.Errorf("error listing unspent: %v", err) - } - - if len(resp.Utxos) != expectedUtxos { - return fmt.Errorf("not enough UTXOs, got %d wanted %d", - len(resp.Utxos), expectedUtxos) - } - - return nil - }, defaultTimeout) - require.NoError(t, err, "wait for listunspent") -} - -// assertChannelPolicyUpdate checks that the required policy update has -// happened on the given node. -func assertChannelPolicyUpdate(t *testing.T, node *lntest.HarnessNode, - advertisingNode string, policy *lnrpc.RoutingPolicy, - chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) { - - require.NoError( - t, node.WaitForChannelPolicyUpdate( - advertisingNode, policy, - chanPoint, includeUnannounced, - ), "error while waiting for channel update", - ) -} - -func transactionInWallet(node *lntest.HarnessNode, txid chainhash.Hash) bool { - txStr := txid.String() - - txResp, err := node.GetTransactions( - context.Background(), &lnrpc.GetTransactionsRequest{}, - ) - if err != nil { - return false - } - - for _, txn := range txResp.Transactions { - if txn.TxHash == txStr { - return true - } - } - - return false -} - -func assertTransactionInWallet(t *testing.T, node *lntest.HarnessNode, txID chainhash.Hash) { - t.Helper() - - err := wait.Predicate(func() bool { - return transactionInWallet(node, txID) - }, defaultTimeout) - require.NoError( - t, err, fmt.Sprintf("transaction %v not found in wallet", txID), - ) -} - -func assertTransactionNotInWallet(t *testing.T, node *lntest.HarnessNode, - txID chainhash.Hash) { - - t.Helper() - - err := wait.Predicate(func() bool { - return !transactionInWallet(node, txID) - }, defaultTimeout) - require.NoError( - t, err, fmt.Sprintf("transaction %v found in wallet", txID), - ) -} - -// assertNodeAnnouncement compares that two node announcements match. -func assertNodeAnnouncement(t *harnessTest, n1, n2 *lnrpc.NodeUpdate) { - // Alias should match. - require.Equal(t.t, n1.Alias, n2.Alias, "alias don't match") - - // Color should match. - require.Equal(t.t, n1.Color, n2.Color, "color don't match") - - // NodeAddresses should match. - require.Equal( - t.t, len(n1.NodeAddresses), len(n2.NodeAddresses), - "node addresses don't match", - ) - - addrs := make(map[string]struct{}, len(n1.NodeAddresses)) - for _, nodeAddr := range n1.NodeAddresses { - addrs[nodeAddr.Addr] = struct{}{} - } - - for _, nodeAddr := range n2.NodeAddresses { - if _, ok := addrs[nodeAddr.Addr]; !ok { - t.Fatalf("address %v not found in node announcement", - nodeAddr.Addr) - } - } -} diff --git a/itest/lnd_channel_force_close_test.go b/itest/lnd_channel_force_close_test.go index 0857751d4..03b1955ab 100644 --- a/itest/lnd_channel_force_close_test.go +++ b/itest/lnd_channel_force_close_test.go @@ -1087,3 +1087,71 @@ func assertReports(ht *lntemp.HarnessTest, hn *node.HarnessNode, require.Equal(ht, expected, res) } } + +// checkCommitmentMaturity checks that both the maturity height and blocks +// maturity height are as expected. +// +// NOTE: only used in current test file. +func checkCommitmentMaturity(forceClose lntemp.PendingForceClose, + maturityHeight uint32, blocksTilMaturity int32) error { + + if forceClose.MaturityHeight != maturityHeight { + return fmt.Errorf("expected commitment maturity height to be "+ + "%d, found %d instead", maturityHeight, + forceClose.MaturityHeight) + } + if forceClose.BlocksTilMaturity != blocksTilMaturity { + return fmt.Errorf("expected commitment blocks til maturity to "+ + "be %d, found %d instead", blocksTilMaturity, + forceClose.BlocksTilMaturity) + } + + return nil +} + +// checkForceClosedChannelNumHtlcs verifies that a force closed channel has the +// proper number of htlcs. +// +// NOTE: only used in current test file. +func checkPendingChannelNumHtlcs( + forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, + expectedNumHtlcs int) error { + + if len(forceClose.PendingHtlcs) != expectedNumHtlcs { + return fmt.Errorf("expected force closed channel to have %d "+ + "pending htlcs, found %d instead", expectedNumHtlcs, + len(forceClose.PendingHtlcs)) + } + + return nil +} + +// checkPendingHtlcStageAndMaturity uniformly tests all pending htlc's belonging +// to a force closed channel, testing for the expected stage number, blocks till +// maturity, and the maturity height. +// +// NOTE: only used in current test file. +func checkPendingHtlcStageAndMaturity( + forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, + stage, maturityHeight uint32, blocksTillMaturity int32) error { + + for _, pendingHtlc := range forceClose.PendingHtlcs { + if pendingHtlc.Stage != stage { + return fmt.Errorf("expected pending htlc to be stage "+ + "%d, found %d", stage, pendingHtlc.Stage) + } + if pendingHtlc.MaturityHeight != maturityHeight { + return fmt.Errorf("expected pending htlc maturity "+ + "height to be %d, instead has %d", + maturityHeight, pendingHtlc.MaturityHeight) + } + if pendingHtlc.BlocksTilMaturity != blocksTillMaturity { + return fmt.Errorf("expected pending htlc blocks til "+ + "maturity to be %d, instead has %d", + blocksTillMaturity, + pendingHtlc.BlocksTilMaturity) + } + } + + return nil +} diff --git a/itest/lnd_channel_policy_test.go b/itest/lnd_channel_policy_test.go index faac28c39..b605ea8b7 100644 --- a/itest/lnd_channel_policy_test.go +++ b/itest/lnd_channel_policy_test.go @@ -12,25 +12,10 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/require" ) -// assertPolicyUpdate checks that a given policy update has been received by a -// list of given nodes. -// TODO(yy): delete. -func assertPolicyUpdate(t *harnessTest, nodes []*lntest.HarnessNode, - advertisingNode string, policy *lnrpc.RoutingPolicy, - chanPoint *lnrpc.ChannelPoint) { - - for _, node := range nodes { - assertChannelPolicyUpdate( - t.t, node, advertisingNode, policy, chanPoint, false, - ) - } -} - // testUpdateChannelPolicy tests that policy updates made to a channel // gets propagated to other nodes in the network. func testUpdateChannelPolicy(ht *lntemp.HarnessTest) { diff --git a/itest/lnd_etcd_failover_test.go b/itest/lnd_etcd_failover_test.go index 90bd5fbbe..0488191f8 100644 --- a/itest/lnd_etcd_failover_test.go +++ b/itest/lnd_etcd_failover_test.go @@ -13,7 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntemp" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/stretchr/testify/require" ) @@ -56,8 +56,8 @@ func testEtcdFailover(ht *lntemp.HarnessTest) { func testEtcdFailoverCase(ht *lntemp.HarnessTest, kill bool) { etcdCfg, cleanup, err := kvdb.StartEtcdTestBackend( - ht.T.TempDir(), uint16(lntest.NextAvailablePort()), - uint16(lntest.NextAvailablePort()), "", + ht.T.TempDir(), uint16(node.NextAvailablePort()), + uint16(node.NextAvailablePort()), "", ) require.NoError(ht, err, "Failed to start etcd instance") defer cleanup() diff --git a/itest/lnd_multi-hop_test.go b/itest/lnd_multi-hop_test.go index 8f738dc7c..287444ac9 100644 --- a/itest/lnd_multi-hop_test.go +++ b/itest/lnd_multi-hop_test.go @@ -15,7 +15,6 @@ import ( "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntemp/rpc" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/routing" "github.com/stretchr/testify/require" @@ -52,102 +51,6 @@ var commitWithZeroConf = []struct { }, } -// waitForInvoiceAccepted waits until the specified invoice moved to the -// accepted state by the node. -func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, - payHash lntypes.Hash) { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, - &invoicesrpc.SubscribeSingleInvoiceRequest{ - RHash: payHash[:], - }, - ) - if err != nil { - t.Fatalf("subscribe single invoice: %v", err) - } - - for { - update, err := invoiceUpdates.Recv() - if err != nil { - t.Fatalf("invoice update err: %v", err) - } - if update.State == lnrpc.Invoice_ACCEPTED { - break - } - } -} - -// checkPaymentStatus asserts that the given node list a payment with the given -// preimage has the expected status. -func checkPaymentStatus(node *lntest.HarnessNode, preimage lntypes.Preimage, - status lnrpc.Payment_PaymentStatus) error { - - ctxb := context.Background() - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - paymentsResp, err := node.ListPayments(ctxt, req) - if err != nil { - return fmt.Errorf("error when obtaining Alice payments: %v", - err) - } - - payHash := preimage.Hash() - var found bool - for _, p := range paymentsResp.Payments { - if p.PaymentHash != payHash.String() { - continue - } - - found = true - if p.Status != status { - return fmt.Errorf("expected payment status "+ - "%v, got %v", status, p.Status) - } - - switch status { - // If this expected status is SUCCEEDED, we expect the final preimage. - case lnrpc.Payment_SUCCEEDED: - if p.PaymentPreimage != preimage.String() { - return fmt.Errorf("preimage doesn't match: %v vs %v", - p.PaymentPreimage, preimage.String()) - } - - // Otherwise we expect an all-zero preimage. - default: - if p.PaymentPreimage != (lntypes.Preimage{}).String() { - return fmt.Errorf("expected zero preimage, got %v", - p.PaymentPreimage) - } - } - } - - if !found { - return fmt.Errorf("payment with payment hash %v not found "+ - "in response", payHash) - } - - return nil -} - -// assertAllTxesSpendFrom asserts that all txes in the list spend from the given -// tx. -func assertAllTxesSpendFrom(t *harnessTest, txes []*wire.MsgTx, - prevTxid chainhash.Hash) { - - for _, tx := range txes { - if tx.TxIn[0].PreviousOutPoint.Hash != prevTxid { - t.Fatalf("tx %v did not spend from %v", - tx.TxHash(), prevTxid) - } - } -} - // caseRunner defines a single test case runner. type caseRunner func(ht *lntemp.HarnessTest, alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) diff --git a/itest/lnd_network_test.go b/itest/lnd_network_test.go index 6763a952e..9244484a6 100644 --- a/itest/lnd_network_test.go +++ b/itest/lnd_network_test.go @@ -8,7 +8,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -105,7 +104,7 @@ func testReconnectAfterIPChange(ht *lntemp.HarnessTest) { // We derive an extra port for Dave, and we initialise his node with // the port advertised as `--externalip` arguments. - ip2 := lntest.NextAvailablePort() + ip2 := node.NextAvailablePort() // Create a new node, Dave, which will initialize a P2P port for him. daveArgs := []string{fmt.Sprintf("--externalip=127.0.0.1:%d", ip2)} @@ -184,7 +183,7 @@ func testReconnectAfterIPChange(ht *lntemp.HarnessTest) { // address. // Change Dave's listening port and restart. - dave.Cfg.P2PPort = lntest.NextAvailablePort() + dave.Cfg.P2PPort = node.NextAvailablePort() dave.Cfg.ExtraArgs = []string{ fmt.Sprintf( "--externalip=127.0.0.1:%d", dave.Cfg.P2PPort, diff --git a/itest/lnd_neutrino_test.go b/itest/lnd_neutrino_test.go index 902eac430..acf56f9d8 100644 --- a/itest/lnd_neutrino_test.go +++ b/itest/lnd_neutrino_test.go @@ -3,14 +3,13 @@ package itest import ( "github.com/lightningnetwork/lnd/lnrpc/neutrinorpc" "github.com/lightningnetwork/lnd/lntemp" - "github.com/lightningnetwork/lnd/lntest" "github.com/stretchr/testify/require" ) // testNeutrino checks that the neutrino sub-server can fetch compact // block filters, server status and connect to a connected peer. func testNeutrino(ht *lntemp.HarnessTest) { - if ht.ChainBackendName() != lntest.NeutrinoBackendName { + if !ht.IsNeutrinoBackend() { ht.Skipf("skipping test for non neutrino backends") } diff --git a/itest/lnd_psbt_test.go b/itest/lnd_psbt_test.go index 286abad6e..5cc0865bc 100644 --- a/itest/lnd_psbt_test.go +++ b/itest/lnd_psbt_test.go @@ -22,7 +22,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/stretchr/testify/require" ) @@ -1187,60 +1186,6 @@ func deriveInternalKey(ht *lntemp.HarnessTest, return keyDesc, parsedPubKey, fullDerivationPath } -// openChannelPsbt attempts to open a channel between srcNode and destNode with -// the passed channel funding parameters. If the passed context has a timeout, -// then if the timeout is reached before the channel pending notification is -// received, an error is returned. An error is returned if the expected step -// of funding the PSBT is not received from the source node. -func openChannelPsbt(ctx context.Context, srcNode, destNode *lntest.HarnessNode, - p lntest.OpenChannelParams) (lnrpc.Lightning_OpenChannelClient, []byte, - error) { - - // Wait until srcNode and destNode have the latest chain synced. - // Otherwise, we may run into a check within the funding manager that - // prevents any funding workflows from being kicked off if the chain - // isn't yet synced. - if err := srcNode.WaitForBlockchainSync(); err != nil { - return nil, nil, fmt.Errorf("unable to sync srcNode chain: %v", - err) - } - if err := destNode.WaitForBlockchainSync(); err != nil { - return nil, nil, fmt.Errorf("unable to sync destNode chain: %v", - err) - } - - // Send the request to open a channel to the source node now. This will - // open a long-lived stream where we'll receive status updates about the - // progress of the channel. - respStream, err := srcNode.OpenChannel(ctx, &lnrpc.OpenChannelRequest{ - NodePubkey: destNode.PubKey[:], - LocalFundingAmount: int64(p.Amt), - PushSat: int64(p.PushAmt), - Private: p.Private, - SpendUnconfirmed: p.SpendUnconfirmed, - MinHtlcMsat: int64(p.MinHtlc), - FundingShim: p.FundingShim, - }) - if err != nil { - return nil, nil, fmt.Errorf("unable to open channel between "+ - "source and dest: %v", err) - } - - // Consume the "PSBT funding ready" update. This waits until the node - // notifies us that the PSBT can now be funded. - resp, err := receiveChanUpdate(ctx, respStream) - if err != nil { - return nil, nil, fmt.Errorf("unable to consume channel update "+ - "message: %v", err) - } - upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund) - if !ok { - return nil, nil, fmt.Errorf("expected PSBT funding update, "+ - "instead got %v", resp) - } - return respStream, upd.PsbtFund.Psbt, nil -} - // receiveChanUpdate waits until a message is received on the stream or the // context is canceled. The context must have a timeout or must be canceled // in case no message is received, otherwise this function will block forever. diff --git a/itest/lnd_revocation_test.go b/itest/lnd_revocation_test.go index bff6e6760..0184d634b 100644 --- a/itest/lnd_revocation_test.go +++ b/itest/lnd_revocation_test.go @@ -13,7 +13,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" "github.com/lightningnetwork/lnd/lntemp" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -158,7 +157,7 @@ func testRevokedCloseRetribution(ht *lntemp.HarnessTest) { // tx's confHeight+CSV-1 blocks and since we've already mined one that // included the justice tx we only need to mine extra DefaultCSV-2 // blocks to unlock it. - ht.MineBlocks(lntest.DefaultCSV - 2) + ht.MineBlocks(defaultCSV - 2) ht.AssertNumPendingForceClose(bob, 0) } diff --git a/itest/lnd_test.go b/itest/lnd_test.go index fa3606d0a..195890ac2 100644 --- a/itest/lnd_test.go +++ b/itest/lnd_test.go @@ -13,7 +13,7 @@ import ( "github.com/btcsuite/btcd/integration/rpctest" "github.com/lightningnetwork/lnd/lntemp" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/stretchr/testify/require" "google.golang.org/grpc/grpclog" ) @@ -62,7 +62,7 @@ func TestLightningNetworkDaemon(t *testing.T) { // Get the test cases to be run in this tranche. testCases, trancheIndex, trancheOffset := getTestCaseSplitTranche() - lntest.ApplyPortOffset(uint32(trancheIndex) * 1000) + node.ApplyPortOffset(uint32(trancheIndex) * 1000) // Create a simple fee service. feeService := lntemp.NewFeeService(t) @@ -188,7 +188,7 @@ func init() { // Before we start any node, we need to make sure that any btcd node // that is started through the RPC harness uses a unique port as well // to avoid any port collisions. - rpctest.ListenAddressGenerator = lntest.GenerateBtcdListenerAddresses + rpctest.ListenAddressGenerator = node.GenerateBtcdListenerAddresses // Swap out grpc's default logger with out fake logger which drops the // statements on the floor. diff --git a/itest/lnd_zero_conf_test.go b/itest/lnd_zero_conf_test.go index bf5de369a..61aadb708 100644 --- a/itest/lnd_zero_conf_test.go +++ b/itest/lnd_zero_conf_test.go @@ -16,7 +16,6 @@ import ( "github.com/lightningnetwork/lnd/lntemp" "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntemp/rpc" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/require" @@ -893,7 +892,7 @@ func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) { // be deleted from the channel graph. This was previously the case due to logic // in the function DisconnectBlockAtHeight. func testZeroConfReorg(ht *lntemp.HarnessTest) { - if ht.ChainBackendName() == lntest.NeutrinoBackendName { + if ht.IsNeutrinoBackend() { ht.Skipf("skipping zero-conf reorg test for neutrino backend") } diff --git a/itest/test_harness.go b/itest/test_harness.go deleted file mode 100644 index 88f11ba08..000000000 --- a/itest/test_harness.go +++ /dev/null @@ -1,351 +0,0 @@ -package itest - -import ( - "bytes" - "context" - "flag" - "fmt" - "math" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/rpcclient" - "github.com/btcsuite/btcd/wire" - "github.com/go-errors/errors" - "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/walletrpc" - "github.com/lightningnetwork/lnd/lntest" - "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/stretchr/testify/require" -) - -var ( - harnessNetParams = &chaincfg.RegressionNetParams - - // lndExecutable is the full path to the lnd binary. - lndExecutable = flag.String( - "lndexec", itestLndBinary, "full path to lnd binary", - ) - - slowMineDelay = 20 * time.Millisecond -) - -const ( - testFeeBase = 1e+6 - defaultCSV = lntest.DefaultCSV - defaultTimeout = wait.DefaultTimeout - minerMempoolTimeout = wait.MinerMempoolTimeout - channelCloseTimeout = wait.ChannelCloseTimeout - itestLndBinary = "../../lnd-itest" - anchorSize = 330 - noFeeLimitMsat = math.MaxInt64 - - AddrTypeWitnessPubkeyHash = lnrpc.AddressType_WITNESS_PUBKEY_HASH - AddrTypeNestedPubkeyHash = lnrpc.AddressType_NESTED_PUBKEY_HASH - AddrTypeTaprootPubkey = lnrpc.AddressType_TAPROOT_PUBKEY -) - -// harnessTest wraps a regular testing.T providing enhanced error detection -// and propagation. All error will be augmented with a full stack-trace in -// order to aid in debugging. Additionally, any panics caused by active -// test cases will also be handled and represented as fatals. -type harnessTest struct { - t *testing.T - - // testCase is populated during test execution and represents the - // current test case. - testCase *testCase - - // lndHarness is a reference to the current network harness. Will be - // nil if not yet set up. - lndHarness *lntest.NetworkHarness -} - -// newHarnessTest creates a new instance of a harnessTest from a regular -// testing.T instance. -func newHarnessTest(t *testing.T, net *lntest.NetworkHarness) *harnessTest { - return &harnessTest{t, nil, net} -} - -// Skipf calls the underlying testing.T's Skip method, causing the current test -// to be skipped. -func (h *harnessTest) Skipf(format string, args ...interface{}) { - h.t.Skipf(format, args...) -} - -// Fatalf causes the current active test case to fail with a fatal error. All -// integration tests should mark test failures solely with this method due to -// the error stack traces it produces. -func (h *harnessTest) Fatalf(format string, a ...interface{}) { - if h.lndHarness != nil { - h.lndHarness.SaveProfilesPages(h.t) - } - - stacktrace := errors.Wrap(fmt.Sprintf(format, a...), 1).ErrorStack() - - if h.testCase != nil { - h.t.Fatalf("Failed: (%v): exited with error: \n"+ - "%v", h.testCase.name, stacktrace) - } else { - h.t.Fatalf("Error outside of test: %v", stacktrace) - } -} - -// RunTestCase executes a harness test case. Any errors or panics will be -// represented as fatal. -func (h *harnessTest) RunTestCase(testCase *testCase) { - h.testCase = testCase - defer func() { - h.testCase = nil - }() - - defer func() { - if err := recover(); err != nil { - description := errors.Wrap(err, 2).ErrorStack() - h.t.Fatalf("Failed: (%v) panicked with: \n%v", - h.testCase.name, description) - } - }() - - testCase.test(h.lndHarness, h) -} - -func (h *harnessTest) Logf(format string, args ...interface{}) { - h.t.Logf(format, args...) -} - -func (h *harnessTest) Log(args ...interface{}) { - h.t.Log(args...) -} - -func (h *harnessTest) getLndBinary() string { - binary := itestLndBinary - lndExec := "" - if lndExecutable != nil && *lndExecutable != "" { - lndExec = *lndExecutable - } - if lndExec == "" && runtime.GOOS == "windows" { - // Windows (even in a bash like environment like git bash as on - // Travis) doesn't seem to like relative paths to exe files... - currentDir, err := os.Getwd() - if err != nil { - h.Fatalf("unable to get working directory: %v", err) - } - targetPath := filepath.Join(currentDir, "../../lnd-itest.exe") - binary, err = filepath.Abs(targetPath) - if err != nil { - h.Fatalf("unable to get absolute path: %v", err) - } - } else if lndExec != "" { - binary = lndExec - } - - return binary -} - -type testCase struct { - name string - test func(net *lntest.NetworkHarness, t *harnessTest) -} - -// waitForTxInMempool polls until finding one transaction in the provided -// miner's mempool. An error is returned if *one* transaction isn't found within -// the given timeout. -func waitForTxInMempool(miner *rpcclient.Client, - timeout time.Duration) (*chainhash.Hash, error) { - - txs, err := waitForNTxsInMempool(miner, 1, timeout) - if err != nil { - return nil, err - } - - return txs[0], err -} - -// waitForNTxsInMempool polls until finding the desired number of transactions -// in the provided miner's mempool. An error is returned if this number is not -// met after the given timeout. -func waitForNTxsInMempool(miner *rpcclient.Client, n int, - timeout time.Duration) ([]*chainhash.Hash, error) { - - breakTimeout := time.After(timeout) - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - var err error - var mempool []*chainhash.Hash - for { - select { - case <-breakTimeout: - return nil, fmt.Errorf("wanted %v, found %v txs "+ - "in mempool: %v", n, len(mempool), mempool) - case <-ticker.C: - mempool, err = miner.GetRawMempool() - if err != nil { - return nil, err - } - - if len(mempool) == n { - return mempool, nil - } - } - } -} - -// mineBlocks mine 'num' of blocks and check that blocks are present in -// node blockchain. numTxs should be set to the number of transactions -// (excluding the coinbase) we expect to be included in the first mined block. -func mineBlocksFast(t *harnessTest, net *lntest.NetworkHarness, - num uint32, numTxs int) []*wire.MsgBlock { - - // If we expect transactions to be included in the blocks we'll mine, - // we wait here until they are seen in the miner's mempool. - var txids []*chainhash.Hash - var err error - if numTxs > 0 { - txids, err = waitForNTxsInMempool( - net.Miner.Client, numTxs, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find txns in mempool: %v", err) - } - } - - blocks := make([]*wire.MsgBlock, num) - - blockHashes, err := net.Miner.Client.Generate(num) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - for i, blockHash := range blockHashes { - block, err := net.Miner.Client.GetBlock(blockHash) - if err != nil { - t.Fatalf("unable to get block: %v", err) - } - - blocks[i] = block - } - - // Finally, assert that all the transactions were included in the first - // block. - for _, txid := range txids { - assertTxInBlock(t, blocks[0], txid) - } - - return blocks -} - -// mineBlocksSlow mines 'num' of blocks and checks that blocks are present in -// the mining node's blockchain. numTxs should be set to the number of -// transactions (excluding the coinbase) we expect to be included in the first -// mined block. Between each mined block an artificial delay is introduced to -// give all network participants time to catch up. -// -// NOTE: This function currently is just an alias for mineBlocksSlow. -func mineBlocks(t *harnessTest, net *lntest.NetworkHarness, - num uint32, numTxs int) []*wire.MsgBlock { - - return mineBlocksSlow(t, net, num, numTxs) -} - -// mineBlocksSlow mines 'num' of blocks and checks that blocks are present in -// the mining node's blockchain. numTxs should be set to the number of -// transactions (excluding the coinbase) we expect to be included in the first -// mined block. Between each mined block an artificial delay is introduced to -// give all network participants time to catch up. -func mineBlocksSlow(t *harnessTest, net *lntest.NetworkHarness, - num uint32, numTxs int) []*wire.MsgBlock { - - t.t.Helper() - - // If we expect transactions to be included in the blocks we'll mine, - // we wait here until they are seen in the miner's mempool. - var txids []*chainhash.Hash - var err error - if numTxs > 0 { - txids, err = waitForNTxsInMempool( - net.Miner.Client, numTxs, minerMempoolTimeout, - ) - require.NoError(t.t, err, "unable to find txns in mempool") - } - - blocks := make([]*wire.MsgBlock, num) - blockHashes := make([]*chainhash.Hash, 0, num) - - for i := uint32(0); i < num; i++ { - generatedHashes, err := net.Miner.Client.Generate(1) - require.NoError(t.t, err, "generate blocks") - blockHashes = append(blockHashes, generatedHashes...) - - time.Sleep(slowMineDelay) - } - - for i, blockHash := range blockHashes { - block, err := net.Miner.Client.GetBlock(blockHash) - require.NoError(t.t, err, "get blocks") - - blocks[i] = block - } - - // Finally, assert that all the transactions were included in the first - // block. - for _, txid := range txids { - assertTxInBlock(t, blocks[0], txid) - } - - return blocks -} - -func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash) { - for _, tx := range block.Transactions { - sha := tx.TxHash() - if bytes.Equal(txid[:], sha[:]) { - return - } - } - - t.Fatalf("tx was not included in block") -} - -func assertWalletUnspent(t *harnessTest, node *lntest.HarnessNode, - out *lnrpc.OutPoint, account string) { - - t.t.Helper() - - ctxb := context.Background() - err := wait.NoError(func() error { - ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - unspent, err := node.WalletKitClient.ListUnspent( - ctxt, &walletrpc.ListUnspentRequest{ - Account: account, - }, - ) - if err != nil { - return err - } - - err = errors.New("tx with wanted txhash never found") - for _, utxo := range unspent.Utxos { - if !bytes.Equal(utxo.Outpoint.TxidBytes, out.TxidBytes) { - continue - } - - err = errors.New("wanted output is not a wallet utxo") - if utxo.Outpoint.OutputIndex != out.OutputIndex { - continue - } - - return nil - } - - return err - }, defaultTimeout) - require.NoError(t.t, err) -} diff --git a/itest/utils.go b/itest/utils.go index 4995a0beb..828ee04e3 100644 --- a/itest/utils.go +++ b/itest/utils.go @@ -1,211 +1,42 @@ package itest import ( - "context" - "crypto/rand" - "fmt" - "io" - "time" + "flag" + "math" "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/rpcclient" - "github.com/btcsuite/btcd/wire" - "github.com/go-errors/errors" + "github.com/btcsuite/btcd/chaincfg" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntemp" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" - "github.com/stretchr/testify/require" ) -// completePaymentRequests sends payments from a lightning node to complete all -// payment requests. If the awaitResponse parameter is true, this function -// does not return until all payments successfully complete without errors. -func completePaymentRequests(client lnrpc.LightningClient, - routerClient routerrpc.RouterClient, paymentRequests []string, - awaitResponse bool) error { +const ( + testFeeBase = 1e+6 + defaultCSV = node.DefaultCSV + defaultTimeout = wait.DefaultTimeout + itestLndBinary = "../../lnd-itest" + anchorSize = 330 + noFeeLimitMsat = math.MaxInt64 - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() + AddrTypeWitnessPubkeyHash = lnrpc.AddressType_WITNESS_PUBKEY_HASH + AddrTypeNestedPubkeyHash = lnrpc.AddressType_NESTED_PUBKEY_HASH + AddrTypeTaprootPubkey = lnrpc.AddressType_TAPROOT_PUBKEY +) - // We start by getting the current state of the client's channels. This - // is needed to ensure the payments actually have been committed before - // we return. - req := &lnrpc.ListChannelsRequest{} - listResp, err := client.ListChannels(ctx, req) - if err != nil { - return err - } +var ( + harnessNetParams = &chaincfg.RegressionNetParams - // send sends a payment and returns an error if it doesn't succeeded. - send := func(payReq string) error { - ctxc, cancel := context.WithCancel(ctx) - defer cancel() - - payStream, err := routerClient.SendPaymentV2( - ctxc, - &routerrpc.SendPaymentRequest{ - PaymentRequest: payReq, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - if err != nil { - return err - } - - resp, err := getPaymentResult(payStream) - if err != nil { - return err - } - if resp.Status != lnrpc.Payment_SUCCEEDED { - return errors.New(resp.FailureReason) - } - - return nil - } - - // Launch all payments simultaneously. - results := make(chan error) - for _, payReq := range paymentRequests { - go func(payReq string) { - err := send(payReq) - if awaitResponse { - results <- err - } - }(payReq) - } - - // If awaiting a response, verify that all payments succeeded. - if awaitResponse { - for range paymentRequests { - err := <-results - if err != nil { - return err - } - } - return nil - } - - // We are not waiting for feedback in the form of a response, but we - // should still wait long enough for the server to receive and handle - // the send before cancelling the request. We wait for the number of - // updates to one of our channels has increased before we return. - err = wait.Predicate(func() bool { - newListResp, err := client.ListChannels(ctx, req) - if err != nil { - return false - } - - // If the number of open channels is now lower than before - // attempting the payments, it means one of the payments - // triggered a force closure (for example, due to an incorrect - // preimage). Return early since it's clear the payment was - // attempted. - if len(newListResp.Channels) < len(listResp.Channels) { - return true - } - - for _, c1 := range listResp.Channels { - for _, c2 := range newListResp.Channels { - if c1.ChannelPoint != c2.ChannelPoint { - continue - } - - // If this channel has an increased numbr of - // updates, we assume the payments are - // committed, and we can return. - if c2.NumUpdates > c1.NumUpdates { - return true - } - } - } - - return false - }, defaultTimeout) - if err != nil { - return err - } - - return nil -} - -// makeFakePayHash creates random pre image hash. -func makeFakePayHash(t *harnessTest) []byte { - randBuf := make([]byte, 32) - - if _, err := rand.Read(randBuf); err != nil { - t.Fatalf("internal error, cannot generate random string: %v", err) - } - - return randBuf -} - -// createPayReqs is a helper method that will create a slice of payment -// requests for the given node. -func createPayReqs(node *lntest.HarnessNode, paymentAmt btcutil.Amount, - numInvoices int) ([]string, [][]byte, []*lnrpc.Invoice, error) { - - payReqs := make([]string, numInvoices) - rHashes := make([][]byte, numInvoices) - invoices := make([]*lnrpc.Invoice, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - return nil, nil, nil, fmt.Errorf("unable to generate "+ - "preimage: %v", err) - } - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: int64(paymentAmt), - } - ctxt, _ := context.WithTimeout( - context.Background(), defaultTimeout, - ) - resp, err := node.AddInvoice(ctxt, invoice) - if err != nil { - return nil, nil, nil, fmt.Errorf("unable to add "+ - "invoice: %v", err) - } - - // Set the payment address in the invoice so the caller can - // properly use it. - invoice.PaymentAddr = resp.PaymentAddr - - payReqs[i] = resp.PaymentRequest - rHashes[i] = resp.RHash - invoices[i] = invoice - } - return payReqs, rHashes, invoices, nil -} - -// getChanInfo is a helper method for getting channel info for a node's sole -// channel. -func getChanInfo(node *lntest.HarnessNode) (*lnrpc.Channel, error) { - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - req := &lnrpc.ListChannelsRequest{} - channelInfo, err := node.ListChannels(ctx, req) - if err != nil { - return nil, err - } - if len(channelInfo.Channels) != 1 { - return nil, fmt.Errorf("node should only have a single "+ - "channel, instead it has %v", len(channelInfo.Channels)) - } - - return channelInfo.Channels[0], nil -} + // lndExecutable is the full path to the lnd binary. + lndExecutable = flag.String( + "lndexec", itestLndBinary, "full path to lnd binary", + ) +) // commitTypeHasAnchors returns whether commitType uses anchor outputs. func commitTypeHasAnchors(commitType lnrpc.CommitmentType) bool { @@ -247,8 +78,9 @@ func calcStaticFee(c lnrpc.CommitmentType, numHTLCs int) btcutil.Amount { feePerKw = chainfee.SatPerKWeight( lntemp.DefaultFeeRateSatPerKw, ) - commitWeight = input.CommitWeight - anchors = btcutil.Amount(0) + commitWeight = input.CommitWeight + anchors = btcutil.Amount(0) + defaultSatPerVByte = lnwallet.DefaultAnchorsCommitMaxFeeRateSatPerVByte ) // The anchor commitment type is slightly heavier, and we must also add @@ -257,8 +89,7 @@ func calcStaticFee(c lnrpc.CommitmentType, numHTLCs int) btcutil.Amount { // channels. if commitTypeHasAnchors(c) { feePerKw = chainfee.SatPerKVByte( - lnwallet.DefaultAnchorsCommitMaxFeeRateSatPerVByte * 1000, - ).FeePerKWeight() + defaultSatPerVByte * 1000).FeePerKWeight() commitWeight = input.AnchorCommitWeight anchors = 2 * anchorSize } @@ -267,29 +98,6 @@ func calcStaticFee(c lnrpc.CommitmentType, numHTLCs int) btcutil.Amount { anchors } -// channelCommitType retrieves the active channel commitment type for the given -// chan point. -func channelCommitType(node *lntest.HarnessNode, - chanPoint *lnrpc.ChannelPoint) (lnrpc.CommitmentType, error) { - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - - req := &lnrpc.ListChannelsRequest{} - channels, err := node.ListChannels(ctxt, req) - if err != nil { - return 0, fmt.Errorf("listchannels failed: %v", err) - } - - for _, c := range channels.Channels { - if c.ChannelPoint == txStr(chanPoint) { - return c.CommitmentType, nil - } - } - - return 0, fmt.Errorf("channel point %v not found", chanPoint) -} - // calculateMaxHtlc re-implements the RequiredRemoteChannelReserve of the // funding manager's config, which corresponds to the maximum MaxHTLC value we // allow users to set when updating a channel policy. @@ -298,171 +106,3 @@ func calculateMaxHtlc(chanCap btcutil.Amount) uint64 { max := lnwire.NewMSatFromSatoshis(chanCap) - reserve return uint64(max) } - -// waitForNodeBlockHeight queries the node for its current block height until -// it reaches the passed height. -func waitForNodeBlockHeight(node *lntest.HarnessNode, height int32) error { - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - var predErr error - err := wait.Predicate(func() bool { - info, err := node.GetInfo(ctx, &lnrpc.GetInfoRequest{}) - if err != nil { - predErr = err - return false - } - - if int32(info.BlockHeight) != height { - predErr = fmt.Errorf("expected block height to "+ - "be %v, was %v", height, info.BlockHeight) - return false - } - return true - }, defaultTimeout) - if err != nil { - return predErr - } - return nil -} - -// getNTxsFromMempool polls until finding the desired number of transactions in -// the provided miner's mempool and returns the full transactions to the caller. -func getNTxsFromMempool(miner *rpcclient.Client, n int, - timeout time.Duration) ([]*wire.MsgTx, error) { - - txids, err := waitForNTxsInMempool(miner, n, timeout) - if err != nil { - return nil, err - } - - var txes []*wire.MsgTx - for _, txid := range txids { - tx, err := miner.GetRawTransaction(txid) - if err != nil { - return nil, err - } - txes = append(txes, tx.MsgTx()) - } - return txes, nil -} - -// getTxFee retrieves parent transactions and reconstructs the fee paid. -func getTxFee(miner *rpcclient.Client, tx *wire.MsgTx) (btcutil.Amount, error) { - var balance btcutil.Amount - for _, in := range tx.TxIn { - parentHash := in.PreviousOutPoint.Hash - rawTx, err := miner.GetRawTransaction(&parentHash) - if err != nil { - return 0, err - } - parent := rawTx.MsgTx() - balance += btcutil.Amount( - parent.TxOut[in.PreviousOutPoint.Index].Value, - ) - } - - for _, out := range tx.TxOut { - balance -= btcutil.Amount(out.Value) - } - - return balance, nil -} - -// channelSubscription houses the proxied update and error chans for a node's -// channel subscriptions. -type channelSubscription struct { - updateChan chan *lnrpc.ChannelEventUpdate - errChan chan error - quit chan struct{} -} - -// subscribeChannelNotifications subscribes to channel updates and launches a -// goroutine that forwards these to the returned channel. -func subscribeChannelNotifications(ctxb context.Context, t *harnessTest, - node *lntest.HarnessNode) channelSubscription { - - // We'll first start by establishing a notification client which will - // send us notifications upon channels becoming active, inactive or - // closed. - req := &lnrpc.ChannelEventSubscription{} - ctx, cancelFunc := context.WithCancel(ctxb) - - chanUpdateClient, err := node.SubscribeChannelEvents(ctx, req) - if err != nil { - t.Fatalf("unable to create channel update client: %v", err) - } - - // We'll launch a goroutine that will be responsible for proxying all - // notifications recv'd from the client into the channel below. - errChan := make(chan error, 1) - quit := make(chan struct{}) - chanUpdates := make(chan *lnrpc.ChannelEventUpdate, 20) - go func() { - defer cancelFunc() - - for { - select { - case <-quit: - return - default: - chanUpdate, err := chanUpdateClient.Recv() - select { - case <-quit: - return - default: - } - - if err == io.EOF { - return - } else if err != nil { - select { - case errChan <- err: - case <-quit: - } - return - } - - select { - case chanUpdates <- chanUpdate: - case <-quit: - return - } - } - } - }() - - return channelSubscription{ - updateChan: chanUpdates, - errChan: errChan, - quit: quit, - } -} - -// findTxAtHeight gets all of the transactions that a node's wallet has a record -// of at the target height, and finds and returns the tx with the target txid, -// failing if it is not found. -func findTxAtHeight(t *harnessTest, height int32, - target string, node *lntest.HarnessNode) *lnrpc.Transaction { - - ctxb := context.Background() - ctx, cancel := context.WithTimeout(ctxb, defaultTimeout) - defer cancel() - - txns, err := node.LightningClient.GetTransactions( - ctx, &lnrpc.GetTransactionsRequest{ - StartHeight: height, - EndHeight: height, - }, - ) - require.NoError(t.t, err, "could not get transactions") - - for _, tx := range txns.Transactions { - if tx.TxHash == target { - return tx - } - } - - return nil -} diff --git a/lntemp/fee_service.go b/lntemp/fee_service.go index aac4cbd1d..e89809c71 100644 --- a/lntemp/fee_service.go +++ b/lntemp/fee_service.go @@ -9,7 +9,7 @@ import ( "sync" "testing" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/stretchr/testify/require" ) @@ -61,7 +61,7 @@ var _ WebFeeService = (*FeeService)(nil) // Start spins up a go-routine to serve fee estimates. func NewFeeService(t *testing.T) *FeeService { - port := lntest.NextAvailablePort() + port := node.NextAvailablePort() f := FeeService{ T: t, url: fmt.Sprintf( diff --git a/lntemp/harness.go b/lntemp/harness.go index cc11ec9ae..9a656723c 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -19,7 +19,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntemp/rpc" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" @@ -100,7 +99,7 @@ type HarnessTest struct { // NewHarnessTest creates a new instance of a harnessTest from a regular // testing.T instance. func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService, - dbBackend lntest.DatabaseBackend) *HarnessTest { + dbBackend node.DatabaseBackend) *HarnessTest { // Create the run context. ctxt, cancel := context.WithCancel(context.Background()) @@ -636,7 +635,7 @@ func (h *HarnessTest) NewNodeEtcd(name string, etcdCfg *etcd.Config, leaderSessionTTL int) *node.HarnessNode { // We don't want to use the embedded etcd instance. - h.manager.dbBackend = lntest.BackendBbolt + h.manager.dbBackend = node.BackendBbolt extraArgs := node.ExtraArgsEtcd( etcdCfg, name, cluster, leaderSessionTTL, @@ -659,7 +658,7 @@ func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config, leaderSessionTTL int) (*node.HarnessNode, []string, []byte) { // We don't want to use the embedded etcd instance. - h.manager.dbBackend = lntest.BackendBbolt + h.manager.dbBackend = node.BackendBbolt // Create a request to generate a new aezeed. The new seed will have // the same password as the internal wallet. @@ -1402,7 +1401,7 @@ func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode, // // The commit sweep resolver is able to broadcast the sweep tx up to // one block before the CSV elapses, so wait until defaulCSV-1. - h.MineBlocks(lntest.DefaultCSV - 1) + h.MineBlocks(node.DefaultCSV - 1) // The node should now sweep the funds, clean up by mining the sweeping // tx. diff --git a/lntemp/harness_miner.go b/lntemp/harness_miner.go index 9568a2c86..851423438 100644 --- a/lntemp/harness_miner.go +++ b/lntemp/harness_miner.go @@ -18,7 +18,7 @@ import ( "github.com/btcsuite/btcd/integration/rpctest" "github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/stretchr/testify/require" ) @@ -73,8 +73,8 @@ func newMiner(ctxb context.Context, t *testing.T, minerDirName, logFilename string) *HarnessMiner { handler := &rpcclient.NotificationHandlers{} - btcdBinary := lntest.GetBtcdBinary() - baseLogPath := fmt.Sprintf("%s/%s", lntest.GetLogDir(), minerDirName) + btcdBinary := node.GetBtcdBinary() + baseLogPath := fmt.Sprintf("%s/%s", node.GetLogDir(), minerDirName) args := []string{ "--rejectnonstd", diff --git a/lntemp/harness_node_manager.go b/lntemp/harness_node_manager.go index 1ee502354..4ed365a33 100644 --- a/lntemp/harness_node_manager.go +++ b/lntemp/harness_node_manager.go @@ -9,7 +9,6 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntemp/node" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" ) @@ -30,7 +29,7 @@ type nodeManager struct { lndBinary string // dbBackend sets the database backend to use. - dbBackend lntest.DatabaseBackend + dbBackend node.DatabaseBackend // activeNodes is a map of all running nodes, format: // {pubkey: *HarnessNode}. @@ -50,7 +49,7 @@ type nodeManager struct { // newNodeManager creates a new node manager instance. func newNodeManager(lndBinary string, - dbBackend lntest.DatabaseBackend) *nodeManager { + dbBackend node.DatabaseBackend) *nodeManager { return &nodeManager{ lndBinary: lndBinary, diff --git a/lntemp/harness_setup.go b/lntemp/harness_setup.go index 8a08c03c3..4c14a4479 100644 --- a/lntemp/harness_setup.go +++ b/lntemp/harness_setup.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/btcsuite/btcd/integration/rpctest" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/lightningnetwork/lnd/lntest" "github.com/stretchr/testify/require" ) @@ -23,7 +24,7 @@ func SetupHarness(t *testing.T, binaryPath, dbBackendName string, t.Log("Setting up HarnessTest...") // Parse testing flags that influence our test execution. - logDir := lntest.GetLogDir() + logDir := node.GetLogDir() require.NoError(t, os.MkdirAll(logDir, 0700), "create log dir failed") // Parse database backend @@ -82,7 +83,7 @@ func prepareMiner(ctxt context.Context, t *testing.T) *HarnessMiner { // prepareChainBackend creates a new chain backend. func prepareChainBackend(t *testing.T, - minerAddr string) (lntest.BackendConfig, func()) { + minerAddr string) (node.BackendConfig, func()) { chainBackend, cleanUp, err := lntest.NewBackend( minerAddr, harnessNetParams, @@ -96,21 +97,21 @@ func prepareChainBackend(t *testing.T, // prepareDbBackend parses a DatabaseBackend based on the name given. func prepareDbBackend(t *testing.T, - dbBackendName string) lntest.DatabaseBackend { + dbBackendName string) node.DatabaseBackend { - var dbBackend lntest.DatabaseBackend + var dbBackend node.DatabaseBackend switch dbBackendName { case "bbolt": - dbBackend = lntest.BackendBbolt + dbBackend = node.BackendBbolt case "etcd": - dbBackend = lntest.BackendEtcd + dbBackend = node.BackendEtcd case "postgres": - dbBackend = lntest.BackendPostgres + dbBackend = node.BackendPostgres case "sqlite": - dbBackend = lntest.BackendSqlite + dbBackend = node.BackendSqlite default: require.Fail(t, "unknown db backend") diff --git a/lntemp/node/config.go b/lntemp/node/config.go index ef00d1bfa..e398cad48 100644 --- a/lntemp/node/config.go +++ b/lntemp/node/config.go @@ -1,14 +1,18 @@ package node import ( + "flag" "fmt" + "io" + "net" + "os" "path" "path/filepath" + "sync/atomic" "github.com/btcsuite/btcd/chaincfg" "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/kvdb/etcd" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" ) @@ -16,6 +20,49 @@ const ( // ListenerFormat is the format string that is used to generate local // listener addresses. ListenerFormat = "127.0.0.1:%d" + + // DefaultCSV is the CSV delay (remotedelay) we will start our test + // nodes with. + DefaultCSV = 4 + + // defaultNodePort is the start of the range for listening ports of + // harness nodes. Ports are monotonically increasing starting from this + // number and are determined by the results of NextAvailablePort(). + defaultNodePort = 5555 +) + +var ( + // lastPort is the last port determined to be free for use by a new + // node. It should be used atomically. + lastPort uint32 = defaultNodePort + + // logOutput is a flag that can be set to append the output from the + // seed nodes to log files. + logOutput = flag.Bool("logoutput", false, + "log output from node n to file output-n.log") + + // logSubDir is the default directory where the logs are written to if + // logOutput is true. + logSubDir = flag.String("logdir", ".", "default dir to write logs to") + + // goroutineDump is a flag that can be set to dump the active + // goroutines of test nodes on failure. + goroutineDump = flag.Bool("goroutinedump", false, + "write goroutine dump from node n to file pprof-n.log") + + // btcdExecutable is the full path to the btcd binary. + btcdExecutable = flag.String( + "btcdexec", "", "full path to btcd binary", + ) +) + +type DatabaseBackend int + +const ( + BackendBbolt DatabaseBackend = iota + BackendEtcd + BackendPostgres + BackendSqlite ) // Option is a function for updating a node's configuration. @@ -74,7 +121,7 @@ type BaseNodeConfig struct { FeeURL string - DbBackend lntest.DatabaseBackend + DbBackend DatabaseBackend PostgresDsn string // NodeID is a unique ID used to identify the node. @@ -127,16 +174,16 @@ func (cfg BaseNodeConfig) ChanBackupPath() string { // current lightning network test. func (cfg *BaseNodeConfig) GenerateListeningPorts() { if cfg.P2PPort == 0 { - cfg.P2PPort = lntest.NextAvailablePort() + cfg.P2PPort = NextAvailablePort() } if cfg.RPCPort == 0 { - cfg.RPCPort = lntest.NextAvailablePort() + cfg.RPCPort = NextAvailablePort() } if cfg.RESTPort == 0 { - cfg.RESTPort = lntest.NextAvailablePort() + cfg.RESTPort = NextAvailablePort() } if cfg.ProfilePort == 0 { - cfg.ProfilePort = lntest.NextAvailablePort() + cfg.ProfilePort = NextAvailablePort() } } @@ -170,8 +217,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string { "--accept-keysend", "--keep-failed-payment-attempts", fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval), - fmt.Sprintf("--bitcoin.defaultremotedelay=%v", - lntest.DefaultCSV), + fmt.Sprintf("--bitcoin.defaultremotedelay=%v", DefaultCSV), fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()), fmt.Sprintf("--restlisten=%v", cfg.RESTAddr()), fmt.Sprintf("--restcors=https://%v", cfg.RESTAddr()), @@ -200,19 +246,19 @@ func (cfg *BaseNodeConfig) GenArgs() []string { } switch cfg.DbBackend { - case lntest.BackendEtcd: + case BackendEtcd: args = append(args, "--db.backend=etcd") args = append(args, "--db.etcd.embedded") args = append( args, fmt.Sprintf( "--db.etcd.embedded_client_port=%v", - lntest.NextAvailablePort(), + NextAvailablePort(), ), ) args = append( args, fmt.Sprintf( "--db.etcd.embedded_peer_port=%v", - lntest.NextAvailablePort(), + NextAvailablePort(), ), ) args = append( @@ -222,11 +268,11 @@ func (cfg *BaseNodeConfig) GenArgs() []string { ), ) - case lntest.BackendPostgres: + case BackendPostgres: args = append(args, "--db.backend=postgres") args = append(args, "--db.postgres.dsn="+cfg.PostgresDsn) - case lntest.BackendSqlite: + case BackendSqlite: args = append(args, "--db.backend=sqlite") args = append(args, fmt.Sprintf("--db.sqlite.busytimeout=%v", wait.SqliteBusyTimeout)) @@ -273,3 +319,85 @@ func ExtraArgsEtcd(etcdCfg *etcd.Config, name string, cluster bool, return extraArgs } + +// NextAvailablePort returns the first port that is available for listening by +// a new node. It panics if no port is found and the maximum available TCP port +// is reached. +func NextAvailablePort() int { + port := atomic.AddUint32(&lastPort, 1) + for port < 65535 { + // If there are no errors while attempting to listen on this + // port, close the socket and return it as available. While it + // could be the case that some other process picks up this port + // between the time the socket is closed and it's reopened in + // the harness node, in practice in CI servers this seems much + // less likely than simply some other process already being + // bound at the start of the tests. + addr := fmt.Sprintf(ListenerFormat, port) + l, err := net.Listen("tcp4", addr) + if err == nil { + err := l.Close() + if err == nil { + return int(port) + } + } + port = atomic.AddUint32(&lastPort, 1) + } + + // No ports available? Must be a mistake. + panic("no ports available for listening") +} + +// GetLogDir returns the passed --logdir flag or the default value if it wasn't +// set. +func GetLogDir() string { + if logSubDir != nil && *logSubDir != "" { + return *logSubDir + } + return "." +} + +// CopyFile copies the file src to dest. +func CopyFile(dest, src string) error { + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + d, err := os.Create(dest) + if err != nil { + return err + } + + if _, err := io.Copy(d, s); err != nil { + d.Close() + return err + } + + return d.Close() +} + +// GetBtcdBinary returns the full path to the binary of the custom built btcd +// executable or an empty string if none is set. +func GetBtcdBinary() string { + if btcdExecutable != nil { + return *btcdExecutable + } + + return "" +} + +// GenerateBtcdListenerAddresses is a function that returns two listener +// addresses with unique ports and should be used to overwrite rpctest's +// default generator which is prone to use colliding ports. +func GenerateBtcdListenerAddresses() (string, string) { + return fmt.Sprintf(ListenerFormat, NextAvailablePort()), + fmt.Sprintf(ListenerFormat, NextAvailablePort()) +} + +// ApplyPortOffset adds the given offset to the lastPort variable, making it +// possible to run the tests in parallel without colliding on the same ports. +func ApplyPortOffset(offset uint32) { + _ = atomic.AddUint32(&lastPort, offset) +} diff --git a/lntemp/node/harness_node.go b/lntemp/node/harness_node.go index c86e30af8..ebba81e84 100644 --- a/lntemp/node/harness_node.go +++ b/lntemp/node/harness_node.go @@ -19,7 +19,6 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntemp/rpc" - "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc" @@ -117,7 +116,7 @@ func NewHarnessNode(t *testing.T, cfg *BaseNodeConfig) (*HarnessNode, error) { // Create temporary database. var dbName string - if cfg.DbBackend == lntest.BackendPostgres { + if cfg.DbBackend == BackendPostgres { var err error dbName, err = createTempPgDb() if err != nil { @@ -383,7 +382,7 @@ func (hn *HarnessNode) StartLndCmd(ctxb context.Context) error { // If the logoutput flag is passed, redirect output from the nodes to // log files. - if *lntest.LogOutput { + if *logOutput { err := addLogFile(hn) if err != nil { return err @@ -918,10 +917,8 @@ func getFinalizedLogFilePrefix(hn *HarnessNode) string { hn.PubKey[:logPubKeyBytes], ) - return fmt.Sprintf("%s/%d-%s-%s-%s", - lntest.GetLogDir(), hn.Cfg.NodeID, - hn.Cfg.LogFilenamePrefix, - hn.Cfg.Name, pubKeyHex) + return fmt.Sprintf("%s/%d-%s-%s-%s", GetLogDir(), hn.Cfg.NodeID, + hn.Cfg.LogFilenamePrefix, hn.Cfg.Name, pubKeyHex) } // finalizeLogfile makes sure the log file cleanup function is initialized, @@ -935,7 +932,7 @@ func finalizeLogfile(hn *HarnessNode) { hn.logFile.Close() // If logoutput flag is not set, return early. - if !*lntest.LogOutput { + if !*logOutput { return } @@ -948,7 +945,7 @@ func finalizeLogfile(hn *HarnessNode) { // finalizeEtcdLog saves the etcd log files when test ends. func finalizeEtcdLog(hn *HarnessNode) { // Exit early if this is not etcd backend. - if hn.Cfg.DbBackend != lntest.BackendEtcd { + if hn.Cfg.DbBackend != BackendEtcd { return } @@ -964,7 +961,7 @@ func finalizeEtcdLog(hn *HarnessNode) { func addLogFile(hn *HarnessNode) error { var fileName string - dir := lntest.GetLogDir() + dir := GetLogDir() fileName = fmt.Sprintf("%s/%d-%s-%s-%s.log", dir, hn.Cfg.NodeID, hn.Cfg.LogFilenamePrefix, hn.Cfg.Name, hex.EncodeToString(hn.PubKey[:logPubKeyBytes])) @@ -1028,7 +1025,7 @@ func copyAll(dstDir, srcDir string) error { if err != nil { return err } - } else if err := lntest.CopyFile(dstPath, srcPath); err != nil { + } else if err := CopyFile(dstPath, srcPath); err != nil { return err } } diff --git a/lntemp/utils.go b/lntemp/utils.go index d9946f365..b1a5f00d4 100644 --- a/lntemp/utils.go +++ b/lntemp/utils.go @@ -10,15 +10,14 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" ) const ( // NeutrinoBackendName is the name of the neutrino backend. NeutrinoBackendName = "neutrino" - // TODO(yy): delete. - DefaultTimeout = lntest.DefaultTimeout + DefaultTimeout = wait.DefaultTimeout // noFeeLimitMsat is used to specify we will put no requirements on fee // charged when choosing a route path. diff --git a/lntest/bitcoind_common.go b/lntest/bitcoind_common.go index d9bf102f6..320d89345 100644 --- a/lntest/bitcoind_common.go +++ b/lntest/bitcoind_common.go @@ -14,6 +14,7 @@ import ( "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/rpcclient" + "github.com/lightningnetwork/lnd/lntemp/node" ) // logDirPattern is the pattern of the name of the temporary log directory. @@ -37,7 +38,7 @@ type BitcoindBackendConfig struct { // A compile time assertion to ensure BitcoindBackendConfig meets the // BackendConfig interface. -var _ BackendConfig = (*BitcoindBackendConfig)(nil) +var _ node.BackendConfig = (*BitcoindBackendConfig)(nil) // GenArgs returns the arguments needed to be passed to LND at startup for // using this node as a chain backend. @@ -89,7 +90,7 @@ func (b BitcoindBackendConfig) Name() string { func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string, rpcPolling bool) (*BitcoindBackendConfig, func() error, error) { - baseLogDir := fmt.Sprintf(logDirPattern, GetLogDir()) + baseLogDir := fmt.Sprintf(logDirPattern, node.GetLogDir()) if netParams != &chaincfg.RegressionNetParams { return nil, nil, fmt.Errorf("only regtest supported") } @@ -109,10 +110,12 @@ func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string, fmt.Errorf("unable to create temp directory: %v", err) } - zmqBlockAddr := fmt.Sprintf("tcp://127.0.0.1:%d", NextAvailablePort()) - zmqTxAddr := fmt.Sprintf("tcp://127.0.0.1:%d", NextAvailablePort()) - rpcPort := NextAvailablePort() - p2pPort := NextAvailablePort() + zmqBlockAddr := fmt.Sprintf("tcp://127.0.0.1:%d", + node.NextAvailablePort()) + zmqTxAddr := fmt.Sprintf("tcp://127.0.0.1:%d", + node.NextAvailablePort()) + rpcPort := node.NextAvailablePort() + p2pPort := node.NextAvailablePort() cmdArgs := []string{ "-datadir=" + tempBitcoindDir, @@ -146,9 +149,9 @@ func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string, // After shutting down the chain backend, we'll make a copy of // the log file before deleting the temporary log dir. logDestination := fmt.Sprintf( - "%s/output_bitcoind_chainbackend.log", GetLogDir(), + "%s/output_bitcoind_chainbackend.log", node.GetLogDir(), ) - err := CopyFile(logDestination, logFile) + err := node.CopyFile(logDestination, logFile) if err != nil { errStr += fmt.Sprintf("unable to copy file: %v\n", err) } diff --git a/lntest/btcd.go b/lntest/btcd.go index 200faa9bf..817936709 100644 --- a/lntest/btcd.go +++ b/lntest/btcd.go @@ -15,6 +15,7 @@ import ( "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/integration/rpctest" "github.com/btcsuite/btcd/rpcclient" + "github.com/lightningnetwork/lnd/lntemp/node" ) // logDirPattern is the pattern of the name of the temporary log directory. @@ -41,7 +42,7 @@ type BtcdBackendConfig struct { // A compile time assertion to ensure BtcdBackendConfig meets the BackendConfig // interface. -var _ BackendConfig = (*BtcdBackendConfig)(nil) +var _ node.BackendConfig = (*BtcdBackendConfig)(nil) // GenArgs returns the arguments needed to be passed to LND at startup for // using this node as a chain backend. @@ -83,7 +84,7 @@ func (b BtcdBackendConfig) Name() string { func NewBackend(miner string, netParams *chaincfg.Params) ( *BtcdBackendConfig, func() error, error) { - baseLogDir := fmt.Sprintf(logDirPattern, GetLogDir()) + baseLogDir := fmt.Sprintf(logDirPattern, node.GetLogDir()) args := []string{ "--rejectnonstd", "--txindex", @@ -98,9 +99,12 @@ func NewBackend(miner string, netParams *chaincfg.Params) ( // Don't disconnect if a reply takes too long. "--nostalldetect", } - chainBackend, err := rpctest.New(netParams, nil, args, GetBtcdBinary()) + chainBackend, err := rpctest.New( + netParams, nil, args, node.GetBtcdBinary(), + ) if err != nil { - return nil, nil, fmt.Errorf("unable to create btcd node: %v", err) + return nil, nil, fmt.Errorf("unable to create btcd node: %v", + err) } // We want to overwrite some of the connection settings to make the @@ -112,7 +116,8 @@ func NewBackend(miner string, netParams *chaincfg.Params) ( chainBackend.ConnectionRetryTimeout = rpctest.DefaultConnectionRetryTimeout * 2 if err := chainBackend.SetUp(false, 0); err != nil { - return nil, nil, fmt.Errorf("unable to set up btcd backend: %v", err) + return nil, nil, fmt.Errorf("unable to set up btcd backend: %v", + err) } bd := &BtcdBackendConfig{ @@ -141,14 +146,16 @@ func NewBackend(miner string, netParams *chaincfg.Params) ( for _, file := range files { logFile := fmt.Sprintf("%s/%s", logDir, file.Name()) newFilename := strings.Replace( - file.Name(), "btcd.log", "output_btcd_chainbackend.log", 1, + file.Name(), "btcd.log", + "output_btcd_chainbackend.log", 1, ) logDestination := fmt.Sprintf( - "%s/%s", GetLogDir(), newFilename, + "%s/%s", node.GetLogDir(), newFilename, ) - err := CopyFile(logDestination, logFile) + err := node.CopyFile(logDestination, logFile) if err != nil { - errStr += fmt.Sprintf("unable to copy file: %v\n", err) + errStr += fmt.Sprintf("unable to copy file: "+ + "%v\n", err) } } diff --git a/lntest/fee_service.go b/lntest/fee_service.go deleted file mode 100644 index e2c1df361..000000000 --- a/lntest/fee_service.go +++ /dev/null @@ -1,114 +0,0 @@ -package lntest - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "sync" - "testing" - - "github.com/lightningnetwork/lnd/lnwallet/chainfee" -) - -const ( - // feeServiceTarget is the confirmation target for which a fee estimate - // is returned. Requests for higher confirmation targets will fall back - // to this. - feeServiceTarget = 1 -) - -// feeService runs a web service that provides fee estimation information. -type feeService struct { - feeEstimates - - t *testing.T - - srv *http.Server - wg sync.WaitGroup - - url string - - lock sync.Mutex -} - -// feeEstimates contains the current fee estimates. -type feeEstimates struct { - Fees map[uint32]uint32 `json:"fee_by_block_target"` -} - -// startFeeService spins up a go-routine to serve fee estimates. -func startFeeService(t *testing.T) *feeService { - port := NextAvailablePort() - f := feeService{ - t: t, - url: fmt.Sprintf("http://localhost:%v/fee-estimates.json", port), - } - - // Initialize default fee estimate. - f.Fees = map[uint32]uint32{feeServiceTarget: 50000} - - listenAddr := fmt.Sprintf(":%v", port) - mux := http.NewServeMux() - mux.HandleFunc("/fee-estimates.json", f.handleRequest) - - f.srv = &http.Server{ - Addr: listenAddr, - Handler: mux, - } - - f.wg.Add(1) - go func() { - defer f.wg.Done() - - if err := f.srv.ListenAndServe(); err != http.ErrServerClosed { - f.t.Errorf("error: cannot start fee api: %v", err) - } - }() - - return &f -} - -// handleRequest handles a client request for fee estimates. -func (f *feeService) handleRequest(w http.ResponseWriter, r *http.Request) { - f.lock.Lock() - defer f.lock.Unlock() - - bytes, err := json.Marshal(f.feeEstimates) - if err != nil { - f.t.Errorf("error: cannot serialize estimates: %v", err) - - return - } - - _, err = io.WriteString(w, string(bytes)) - if err != nil { - f.t.Errorf("error: cannot send estimates: %v", err) - } -} - -// stop stops the web server. -func (f *feeService) stop() { - if err := f.srv.Shutdown(context.Background()); err != nil { - f.t.Errorf("error: cannot stop fee api: %v", err) - } - - f.wg.Wait() -} - -// setFee changes the current fee estimate for the fixed confirmation target. -func (f *feeService) setFee(fee chainfee.SatPerKWeight) { - f.lock.Lock() - defer f.lock.Unlock() - - f.Fees[feeServiceTarget] = uint32(fee.FeePerKVByte()) -} - -// setFeeWithConf sets a fee for the given confirmation target. -func (f *feeService) setFeeWithConf(fee chainfee.SatPerKWeight, conf uint32) { - f.lock.Lock() - defer f.lock.Unlock() - - f.Fees[conf] = uint32(fee.FeePerKVByte()) -} diff --git a/lntest/fee_service_test.go b/lntest/fee_service_test.go deleted file mode 100644 index 228d7c30e..000000000 --- a/lntest/fee_service_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package lntest - -import ( - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// TestFeeService tests the itest fee estimating web service. -func TestFeeService(t *testing.T) { - service := startFeeService(t) - defer service.stop() - - service.setFee(5000) - - // Wait for service to start accepting connections. - var resp *http.Response - require.Eventually( - t, - func() bool { - var err error - resp, err = http.Get(service.url) // nolint:bodyclose - return err == nil - }, - 10*time.Second, time.Second, - ) - - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - require.NoError(t, err) - - require.Equal( - t, "{\"fee_by_block_target\":{\"1\":20000}}", string(body), - ) -} diff --git a/lntest/harness_miner.go b/lntest/harness_miner.go deleted file mode 100644 index b9f54619a..000000000 --- a/lntest/harness_miner.go +++ /dev/null @@ -1,161 +0,0 @@ -package lntest - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/integration/rpctest" - "github.com/btcsuite/btcd/rpcclient" -) - -const ( - // minerLogFilename is the default log filename for the miner node. - minerLogFilename = "output_btcd_miner.log" - - // minerLogDir is the default log dir for the miner node. - minerLogDir = ".minerlogs" -) - -var harnessNetParams = &chaincfg.RegressionNetParams - -type HarnessMiner struct { - *rpctest.Harness - - // runCtx is a context with cancel method. It's used to signal when the - // node needs to quit, and used as the parent context when spawning - runCtx context.Context - cancel context.CancelFunc - - // logPath is the directory path of the miner's logs. - logPath string - - // logFilename is the saved log filename of the miner node. - logFilename string -} - -// NewMiner creates a new miner using btcd backend with the default log file -// dir and name. -func NewMiner() (*HarnessMiner, error) { - return newMiner(minerLogDir, minerLogFilename) -} - -// NewTempMiner creates a new miner using btcd backend with the specified log -// file dir and name. -func NewTempMiner(tempDir, tempLogFilename string) (*HarnessMiner, error) { - return newMiner(tempDir, tempLogFilename) -} - -// newMiner creates a new miner using btcd's rpctest. -func newMiner(minerDirName, logFilename string) (*HarnessMiner, error) { - handler := &rpcclient.NotificationHandlers{} - btcdBinary := GetBtcdBinary() - baseLogPath := fmt.Sprintf("%s/%s", GetLogDir(), minerDirName) - - args := []string{ - "--rejectnonstd", - "--txindex", - "--nowinservice", - "--nobanning", - "--debuglevel=debug", - "--logdir=" + baseLogPath, - "--trickleinterval=100ms", - // Don't disconnect if a reply takes too long. - "--nostalldetect", - } - - miner, err := rpctest.New(harnessNetParams, handler, args, btcdBinary) - if err != nil { - return nil, fmt.Errorf("unable to create mining node: %v", err) - } - - ctxt, cancel := context.WithCancel(context.Background()) - m := &HarnessMiner{ - Harness: miner, - runCtx: ctxt, - cancel: cancel, - logPath: baseLogPath, - logFilename: logFilename, - } - return m, nil -} - -// Stop shuts down the miner and saves its logs. -func (h *HarnessMiner) Stop() error { - h.cancel() - - if err := h.TearDown(); err != nil { - return fmt.Errorf("tear down miner got error: %s", err) - } - - return h.saveLogs() -} - -// saveLogs copies the node logs and save it to the file specified by -// h.logFilename. -func (h *HarnessMiner) saveLogs() error { - // After shutting down the miner, we'll make a copy of the log files - // before deleting the temporary log dir. - path := fmt.Sprintf("%s/%s", h.logPath, harnessNetParams.Name) - files, err := ioutil.ReadDir(path) - if err != nil { - return fmt.Errorf("unable to read log directory: %v", err) - } - - for _, file := range files { - newFilename := strings.Replace( - file.Name(), "btcd.log", h.logFilename, 1, - ) - copyPath := fmt.Sprintf("%s/../%s", h.logPath, newFilename) - - logFile := fmt.Sprintf("%s/%s", path, file.Name()) - err := CopyFile(filepath.Clean(copyPath), logFile) - if err != nil { - return fmt.Errorf("unable to copy file: %v", err) - } - } - - if err = os.RemoveAll(h.logPath); err != nil { - return fmt.Errorf("cannot remove dir %s: %v", h.logPath, err) - } - - return nil -} - -// waitForTxInMempool blocks until the target txid is seen in the mempool. If -// the transaction isn't seen within the network before the passed timeout, -// then an error is returned. -func (h *HarnessMiner) waitForTxInMempool(txid chainhash.Hash) error { - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - var mempool []*chainhash.Hash - for { - select { - case <-h.runCtx.Done(): - return fmt.Errorf("NetworkHarness has been torn down") - case <-time.After(DefaultTimeout): - return fmt.Errorf("wanted %v, found %v txs "+ - "in mempool: %v", txid, len(mempool), mempool) - - case <-ticker.C: - var err error - mempool, err = h.Client.GetRawMempool() - if err != nil { - return err - } - - for _, mempoolTx := range mempool { - if *mempoolTx == txid { - return nil - } - } - } - } -} diff --git a/lntest/harness_net.go b/lntest/harness_net.go deleted file mode 100644 index 358a1c0b3..000000000 --- a/lntest/harness_net.go +++ /dev/null @@ -1,1759 +0,0 @@ -package lntest - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd" - "github.com/lightningnetwork/lnd/kvdb/etcd" - "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/lightningnetwork/lnd/lnwallet/chainfee" - "github.com/lightningnetwork/lnd/lnwire" - "github.com/stretchr/testify/require" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc/grpclog" -) - -// DefaultCSV is the CSV delay (remotedelay) we will start our test nodes with. -const DefaultCSV = 4 - -// NodeOption is a function for updating a node's configuration. -type NodeOption func(*BaseNodeConfig) - -// NetworkHarness is an integration testing harness for the lightning network. -// Building on top of HarnessNode, it is responsible for handling interactions -// among different nodes. The harness by default is created with two active -// nodes on the network: -// Alice and Bob. -type NetworkHarness struct { - netParams *chaincfg.Params - - // currentTestCase holds the name for the currently run test case. - currentTestCase string - - // lndBinary is the full path to the lnd binary that was specifically - // compiled with all required itest flags. - lndBinary string - - // Miner is a reference to a running full node that can be used to - // create new blocks on the network. - Miner *HarnessMiner - - // BackendCfg houses the information necessary to use a node as LND - // chain backend, such as rpc configuration, P2P information etc. - BackendCfg BackendConfig - - activeNodes map[int]*HarnessNode - - nodesByPub map[string]*HarnessNode - - // Alice and Bob are the initial seeder nodes that are automatically - // created to be the initial participants of the test network. - Alice *HarnessNode - Bob *HarnessNode - - // dbBackend sets the database backend to use. - dbBackend DatabaseBackend - - // Channel for transmitting stderr output from failed lightning node - // to main process. - lndErrorChan chan error - - // feeService is a web service that provides external fee estimates to - // lnd. - feeService *feeService - - // runCtx is a context with cancel method. It's used to signal when the - // node needs to quit, and used as the parent context when spawning - // children contexts for RPC requests. - runCtx context.Context - cancel context.CancelFunc - - mtx sync.Mutex -} - -// NewNetworkHarness creates a new network test harness. -// TODO(roasbeef): add option to use golang's build library to a binary of the -// current repo. This will save developers from having to manually `go install` -// within the repo each time before changes. -func NewNetworkHarness(m *HarnessMiner, b BackendConfig, lndBinary string, - dbBackend DatabaseBackend) (*NetworkHarness, error) { - - ctxt, cancel := context.WithCancel(context.Background()) - - n := NetworkHarness{ - activeNodes: make(map[int]*HarnessNode), - nodesByPub: make(map[string]*HarnessNode), - lndErrorChan: make(chan error), - netParams: m.ActiveNet, - Miner: m, - BackendCfg: b, - runCtx: ctxt, - cancel: cancel, - lndBinary: lndBinary, - dbBackend: dbBackend, - } - return &n, nil -} - -// LookUpNodeByPub queries the set of active nodes to locate a node according -// to its public key. The error is returned if the node was not found. -func (n *NetworkHarness) LookUpNodeByPub(pubStr string) (*HarnessNode, error) { - n.mtx.Lock() - defer n.mtx.Unlock() - - node, ok := n.nodesByPub[pubStr] - if !ok { - return nil, fmt.Errorf("unable to find node") - } - - return node, nil -} - -// ProcessErrors returns a channel used for reporting any fatal process errors. -// If any of the active nodes within the harness' test network incur a fatal -// error, that error is sent over this channel. -func (n *NetworkHarness) ProcessErrors() <-chan error { - return n.lndErrorChan -} - -// SetUp starts the initial seeder nodes within the test harness. The initial -// node's wallets will be funded wallets with ten 1 BTC outputs each. Finally -// rpc clients capable of communicating with the initial seeder nodes are -// created. Nodes are initialized with the given extra command line flags, which -// should be formatted properly - "--arg=value". -func (n *NetworkHarness) SetUp(t *testing.T, - testCase string, lndArgs []string) error { - - // Swap out grpc's default logger with out fake logger which drops the - // statements on the floor. - fakeLogger := grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard) - grpclog.SetLoggerV2(fakeLogger) - n.currentTestCase = testCase - n.feeService = startFeeService(t) - - // Start the initial seeder nodes within the test network, then connect - // their respective RPC clients. - eg := errgroup.Group{} - eg.Go(func() error { - var err error - n.Alice, err = n.newNode( - "Alice", lndArgs, false, nil, n.dbBackend, true, - ) - return err - }) - eg.Go(func() error { - var err error - n.Bob, err = n.newNode( - "Bob", lndArgs, false, nil, n.dbBackend, true, - ) - return err - }) - require.NoError(t, eg.Wait()) - - // First, make a connection between the two nodes. This will wait until - // both nodes are fully started since the Connect RPC is guarded behind - // the server.Started() flag that waits for all subsystems to be ready. - n.ConnectNodes(t, n.Alice, n.Bob) - - // Load up the wallets of the seeder nodes with 10 outputs of 1 BTC - // each. - addrReq := &lnrpc.NewAddressRequest{ - Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH, - } - clients := []lnrpc.LightningClient{n.Alice, n.Bob} - for _, client := range clients { - for i := 0; i < 10; i++ { - resp, err := client.NewAddress(n.runCtx, addrReq) - if err != nil { - return err - } - addr, err := btcutil.DecodeAddress(resp.Address, n.netParams) - if err != nil { - return err - } - addrScript, err := txscript.PayToAddrScript(addr) - if err != nil { - return err - } - - output := &wire.TxOut{ - PkScript: addrScript, - Value: btcutil.SatoshiPerBitcoin, - } - _, err = n.Miner.SendOutputs([]*wire.TxOut{output}, 7500) - if err != nil { - return err - } - } - } - - // We generate several blocks in order to give the outputs created - // above a good number of confirmations. - if _, err := n.Miner.Client.Generate(10); err != nil { - return err - } - - // Now we want to wait for the nodes to catch up. - if err := n.Alice.WaitForBlockchainSync(); err != nil { - return err - } - if err := n.Bob.WaitForBlockchainSync(); err != nil { - return err - } - - // Now block until both wallets have fully synced up. - expectedBalance := int64(btcutil.SatoshiPerBitcoin * 10) - balReq := &lnrpc.WalletBalanceRequest{} - balanceTicker := time.NewTicker(time.Millisecond * 200) - defer balanceTicker.Stop() - balanceTimeout := time.After(DefaultTimeout) -out: - for { - select { - case <-balanceTicker.C: - aliceResp, err := n.Alice.WalletBalance(n.runCtx, balReq) - if err != nil { - return err - } - bobResp, err := n.Bob.WalletBalance(n.runCtx, balReq) - if err != nil { - return err - } - - if aliceResp.ConfirmedBalance == expectedBalance && - bobResp.ConfirmedBalance == expectedBalance { - - break out - } - case <-balanceTimeout: - return fmt.Errorf("balances not synced after deadline") - } - } - - return nil -} - -// TearDown tears down all active nodes within the test lightning network. -func (n *NetworkHarness) TearDown() error { - for _, node := range n.activeNodes { - if err := n.ShutdownNode(node); err != nil { - return err - } - } - - return nil -} - -// Stop stops the test harness. -func (n *NetworkHarness) Stop() { - close(n.lndErrorChan) - n.cancel() - - // feeService may not be created. For instance, running a non-exist - // test case. - if n.feeService != nil { - n.feeService.stop() - } -} - -// extraArgsEtcd returns extra args for configuring LND to use an external etcd -// database (for remote channel DB and wallet DB). -func extraArgsEtcd(etcdCfg *etcd.Config, name string, cluster bool, - leaderSessionTTL int) []string { - - extraArgs := []string{ - "--db.backend=etcd", - fmt.Sprintf("--db.etcd.host=%v", etcdCfg.Host), - fmt.Sprintf("--db.etcd.user=%v", etcdCfg.User), - fmt.Sprintf("--db.etcd.pass=%v", etcdCfg.Pass), - fmt.Sprintf("--db.etcd.namespace=%v", etcdCfg.Namespace), - } - - if etcdCfg.InsecureSkipVerify { - extraArgs = append(extraArgs, "--db.etcd.insecure_skip_verify") - } - - if cluster { - clusterArgs := []string{ - "--cluster.enable-leader-election", - fmt.Sprintf("--cluster.id=%v", name), - fmt.Sprintf("--cluster.leader-session-ttl=%v", - leaderSessionTTL), - } - extraArgs = append(extraArgs, clusterArgs...) - } - - return extraArgs -} - -// NewNodeWithSeedEtcd starts a new node with seed that'll use an external -// etcd database as its (remote) channel and wallet DB. The passsed cluster -// flag indicates that we'd like the node to join the cluster leader election. -func (n *NetworkHarness) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config, - password []byte, entropy []byte, statelessInit, cluster bool, - leaderSessionTTL int) (*HarnessNode, []string, []byte, error) { - - // We don't want to use the embedded etcd instance. - const dbBackend = BackendBbolt - - extraArgs := extraArgsEtcd(etcdCfg, name, cluster, leaderSessionTTL) - return n.newNodeWithSeed( - name, extraArgs, password, entropy, statelessInit, dbBackend, - ) -} - -// NewNodeWithSeedEtcd starts a new node with seed that'll use an external -// etcd database as its (remote) channel and wallet DB. The passsed cluster -// flag indicates that we'd like the node to join the cluster leader election. -// If the wait flag is false then we won't wait until RPC is available (this is -// useful when the node is not expected to become the leader right away). -func (n *NetworkHarness) NewNodeEtcd(name string, etcdCfg *etcd.Config, - password []byte, cluster, wait bool, leaderSessionTTL int) ( - *HarnessNode, error) { - - // We don't want to use the embedded etcd instance. - const dbBackend = BackendBbolt - - extraArgs := extraArgsEtcd(etcdCfg, name, cluster, leaderSessionTTL) - return n.newNode(name, extraArgs, true, password, dbBackend, wait) -} - -// NewNode fully initializes a returns a new HarnessNode bound to the -// current instance of the network harness. The created node is running, but -// not yet connected to other nodes within the network. -func (n *NetworkHarness) NewNode(t *testing.T, - name string, extraArgs []string, opts ...NodeOption) *HarnessNode { - - node, err := n.newNode( - name, extraArgs, false, nil, n.dbBackend, true, opts..., - ) - require.NoErrorf(t, err, "unable to create new node for %s", name) - - return node -} - -// NewNodeWithSeed fully initializes a new HarnessNode after creating a fresh -// aezeed. The provided password is used as both the aezeed password and the -// wallet password. The generated mnemonic is returned along with the -// initialized harness node. -func (n *NetworkHarness) NewNodeWithSeed(name string, extraArgs []string, - password []byte, statelessInit bool) (*HarnessNode, []string, []byte, - error) { - - return n.newNodeWithSeed( - name, extraArgs, password, nil, statelessInit, n.dbBackend, - ) -} - -func (n *NetworkHarness) newNodeWithSeed(name string, extraArgs []string, - password, entropy []byte, statelessInit bool, dbBackend DatabaseBackend) ( - *HarnessNode, []string, []byte, error) { - - node, err := n.newNode( - name, extraArgs, true, password, dbBackend, true, - ) - if err != nil { - return nil, nil, nil, err - } - - // Create a request to generate a new aezeed. The new seed will have the - // same password as the internal wallet. - genSeedReq := &lnrpc.GenSeedRequest{ - AezeedPassphrase: password, - SeedEntropy: entropy, - } - - ctxt, cancel := context.WithTimeout(n.runCtx, DefaultTimeout) - defer cancel() - - var genSeedResp *lnrpc.GenSeedResponse - if err := wait.NoError(func() error { - genSeedResp, err = node.GenSeed(ctxt, genSeedReq) - return err - }, DefaultTimeout); err != nil { - return nil, nil, nil, err - } - - // With the seed created, construct the init request to the node, - // including the newly generated seed. - initReq := &lnrpc.InitWalletRequest{ - WalletPassword: password, - CipherSeedMnemonic: genSeedResp.CipherSeedMnemonic, - AezeedPassphrase: password, - StatelessInit: statelessInit, - } - - // Pass the init request via rpc to finish unlocking the node. This will - // also initialize the macaroon-authenticated LightningClient. - response, err := node.Init(initReq) - if err != nil { - return nil, nil, nil, err - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - // In stateless initialization mode we get a macaroon back that we have - // to return to the test, otherwise gRPC calls won't be possible since - // there are no macaroon files created in that mode. - // In stateful init the admin macaroon will just be nil. - return node, genSeedResp.CipherSeedMnemonic, response.AdminMacaroon, nil -} - -func (n *NetworkHarness) NewNodeRemoteSigner(name string, extraArgs []string, - password []byte, watchOnly *lnrpc.WatchOnly) (*HarnessNode, error) { - - node, err := n.newNode( - name, extraArgs, true, password, n.dbBackend, true, - ) - if err != nil { - return nil, err - } - - // With the seed created, construct the init request to the node, - // including the newly generated seed. - initReq := &lnrpc.InitWalletRequest{ - WalletPassword: password, - WatchOnly: watchOnly, - } - - // Pass the init request via rpc to finish unlocking the node. This will - // also initialize the macaroon-authenticated LightningClient. - _, err = node.Init(initReq) - if err != nil { - return nil, err - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - return node, nil -} - -// RestoreNodeWithSeed fully initializes a HarnessNode using a chosen mnemonic, -// password, recovery window, and optionally a set of static channel backups. -// After providing the initialization request to unlock the node, this method -// will finish initializing the LightningClient such that the HarnessNode can -// be used for regular rpc operations. -func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string, - password []byte, mnemonic []string, rootKey string, recoveryWindow int32, - chanBackups *lnrpc.ChanBackupSnapshot, - opts ...NodeOption) (*HarnessNode, error) { - - node, err := n.newNode( - name, extraArgs, true, password, n.dbBackend, true, opts..., - ) - if err != nil { - return nil, err - } - - initReq := &lnrpc.InitWalletRequest{ - WalletPassword: password, - CipherSeedMnemonic: mnemonic, - AezeedPassphrase: password, - ExtendedMasterKey: rootKey, - RecoveryWindow: recoveryWindow, - ChannelBackups: chanBackups, - } - - _, err = node.Init(initReq) - if err != nil { - return nil, err - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - return node, nil -} - -// newNode initializes a new HarnessNode, supporting the ability to initialize a -// wallet with or without a seed. If hasSeed is false, the returned harness node -// can be used immediately. Otherwise, the node will require an additional -// initialization phase where the wallet is either created or restored. -func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool, - password []byte, dbBackend DatabaseBackend, wait bool, opts ...NodeOption) ( - *HarnessNode, error) { - - cfg := &BaseNodeConfig{ - Name: name, - LogFilenamePrefix: n.currentTestCase, - HasSeed: hasSeed, - Password: password, - BackendCfg: n.BackendCfg, - NetParams: n.netParams, - ExtraArgs: extraArgs, - FeeURL: n.feeService.url, - DbBackend: dbBackend, - } - for _, opt := range opts { - opt(cfg) - } - - node, err := newNode(cfg) - if err != nil { - return nil, err - } - - // Put node in activeNodes to ensure Shutdown is called even if Start - // returns an error. - n.mtx.Lock() - n.activeNodes[node.NodeID] = node - n.mtx.Unlock() - - err = node.start(n.lndBinary, n.lndErrorChan, wait) - if err != nil { - return nil, err - } - - // If this node is to have a seed, it will need to be unlocked or - // initialized via rpc. Delay registering it with the network until it - // can be driven via an unlocked rpc connection. - if node.Cfg.HasSeed { - return node, nil - } - - // With the node started, we can now record its public key within the - // global mapping. - n.RegisterNode(node) - - return node, nil -} - -// RegisterNode records a new HarnessNode in the NetworkHarnesses map of known -// nodes. This method should only be called with nodes that have successfully -// retrieved their public keys via FetchNodeInfo. -func (n *NetworkHarness) RegisterNode(node *HarnessNode) { - n.mtx.Lock() - n.nodesByPub[node.PubKeyStr] = node - n.mtx.Unlock() -} - -func (n *NetworkHarness) connect(ctx context.Context, - req *lnrpc.ConnectPeerRequest, a *HarnessNode) error { - - syncTimeout := time.After(DefaultTimeout) -tryconnect: - if _, err := a.ConnectPeer(ctx, req); err != nil { - // If the chain backend is still syncing, retry. - if strings.Contains(err.Error(), lnd.ErrServerNotActive.Error()) || - strings.Contains(err.Error(), "i/o timeout") { - - select { - case <-time.After(100 * time.Millisecond): - goto tryconnect - case <-syncTimeout: - return fmt.Errorf("chain backend did not " + - "finish syncing") - } - } - return err - } - - return nil -} - -// EnsureConnected will try to connect to two nodes, returning no error if they -// are already connected. If the nodes were not connected previously, this will -// behave the same as ConnectNodes. If a pending connection request has already -// been made, the method will block until the two nodes appear in each other's -// peers list, or until the 15s timeout expires. -func (n *NetworkHarness) EnsureConnected(t *testing.T, a, b *HarnessNode) { - ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout*2) - defer cancel() - - // errConnectionRequested is used to signal that a connection was - // requested successfully, which is distinct from already being - // connected to the peer. - errConnectionRequested := errors.New("connection request in progress") - - tryConnect := func(a, b *HarnessNode) error { - bInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{}) - if err != nil { - return err - } - - req := &lnrpc.ConnectPeerRequest{ - Addr: &lnrpc.LightningAddress{ - Pubkey: bInfo.IdentityPubkey, - Host: b.Cfg.P2PAddr(), - }, - } - - var predErr error - err = wait.Predicate(func() bool { - ctx, cancel := context.WithTimeout(ctx, DefaultTimeout) - defer cancel() - - err := n.connect(ctx, req, a) - switch { - // Request was successful, wait for both to display the - // connection. - case err == nil: - predErr = errConnectionRequested - return true - - // If the two are already connected, we return early - // with no error. - case strings.Contains( - err.Error(), "already connected to peer", - ): - predErr = nil - return true - - default: - predErr = err - return false - } - }, DefaultTimeout) - if err != nil { - return fmt.Errorf("connection not succeeded within 15 "+ - "seconds: %v", predErr) - } - - return predErr - } - - aErr := tryConnect(a, b) - bErr := tryConnect(b, a) - switch { - // If both reported already being connected to each other, we can exit - // early. - case aErr == nil && bErr == nil: - - // Return any critical errors returned by either alice. - case aErr != nil && aErr != errConnectionRequested: - t.Fatalf( - "ensure connection between %s and %s failed "+ - "with error from %s: %v", - a.Cfg.Name, b.Cfg.Name, a.Cfg.Name, aErr, - ) - - // Return any critical errors returned by either bob. - case bErr != nil && bErr != errConnectionRequested: - t.Fatalf("ensure connection between %s and %s failed "+ - "with error from %s: %v", - a.Cfg.Name, b.Cfg.Name, b.Cfg.Name, bErr, - ) - - // Otherwise one or both requested a connection, so we wait for the - // peers lists to reflect the connection. - default: - } - - findSelfInPeerList := func(a, b *HarnessNode) bool { - // If node B is seen in the ListPeers response from node A, - // then we can exit early as the connection has been fully - // established. - resp, err := b.ListPeers(ctx, &lnrpc.ListPeersRequest{}) - if err != nil { - return false - } - - for _, peer := range resp.Peers { - if peer.PubKey == a.PubKeyStr { - return true - } - } - - return false - } - - err := wait.Predicate(func() bool { - return findSelfInPeerList(a, b) && findSelfInPeerList(b, a) - }, DefaultTimeout) - - require.NoErrorf( - t, err, "unable to connect %s to %s, "+ - "got error: peers not connected within %v seconds", - a.Cfg.Name, b.Cfg.Name, DefaultTimeout, - ) -} - -// ConnectNodes attempts to create a connection between nodes a and b. -func (n *NetworkHarness) ConnectNodes(t *testing.T, a, b *HarnessNode) { - n.connectNodes(t, a, b, false) -} - -// ConnectNodesPerm attempts to connect nodes a and b and sets node b as -// a peer that node a should persistently attempt to reconnect to if they -// become disconnected. -func (n *NetworkHarness) ConnectNodesPerm(t *testing.T, - a, b *HarnessNode) { - - n.connectNodes(t, a, b, true) -} - -// connectNodes establishes an encrypted+authenticated p2p connection from node -// a towards node b. The function will return a non-nil error if the connection -// was unable to be established. If the perm parameter is set to true then -// node a will persistently attempt to reconnect to node b if they get -// disconnected. -// -// NOTE: This function may block for up to 15-seconds as it will not return -// until the new connection is detected as being known to both nodes. -func (n *NetworkHarness) connectNodes(t *testing.T, a, b *HarnessNode, - perm bool) { - - ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout) - defer cancel() - - bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{}) - require.NoErrorf( - t, err, "unable to connect %s to %s, got error: %v", - a.Cfg.Name, b.Cfg.Name, err, - ) - - req := &lnrpc.ConnectPeerRequest{ - Addr: &lnrpc.LightningAddress{ - Pubkey: bobInfo.IdentityPubkey, - Host: b.Cfg.P2PAddr(), - }, - Perm: perm, - } - - err = n.connect(ctx, req, a) - require.NoErrorf( - t, err, "unable to connect %s to %s, got error: %v", - a.Cfg.Name, b.Cfg.Name, err, - ) - - err = wait.Predicate(func() bool { - // If node B is seen in the ListPeers response from node A, - // then we can exit early as the connection has been fully - // established. - resp, err := a.ListPeers(ctx, &lnrpc.ListPeersRequest{}) - if err != nil { - return false - } - - for _, peer := range resp.Peers { - if peer.PubKey == b.PubKeyStr { - return true - } - } - - return false - }, DefaultTimeout) - - require.NoErrorf( - t, err, "unable to connect %s to %s, "+ - "got error: peers not connected within %v seconds", - a.Cfg.Name, b.Cfg.Name, DefaultTimeout, - ) -} - -// DisconnectNodes disconnects node a from node b by sending RPC message -// from a node to b node. -func (n *NetworkHarness) DisconnectNodes(a, b *HarnessNode) error { - ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout) - defer cancel() - - bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{}) - if err != nil { - return err - } - - req := &lnrpc.DisconnectPeerRequest{ - PubKey: bobInfo.IdentityPubkey, - } - - if _, err := a.DisconnectPeer(ctx, req); err != nil { - return err - } - - return nil -} - -// RestartNode attempts to restart a lightning node by shutting it down -// cleanly, then restarting the process. This function is fully blocking. Upon -// restart, the RPC connection to the node will be re-attempted, continuing iff -// the connection attempt is successful. If the callback parameter is non-nil, -// then the function will be executed after the node shuts down, but *before* -// the process has been started up again. -// -// This method can be useful when testing edge cases such as a node broadcast -// and invalidated prior state, or persistent state recovery, simulating node -// crashes, etc. Additionally, each time the node is restarted, the caller can -// pass a set of SCBs to pass in via the Unlock method allowing them to restore -// channels during restart. -func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error, - chanBackups ...*lnrpc.ChanBackupSnapshot) error { - - err := n.RestartNodeNoUnlock(node, callback, true) - if err != nil { - return err - } - - // If the node doesn't have a password set, then we can exit here as we - // don't need to unlock it. - if len(node.Cfg.Password) == 0 { - return nil - } - - // Otherwise, we'll unlock the wallet, then complete the final steps - // for the node initialization process. - unlockReq := &lnrpc.UnlockWalletRequest{ - WalletPassword: node.Cfg.Password, - } - if len(chanBackups) != 0 { - unlockReq.ChannelBackups = chanBackups[0] - unlockReq.RecoveryWindow = 1000 - } - - if err := node.Unlock(unlockReq); err != nil { - return err - } - - // Give the node some time to catch up with the chain before we - // continue with the tests. - return node.WaitForBlockchainSync() -} - -// RestartNodeNoUnlock attempts to restart a lightning node by shutting it down -// cleanly, then restarting the process. In case the node was setup with a seed, -// it will be left in the unlocked state. This function is fully blocking. If -// the callback parameter is non-nil, then the function will be executed after -// the node shuts down, but *before* the process has been started up again. -func (n *NetworkHarness) RestartNodeNoUnlock(node *HarnessNode, - callback func() error, wait bool) error { - - if err := node.stop(); err != nil { - return err - } - - if callback != nil { - if err := callback(); err != nil { - return err - } - } - - return node.start(n.lndBinary, n.lndErrorChan, wait) -} - -// SuspendNode stops the given node and returns a callback that can be used to -// start it again. -func (n *NetworkHarness) SuspendNode(node *HarnessNode) (func() error, error) { - if err := node.stop(); err != nil { - return nil, err - } - - restart := func() error { - return node.start(n.lndBinary, n.lndErrorChan, true) - } - - return restart, nil -} - -// ShutdownNode stops an active lnd process and returns when the process has -// exited and any temporary directories have been cleaned up. -func (n *NetworkHarness) ShutdownNode(node *HarnessNode) error { - if err := node.shutdown(); err != nil { - return err - } - - delete(n.activeNodes, node.NodeID) - return nil -} - -// KillNode kills the node (but won't wait for the node process to stop). -func (n *NetworkHarness) KillNode(node *HarnessNode) error { - if err := node.kill(); err != nil { - return err - } - - delete(n.activeNodes, node.NodeID) - return nil -} - -// StopNode stops the target node, but doesn't yet clean up its directories. -// This can be used to temporarily bring a node down during a test, to be later -// started up again. -func (n *NetworkHarness) StopNode(node *HarnessNode) error { - return node.stop() -} - -// SaveProfilesPages hits profiles pages of all active nodes and writes it to -// disk using a similar naming scheme as to the regular set of logs. -func (n *NetworkHarness) SaveProfilesPages(t *testing.T) { - // Only write gorutine dumps if flag is active. - if !(*goroutineDump) { - return - } - - for _, node := range n.activeNodes { - if err := saveProfilesPage(node); err != nil { - t.Logf("Logging follow-up error only, see rest of "+ - "the log for actual cause: %v\n", err) - } - } -} - -// saveProfilesPage saves the profiles page for the given node to file. -func saveProfilesPage(node *HarnessNode) error { - resp, err := http.Get( - fmt.Sprintf( - "http://localhost:%d/debug/pprof/goroutine?debug=1", - node.Cfg.ProfilePort, - ), - ) - if err != nil { - return fmt.Errorf("failed to get profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed to read profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - - fileName := fmt.Sprintf( - "pprof-%d-%s-%s.log", node.NodeID, node.Cfg.Name, - hex.EncodeToString(node.PubKey[:logPubKeyBytes]), - ) - - logFile, err := os.Create(fileName) - if err != nil { - return fmt.Errorf("failed to create file for profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - defer logFile.Close() - - _, err = logFile.Write(body) - if err != nil { - return fmt.Errorf("failed to save profile page "+ - "(node_id=%d, name=%s): %v", - node.NodeID, node.Cfg.Name, err) - } - return nil -} - -// OpenChannelParams houses the params to specify when opening a new channel. -type OpenChannelParams struct { - // Amt is the local amount being put into the channel. - Amt btcutil.Amount - - // PushAmt is the amount that should be pushed to the remote when the - // channel is opened. - PushAmt btcutil.Amount - - // Private is a boolan indicating whether the opened channel should be - // private. - Private bool - - // SpendUnconfirmed is a boolean indicating whether we can utilize - // unconfirmed outputs to fund the channel. - SpendUnconfirmed bool - - // MinHtlc is the htlc_minimum_msat value set when opening the channel. - MinHtlc lnwire.MilliSatoshi - - // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the - // channel, restricting the number of concurrent HTLCs the remote party - // can add to a commitment. - RemoteMaxHtlcs uint16 - - // FundingShim is an optional funding shim that the caller can specify - // in order to modify the channel funding workflow. - FundingShim *lnrpc.FundingShim - - // SatPerVByte is the amount of satoshis to spend in chain fees per virtual - // byte of the transaction. - SatPerVByte btcutil.Amount - - // CommitmentType is the commitment type that should be used for the - // channel to be opened. - CommitmentType lnrpc.CommitmentType - - // ZeroConf is used to determine if the channel will be a zero-conf - // channel. This only works if the explicit negotiation is used with - // anchors or script enforced leases. - ZeroConf bool - - // ScidAlias denotes whether the channel will be an option-scid-alias - // channel type negotiation. - ScidAlias bool - - // BaseFee is the channel base fee applied during the channel - // announcement phase. - BaseFee uint64 - - // FeeRate is the channel fee rate in ppm applied during the channel - // announcement phase. - FeeRate uint64 - - // UseBaseFee, if set, instructs the downstream logic to apply the - // user-specified channel base fee to the channel update announcement. - // If set to false it avoids applying a base fee of 0 and instead - // activates the default configured base fee. - UseBaseFee bool - - // UseFeeRate, if set, instructs the downstream logic to apply the - // user-specified channel fee rate to the channel update announcement. - // If set to false it avoids applying a fee rate of 0 and instead - // activates the default configured fee rate. - UseFeeRate bool -} - -// OpenChannel attempts to open a channel between srcNode and destNode with the -// passed channel funding parameters. If the passed context has a timeout, then -// if the timeout is reached before the channel pending notification is -// received, an error is returned. The confirmed boolean determines whether we -// should fund the channel with confirmed outputs or not. -func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode, - p OpenChannelParams) (lnrpc.Lightning_OpenChannelClient, error) { - - // Wait until srcNode and destNode have the latest chain synced. - // Otherwise, we may run into a check within the funding manager that - // prevents any funding workflows from being kicked off if the chain - // isn't yet synced. - if err := srcNode.WaitForBlockchainSync(); err != nil { - return nil, fmt.Errorf("unable to sync srcNode chain: %v", err) - } - if err := destNode.WaitForBlockchainSync(); err != nil { - return nil, fmt.Errorf("unable to sync destNode chain: %v", err) - } - - minConfs := int32(1) - if p.SpendUnconfirmed { - minConfs = 0 - } - - openReq := &lnrpc.OpenChannelRequest{ - NodePubkey: destNode.PubKey[:], - LocalFundingAmount: int64(p.Amt), - PushSat: int64(p.PushAmt), - Private: p.Private, - MinConfs: minConfs, - SpendUnconfirmed: p.SpendUnconfirmed, - MinHtlcMsat: int64(p.MinHtlc), - RemoteMaxHtlcs: uint32(p.RemoteMaxHtlcs), - FundingShim: p.FundingShim, - SatPerByte: int64(p.SatPerVByte), - CommitmentType: p.CommitmentType, - ZeroConf: p.ZeroConf, - ScidAlias: p.ScidAlias, - BaseFee: p.BaseFee, - FeeRate: p.FeeRate, - UseBaseFee: p.UseBaseFee, - UseFeeRate: p.UseFeeRate, - } - - // We need to use n.runCtx here to keep the response stream alive after - // the function is returned. - respStream, err := srcNode.OpenChannel(n.runCtx, openReq) - if err != nil { - return nil, fmt.Errorf("unable to open channel between "+ - "alice and bob: %v", err) - } - - chanOpen := make(chan struct{}) - errChan := make(chan error) - go func() { - // Consume the "channel pending" update. This waits until the - // node notifies us that the final message in the channel - // funding workflow has been sent to the remote node. - resp, err := respStream.Recv() - if err != nil { - errChan <- err - return - } - _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - if !ok { - errChan <- fmt.Errorf("expected channel pending: "+ - "update, instead got %v", resp) - return - } - - close(chanOpen) - }() - - select { - case <-time.After(ChannelOpenTimeout): - return nil, fmt.Errorf("timeout reached before chan pending "+ - "update sent: %v", err) - case err := <-errChan: - return nil, err - case <-chanOpen: - return respStream, nil - } -} - -// OpenPendingChannel attempts to open a channel between srcNode and destNode -// with the passed channel funding parameters. If the passed context has a -// timeout, then if the timeout is reached before the channel pending -// notification is received, an error is returned. -func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode, - amt btcutil.Amount, - pushAmt btcutil.Amount) (*lnrpc.PendingUpdate, error) { - - // Wait until srcNode and destNode have blockchain synced - if err := srcNode.WaitForBlockchainSync(); err != nil { - return nil, fmt.Errorf("unable to sync srcNode chain: %v", err) - } - if err := destNode.WaitForBlockchainSync(); err != nil { - return nil, fmt.Errorf("unable to sync destNode chain: %v", err) - } - - openReq := &lnrpc.OpenChannelRequest{ - NodePubkey: destNode.PubKey[:], - LocalFundingAmount: int64(amt), - PushSat: int64(pushAmt), - Private: false, - } - - // We need to use n.runCtx here to keep the response stream alive after - // the function is returned. - respStream, err := srcNode.OpenChannel(n.runCtx, openReq) - if err != nil { - return nil, fmt.Errorf("unable to open channel between "+ - "alice and bob: %v", err) - } - - chanPending := make(chan *lnrpc.PendingUpdate) - errChan := make(chan error) - go func() { - // Consume the "channel pending" update. This waits until the - // node notifies us that the final message in the channel - // funding workflow has been sent to the remote node. - resp, err := respStream.Recv() - if err != nil { - errChan <- err - return - } - pendingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) - if !ok { - errChan <- fmt.Errorf("expected channel pending "+ - "update, instead got %v", resp) - return - } - - chanPending <- pendingResp.ChanPending - }() - - select { - case <-time.After(ChannelOpenTimeout): - return nil, fmt.Errorf("timeout reached before chan pending " + - "update sent") - case err := <-errChan: - return nil, err - case pendingChan := <-chanPending: - return pendingChan, nil - } -} - -// WaitForChannelOpen waits for a notification that a channel is open by -// consuming a message from the past open channel stream. If the passed context -// has a timeout, then if the timeout is reached before the channel has been -// opened, then an error is returned. -func (n *NetworkHarness) WaitForChannelOpen( - openChanStream lnrpc.Lightning_OpenChannelClient) ( - *lnrpc.ChannelPoint, error) { - - ctx, cancel := context.WithTimeout(n.runCtx, ChannelOpenTimeout) - defer cancel() - - errChan := make(chan error) - respChan := make(chan *lnrpc.ChannelPoint) - go func() { - resp, err := openChanStream.Recv() - if err != nil { - errChan <- fmt.Errorf("unable to read rpc resp: %v", err) - return - } - fundingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen) - if !ok { - errChan <- fmt.Errorf("expected channel open update, "+ - "instead got %v", resp) - return - } - - respChan <- fundingResp.ChanOpen.ChannelPoint - }() - - select { - case <-ctx.Done(): - return nil, fmt.Errorf("timeout reached while waiting for " + - "channel open") - case err := <-errChan: - return nil, err - case chanPoint := <-respChan: - return chanPoint, nil - } -} - -// CloseChannel attempts to close the channel indicated by the -// passed channel point, initiated by the passed lnNode. If the passed context -// has a timeout, an error is returned if that timeout is reached before the -// channel close is pending. -func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode, - cp *lnrpc.ChannelPoint, force bool) (lnrpc.Lightning_CloseChannelClient, - *chainhash.Hash, error) { - - // The cancel is intentionally left out here because the returned - // item(close channel client) relies on the context being active. This - // will be fixed once we finish refactoring the NetworkHarness. - ctxt, cancel := context.WithTimeout(n.runCtx, ChannelCloseTimeout) - defer cancel() - - // Create a channel outpoint that we can use to compare to channels - // from the ListChannelsResponse. - txidHash, err := getChanPointFundingTxid(cp) - if err != nil { - return nil, nil, err - } - fundingTxID, err := chainhash.NewHash(txidHash) - if err != nil { - return nil, nil, err - } - chanPoint := wire.OutPoint{ - Hash: *fundingTxID, - Index: cp.OutputIndex, - } - - // We'll wait for *both* nodes to read the channel as active if we're - // performing a cooperative channel closure. - if !force { - timeout := DefaultTimeout - listReq := &lnrpc.ListChannelsRequest{} - - // We define two helper functions, one two locate a particular - // channel, and the other to check if a channel is active or - // not. - filterChannel := func(node *HarnessNode, - op wire.OutPoint) (*lnrpc.Channel, error) { - - listResp, err := node.ListChannels(ctxt, listReq) - if err != nil { - return nil, err - } - - for _, c := range listResp.Channels { - if c.ChannelPoint == op.String() { - return c, nil - } - } - - return nil, fmt.Errorf("unable to find channel") - } - activeChanPredicate := func(node *HarnessNode) func() bool { - return func() bool { - channel, err := filterChannel(node, chanPoint) - if err != nil { - return false - } - - return channel.Active - } - } - - // Next, we'll fetch the target channel in order to get the - // harness node that will be receiving the channel close - // request. - targetChan, err := filterChannel(lnNode, chanPoint) - if err != nil { - return nil, nil, err - } - receivingNode, err := n.LookUpNodeByPub(targetChan.RemotePubkey) - if err != nil { - return nil, nil, err - } - - // Before proceeding, we'll ensure that the channel is active - // for both nodes. - err = wait.Predicate(activeChanPredicate(lnNode), timeout) - if err != nil { - return nil, nil, fmt.Errorf("channel of closing " + - "node not active in time") - } - err = wait.Predicate( - activeChanPredicate(receivingNode), timeout, - ) - if err != nil { - return nil, nil, fmt.Errorf("channel of receiving " + - "node not active in time") - } - } - - var ( - closeRespStream lnrpc.Lightning_CloseChannelClient - closeTxid *chainhash.Hash - ) - - err = wait.NoError(func() error { - closeReq := &lnrpc.CloseChannelRequest{ - ChannelPoint: cp, Force: force, - } - // We need to use n.runCtx to keep the client stream alive - // after the function has returned. - closeRespStream, err = lnNode.CloseChannel(n.runCtx, closeReq) - if err != nil { - return fmt.Errorf("unable to close channel: %v", err) - } - - // Consume the "channel close" update in order to wait for the - // closing transaction to be broadcast, then wait for the - // closing tx to be seen within the network. - closeResp, err := closeRespStream.Recv() - if err != nil { - return fmt.Errorf("unable to recv() from close "+ - "stream: %v", err) - } - pendingClose, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending) - if !ok { - return fmt.Errorf("expected channel close update, "+ - "instead got %v", pendingClose) - } - - closeTxid, err = chainhash.NewHash( - pendingClose.ClosePending.Txid, - ) - if err != nil { - return fmt.Errorf("unable to decode closeTxid: "+ - "%v", err) - } - if err := n.Miner.waitForTxInMempool(*closeTxid); err != nil { - return fmt.Errorf("error while waiting for "+ - "broadcast tx: %v", err) - } - return nil - }, ChannelCloseTimeout) - if err != nil { - return nil, nil, err - } - - return closeRespStream, closeTxid, nil -} - -// WaitForChannelClose waits for a notification from the passed channel close -// stream that the node has deemed the channel has been fully closed. If the -// passed context has a timeout, then if the timeout is reached before the -// notification is received then an error is returned. -func (n *NetworkHarness) WaitForChannelClose( - closeChanStream lnrpc.Lightning_CloseChannelClient) ( - *chainhash.Hash, error) { - - errChan := make(chan error) - updateChan := make(chan *lnrpc.CloseStatusUpdate_ChanClose) - go func() { - closeResp, err := closeChanStream.Recv() - if err != nil { - errChan <- err - return - } - - closeFin, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ChanClose) - if !ok { - errChan <- fmt.Errorf("expected channel close update, "+ - "instead got %v", closeFin) - return - } - - updateChan <- closeFin - }() - - // Wait until either the deadline for the context expires, an error - // occurs, or the channel close update is received. - select { - case <-time.After(ChannelCloseTimeout): - return nil, fmt.Errorf("timeout reached before update sent") - case err := <-errChan: - return nil, err - case update := <-updateChan: - return chainhash.NewHash(update.ChanClose.ClosingTxid) - } -} - -// AssertChannelExists asserts that an active channel identified by the -// specified channel point exists from the point-of-view of the node. It takes -// an optional set of check functions which can be used to make further -// assertions using channel's values. These functions are responsible for -// failing the test themselves if they do not pass. -func (n *NetworkHarness) AssertChannelExists(node *HarnessNode, - chanPoint *wire.OutPoint, checks ...func(*lnrpc.Channel)) error { - - ctx, cancel := context.WithTimeout(n.runCtx, ChannelCloseTimeout) - defer cancel() - - req := &lnrpc.ListChannelsRequest{} - - return wait.NoError(func() error { - resp, err := node.ListChannels(ctx, req) - if err != nil { - return fmt.Errorf("unable fetch node's channels: %v", err) - } - - for _, channel := range resp.Channels { - if channel.ChannelPoint == chanPoint.String() { - // First check whether our channel is active, - // failing early if it is not. - if !channel.Active { - return fmt.Errorf("channel %s inactive", - chanPoint) - } - - // Apply any additional checks that we would - // like to verify. - for _, check := range checks { - check(channel) - } - - return nil - } - } - - return fmt.Errorf("channel %s not found", chanPoint) - }, DefaultTimeout) -} - -// DumpLogs reads the current logs generated by the passed node, and returns -// the logs as a single string. This function is useful for examining the logs -// of a particular node in the case of a test failure. -// Logs from lightning node being generated with delay - you should -// add time.Sleep() in order to get all logs. -func (n *NetworkHarness) DumpLogs(node *HarnessNode) (string, error) { - logFile := fmt.Sprintf("%v/simnet/lnd.log", node.Cfg.LogDir) - - buf, err := ioutil.ReadFile(logFile) - if err != nil { - return "", err - } - - return string(buf), nil -} - -// SendCoins attempts to send amt satoshis from the internal mining node to the -// targeted lightning node using a P2WKH address. 6 blocks are mined after in -// order to confirm the transaction. -func (n *NetworkHarness) SendCoins(t *testing.T, amt btcutil.Amount, - target *HarnessNode) { - - err := n.SendCoinsOfType( - amt, target, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true, - ) - require.NoErrorf(t, err, "unable to send coins for %s", target.Cfg.Name) -} - -// SendCoinsUnconfirmed sends coins from the internal mining node to the target -// lightning node using a P2WPKH address. No blocks are mined after, so the -// transaction remains unconfirmed. -func (n *NetworkHarness) SendCoinsUnconfirmed(t *testing.T, amt btcutil.Amount, - target *HarnessNode) { - - err := n.SendCoinsOfType( - amt, target, lnrpc.AddressType_WITNESS_PUBKEY_HASH, false, - ) - require.NoErrorf( - t, err, "unable to send unconfirmed coins for %s", - target.Cfg.Name, - ) -} - -// SendCoinsNP2WKH attempts to send amt satoshis from the internal mining node -// to the targeted lightning node using a NP2WKH address. -func (n *NetworkHarness) SendCoinsNP2WKH(t *testing.T, amt btcutil.Amount, - target *HarnessNode) { - - err := n.SendCoinsOfType( - amt, target, lnrpc.AddressType_NESTED_PUBKEY_HASH, true, - ) - require.NoErrorf( - t, err, "unable to send NP2WKH coins for %s", - target.Cfg.Name, - ) -} - -// SendCoinsP2TR attempts to send amt satoshis from the internal mining node -// to the targeted lightning node using a P2TR address. -func (n *NetworkHarness) SendCoinsP2TR(t *testing.T, amt btcutil.Amount, - target *HarnessNode) { - - err := n.SendCoinsOfType( - amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true, - ) - require.NoErrorf( - t, err, "unable to send P2TR coins for %s", target.Cfg.Name, - ) -} - -// SendCoinsOfType attempts to send amt satoshis from the internal mining node -// to the targeted lightning node. The confirmed boolean indicates whether the -// transaction that pays to the target should confirm. -func (n *NetworkHarness) SendCoinsOfType(amt btcutil.Amount, target *HarnessNode, - addrType lnrpc.AddressType, confirmed bool) error { - - ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout) - defer cancel() - - balReq := &lnrpc.WalletBalanceRequest{} - initialBalance, err := target.WalletBalance(ctx, balReq) - if err != nil { - return err - } - - // First, obtain an address from the target lightning node, preferring - // to receive a p2wkh address s.t the output can immediately be used as - // an input to a funding transaction. - addrReq := &lnrpc.NewAddressRequest{ - Type: addrType, - } - resp, err := target.NewAddress(ctx, addrReq) - if err != nil { - return err - } - addr, err := btcutil.DecodeAddress(resp.Address, n.netParams) - if err != nil { - return err - } - addrScript, err := txscript.PayToAddrScript(addr) - if err != nil { - return err - } - - // Generate a transaction which creates an output to the target - // pkScript of the desired amount. - output := &wire.TxOut{ - PkScript: addrScript, - Value: int64(amt), - } - _, err = n.Miner.SendOutputs([]*wire.TxOut{output}, 7500) - if err != nil { - return err - } - - // Encode the pkScript in hex as this the format that it will be - // returned via rpc. - expPkScriptStr := hex.EncodeToString(addrScript) - - // Now, wait for ListUnspent to show the unconfirmed transaction - // containing the correct pkscript. - err = wait.NoError(func() error { - // Since neutrino doesn't support unconfirmed outputs, skip - // this check. - if target.Cfg.BackendCfg.Name() == "neutrino" { - return nil - } - - req := &lnrpc.ListUnspentRequest{} - resp, err := target.ListUnspent(ctx, req) - if err != nil { - return err - } - - // When using this method, there should only ever be on - // unconfirmed transaction. - if len(resp.Utxos) != 1 { - return fmt.Errorf("number of unconfirmed utxos "+ - "should be 1, found %d", len(resp.Utxos)) - } - - // Assert that the lone unconfirmed utxo contains the same - // pkscript as the output generated above. - pkScriptStr := resp.Utxos[0].PkScript - if strings.Compare(pkScriptStr, expPkScriptStr) != 0 { - return fmt.Errorf("pkscript mismatch, want: %s, "+ - "found: %s", expPkScriptStr, pkScriptStr) - } - - return nil - }, DefaultTimeout) - if err != nil { - return fmt.Errorf("unconfirmed utxo was not found in "+ - "ListUnspent: %v", err) - } - - // If the transaction should remain unconfirmed, then we'll wait until - // the target node's unconfirmed balance reflects the expected balance - // and exit. - if !confirmed { - expectedBalance := btcutil.Amount(initialBalance.UnconfirmedBalance) + amt - return target.WaitForBalance(expectedBalance, false) - } - - // Otherwise, we'll generate 6 new blocks to ensure the output gains a - // sufficient number of confirmations and wait for the balance to - // reflect what's expected. - if _, err := n.Miner.Client.Generate(6); err != nil { - return err - } - - fullInitialBalance := initialBalance.ConfirmedBalance + - initialBalance.UnconfirmedBalance - expectedBalance := btcutil.Amount(fullInitialBalance) + amt - return target.WaitForBalance(expectedBalance, true) -} - -func (n *NetworkHarness) SetFeeEstimate(fee chainfee.SatPerKWeight) { - n.feeService.setFee(fee) -} - -func (n *NetworkHarness) SetFeeEstimateWithConf( - fee chainfee.SatPerKWeight, conf uint32) { - - n.feeService.setFeeWithConf(fee, conf) -} - -// copyAll copies all files and directories from srcDir to dstDir recursively. -// Note that this function does not support links. -func copyAll(dstDir, srcDir string) error { - entries, err := ioutil.ReadDir(srcDir) - if err != nil { - return err - } - - for _, entry := range entries { - srcPath := filepath.Join(srcDir, entry.Name()) - dstPath := filepath.Join(dstDir, entry.Name()) - - info, err := os.Stat(srcPath) - if err != nil { - return err - } - - if info.IsDir() { - err := os.Mkdir(dstPath, info.Mode()) - if err != nil && !os.IsExist(err) { - return err - } - - err = copyAll(dstPath, srcPath) - if err != nil { - return err - } - } else if err := CopyFile(dstPath, srcPath); err != nil { - return err - } - } - - return nil -} - -// BackupDb creates a backup of the current database. -func (n *NetworkHarness) BackupDb(hn *HarnessNode) error { - if hn.backupDbDir != "" { - return errors.New("backup already created") - } - - restart, err := n.SuspendNode(hn) - if err != nil { - return err - } - - if hn.postgresDbName != "" { - // Backup database. - backupDbName := hn.postgresDbName + "_backup" - err := executePgQuery( - "CREATE DATABASE " + backupDbName + " WITH TEMPLATE " + - hn.postgresDbName, - ) - if err != nil { - return err - } - } else { - // Backup files. - tempDir, err := ioutil.TempDir("", "past-state") - if err != nil { - return fmt.Errorf("unable to create temp db folder: %v", - err) - } - - if err := copyAll(tempDir, hn.DBDir()); err != nil { - return fmt.Errorf("unable to copy database files: %v", - err) - } - - hn.backupDbDir = tempDir - } - - err = restart() - if err != nil { - return err - } - - return nil -} - -// RestoreDb restores a database backup. -func (n *NetworkHarness) RestoreDb(hn *HarnessNode) error { - if hn.postgresDbName != "" { - // Restore database. - backupDbName := hn.postgresDbName + "_backup" - err := executePgQuery( - "DROP DATABASE " + hn.postgresDbName, - ) - if err != nil { - return err - } - err = executePgQuery( - "ALTER DATABASE " + backupDbName + " RENAME TO " + hn.postgresDbName, - ) - if err != nil { - return err - } - } else { - // Restore files. - if hn.backupDbDir == "" { - return errors.New("no database backup created") - } - - if err := copyAll(hn.DBDir(), hn.backupDbDir); err != nil { - return fmt.Errorf("unable to copy database files: %v", err) - } - - if err := os.RemoveAll(hn.backupDbDir); err != nil { - return fmt.Errorf("unable to remove backup dir: %v", err) - } - hn.backupDbDir = "" - } - - return nil -} - -// getChanPointFundingTxid returns the given channel point's funding txid in -// raw bytes. -func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) ([]byte, error) { - var txid []byte - - // A channel point's funding txid can be get/set as a byte slice or a - // string. In the case it is a string, decode it. - switch chanPoint.GetFundingTxid().(type) { - case *lnrpc.ChannelPoint_FundingTxidBytes: - txid = chanPoint.GetFundingTxidBytes() - case *lnrpc.ChannelPoint_FundingTxidStr: - s := chanPoint.GetFundingTxidStr() - h, err := chainhash.NewHashFromStr(s) - if err != nil { - return nil, err - } - - txid = h[:] - } - - return txid, nil -} diff --git a/lntest/harness_node.go b/lntest/harness_node.go deleted file mode 100644 index 26dc90498..000000000 --- a/lntest/harness_node.go +++ /dev/null @@ -1,1928 +0,0 @@ -package lntest - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/wire" - "github.com/jackc/pgx/v4/pgxpool" - "github.com/lightningnetwork/lnd/chanbackup" - "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/chainrpc" - "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" - "github.com/lightningnetwork/lnd/lnrpc/neutrinorpc" - "github.com/lightningnetwork/lnd/lnrpc/peersrpc" - "github.com/lightningnetwork/lnd/lnrpc/routerrpc" - "github.com/lightningnetwork/lnd/lnrpc/signrpc" - "github.com/lightningnetwork/lnd/lnrpc/walletrpc" - "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc" - "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" - "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/lightningnetwork/lnd/macaroons" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/status" - "gopkg.in/macaroon.v2" -) - -const ( - // logPubKeyBytes is the number of bytes of the node's PubKey that will - // be appended to the log file name. The whole PubKey is too long and - // not really necessary to quickly identify what node produced which - // log file. - logPubKeyBytes = 4 - - // trickleDelay is the amount of time in milliseconds between each - // release of announcements by AuthenticatedGossiper to the network. - trickleDelay = 50 - - postgresDsn = "postgres://postgres:postgres@localhost:6432/%s?sslmode=disable" - - // commitInterval specifies the maximum interval the graph database - // will wait between attempting to flush a batch of modifications to - // disk(db.batch-commit-interval). - commitInterval = 10 * time.Millisecond - - DefaultTimeout = wait.DefaultTimeout - NodeStartTimeout = wait.NodeStartTimeout - ChannelOpenTimeout = wait.ChannelOpenTimeout - ChannelCloseTimeout = wait.ChannelCloseTimeout -) - -var ( - // numActiveNodes is the number of active nodes within the test network. - numActiveNodes = 0 - numActiveNodesMtx sync.Mutex -) - -func postgresDatabaseDsn(dbName string) string { - return fmt.Sprintf(postgresDsn, dbName) -} - -// BackendConfig is an interface that abstracts away the specific chain backend -// node implementation. -type BackendConfig interface { - // GenArgs returns the arguments needed to be passed to LND at startup - // for using this node as a chain backend. - GenArgs() []string - - // ConnectMiner is called to establish a connection to the test miner. - ConnectMiner() error - - // DisconnectMiner is called to disconnect the miner. - DisconnectMiner() error - - // Name returns the name of the backend type. - Name() string - - // Credentials returns the rpc username, password and host for the - // backend. - Credentials() (string, string, string, error) -} - -// NodeConfig is the basic interface a node configuration must implement. -type NodeConfig interface { - // BaseConfig returns the base node configuration struct. - BaseConfig() *BaseNodeConfig - - // GenerateListeningPorts generates the ports to listen on designated - // for the current lightning network test. - GenerateListeningPorts() - - // GenArgs generates a slice of command line arguments from the - // lightning node config struct. - GenArgs() []string -} - -// BaseNodeConfig is the base node configuration. -type BaseNodeConfig struct { - Name string - - // LogFilenamePrefix is used to prefix node log files. Can be used - // to store the current test case for simpler postmortem debugging. - LogFilenamePrefix string - - BackendCfg BackendConfig - NetParams *chaincfg.Params - BaseDir string - ExtraArgs []string - - DataDir string - LogDir string - TLSCertPath string - TLSKeyPath string - AdminMacPath string - ReadMacPath string - InvoiceMacPath string - - HasSeed bool - Password []byte - - P2PPort int - RPCPort int - RESTPort int - ProfilePort int - - AcceptKeySend bool - AcceptAMP bool - - FeeURL string - - DbBackend DatabaseBackend - PostgresDsn string -} - -func (cfg BaseNodeConfig) P2PAddr() string { - return fmt.Sprintf(ListenerFormat, cfg.P2PPort) -} - -func (cfg BaseNodeConfig) RPCAddr() string { - return fmt.Sprintf(ListenerFormat, cfg.RPCPort) -} - -func (cfg BaseNodeConfig) RESTAddr() string { - return fmt.Sprintf(ListenerFormat, cfg.RESTPort) -} - -// DBDir returns the holding directory path of the graph database. -func (cfg BaseNodeConfig) DBDir() string { - return filepath.Join(cfg.DataDir, "graph", cfg.NetParams.Name) -} - -func (cfg BaseNodeConfig) DBPath() string { - return filepath.Join(cfg.DBDir(), "channel.db") -} - -func (cfg BaseNodeConfig) ChanBackupPath() string { - return filepath.Join( - cfg.DataDir, "chain", "bitcoin", - fmt.Sprintf( - "%v/%v", cfg.NetParams.Name, - chanbackup.DefaultBackupFileName, - ), - ) -} - -// GenerateListeningPorts generates the ports to listen on designated for the -// current lightning network test. -func (cfg *BaseNodeConfig) GenerateListeningPorts() { - if cfg.P2PPort == 0 { - cfg.P2PPort = NextAvailablePort() - } - if cfg.RPCPort == 0 { - cfg.RPCPort = NextAvailablePort() - } - if cfg.RESTPort == 0 { - cfg.RESTPort = NextAvailablePort() - } - if cfg.ProfilePort == 0 { - cfg.ProfilePort = NextAvailablePort() - } -} - -// BaseConfig returns the base node configuration struct. -func (cfg *BaseNodeConfig) BaseConfig() *BaseNodeConfig { - return cfg -} - -// GenArgs generates a slice of command line arguments from the lightning node -// config struct. -func (cfg *BaseNodeConfig) GenArgs() []string { - var args []string - - switch cfg.NetParams { - case &chaincfg.TestNet3Params: - args = append(args, "--bitcoin.testnet") - case &chaincfg.SimNetParams: - args = append(args, "--bitcoin.simnet") - case &chaincfg.RegressionNetParams: - args = append(args, "--bitcoin.regtest") - } - - backendArgs := cfg.BackendCfg.GenArgs() - args = append(args, backendArgs...) - - nodeArgs := []string{ - "--bitcoin.active", - "--nobootstrap", - "--debuglevel=debug", - "--bitcoin.defaultchanconfs=1", - "--keep-failed-payment-attempts", - fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval), - fmt.Sprintf("--bitcoin.defaultremotedelay=%v", DefaultCSV), - fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()), - fmt.Sprintf("--restlisten=%v", cfg.RESTAddr()), - fmt.Sprintf("--restcors=https://%v", cfg.RESTAddr()), - fmt.Sprintf("--listen=%v", cfg.P2PAddr()), - fmt.Sprintf("--externalip=%v", cfg.P2PAddr()), - fmt.Sprintf("--lnddir=%v", cfg.BaseDir), - fmt.Sprintf("--adminmacaroonpath=%v", cfg.AdminMacPath), - fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath), - fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath), - fmt.Sprintf("--trickledelay=%v", trickleDelay), - fmt.Sprintf("--profile=%d", cfg.ProfilePort), - fmt.Sprintf("--caches.rpc-graph-cache-duration=%d", 0), - } - args = append(args, nodeArgs...) - - if !cfg.HasSeed { - args = append(args, "--noseedbackup") - } - - if cfg.ExtraArgs != nil { - args = append(args, cfg.ExtraArgs...) - } - - if cfg.AcceptKeySend { - args = append(args, "--accept-keysend") - } - - if cfg.AcceptAMP { - args = append(args, "--accept-amp") - } - - switch cfg.DbBackend { - case BackendEtcd: - args = append(args, "--db.backend=etcd") - args = append(args, "--db.etcd.embedded") - args = append( - args, fmt.Sprintf( - "--db.etcd.embedded_client_port=%v", - NextAvailablePort(), - ), - ) - args = append( - args, fmt.Sprintf( - "--db.etcd.embedded_peer_port=%v", - NextAvailablePort(), - ), - ) - args = append( - args, fmt.Sprintf( - "--db.etcd.embedded_log_file=%v", - path.Join(cfg.LogDir, "etcd.log"), - ), - ) - - case BackendPostgres: - args = append(args, "--db.backend=postgres") - args = append(args, "--db.postgres.dsn="+cfg.PostgresDsn) - - case BackendSqlite: - args = append(args, "--db.backend=sqlite") - args = append(args, fmt.Sprintf("--db.sqlite.busytimeout=%v", - wait.SqliteBusyTimeout)) - } - - if cfg.FeeURL != "" { - args = append(args, "--feeurl="+cfg.FeeURL) - } - - return args -} - -// policyUpdateMap defines a type to store channel policy updates. It has the -// format, -// -// { -// "chanPoint1": { -// "advertisingNode1": [ -// policy1, policy2, ... -// ], -// "advertisingNode2": [ -// policy1, policy2, ... -// ] -// }, -// "chanPoint2": ... -// }. -type policyUpdateMap map[string]map[string][]*lnrpc.RoutingPolicy - -// HarnessNode represents an instance of lnd running within our test network -// harness. Each HarnessNode instance also fully embeds an RPC client in -// order to pragmatically drive the node. -type HarnessNode struct { - Cfg *BaseNodeConfig - - // NodeID is a unique identifier for the node within a NetworkHarness. - NodeID int - - // PubKey is the serialized compressed identity public key of the node. - // This field will only be populated once the node itself has been - // started via the start() method. - PubKey [33]byte - PubKeyStr string - - // rpc holds a list of RPC clients. - rpc *RPCClients - - // chanWatchRequests receives a request for watching a particular event - // for a given channel. - chanWatchRequests chan *chanWatchRequest - - // For each outpoint, we'll track an integer which denotes the number of - // edges seen for that channel within the network. When this number - // reaches 2, then it means that both edge advertisements has propagated - // through the network. - openChans map[wire.OutPoint]int - openChanWatchers map[wire.OutPoint][]chan struct{} - - closedChans map[wire.OutPoint]struct{} - closeChanWatchers map[wire.OutPoint][]chan struct{} - - // policyUpdates stores a slice of seen polices by each advertising - // node and the outpoint. - policyUpdates policyUpdateMap - - // backupDbDir is the path where a database backup is stored, if any. - backupDbDir string - - // postgresDbName is the name of the postgres database where lnd data is - // stored in. - postgresDbName string - - // runCtx is a context with cancel method. It's used to signal when the - // node needs to quit, and used as the parent context when spawning - // children contexts for RPC requests. - runCtx context.Context - cancel context.CancelFunc - - wg sync.WaitGroup - cmd *exec.Cmd - logFile *os.File - - // TODO(yy): remove - lnrpc.LightningClient - lnrpc.WalletUnlockerClient - invoicesrpc.InvoicesClient - peersrpc.PeersClient - SignerClient signrpc.SignerClient - RouterClient routerrpc.RouterClient - WalletKitClient walletrpc.WalletKitClient - Watchtower watchtowerrpc.WatchtowerClient - WatchtowerClient wtclientrpc.WatchtowerClientClient - StateClient lnrpc.StateClient - ChainClient chainrpc.ChainNotifierClient - ChainKit chainrpc.ChainKitClient - NeutrinoClient neutrinorpc.NeutrinoKitClient -} - -// RPCClients wraps a list of RPC clients into a single struct for easier -// access. -type RPCClients struct { - // conn is the underlying connection to the grpc endpoint of the node. - conn *grpc.ClientConn - - LN lnrpc.LightningClient - WalletUnlocker lnrpc.WalletUnlockerClient - Invoice invoicesrpc.InvoicesClient - Signer signrpc.SignerClient - Router routerrpc.RouterClient - WalletKit walletrpc.WalletKitClient - Watchtower watchtowerrpc.WatchtowerClient - WatchtowerClient wtclientrpc.WatchtowerClientClient - State lnrpc.StateClient - ChainClient chainrpc.ChainNotifierClient - ChainKit chainrpc.ChainKitClient - NeutrinoClient neutrinorpc.NeutrinoKitClient -} - -// Assert *HarnessNode implements the lnrpc.LightningClient interface. -var _ lnrpc.LightningClient = (*HarnessNode)(nil) -var _ lnrpc.WalletUnlockerClient = (*HarnessNode)(nil) -var _ invoicesrpc.InvoicesClient = (*HarnessNode)(nil) -var _ peersrpc.PeersClient = (*HarnessNode)(nil) - -// nextNodeID generates a unique sequence to be used as the node's ID. -func nextNodeID() int { - numActiveNodesMtx.Lock() - defer numActiveNodesMtx.Unlock() - nodeNum := numActiveNodes - numActiveNodes++ - - return nodeNum -} - -// newNode creates a new test lightning node instance from the passed config. -func newNode(cfg *BaseNodeConfig) (*HarnessNode, error) { - if cfg.BaseDir == "" { - var err error - cfg.BaseDir, err = ioutil.TempDir("", "lndtest-node") - if err != nil { - return nil, err - } - } - cfg.DataDir = filepath.Join(cfg.BaseDir, "data") - cfg.LogDir = filepath.Join(cfg.BaseDir, "logs") - cfg.TLSCertPath = filepath.Join(cfg.BaseDir, "tls.cert") - cfg.TLSKeyPath = filepath.Join(cfg.BaseDir, "tls.key") - - networkDir := filepath.Join( - cfg.DataDir, "chain", "bitcoin", cfg.NetParams.Name, - ) - cfg.AdminMacPath = filepath.Join(networkDir, "admin.macaroon") - cfg.ReadMacPath = filepath.Join(networkDir, "readonly.macaroon") - cfg.InvoiceMacPath = filepath.Join(networkDir, "invoice.macaroon") - - cfg.GenerateListeningPorts() - - // Run all tests with accept keysend. The keysend code is very isolated - // and it is highly unlikely that it would affect regular itests when - // enabled. - cfg.AcceptKeySend = true - - // Create temporary database. - var dbName string - if cfg.DbBackend == BackendPostgres { - var err error - dbName, err = createTempPgDb() - if err != nil { - return nil, err - } - cfg.PostgresDsn = postgresDatabaseDsn(dbName) - } - - return &HarnessNode{ - Cfg: cfg, - NodeID: nextNodeID(), - chanWatchRequests: make(chan *chanWatchRequest), - openChans: make(map[wire.OutPoint]int), - openChanWatchers: make(map[wire.OutPoint][]chan struct{}), - - closedChans: make(map[wire.OutPoint]struct{}), - closeChanWatchers: make(map[wire.OutPoint][]chan struct{}), - - policyUpdates: policyUpdateMap{}, - - postgresDbName: dbName, - }, nil -} - -func createTempPgDb() (string, error) { - // Create random database name. - randBytes := make([]byte, 8) - _, err := rand.Read(randBytes) - if err != nil { - return "", err - } - dbName := "itest_" + hex.EncodeToString(randBytes) - - // Create database. - err = executePgQuery("CREATE DATABASE " + dbName) - if err != nil { - return "", err - } - - return dbName, nil -} - -func executePgQuery(query string) error { - pool, err := pgxpool.Connect( - context.Background(), - postgresDatabaseDsn("postgres"), - ) - if err != nil { - return fmt.Errorf("unable to connect to database: %w", err) - } - defer pool.Close() - - _, err = pool.Exec(context.Background(), query) - return err -} - -// String gives the internal state of the node which is useful for debugging. -func (hn *HarnessNode) String() string { - type nodeCfg struct { - LogFilenamePrefix string - ExtraArgs []string - HasSeed bool - P2PPort int - RPCPort int - RESTPort int - ProfilePort int - AcceptKeySend bool - AcceptAMP bool - FeeURL string - } - - nodeState := struct { - NodeID int - Name string - PubKey string - OpenChans map[string]int - ClosedChans map[string]struct{} - NodeCfg nodeCfg - }{ - NodeID: hn.NodeID, - Name: hn.Cfg.Name, - PubKey: hn.PubKeyStr, - OpenChans: make(map[string]int), - ClosedChans: make(map[string]struct{}), - NodeCfg: nodeCfg{ - LogFilenamePrefix: hn.Cfg.LogFilenamePrefix, - ExtraArgs: hn.Cfg.ExtraArgs, - HasSeed: hn.Cfg.HasSeed, - P2PPort: hn.Cfg.P2PPort, - RPCPort: hn.Cfg.RPCPort, - RESTPort: hn.Cfg.RESTPort, - AcceptKeySend: hn.Cfg.AcceptKeySend, - AcceptAMP: hn.Cfg.AcceptAMP, - FeeURL: hn.Cfg.FeeURL, - }, - } - - for outpoint, count := range hn.openChans { - nodeState.OpenChans[outpoint.String()] = count - } - for outpoint, count := range hn.closedChans { - nodeState.ClosedChans[outpoint.String()] = count - } - - stateBytes, err := json.MarshalIndent(nodeState, "", "\t") - if err != nil { - return fmt.Sprintf("\n encode node state with err: %v", err) - } - - return fmt.Sprintf("\nnode state: %s", stateBytes) -} - -// DBPath returns the filepath to the channeldb database file for this node. -func (hn *HarnessNode) DBPath() string { - return hn.Cfg.DBPath() -} - -// DBDir returns the path for the directory holding channeldb file(s). -func (hn *HarnessNode) DBDir() string { - return hn.Cfg.DBDir() -} - -// Name returns the name of this node set during initialization. -func (hn *HarnessNode) Name() string { - return hn.Cfg.Name -} - -// TLSCertStr returns the path where the TLS certificate is stored. -func (hn *HarnessNode) TLSCertStr() string { - return hn.Cfg.TLSCertPath -} - -// TLSKeyStr returns the path where the TLS key is stored. -func (hn *HarnessNode) TLSKeyStr() string { - return hn.Cfg.TLSKeyPath -} - -// ChanBackupPath returns the fielpath to the on-disk channel.backup file for -// this node. -func (hn *HarnessNode) ChanBackupPath() string { - return hn.Cfg.ChanBackupPath() -} - -// AdminMacPath returns the filepath to the admin.macaroon file for this node. -func (hn *HarnessNode) AdminMacPath() string { - return hn.Cfg.AdminMacPath -} - -// ReadMacPath returns the filepath to the readonly.macaroon file for this node. -func (hn *HarnessNode) ReadMacPath() string { - return hn.Cfg.ReadMacPath -} - -// InvoiceMacPath returns the filepath to the invoice.macaroon file for this -// node. -func (hn *HarnessNode) InvoiceMacPath() string { - return hn.Cfg.InvoiceMacPath -} - -// startLnd handles the startup of lnd, creating log files, and possibly kills -// the process when needed. -func (hn *HarnessNode) startLnd(lndBinary string, lndError chan<- error) error { - args := hn.Cfg.GenArgs() - hn.cmd = exec.Command(lndBinary, args...) - - // Redirect stderr output to buffer - var errb bytes.Buffer - hn.cmd.Stderr = &errb - - // If the logoutput flag is passed, redirect output from the nodes to - // log files. - var ( - fileName string - err error - ) - if *LogOutput { - fileName, err = addLogFile(hn) - if err != nil { - return err - } - } - - if err := hn.cmd.Start(); err != nil { - return err - } - - // Launch a new goroutine which that bubbles up any potential fatal - // process errors to the goroutine running the tests. - hn.wg.Add(1) - go func() { - defer hn.wg.Done() - - err := hn.cmd.Wait() - if err != nil { - lndError <- fmt.Errorf("%v\n%v", err, errb.String()) - } - - // Make sure log file is closed and renamed if necessary. - finalizeLogfile(hn, fileName) - - // Rename the etcd.log file if the node was running on embedded - // etcd. - finalizeEtcdLog(hn) - }() - - return nil -} - -// Start launches a new process running lnd. Additionally, the PID of the -// launched process is saved in order to possibly kill the process forcibly -// later. -// -// This may not clean up properly if an error is returned, so the caller should -// call shutdown() regardless of the return value. -func (hn *HarnessNode) start(lndBinary string, lndError chan<- error, - wait bool) error { - - // Init the runCtx. - ctxt, cancel := context.WithCancel(context.Background()) - hn.runCtx = ctxt - hn.cancel = cancel - - // Start lnd and prepare logs. - if err := hn.startLnd(lndBinary, lndError); err != nil { - return err - } - - // We may want to skip waiting for the node to come up (eg. the node - // is waiting to become the leader). - if !wait { - return nil - } - - // Since Stop uses the LightningClient to stop the node, if we fail to - // get a connected client, we have to kill the process. - useMacaroons := !hn.Cfg.HasSeed - conn, err := hn.ConnectRPC(useMacaroons) - if err != nil { - err = fmt.Errorf("ConnectRPC err: %w", err) - cmdErr := hn.cmd.Process.Kill() - if cmdErr != nil { - err = fmt.Errorf("kill process got err: %w: %v", - cmdErr, err) - } - return err - } - - // Init all the RPC clients. - hn.InitRPCClients(conn) - - if err := hn.WaitUntilStarted(); err != nil { - return err - } - - // If the node was created with a seed, we will need to perform an - // additional step to unlock the wallet. The connection returned will - // only use the TLS certs, and can only perform operations necessary to - // unlock the daemon. - if hn.Cfg.HasSeed { - // TODO(yy): remove - hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn) - return nil - } - - return hn.initLightningClient() -} - -// WaitUntilStarted waits until the wallet state flips from "WAITING_TO_START". -func (hn *HarnessNode) WaitUntilStarted() error { - return hn.waitTillServerState(func(s lnrpc.WalletState) bool { - return s != lnrpc.WalletState_WAITING_TO_START - }) -} - -// WaitUntilStateReached waits until the given wallet state (or one of the -// states following it) has been reached. -func (hn *HarnessNode) WaitUntilStateReached( - desiredState lnrpc.WalletState) error { - - return hn.waitTillServerState(func(s lnrpc.WalletState) bool { - return s >= desiredState - }) -} - -// WaitUntilServerActive waits until the lnd daemon is fully started. -func (hn *HarnessNode) WaitUntilServerActive() error { - return hn.waitTillServerState(func(s lnrpc.WalletState) bool { - return s == lnrpc.WalletState_SERVER_ACTIVE - }) -} - -// WaitUntilLeader attempts to finish the start procedure by initiating an RPC -// connection and setting up the wallet unlocker client. This is needed when -// a node that has recently been started was waiting to become the leader and -// we're at the point when we expect that it is the leader now (awaiting -// unlock). -func (hn *HarnessNode) WaitUntilLeader(timeout time.Duration) error { - var ( - conn *grpc.ClientConn - connErr error - ) - - if err := wait.NoError(func() error { - conn, connErr = hn.ConnectRPC(!hn.Cfg.HasSeed) - return connErr - }, timeout); err != nil { - return err - } - - // Init all the RPC clients. - hn.InitRPCClients(conn) - - if err := hn.WaitUntilStarted(); err != nil { - return err - } - - // If the node was created with a seed, we will need to perform an - // additional step to unlock the wallet. The connection returned will - // only use the TLS certs, and can only perform operations necessary to - // unlock the daemon. - if hn.Cfg.HasSeed { - // TODO(yy): remove - hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn) - - return nil - } - - return hn.initLightningClient() -} - -// initClientWhenReady waits until the main gRPC server is detected as active, -// then complete the normal HarnessNode gRPC connection creation. If the node -// is initialized stateless, the macaroon is returned so that the client can -// use it. -func (hn *HarnessNode) initClientWhenReady(stateless bool, - macBytes []byte) error { - - // Wait for the wallet to finish unlocking, such that we can connect to - // it via a macaroon-authenticated rpc connection. - var ( - conn *grpc.ClientConn - err error - ) - if err = wait.NoError(func() error { - // If the node has been initialized stateless, we need to pass - // the macaroon to the client. - if stateless { - adminMac := &macaroon.Macaroon{} - err := adminMac.UnmarshalBinary(macBytes) - if err != nil { - return fmt.Errorf("unmarshal failed: %w", err) - } - conn, err = hn.ConnectRPCWithMacaroon(adminMac) - return err - } - - // Normal initialization, we expect a macaroon to be in the - // file system. - conn, err = hn.ConnectRPC(true) - return err - }, DefaultTimeout); err != nil { - return fmt.Errorf("timeout while init client: %w", err) - } - - // Init all the RPC clients. - hn.InitRPCClients(conn) - - return hn.initLightningClient() -} - -// Init initializes a harness node by passing the init request via rpc. After -// the request is submitted, this method will block until a -// macaroon-authenticated RPC connection can be established to the harness -// node. Once established, the new connection is used to initialize the -// LightningClient and subscribes the HarnessNode to topology changes. -func (hn *HarnessNode) Init( - initReq *lnrpc.InitWalletRequest) (*lnrpc.InitWalletResponse, error) { - - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - response, err := hn.rpc.WalletUnlocker.InitWallet(ctxt, initReq) - if err != nil { - return nil, fmt.Errorf("failed to init wallet: %w", err) - } - - err = hn.initClientWhenReady( - initReq.StatelessInit, response.AdminMacaroon, - ) - if err != nil { - return nil, fmt.Errorf("failed to init: %w", err) - } - - return response, nil -} - -// InitChangePassword initializes a harness node by passing the change password -// request via RPC. After the request is submitted, this method will block until -// a macaroon-authenticated RPC connection can be established to the harness -// node. Once established, the new connection is used to initialize the -// LightningClient and subscribes the HarnessNode to topology changes. -func (hn *HarnessNode) InitChangePassword( - chngPwReq *lnrpc.ChangePasswordRequest) (*lnrpc.ChangePasswordResponse, - error) { - - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - response, err := hn.rpc.WalletUnlocker.ChangePassword(ctxt, chngPwReq) - if err != nil { - return nil, err - } - err = hn.initClientWhenReady( - chngPwReq.StatelessInit, response.AdminMacaroon, - ) - if err != nil { - return nil, err - } - - return response, nil -} - -// Unlock attempts to unlock the wallet of the target HarnessNode. This method -// should be called after the restart of a HarnessNode that was created with a -// seed+password. Once this method returns, the HarnessNode will be ready to -// accept normal gRPC requests and harness command. -func (hn *HarnessNode) Unlock(unlockReq *lnrpc.UnlockWalletRequest) error { - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - // Otherwise, we'll need to unlock the node before it's able to start - // up properly. - _, err := hn.rpc.WalletUnlocker.UnlockWallet(ctxt, unlockReq) - if err != nil { - return err - } - - // Now that the wallet has been unlocked, we'll wait for the RPC client - // to be ready, then establish the normal gRPC connection. - return hn.initClientWhenReady(false, nil) -} - -// waitTillServerState makes a subscription to the server's state change and -// blocks until the server is in the targeted state. -func (hn *HarnessNode) waitTillServerState( - predicate func(state lnrpc.WalletState) bool) error { - - ctxt, cancel := context.WithTimeout(hn.runCtx, NodeStartTimeout) - defer cancel() - - client, err := hn.rpc.State.SubscribeState( - ctxt, &lnrpc.SubscribeStateRequest{}, - ) - if err != nil { - return fmt.Errorf("failed to subscribe to state: %w", err) - } - - errChan := make(chan error, 1) - done := make(chan struct{}) - go func() { - for { - resp, err := client.Recv() - if err != nil { - errChan <- err - return - } - - if predicate(resp.State) { - close(done) - return - } - } - }() - - var lastErr error - for { - select { - case err := <-errChan: - lastErr = err - - case <-done: - return nil - - case <-time.After(NodeStartTimeout): - return fmt.Errorf("timeout waiting for state, "+ - "got err from stream: %v", lastErr) - } - } -} - -// InitRPCClients initializes a list of RPC clients for the node. -func (hn *HarnessNode) InitRPCClients(c *grpc.ClientConn) { - hn.rpc = &RPCClients{ - conn: c, - LN: lnrpc.NewLightningClient(c), - Invoice: invoicesrpc.NewInvoicesClient(c), - Router: routerrpc.NewRouterClient(c), - WalletKit: walletrpc.NewWalletKitClient(c), - WalletUnlocker: lnrpc.NewWalletUnlockerClient(c), - Watchtower: watchtowerrpc.NewWatchtowerClient(c), - WatchtowerClient: wtclientrpc.NewWatchtowerClientClient(c), - Signer: signrpc.NewSignerClient(c), - State: lnrpc.NewStateClient(c), - ChainClient: chainrpc.NewChainNotifierClient(c), - ChainKit: chainrpc.NewChainKitClient(c), - NeutrinoClient: neutrinorpc.NewNeutrinoKitClient(c), - } -} - -// initLightningClient blocks until the lnd server is fully started and -// subscribes the harness node to graph topology updates. This method also -// spawns a lightning network watcher for this node, which watches for topology -// changes. -func (hn *HarnessNode) initLightningClient() error { - // TODO(yy): remove - // Construct the LightningClient that will allow us to use the - // HarnessNode directly for normal rpc operations. - conn := hn.rpc.conn - hn.LightningClient = lnrpc.NewLightningClient(conn) - hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn) - hn.RouterClient = routerrpc.NewRouterClient(conn) - hn.WalletKitClient = walletrpc.NewWalletKitClient(conn) - hn.Watchtower = watchtowerrpc.NewWatchtowerClient(conn) - hn.WatchtowerClient = wtclientrpc.NewWatchtowerClientClient(conn) - hn.SignerClient = signrpc.NewSignerClient(conn) - hn.PeersClient = peersrpc.NewPeersClient(conn) - hn.StateClient = lnrpc.NewStateClient(conn) - hn.ChainClient = chainrpc.NewChainNotifierClient(conn) - hn.ChainKit = chainrpc.NewChainKitClient(conn) - hn.NeutrinoClient = neutrinorpc.NewNeutrinoKitClient(conn) - - // Wait until the server is fully started. - if err := hn.WaitUntilServerActive(); err != nil { - return err - } - - // Set the harness node's pubkey to what the node claims in GetInfo. - // The RPC must have been started at this point. - if err := hn.FetchNodeInfo(); err != nil { - return err - } - - // Launch the watcher that will hook into graph related topology change - // from the PoV of this node. - hn.wg.Add(1) - go hn.lightningNetworkWatcher() - - return nil -} - -// FetchNodeInfo queries an unlocked node to retrieve its public key. -func (hn *HarnessNode) FetchNodeInfo() error { - // Obtain the lnid of this node for quick identification purposes. - info, err := hn.rpc.LN.GetInfo(hn.runCtx, &lnrpc.GetInfoRequest{}) - if err != nil { - return err - } - - hn.PubKeyStr = info.IdentityPubkey - - pubkey, err := hex.DecodeString(info.IdentityPubkey) - if err != nil { - return err - } - copy(hn.PubKey[:], pubkey) - - return nil -} - -// AddToLogf adds a line of choice to the node's logfile. This is useful -// to interleave test output with output from the node. -func (hn *HarnessNode) AddToLogf(format string, a ...interface{}) { - // If this node was not set up with a log file, just return early. - if hn.logFile == nil { - return - } - - desc := fmt.Sprintf("itest: %s\n", fmt.Sprintf(format, a...)) - if _, err := hn.logFile.WriteString(desc); err != nil { - hn.PrintErrf("write to log err: %v", err) - } -} - -// ReadMacaroon waits a given duration for the macaroon file to be created. If -// the file is readable within the timeout, its content is de-serialized as a -// macaroon and returned. -func (hn *HarnessNode) ReadMacaroon(macPath string, timeout time.Duration) ( - *macaroon.Macaroon, error) { - - // Wait until macaroon file is created and has valid content before - // using it. - var mac *macaroon.Macaroon - err := wait.NoError(func() error { - macBytes, err := ioutil.ReadFile(macPath) - if err != nil { - return fmt.Errorf("error reading macaroon file: %v", - err) - } - - newMac := &macaroon.Macaroon{} - if err = newMac.UnmarshalBinary(macBytes); err != nil { - return fmt.Errorf("error unmarshalling macaroon "+ - "file: %v", err) - } - mac = newMac - - return nil - }, timeout) - - return mac, err -} - -// ConnectRPCWithMacaroon uses the TLS certificate and given macaroon to -// create a gRPC client connection. -func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) ( - *grpc.ClientConn, error) { - - // Wait until TLS certificate is created and has valid content before - // using it, up to 30 sec. - var tlsCreds credentials.TransportCredentials - err := wait.NoError(func() error { - var err error - tlsCreds, err = credentials.NewClientTLSFromFile( - hn.Cfg.TLSCertPath, "", - ) - return err - }, DefaultTimeout) - if err != nil { - return nil, fmt.Errorf("error reading TLS cert: %v", err) - } - - opts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithTransportCredentials(tlsCreds), - } - - ctx, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - if mac == nil { - return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...) - } - macCred, err := macaroons.NewMacaroonCredential(mac) - if err != nil { - return nil, fmt.Errorf("error cloning mac: %v", err) - } - opts = append(opts, grpc.WithPerRPCCredentials(macCred)) - - return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...) -} - -// ConnectRPC uses the TLS certificate and admin macaroon files written by the -// lnd node to create a gRPC client connection. -func (hn *HarnessNode) ConnectRPC(useMacs bool) (*grpc.ClientConn, error) { - // If we don't want to use macaroons, just pass nil, the next method - // will handle it correctly. - if !useMacs { - return hn.ConnectRPCWithMacaroon(nil) - } - - // If we should use a macaroon, always take the admin macaroon as a - // default. - mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, DefaultTimeout) - if err != nil { - return nil, err - } - return hn.ConnectRPCWithMacaroon(mac) -} - -// SetExtraArgs assigns the ExtraArgs field for the node's configuration. The -// changes will take effect on restart. -func (hn *HarnessNode) SetExtraArgs(extraArgs []string) { - hn.Cfg.ExtraArgs = extraArgs -} - -// cleanup cleans up all the temporary files created by the node's process. -func (hn *HarnessNode) cleanup() error { - if hn.backupDbDir != "" { - err := os.RemoveAll(hn.backupDbDir) - if err != nil { - return fmt.Errorf("unable to remove backup dir: %v", - err) - } - } - - return os.RemoveAll(hn.Cfg.BaseDir) -} - -// Stop attempts to stop the active lnd process. -func (hn *HarnessNode) stop() error { - // Do nothing if the process is not running. - if hn.runCtx == nil { - return nil - } - - // If start() failed before creating clients, we will just wait for the - // child process to die. - if hn.rpc != nil && hn.rpc.LN != nil { - // Don't watch for error because sometimes the RPC connection - // gets closed before a response is returned. - req := lnrpc.StopRequest{} - - err := wait.NoError(func() error { - _, err := hn.rpc.LN.StopDaemon(hn.runCtx, &req) - switch { - case err == nil: - return nil - - // Try again if a recovery/rescan is in progress. - case strings.Contains( - err.Error(), "recovery in progress", - ): - return err - - default: - return nil - } - }, DefaultTimeout) - if err != nil { - return err - } - } - - // Stop the runCtx and wait for goroutines to finish. - hn.cancel() - - // Wait for lnd process to exit. - err := wait.NoError(func() error { - if hn.cmd.ProcessState == nil { - return fmt.Errorf("process did not exit") - } - - if !hn.cmd.ProcessState.Exited() { - return fmt.Errorf("process did not exit") - } - - // Wait for goroutines to be finished. - hn.wg.Wait() - - return nil - }, DefaultTimeout*2) - if err != nil { - return err - } - - hn.LightningClient = nil - hn.WalletUnlockerClient = nil - hn.Watchtower = nil - hn.WatchtowerClient = nil - hn.NeutrinoClient = nil - - // Close any attempts at further grpc connections. - if hn.rpc.conn != nil { - err := status.Code(hn.rpc.conn.Close()) - switch err { - case codes.OK: - return nil - - // When the context is canceled above, we might get the - // following error as the context is no longer active. - case codes.Canceled: - return nil - - case codes.Unknown: - return fmt.Errorf("unknown error attempting to stop "+ - "grpc client: %v", err) - - default: - return fmt.Errorf("error attempting to stop "+ - "grpc client: %v", err) - } - } - - return nil -} - -// shutdown stops the active lnd process and cleans up any temporary -// directories created along the way. -func (hn *HarnessNode) shutdown() error { - if err := hn.stop(); err != nil { - return err - } - if err := hn.cleanup(); err != nil { - return err - } - return nil -} - -// kill kills the lnd process. -func (hn *HarnessNode) kill() error { - return hn.cmd.Process.Kill() -} - -type chanWatchType uint8 - -const ( - // watchOpenChannel specifies that this is a request to watch an open - // channel event. - watchOpenChannel chanWatchType = iota - - // watchCloseChannel specifies that this is a request to watch a close - // channel event. - watchCloseChannel - - // watchPolicyUpdate specifies that this is a request to watch a policy - // update event. - watchPolicyUpdate -) - -// closeChanWatchRequest is a request to the lightningNetworkWatcher to be -// notified once it's detected within the test Lightning Network, that a -// channel has either been added or closed. -type chanWatchRequest struct { - chanPoint wire.OutPoint - - chanWatchType chanWatchType - - eventChan chan struct{} - - advertisingNode string - policy *lnrpc.RoutingPolicy - includeUnannounced bool -} - -func (hn *HarnessNode) checkChanPointInGraph(chanPoint wire.OutPoint) bool { - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - chanGraph, err := hn.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{}) - if err != nil { - return false - } - - targetChanPoint := chanPoint.String() - for _, chanEdge := range chanGraph.Edges { - candidateChanPoint := chanEdge.ChanPoint - if targetChanPoint == candidateChanPoint { - return true - } - } - - return false -} - -// lightningNetworkWatcher is a goroutine which is able to dispatch -// notifications once it has been observed that a target channel has been -// closed or opened within the network. In order to dispatch these -// notifications, the GraphTopologySubscription client exposed as part of the -// gRPC interface is used. -func (hn *HarnessNode) lightningNetworkWatcher() { - defer hn.wg.Done() - - graphUpdates := make(chan *lnrpc.GraphTopologyUpdate) - - // Start a goroutine to receive graph updates. - hn.wg.Add(1) - go func() { - defer hn.wg.Done() - err := hn.receiveTopologyClientStream(graphUpdates) - - if err != nil { - hn.PrintErrf("receive topology client stream "+ - "got err:%v", err) - } - }() - - for { - select { - // A new graph update has just been received, so we'll examine - // the current set of registered clients to see if we can - // dispatch any requests. - case graphUpdate := <-graphUpdates: - hn.handleChannelEdgeUpdates(graphUpdate.ChannelUpdates) - hn.handleClosedChannelUpdate(graphUpdate.ClosedChans) - // TODO(yy): handle node updates too - - // A new watch request, has just arrived. We'll either be able - // to dispatch immediately, or need to add the client for - // processing later. - case watchRequest := <-hn.chanWatchRequests: - switch watchRequest.chanWatchType { - case watchOpenChannel: - // TODO(roasbeef): add update type also, checks - // for multiple of 2 - hn.handleOpenChannelWatchRequest(watchRequest) - - case watchCloseChannel: - hn.handleCloseChannelWatchRequest(watchRequest) - - case watchPolicyUpdate: - hn.handlePolicyUpdateWatchRequest(watchRequest) - } - - case <-hn.runCtx.Done(): - return - } - } -} - -// WaitForNetworkChannelOpen will block until a channel with the target -// outpoint is seen as being fully advertised within the network. A channel is -// considered "fully advertised" once both of its directional edges have been -// advertised within the test Lightning Network. -func (hn *HarnessNode) WaitForNetworkChannelOpen( - chanPoint *lnrpc.ChannelPoint) error { - - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - eventChan := make(chan struct{}) - - op, err := MakeOutpoint(chanPoint) - if err != nil { - return fmt.Errorf("failed to create outpoint for %v "+ - "got err: %v", chanPoint, err) - } - - hn.chanWatchRequests <- &chanWatchRequest{ - chanPoint: op, - eventChan: eventChan, - chanWatchType: watchOpenChannel, - } - - select { - case <-eventChan: - return nil - case <-ctxt.Done(): - return fmt.Errorf("channel:%s not opened before timeout: %s", - op, hn) - } -} - -// WaitForNetworkChannelClose will block until a channel with the target -// outpoint is seen as closed within the network. A channel is considered -// closed once a transaction spending the funding outpoint is seen within a -// confirmed block. -func (hn *HarnessNode) WaitForNetworkChannelClose( - chanPoint *lnrpc.ChannelPoint) error { - - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - eventChan := make(chan struct{}) - - op, err := MakeOutpoint(chanPoint) - if err != nil { - return fmt.Errorf("failed to create outpoint for %v "+ - "got err: %v", chanPoint, err) - } - - hn.chanWatchRequests <- &chanWatchRequest{ - chanPoint: op, - eventChan: eventChan, - chanWatchType: watchCloseChannel, - } - - select { - case <-eventChan: - return nil - case <-ctxt.Done(): - return fmt.Errorf("channel:%s not closed before timeout: "+ - "%s", op, hn) - } -} - -// WaitForChannelPolicyUpdate will block until a channel policy with the target -// outpoint and advertisingNode is seen within the network. -func (hn *HarnessNode) WaitForChannelPolicyUpdate( - advertisingNode string, policy *lnrpc.RoutingPolicy, - chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) error { - - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - eventChan := make(chan struct{}) - - op, err := MakeOutpoint(chanPoint) - if err != nil { - return fmt.Errorf("failed to create outpoint for %v"+ - "got err: %v", chanPoint, err) - } - - ticker := time.NewTicker(wait.PollInterval) - defer ticker.Stop() - - for { - select { - // Send a watch request every second. - case <-ticker.C: - // Did the event can close in the meantime? We want to - // avoid a "close of closed channel" panic since we're - // re-using the same event chan for multiple requests. - select { - case <-eventChan: - return nil - default: - } - - hn.chanWatchRequests <- &chanWatchRequest{ - chanPoint: op, - eventChan: eventChan, - chanWatchType: watchPolicyUpdate, - policy: policy, - advertisingNode: advertisingNode, - includeUnannounced: includeUnannounced, - } - - case <-eventChan: - return nil - - case <-ctxt.Done(): - return fmt.Errorf("channel:%s policy not updated "+ - "before timeout: [%s:%v] %s", op, - advertisingNode, policy, hn.String()) - } - } -} - -// WaitForBlockchainSync waits for the target node to be fully synchronized -// with the blockchain. If the passed context object has a set timeout, it will -// continually poll until the timeout has elapsed. In the case that the chain -// isn't synced before the timeout is up, this function will return an error. -func (hn *HarnessNode) WaitForBlockchainSync() error { - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - ticker := time.NewTicker(time.Millisecond * 100) - defer ticker.Stop() - - for { - resp, err := hn.rpc.LN.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) - if err != nil { - return err - } - if resp.SyncedToChain { - return nil - } - - select { - case <-ctxt.Done(): - return fmt.Errorf("timeout while waiting for " + - "blockchain sync") - case <-hn.runCtx.Done(): - return nil - case <-ticker.C: - } - } -} - -// WaitForBalance waits until the node sees the expected confirmed/unconfirmed -// balance within their wallet. -func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount, - confirmed bool) error { - - req := &lnrpc.WalletBalanceRequest{} - - var lastBalance btcutil.Amount - doesBalanceMatch := func() bool { - balance, err := hn.rpc.LN.WalletBalance(hn.runCtx, req) - if err != nil { - return false - } - - if confirmed { - lastBalance = btcutil.Amount(balance.ConfirmedBalance) - return btcutil.Amount(balance.ConfirmedBalance) == - expectedBalance - } - - lastBalance = btcutil.Amount(balance.UnconfirmedBalance) - return btcutil.Amount(balance.UnconfirmedBalance) == - expectedBalance - } - - err := wait.Predicate(doesBalanceMatch, DefaultTimeout) - if err != nil { - return fmt.Errorf("balances not synced after deadline: "+ - "expected %v, only have %v", expectedBalance, - lastBalance) - } - - return nil -} - -// PrintErrf prints an error to the console. -func (hn *HarnessNode) PrintErrf(format string, a ...interface{}) { - fmt.Printf("itest error from [node:%s]: %s\n", // nolint:forbidigo - hn.Cfg.Name, fmt.Sprintf(format, a...)) -} - -// handleChannelEdgeUpdates takes a series of channel edge updates, extracts -// the outpoints, and saves them to harness node's internal state. -func (hn *HarnessNode) handleChannelEdgeUpdates( - updates []*lnrpc.ChannelEdgeUpdate) { - - // For each new channel, we'll increment the number of - // edges seen by one. - for _, newChan := range updates { - op, err := MakeOutpoint(newChan.ChanPoint) - if err != nil { - hn.PrintErrf("failed to create outpoint for %v "+ - "got err: %v", newChan.ChanPoint, err) - return - } - hn.openChans[op]++ - - // For this new channel, if the number of edges seen is less - // than two, then the channel hasn't been fully announced yet. - if numEdges := hn.openChans[op]; numEdges < 2 { - return - } - - // Otherwise, we'll notify all the registered watchers and - // remove the dispatched watchers. - for _, eventChan := range hn.openChanWatchers[op] { - close(eventChan) - } - delete(hn.openChanWatchers, op) - - // Check whether there's a routing policy update. If so, save - // it to the node state. - if newChan.RoutingPolicy == nil { - continue - } - - // Append the policy to the slice. - node := newChan.AdvertisingNode - policies := hn.policyUpdates[op.String()] - - // If the map[op] is nil, we need to initialize the map first. - if policies == nil { - policies = make(map[string][]*lnrpc.RoutingPolicy) - } - policies[node] = append( - policies[node], newChan.RoutingPolicy, - ) - hn.policyUpdates[op.String()] = policies - } -} - -// handleOpenChannelWatchRequest processes a watch open channel request by -// checking the number of the edges seen for a given channel point. If the -// number is no less than 2 then the channel is considered open. Otherwise, we -// will attempt to find it in its channel graph. If neither can be found, the -// request is added to a watch request list than will be handled by -// handleChannelEdgeUpdates. -func (hn *HarnessNode) handleOpenChannelWatchRequest(req *chanWatchRequest) { - targetChan := req.chanPoint - - // If this is an open request, then it can be dispatched if the number - // of edges seen for the channel is at least two. - if numEdges := hn.openChans[targetChan]; numEdges >= 2 { - close(req.eventChan) - return - } - - // Before we add the channel to our set of open clients, we'll check to - // see if the channel is already in the channel graph of the target - // node. This lets us handle the case where a node has already seen a - // channel before a notification has been requested, causing us to miss - // it. - chanFound := hn.checkChanPointInGraph(targetChan) - if chanFound { - close(req.eventChan) - return - } - - // Otherwise, we'll add this to the list of open channel watchers for - // this out point. - hn.openChanWatchers[targetChan] = append( - hn.openChanWatchers[targetChan], - req.eventChan, - ) -} - -// handleClosedChannelUpdate takes a series of closed channel updates, extracts -// the outpoints, saves them to harness node's internal state, and notifies all -// registered clients. -func (hn *HarnessNode) handleClosedChannelUpdate( - updates []*lnrpc.ClosedChannelUpdate) { - - // For each channel closed, we'll mark that we've detected a channel - // closure while lnd was pruning the channel graph. - for _, closedChan := range updates { - op, err := MakeOutpoint(closedChan.ChanPoint) - if err != nil { - hn.PrintErrf("failed to create outpoint for %v "+ - "got err: %v", closedChan.ChanPoint, err) - return - } - - hn.closedChans[op] = struct{}{} - - // As the channel has been closed, we'll notify all register - // watchers. - for _, eventChan := range hn.closeChanWatchers[op] { - close(eventChan) - } - delete(hn.closeChanWatchers, op) - } -} - -// handleCloseChannelWatchRequest processes a watch close channel request by -// checking whether the given channel point can be found in the node's internal -// state. If not, the request is added to a watch request list than will be -// handled by handleCloseChannelWatchRequest. -func (hn *HarnessNode) handleCloseChannelWatchRequest(req *chanWatchRequest) { - targetChan := req.chanPoint - - // If this is a close request, then it can be immediately dispatched if - // we've already seen a channel closure for this channel. - if _, ok := hn.closedChans[targetChan]; ok { - close(req.eventChan) - return - } - - // Otherwise, we'll add this to the list of close channel watchers for - // this out point. - hn.closeChanWatchers[targetChan] = append( - hn.closeChanWatchers[targetChan], - req.eventChan, - ) -} - -type topologyClient lnrpc.Lightning_SubscribeChannelGraphClient - -// newTopologyClient creates a topology client. -func (hn *HarnessNode) newTopologyClient( - ctx context.Context) (topologyClient, error) { - - req := &lnrpc.GraphTopologySubscription{} - client, err := hn.rpc.LN.SubscribeChannelGraph(ctx, req) - if err != nil { - return nil, fmt.Errorf("%s(%d): unable to create topology "+ - "client: %v (%s)", hn.Name(), hn.NodeID, err, - time.Now().String()) - } - - return client, nil -} - -// receiveTopologyClientStream initializes a topologyClient to subscribe -// topology update events. Due to a race condition between the ChannelRouter -// starting and us making the subscription request, it's possible for our graph -// subscription to fail. In that case, we will retry the subscription until it -// succeeds or fail after 10 seconds. -// -// NOTE: must be run as a goroutine. -func (hn *HarnessNode) receiveTopologyClientStream( - receiver chan *lnrpc.GraphTopologyUpdate) error { - - // Create a topology client to receive graph updates. - client, err := hn.newTopologyClient(hn.runCtx) - if err != nil { - return fmt.Errorf("create topologyClient failed: %w", err) - } - - // We use the context to time out when retrying graph subscription. - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - for { - update, err := client.Recv() - - switch { - case err == nil: - // Good case. We will send the update to the receiver. - - case strings.Contains(err.Error(), "router not started"): - // If the router hasn't been started, we will retry - // every 200 ms until it has been started or fail - // after the ctxt is timed out. - select { - case <-ctxt.Done(): - return fmt.Errorf("graph subscription: " + - "router not started before timeout") - case <-time.After(wait.PollInterval): - case <-hn.runCtx.Done(): - return nil - } - - // Re-create the topology client. - client, err = hn.newTopologyClient(hn.runCtx) - if err != nil { - return fmt.Errorf("create topologyClient "+ - "failed: %v", err) - } - - continue - - case strings.Contains(err.Error(), "EOF"): - // End of subscription stream. Do nothing and quit. - return nil - - case strings.Contains(err.Error(), context.Canceled.Error()): - // End of subscription stream. Do nothing and quit. - return nil - - default: - // An expected error is returned, return and leave it - // to be handled by the caller. - return fmt.Errorf("graph subscription err: %w", err) - } - - // Send the update or quit. - select { - case receiver <- update: - case <-hn.runCtx.Done(): - return nil - } - } -} - -// handlePolicyUpdateWatchRequest checks that if the expected policy can be -// found either in the node's interval state or describe graph response. If -// found, it will signal the request by closing the event channel. Otherwise it -// does nothing but returns nil. -func (hn *HarnessNode) handlePolicyUpdateWatchRequest(req *chanWatchRequest) { - op := req.chanPoint - - // Get a list of known policies for this chanPoint+advertisingNode - // combination. Start searching in the node state first. - policies, ok := hn.policyUpdates[op.String()][req.advertisingNode] - - if !ok { - // If it cannot be found in the node state, try searching it - // from the node's DescribeGraph. - policyMap := hn.getChannelPolicies(req.includeUnannounced) - policies, ok = policyMap[op.String()][req.advertisingNode] - if !ok { - return - } - } - - // Check if there's a matched policy. - for _, policy := range policies { - if CheckChannelPolicy(policy, req.policy) == nil { - close(req.eventChan) - return - } - } -} - -// getChannelPolicies queries the channel graph and formats the policies into -// the format defined in type policyUpdateMap. -func (hn *HarnessNode) getChannelPolicies(include bool) policyUpdateMap { - ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout) - defer cancel() - - graph, err := hn.rpc.LN.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: include, - }) - if err != nil { - hn.PrintErrf("DescribeGraph got err: %v", err) - return nil - } - - policyUpdates := policyUpdateMap{} - - for _, e := range graph.Edges { - policies := policyUpdates[e.ChanPoint] - - // If the map[op] is nil, we need to initialize the map first. - if policies == nil { - policies = make(map[string][]*lnrpc.RoutingPolicy) - } - - if e.Node1Policy != nil { - policies[e.Node1Pub] = append( - policies[e.Node1Pub], e.Node1Policy, - ) - } - - if e.Node2Policy != nil { - policies[e.Node2Pub] = append( - policies[e.Node2Pub], e.Node2Policy, - ) - } - - policyUpdates[e.ChanPoint] = policies - } - - return policyUpdates -} - -// renameFile is a helper to rename (log) files created during integration -// tests. -func renameFile(fromFileName, toFileName string) { - err := os.Rename(fromFileName, toFileName) - if err != nil { - fmt.Printf("could not rename %s to %s: %v\n", // nolint:forbidigo - fromFileName, toFileName, err) - } -} - -// getFinalizedLogFilePrefix returns the finalize log filename. -func getFinalizedLogFilePrefix(hn *HarnessNode) string { - pubKeyHex := hex.EncodeToString( - hn.PubKey[:logPubKeyBytes], - ) - - return fmt.Sprintf("%s/%d-%s-%s-%s", - GetLogDir(), hn.NodeID, - hn.Cfg.LogFilenamePrefix, - hn.Cfg.Name, pubKeyHex) -} - -// finalizeLogfile makes sure the log file cleanup function is initialized, -// even if no log file is created. -func finalizeLogfile(hn *HarnessNode, fileName string) { - if hn.logFile != nil { - hn.logFile.Close() - - // If logoutput flag is not set, return early. - if !*LogOutput { - return - } - - newFileName := fmt.Sprintf("%v.log", - getFinalizedLogFilePrefix(hn), - ) - - renameFile(fileName, newFileName) - } -} - -func finalizeEtcdLog(hn *HarnessNode) { - if hn.Cfg.DbBackend != BackendEtcd { - return - } - - etcdLogFileName := fmt.Sprintf("%s/etcd.log", hn.Cfg.LogDir) - newEtcdLogFileName := fmt.Sprintf("%v-etcd.log", - getFinalizedLogFilePrefix(hn), - ) - - renameFile(etcdLogFileName, newEtcdLogFileName) -} - -func addLogFile(hn *HarnessNode) (string, error) { - var fileName string - - dir := GetLogDir() - fileName = fmt.Sprintf("%s/%d-%s-%s-%s.log", dir, hn.NodeID, - hn.Cfg.LogFilenamePrefix, hn.Cfg.Name, - hex.EncodeToString(hn.PubKey[:logPubKeyBytes])) - - // If the node's PubKey is not yet initialized, create a - // temporary file name. Later, after the PubKey has been - // initialized, the file can be moved to its final name with - // the PubKey included. - if bytes.Equal(hn.PubKey[:4], []byte{0, 0, 0, 0}) { - fileName = fmt.Sprintf("%s/%d-%s-%s-tmp__.log", dir, - hn.NodeID, hn.Cfg.LogFilenamePrefix, - hn.Cfg.Name) - } - - // Create file if not exists, otherwise append. - file, err := os.OpenFile(fileName, - os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) - if err != nil { - return fileName, err - } - - // Pass node's stderr to both errb and the file. - w := io.MultiWriter(hn.cmd.Stderr, file) - hn.cmd.Stderr = w - - // Pass the node's stdout only to the file. - hn.cmd.Stdout = file - - // Let the node keep a reference to this file, such - // that we can add to it if necessary. - hn.logFile = file - - return fileName, nil -} diff --git a/lntest/neutrino.go b/lntest/neutrino.go index 64e880372..9ea18a4db 100644 --- a/lntest/neutrino.go +++ b/lntest/neutrino.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/btcsuite/btcd/chaincfg" + "github.com/lightningnetwork/lnd/lntemp/node" ) // NeutrinoBackendConfig is an implementation of the BackendConfig interface @@ -17,7 +18,7 @@ type NeutrinoBackendConfig struct { // A compile time assertion to ensure NeutrinoBackendConfig meets the // BackendConfig interface. -var _ BackendConfig = (*NeutrinoBackendConfig)(nil) +var _ node.BackendConfig = (*NeutrinoBackendConfig)(nil) // GenArgs returns the arguments needed to be passed to LND at startup for // using this node as a chain backend. diff --git a/lntest/test_common.go b/lntest/test_common.go deleted file mode 100644 index 1cab0ad5c..000000000 --- a/lntest/test_common.go +++ /dev/null @@ -1,190 +0,0 @@ -package lntest - -import ( - "errors" - "flag" - "fmt" - "io" - "net" - "os" - "sync/atomic" - - "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd/lnrpc" -) - -const ( - // defaultNodePort is the start of the range for listening ports of - // harness nodes. Ports are monotonically increasing starting from this - // number and are determined by the results of nextAvailablePort(). - defaultNodePort = 5555 - - // ListenerFormat is the format string that is used to generate local - // listener addresses. - ListenerFormat = "127.0.0.1:%d" - - // NeutrinoBackendName is the name of the neutrino backend. - NeutrinoBackendName = "neutrino" -) - -type DatabaseBackend int - -const ( - BackendBbolt DatabaseBackend = iota - BackendEtcd - BackendPostgres - BackendSqlite -) - -var ( - // lastPort is the last port determined to be free for use by a new - // node. It should be used atomically. - lastPort uint32 = defaultNodePort - - // logOutput is a flag that can be set to append the output from the - // seed nodes to log files. - // - // TODO(yy): remove the export. - LogOutput = flag.Bool("logoutput", false, - "log output from node n to file output-n.log") - - // logSubDir is the default directory where the logs are written to if - // logOutput is true. - logSubDir = flag.String("logdir", ".", "default dir to write logs to") - - // goroutineDump is a flag that can be set to dump the active - // goroutines of test nodes on failure. - goroutineDump = flag.Bool("goroutinedump", false, - "write goroutine dump from node n to file pprof-n.log") - - // btcdExecutable is the full path to the btcd binary. - btcdExecutable = flag.String( - "btcdexec", "", "full path to btcd binary", - ) -) - -// NextAvailablePort returns the first port that is available for listening by -// a new node. It panics if no port is found and the maximum available TCP port -// is reached. -func NextAvailablePort() int { - port := atomic.AddUint32(&lastPort, 1) - for port < 65535 { - // If there are no errors while attempting to listen on this - // port, close the socket and return it as available. While it - // could be the case that some other process picks up this port - // between the time the socket is closed and it's reopened in - // the harness node, in practice in CI servers this seems much - // less likely than simply some other process already being - // bound at the start of the tests. - addr := fmt.Sprintf(ListenerFormat, port) - l, err := net.Listen("tcp4", addr) - if err == nil { - err := l.Close() - if err == nil { - return int(port) - } - } - port = atomic.AddUint32(&lastPort, 1) - } - - // No ports available? Must be a mistake. - panic("no ports available for listening") -} - -// ApplyPortOffset adds the given offset to the lastPort variable, making it -// possible to run the tests in parallel without colliding on the same ports. -func ApplyPortOffset(offset uint32) { - _ = atomic.AddUint32(&lastPort, offset) -} - -// GetLogDir returns the passed --logdir flag or the default value if it wasn't -// set. -func GetLogDir() string { - if logSubDir != nil && *logSubDir != "" { - return *logSubDir - } - return "." -} - -// GetBtcdBinary returns the full path to the binary of the custom built btcd -// executable or an empty string if none is set. -func GetBtcdBinary() string { - if btcdExecutable != nil { - return *btcdExecutable - } - - return "" -} - -// GenerateBtcdListenerAddresses is a function that returns two listener -// addresses with unique ports and should be used to overwrite rpctest's -// default generator which is prone to use colliding ports. -func GenerateBtcdListenerAddresses() (string, string) { - return fmt.Sprintf(ListenerFormat, NextAvailablePort()), - fmt.Sprintf(ListenerFormat, NextAvailablePort()) -} - -// MakeOutpoint returns the outpoint of the channel's funding transaction. -func MakeOutpoint(chanPoint *lnrpc.ChannelPoint) (wire.OutPoint, error) { - fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint) - if err != nil { - return wire.OutPoint{}, err - } - - return wire.OutPoint{ - Hash: *fundingTxID, - Index: chanPoint.OutputIndex, - }, nil -} - -// CheckChannelPolicy checks that the policy matches the expected one. -func CheckChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) error { - if policy.FeeBaseMsat != expectedPolicy.FeeBaseMsat { - return fmt.Errorf("expected base fee %v, got %v", - expectedPolicy.FeeBaseMsat, policy.FeeBaseMsat) - } - if policy.FeeRateMilliMsat != expectedPolicy.FeeRateMilliMsat { - return fmt.Errorf("expected fee rate %v, got %v", - expectedPolicy.FeeRateMilliMsat, - policy.FeeRateMilliMsat) - } - if policy.TimeLockDelta != expectedPolicy.TimeLockDelta { - return fmt.Errorf("expected time lock delta %v, got %v", - expectedPolicy.TimeLockDelta, - policy.TimeLockDelta) - } - if policy.MinHtlc != expectedPolicy.MinHtlc { - return fmt.Errorf("expected min htlc %v, got %v", - expectedPolicy.MinHtlc, policy.MinHtlc) - } - if policy.MaxHtlcMsat != expectedPolicy.MaxHtlcMsat { - return fmt.Errorf("expected max htlc %v, got %v", - expectedPolicy.MaxHtlcMsat, policy.MaxHtlcMsat) - } - if policy.Disabled != expectedPolicy.Disabled { - return errors.New("edge should be disabled but isn't") - } - - return nil -} - -// CopyFile copies the file src to dest. -func CopyFile(dest, src string) error { - s, err := os.Open(src) - if err != nil { - return err - } - defer s.Close() - - d, err := os.Create(dest) - if err != nil { - return err - } - - if _, err := io.Copy(d, s); err != nil { - d.Close() - return err - } - - return d.Close() -}