mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-05 02:28:11 +02:00
Merge pull request #5449 from yyforyongyu/itest-refactor-assertion
Break down the lnd_test.go file into smaller files
This commit is contained in:
commit
edd4152682
File diff suppressed because it is too large
Load Diff
@ -1251,3 +1251,15 @@ func copyPorts(oldNode *lntest.HarnessNode) lntest.NodeOption {
|
||||
cfg.ProfilePort = oldNode.Cfg.ProfilePort
|
||||
}
|
||||
}
|
||||
|
||||
func rpcPointToWirePoint(t *harnessTest, chanPoint *lnrpc.ChannelPoint) wire.OutPoint {
|
||||
txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get txid: %v", err)
|
||||
}
|
||||
|
||||
return wire.OutPoint{
|
||||
Hash: *txid,
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
}
|
||||
|
281
lntest/itest/lnd_channel_balance_test.go
Normal file
281
lntest/itest/lnd_channel_balance_test.go
Normal file
@ -0,0 +1,281 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/chainreg"
|
||||
"github.com/lightningnetwork/lnd/funding"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testChannelBalance creates a new channel between Alice and Bob, then checks
|
||||
// channel balance to be equal amount specified while creation of channel.
|
||||
func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// Open a channel with 0.16 BTC between Alice and Bob, ensuring the
|
||||
// channel has been opened properly.
|
||||
amount := funding.MaxBtcFundingAmount
|
||||
|
||||
// Creates a helper closure to be used below which asserts the proper
|
||||
// response to a channel balance RPC.
|
||||
checkChannelBalance := func(node *lntest.HarnessNode,
|
||||
local, remote btcutil.Amount) {
|
||||
|
||||
expectedResponse := &lnrpc.ChannelBalanceResponse{
|
||||
LocalBalance: &lnrpc.Amount{
|
||||
Sat: uint64(local),
|
||||
Msat: uint64(lnwire.NewMSatFromSatoshis(local)),
|
||||
},
|
||||
RemoteBalance: &lnrpc.Amount{
|
||||
Sat: uint64(remote),
|
||||
Msat: uint64(lnwire.NewMSatFromSatoshis(
|
||||
remote,
|
||||
)),
|
||||
},
|
||||
UnsettledLocalBalance: &lnrpc.Amount{},
|
||||
UnsettledRemoteBalance: &lnrpc.Amount{},
|
||||
PendingOpenLocalBalance: &lnrpc.Amount{},
|
||||
PendingOpenRemoteBalance: &lnrpc.Amount{},
|
||||
// Deprecated fields.
|
||||
Balance: int64(local),
|
||||
}
|
||||
assertChannelBalanceResp(t, node, expectedResponse)
|
||||
}
|
||||
|
||||
// Before beginning, make sure alice and bob are connected.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, net.Alice, net.Bob)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: amount,
|
||||
},
|
||||
)
|
||||
|
||||
// Wait for both Alice and Bob to recognize this new channel.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
cType, err := channelCommitType(net.Alice, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get channel type: %v", err)
|
||||
}
|
||||
|
||||
// As this is a single funder channel, Alice's balance should be
|
||||
// exactly 0.5 BTC since now state transitions have taken place yet.
|
||||
checkChannelBalance(net.Alice, amount-cType.calcStaticFee(0), 0)
|
||||
|
||||
// Ensure Bob currently has no available balance within the channel.
|
||||
checkChannelBalance(net.Bob, 0, amount-cType.calcStaticFee(0))
|
||||
|
||||
// Finally close the channel between Alice and Bob, asserting that the
|
||||
// channel has been properly closed on-chain.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testChannelUnsettledBalance will test that the UnsettledBalance field
|
||||
// is updated according to the number of Pending Htlcs.
|
||||
// Alice will send Htlcs to Carol while she is in hodl mode. This will result
|
||||
// in a build of pending Htlcs. We expect the channels unsettled balance to
|
||||
// equal the sum of all the Pending Htlcs.
|
||||
func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
const chanAmt = btcutil.Amount(1000000)
|
||||
ctxb := context.Background()
|
||||
|
||||
// Creates a helper closure to be used below which asserts the proper
|
||||
// response to a channel balance RPC.
|
||||
checkChannelBalance := func(node *lntest.HarnessNode,
|
||||
local, remote, unsettledLocal, unsettledRemote btcutil.Amount) {
|
||||
|
||||
expectedResponse := &lnrpc.ChannelBalanceResponse{
|
||||
LocalBalance: &lnrpc.Amount{
|
||||
Sat: uint64(local),
|
||||
Msat: uint64(lnwire.NewMSatFromSatoshis(
|
||||
local,
|
||||
)),
|
||||
},
|
||||
RemoteBalance: &lnrpc.Amount{
|
||||
Sat: uint64(remote),
|
||||
Msat: uint64(lnwire.NewMSatFromSatoshis(
|
||||
remote,
|
||||
)),
|
||||
},
|
||||
UnsettledLocalBalance: &lnrpc.Amount{
|
||||
Sat: uint64(unsettledLocal),
|
||||
Msat: uint64(lnwire.NewMSatFromSatoshis(
|
||||
unsettledLocal,
|
||||
)),
|
||||
},
|
||||
UnsettledRemoteBalance: &lnrpc.Amount{
|
||||
Sat: uint64(unsettledRemote),
|
||||
Msat: uint64(lnwire.NewMSatFromSatoshis(
|
||||
unsettledRemote,
|
||||
)),
|
||||
},
|
||||
PendingOpenLocalBalance: &lnrpc.Amount{},
|
||||
PendingOpenRemoteBalance: &lnrpc.Amount{},
|
||||
// Deprecated fields.
|
||||
Balance: int64(local),
|
||||
}
|
||||
assertChannelBalanceResp(t, node, expectedResponse)
|
||||
}
|
||||
|
||||
// Create carol in hodl mode.
|
||||
carol := net.NewNode(t.t, "Carol", []string{"--hodl.exit-settle"})
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
// Connect Alice to Carol.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.ConnectNodes(ctxb, t.t, net.Alice, carol)
|
||||
|
||||
// Open a channel between Alice and Carol.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPointAlice := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, carol,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// Wait for Alice and Carol to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
cType, err := channelCommitType(net.Alice, chanPointAlice)
|
||||
require.NoError(t.t, err, "unable to get channel type")
|
||||
|
||||
// Check alice's channel balance, which should have zero remote and zero
|
||||
// pending balance.
|
||||
checkChannelBalance(net.Alice, chanAmt-cType.calcStaticFee(0), 0, 0, 0)
|
||||
|
||||
// Check carol's channel balance, which should have zero local and zero
|
||||
// pending balance.
|
||||
checkChannelBalance(carol, 0, chanAmt-cType.calcStaticFee(0), 0, 0)
|
||||
|
||||
// Channel should be ready for payments.
|
||||
const (
|
||||
payAmt = 100
|
||||
numInvoices = 6
|
||||
)
|
||||
|
||||
// Simulateneously send numInvoices payments from Alice to Carol.
|
||||
carolPubKey := carol.PubKey[:]
|
||||
errChan := make(chan error)
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
go func() {
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err := net.Alice.RouterClient.SendPaymentV2(ctxt,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
Dest: carolPubKey,
|
||||
Amt: int64(payAmt),
|
||||
PaymentHash: makeFakePayHash(t),
|
||||
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Test that the UnsettledBalance for both Alice and Carol
|
||||
// is equal to the amount of invoices * payAmt.
|
||||
var unsettledErr error
|
||||
nodes := []*lntest.HarnessNode{net.Alice, carol}
|
||||
err = wait.Predicate(func() bool {
|
||||
// There should be a number of PendingHtlcs equal
|
||||
// to the amount of Invoices sent.
|
||||
unsettledErr = assertNumActiveHtlcs(nodes, numInvoices)
|
||||
if unsettledErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Set the amount expected for the Unsettled Balance for
|
||||
// this channel.
|
||||
expectedBalance := numInvoices * payAmt
|
||||
|
||||
// Check each nodes UnsettledBalance field.
|
||||
for _, node := range nodes {
|
||||
// Get channel info for the node.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanInfo, err := getChanInfo(ctxt, node)
|
||||
if err != nil {
|
||||
unsettledErr = err
|
||||
return false
|
||||
}
|
||||
|
||||
// Check that UnsettledBalance is what we expect.
|
||||
if int(chanInfo.UnsettledBalance) != expectedBalance {
|
||||
unsettledErr = fmt.Errorf("unsettled balance failed "+
|
||||
"expected: %v, received: %v", expectedBalance,
|
||||
chanInfo.UnsettledBalance)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unsettled balace error: %v", unsettledErr)
|
||||
}
|
||||
|
||||
// Check for payment errors.
|
||||
select {
|
||||
case err := <-errChan:
|
||||
t.Fatalf("payment error: %v", err)
|
||||
default:
|
||||
}
|
||||
|
||||
// Check alice's channel balance, which should have a remote unsettled
|
||||
// balance that equals to the amount of invoices * payAmt. The remote
|
||||
// balance remains zero.
|
||||
aliceLocal := chanAmt - cType.calcStaticFee(0) - numInvoices*payAmt
|
||||
checkChannelBalance(net.Alice, aliceLocal, 0, 0, numInvoices*payAmt)
|
||||
|
||||
// Check carol's channel balance, which should have a local unsettled
|
||||
// balance that equals to the amount of invoices * payAmt. The local
|
||||
// balance remains zero.
|
||||
checkChannelBalance(carol, 0, aliceLocal, numInvoices*payAmt, 0)
|
||||
|
||||
// Force and assert the channel closure.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, true)
|
||||
|
||||
// Cleanup by mining the force close and sweep transaction.
|
||||
cleanupForceClose(t, net, net.Alice, chanPointAlice)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
777
lntest/itest/lnd_channel_graph_test.go
Normal file
777
lntest/itest/lnd_channel_graph_test.go
Normal file
@ -0,0 +1,777 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/chainreg"
|
||||
"github.com/lightningnetwork/lnd/funding"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testUpdateChanStatus checks that calls to the UpdateChanStatus RPC update
|
||||
// the channel graph as expected, and that channel state is properly updated
|
||||
// in the presence of interleaved node disconnects / reconnects.
|
||||
func testUpdateChanStatus(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// Create two fresh nodes and open a channel between them.
|
||||
alice := net.NewNode(
|
||||
t.t, "Alice", []string{
|
||||
"--minbackoff=10s",
|
||||
"--chan-enable-timeout=1.5s",
|
||||
"--chan-disable-timeout=3s",
|
||||
"--chan-status-sample-interval=.5s",
|
||||
},
|
||||
)
|
||||
defer shutdownAndAssert(net, t, alice)
|
||||
|
||||
bob := net.NewNode(
|
||||
t.t, "Bob", []string{
|
||||
"--minbackoff=10s",
|
||||
"--chan-enable-timeout=1.5s",
|
||||
"--chan-disable-timeout=3s",
|
||||
"--chan-status-sample-interval=.5s",
|
||||
},
|
||||
)
|
||||
defer shutdownAndAssert(net, t, bob)
|
||||
|
||||
// Connect Alice to Bob.
|
||||
net.ConnectNodes(ctxb, t.t, alice, bob)
|
||||
|
||||
// Give Alice some coins so she can fund a channel.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, alice)
|
||||
|
||||
// Open a channel with 100k satoshis between Alice and Bob with Alice
|
||||
// being the sole funder of the channel.
|
||||
chanAmt := btcutil.Amount(100000)
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, alice, bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// Wait for Alice and Bob to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't see the bob->alice channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
// Launch a node for Carol which will connect to Alice and Bob in
|
||||
// order to receive graph updates. This will ensure that the
|
||||
// channel updates are propagated throughout the network.
|
||||
carol := net.NewNode(t.t, "Carol", nil)
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.ConnectNodes(ctxt, t.t, alice, carol)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.ConnectNodes(ctxt, t.t, bob, carol)
|
||||
|
||||
carolSub := subscribeGraphNotifications(ctxb, t, carol)
|
||||
defer close(carolSub.quit)
|
||||
|
||||
// sendReq sends an UpdateChanStatus request to the given node.
|
||||
sendReq := func(node *lntest.HarnessNode, chanPoint *lnrpc.ChannelPoint,
|
||||
action routerrpc.ChanStatusAction) {
|
||||
|
||||
req := &routerrpc.UpdateChanStatusRequest{
|
||||
ChanPoint: chanPoint,
|
||||
Action: action,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err = node.RouterClient.UpdateChanStatus(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to call UpdateChanStatus for %s's node: %v",
|
||||
node.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// assertEdgeDisabled ensures that a given node has the correct
|
||||
// Disabled state for a channel.
|
||||
assertEdgeDisabled := func(node *lntest.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint, disabled bool) {
|
||||
|
||||
var predErr error
|
||||
err = wait.Predicate(func() bool {
|
||||
req := &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err := node.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
predErr = fmt.Errorf("unable to query node %v's graph: %v", node, err)
|
||||
return false
|
||||
}
|
||||
numEdges := len(chanGraph.Edges)
|
||||
if numEdges != 1 {
|
||||
predErr = fmt.Errorf("expected to find 1 edge in the graph, found %d", numEdges)
|
||||
return false
|
||||
}
|
||||
edge := chanGraph.Edges[0]
|
||||
if edge.ChanPoint != chanPoint.GetFundingTxidStr() {
|
||||
predErr = fmt.Errorf("expected chan_point %v, got %v",
|
||||
chanPoint.GetFundingTxidStr(), edge.ChanPoint)
|
||||
}
|
||||
var policy *lnrpc.RoutingPolicy
|
||||
if node.PubKeyStr == edge.Node1Pub {
|
||||
policy = edge.Node1Policy
|
||||
} else {
|
||||
policy = edge.Node2Policy
|
||||
}
|
||||
if disabled != policy.Disabled {
|
||||
predErr = fmt.Errorf("expected policy.Disabled to be %v, "+
|
||||
"but policy was %v", disabled, policy)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", predErr)
|
||||
}
|
||||
}
|
||||
|
||||
// When updating the state of the channel between Alice and Bob, we
|
||||
// should expect to see channel updates with the default routing
|
||||
// policy. The value of "Disabled" will depend on the specific
|
||||
// scenario being tested.
|
||||
expectedPolicy := &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: int64(chainreg.DefaultBitcoinBaseFeeMSat),
|
||||
FeeRateMilliMsat: int64(chainreg.DefaultBitcoinFeeRate),
|
||||
TimeLockDelta: chainreg.DefaultBitcoinTimeLockDelta,
|
||||
MinHtlc: 1000, // default value
|
||||
MaxHtlcMsat: calculateMaxHtlc(chanAmt),
|
||||
}
|
||||
|
||||
// Initially, the channel between Alice and Bob should not be
|
||||
// disabled.
|
||||
assertEdgeDisabled(alice, chanPoint, false)
|
||||
|
||||
// Manually disable the channel and ensure that a "Disabled = true"
|
||||
// update is propagated.
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_DISABLE)
|
||||
expectedPolicy.Disabled = true
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// Re-enable the channel and ensure that a "Disabled = false" update
|
||||
// is propagated.
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_ENABLE)
|
||||
expectedPolicy.Disabled = false
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// Manually enabling a channel should NOT prevent subsequent
|
||||
// disconnections from automatically disabling the channel again
|
||||
// (we don't want to clutter the network with channels that are
|
||||
// falsely advertised as enabled when they don't work).
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.DisconnectNodes(ctxt, alice, bob); err != nil {
|
||||
t.Fatalf("unable to disconnect Alice from Bob: %v", err)
|
||||
}
|
||||
expectedPolicy.Disabled = true
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
{bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// Reconnecting the nodes should propagate a "Disabled = false" update.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, alice, bob)
|
||||
expectedPolicy.Disabled = false
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
{bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// Manually disabling the channel should prevent a subsequent
|
||||
// disconnect / reconnect from re-enabling the channel on
|
||||
// Alice's end. Note the asymmetry between manual enable and
|
||||
// manual disable!
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_DISABLE)
|
||||
|
||||
// Alice sends out the "Disabled = true" update in response to
|
||||
// the ChanStatusAction_DISABLE request.
|
||||
expectedPolicy.Disabled = true
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.DisconnectNodes(ctxt, alice, bob); err != nil {
|
||||
t.Fatalf("unable to disconnect Alice from Bob: %v", err)
|
||||
}
|
||||
|
||||
// Bob sends a "Disabled = true" update upon detecting the
|
||||
// disconnect.
|
||||
expectedPolicy.Disabled = true
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// Bob sends a "Disabled = false" update upon detecting the
|
||||
// reconnect.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, alice, bob)
|
||||
expectedPolicy.Disabled = false
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// However, since we manually disabled the channel on Alice's end,
|
||||
// the policy on Alice's end should still be "Disabled = true". Again,
|
||||
// note the asymmetry between manual enable and manual disable!
|
||||
assertEdgeDisabled(alice, chanPoint, true)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.DisconnectNodes(ctxt, alice, bob); err != nil {
|
||||
t.Fatalf("unable to disconnect Alice from Bob: %v", err)
|
||||
}
|
||||
|
||||
// Bob sends a "Disabled = true" update upon detecting the
|
||||
// disconnect.
|
||||
expectedPolicy.Disabled = true
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
|
||||
// After restoring automatic channel state management on Alice's end,
|
||||
// BOTH Alice and Bob should set the channel state back to "enabled"
|
||||
// on reconnect.
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_AUTO)
|
||||
net.EnsureConnected(ctxt, t.t, alice, bob)
|
||||
expectedPolicy.Disabled = false
|
||||
waitForChannelUpdate(
|
||||
t, carolSub,
|
||||
[]expectedChanUpdate{
|
||||
{alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
{bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
assertEdgeDisabled(alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testUnannouncedChannels checks unannounced channels are not returned by
|
||||
// describeGraph RPC request unless explicitly asked for.
|
||||
func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
amount := funding.MaxBtcFundingAmount
|
||||
|
||||
// Open a channel between Alice and Bob, ensuring the
|
||||
// channel has been opened properly.
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanOpenUpdate := openChannelStream(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: amount,
|
||||
},
|
||||
)
|
||||
|
||||
// Mine 2 blocks, and check that the channel is opened but not yet
|
||||
// announced to the network.
|
||||
mineBlocks(t, net, 2, 1)
|
||||
|
||||
// One block is enough to make the channel ready for use, since the
|
||||
// nodes have defaultNumConfs=1 set.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
fundingChanPoint, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate)
|
||||
if err != nil {
|
||||
t.Fatalf("error while waiting for channel open: %v", err)
|
||||
}
|
||||
|
||||
// Alice should have 1 edge in her graph.
|
||||
req := &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query alice's graph: %v", err)
|
||||
}
|
||||
|
||||
numEdges := len(chanGraph.Edges)
|
||||
if numEdges != 1 {
|
||||
t.Fatalf("expected to find 1 edge in the graph, found %d", numEdges)
|
||||
}
|
||||
|
||||
// Channels should not be announced yet, hence Alice should have no
|
||||
// announced edges in her graph.
|
||||
req.IncludeUnannounced = false
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query alice's graph: %v", err)
|
||||
}
|
||||
|
||||
numEdges = len(chanGraph.Edges)
|
||||
if numEdges != 0 {
|
||||
t.Fatalf("expected to find 0 announced edges in the graph, found %d",
|
||||
numEdges)
|
||||
}
|
||||
|
||||
// Mine 4 more blocks, and check that the channel is now announced.
|
||||
mineBlocks(t, net, 4, 0)
|
||||
|
||||
// Give the network a chance to learn that auth proof is confirmed.
|
||||
var predErr error
|
||||
err = wait.Predicate(func() bool {
|
||||
// The channel should now be announced. Check that Alice has 1
|
||||
// announced edge.
|
||||
req.IncludeUnannounced = false
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
predErr = fmt.Errorf("unable to query alice's graph: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
numEdges = len(chanGraph.Edges)
|
||||
if numEdges != 1 {
|
||||
predErr = fmt.Errorf("expected to find 1 announced edge in "+
|
||||
"the graph, found %d", numEdges)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", predErr)
|
||||
}
|
||||
|
||||
// The channel should now be announced. Check that Alice has 1 announced
|
||||
// edge.
|
||||
req.IncludeUnannounced = false
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query alice's graph: %v", err)
|
||||
}
|
||||
|
||||
numEdges = len(chanGraph.Edges)
|
||||
if numEdges != 1 {
|
||||
t.Fatalf("expected to find 1 announced edge in the graph, found %d",
|
||||
numEdges)
|
||||
}
|
||||
|
||||
// Close the channel used during the test.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, fundingChanPoint, false)
|
||||
}
|
||||
|
||||
func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
t.t.Run("pinned", func(t *testing.T) {
|
||||
ht := newHarnessTest(t, net)
|
||||
testGraphTopologyNtfns(net, ht, true)
|
||||
})
|
||||
t.t.Run("unpinned", func(t *testing.T) {
|
||||
ht := newHarnessTest(t, net)
|
||||
testGraphTopologyNtfns(net, ht, false)
|
||||
})
|
||||
}
|
||||
|
||||
func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned bool) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const chanAmt = funding.MaxBtcFundingAmount
|
||||
|
||||
// Spin up Bob first, since we will need to grab his pubkey when
|
||||
// starting Alice to test pinned syncing.
|
||||
bob := net.NewNode(t.t, "bob", nil)
|
||||
defer shutdownAndAssert(net, t, bob)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
bobInfo, err := bob.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
|
||||
require.NoError(t.t, err)
|
||||
bobPubkey := bobInfo.IdentityPubkey
|
||||
|
||||
// For unpinned syncing, start Alice as usual. Otherwise grab Bob's
|
||||
// pubkey to include in his pinned syncer set.
|
||||
var aliceArgs []string
|
||||
if pinned {
|
||||
aliceArgs = []string{
|
||||
"--numgraphsyncpeers=0",
|
||||
fmt.Sprintf("--gossip.pinned-syncers=%s", bobPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
alice := net.NewNode(t.t, "alice", aliceArgs)
|
||||
defer shutdownAndAssert(net, t, alice)
|
||||
|
||||
// Connect Alice and Bob.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, alice, bob)
|
||||
|
||||
// Alice stimmy.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, alice)
|
||||
|
||||
// Bob stimmy.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, bob)
|
||||
|
||||
// Assert that Bob has the correct sync type before proceeeding.
|
||||
if pinned {
|
||||
assertSyncType(t, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC)
|
||||
} else {
|
||||
assertSyncType(t, alice, bobPubkey, lnrpc.Peer_ACTIVE_SYNC)
|
||||
}
|
||||
|
||||
// Regardless of syncer type, ensure that both peers report having
|
||||
// completed their initial sync before continuing to make a channel.
|
||||
waitForGraphSync(t, alice)
|
||||
|
||||
// Let Alice subscribe to graph notifications.
|
||||
graphSub := subscribeGraphNotifications(ctxb, t, alice)
|
||||
defer close(graphSub.quit)
|
||||
|
||||
// Open a new channel between Alice and Bob.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, alice, bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// The channel opening above should have triggered a few notifications
|
||||
// sent to the notification client. We'll expect two channel updates,
|
||||
// and two node announcements.
|
||||
var numChannelUpds int
|
||||
var numNodeAnns int
|
||||
for numChannelUpds < 2 && numNodeAnns < 2 {
|
||||
select {
|
||||
// Ensure that a new update for both created edges is properly
|
||||
// dispatched to our registered client.
|
||||
case graphUpdate := <-graphSub.updateChan:
|
||||
// Process all channel updates prsented in this update
|
||||
// message.
|
||||
for _, chanUpdate := range graphUpdate.ChannelUpdates {
|
||||
switch chanUpdate.AdvertisingNode {
|
||||
case alice.PubKeyStr:
|
||||
case bob.PubKeyStr:
|
||||
default:
|
||||
t.Fatalf("unknown advertising node: %v",
|
||||
chanUpdate.AdvertisingNode)
|
||||
}
|
||||
switch chanUpdate.ConnectingNode {
|
||||
case alice.PubKeyStr:
|
||||
case bob.PubKeyStr:
|
||||
default:
|
||||
t.Fatalf("unknown connecting node: %v",
|
||||
chanUpdate.ConnectingNode)
|
||||
}
|
||||
|
||||
if chanUpdate.Capacity != int64(chanAmt) {
|
||||
t.Fatalf("channel capacities mismatch:"+
|
||||
" expected %v, got %v", chanAmt,
|
||||
btcutil.Amount(chanUpdate.Capacity))
|
||||
}
|
||||
numChannelUpds++
|
||||
}
|
||||
|
||||
for _, nodeUpdate := range graphUpdate.NodeUpdates {
|
||||
switch nodeUpdate.IdentityKey {
|
||||
case alice.PubKeyStr:
|
||||
case bob.PubKeyStr:
|
||||
default:
|
||||
t.Fatalf("unknown node: %v",
|
||||
nodeUpdate.IdentityKey)
|
||||
}
|
||||
numNodeAnns++
|
||||
}
|
||||
case err := <-graphSub.errChan:
|
||||
t.Fatalf("unable to recv graph update: %v", err)
|
||||
case <-time.After(time.Second * 10):
|
||||
t.Fatalf("timeout waiting for graph notifications, "+
|
||||
"only received %d/2 chanupds and %d/2 nodeanns",
|
||||
numChannelUpds, numNodeAnns)
|
||||
}
|
||||
}
|
||||
|
||||
_, blockHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
|
||||
// Now we'll test that updates are properly sent after channels are closed
|
||||
// within the network.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, alice, chanPoint, false)
|
||||
|
||||
// Now that the channel has been closed, we should receive a
|
||||
// notification indicating so.
|
||||
out:
|
||||
for {
|
||||
select {
|
||||
case graphUpdate := <-graphSub.updateChan:
|
||||
if len(graphUpdate.ClosedChans) != 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
closedChan := graphUpdate.ClosedChans[0]
|
||||
if closedChan.ClosedHeight != uint32(blockHeight+1) {
|
||||
t.Fatalf("close heights of channel mismatch: "+
|
||||
"expected %v, got %v", blockHeight+1,
|
||||
closedChan.ClosedHeight)
|
||||
}
|
||||
chanPointTxid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get txid: %v", err)
|
||||
}
|
||||
closedChanTxid, err := lnrpc.GetChanPointFundingTxid(
|
||||
closedChan.ChanPoint,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get txid: %v", err)
|
||||
}
|
||||
if !bytes.Equal(closedChanTxid[:], chanPointTxid[:]) {
|
||||
t.Fatalf("channel point hash mismatch: "+
|
||||
"expected %v, got %v", chanPointTxid,
|
||||
closedChanTxid)
|
||||
}
|
||||
if closedChan.ChanPoint.OutputIndex != chanPoint.OutputIndex {
|
||||
t.Fatalf("output index mismatch: expected %v, "+
|
||||
"got %v", chanPoint.OutputIndex,
|
||||
closedChan.ChanPoint)
|
||||
}
|
||||
|
||||
break out
|
||||
|
||||
case err := <-graphSub.errChan:
|
||||
t.Fatalf("unable to recv graph update: %v", err)
|
||||
case <-time.After(time.Second * 10):
|
||||
t.Fatalf("notification for channel closure not " +
|
||||
"sent")
|
||||
}
|
||||
}
|
||||
|
||||
// For the final portion of the test, we'll ensure that once a new node
|
||||
// appears in the network, the proper notification is dispatched. Note
|
||||
// that a node that does not have any channels open is ignored, so first
|
||||
// we disconnect Alice and Bob, open a channel between Bob and Carol,
|
||||
// and finally connect Alice to Bob again.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.DisconnectNodes(ctxt, alice, bob); err != nil {
|
||||
t.Fatalf("unable to disconnect alice and bob: %v", err)
|
||||
}
|
||||
carol := net.NewNode(t.t, "Carol", nil)
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.ConnectNodes(ctxt, t.t, bob, carol)
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint = openChannelAndAssert(
|
||||
ctxt, t, net, bob, carol,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// Reconnect Alice and Bob. This should result in the nodes syncing up
|
||||
// their respective graph state, with the new addition being the
|
||||
// existence of Carol in the graph, and also the channel between Bob
|
||||
// and Carol. Note that we will also receive a node announcement from
|
||||
// Bob, since a node will update its node announcement after a new
|
||||
// channel is opened.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, alice, bob)
|
||||
|
||||
// We should receive an update advertising the newly connected node,
|
||||
// Bob's new node announcement, and the channel between Bob and Carol.
|
||||
numNodeAnns = 0
|
||||
numChannelUpds = 0
|
||||
for numChannelUpds < 2 && numNodeAnns < 1 {
|
||||
select {
|
||||
case graphUpdate := <-graphSub.updateChan:
|
||||
for _, nodeUpdate := range graphUpdate.NodeUpdates {
|
||||
switch nodeUpdate.IdentityKey {
|
||||
case carol.PubKeyStr:
|
||||
case bob.PubKeyStr:
|
||||
default:
|
||||
t.Fatalf("unknown node update pubey: %v",
|
||||
nodeUpdate.IdentityKey)
|
||||
}
|
||||
numNodeAnns++
|
||||
}
|
||||
|
||||
for _, chanUpdate := range graphUpdate.ChannelUpdates {
|
||||
switch chanUpdate.AdvertisingNode {
|
||||
case carol.PubKeyStr:
|
||||
case bob.PubKeyStr:
|
||||
default:
|
||||
t.Fatalf("unknown advertising node: %v",
|
||||
chanUpdate.AdvertisingNode)
|
||||
}
|
||||
switch chanUpdate.ConnectingNode {
|
||||
case carol.PubKeyStr:
|
||||
case bob.PubKeyStr:
|
||||
default:
|
||||
t.Fatalf("unknown connecting node: %v",
|
||||
chanUpdate.ConnectingNode)
|
||||
}
|
||||
|
||||
if chanUpdate.Capacity != int64(chanAmt) {
|
||||
t.Fatalf("channel capacities mismatch:"+
|
||||
" expected %v, got %v", chanAmt,
|
||||
btcutil.Amount(chanUpdate.Capacity))
|
||||
}
|
||||
numChannelUpds++
|
||||
}
|
||||
case err := <-graphSub.errChan:
|
||||
t.Fatalf("unable to recv graph update: %v", err)
|
||||
case <-time.After(time.Second * 10):
|
||||
t.Fatalf("timeout waiting for graph notifications, "+
|
||||
"only received %d/2 chanupds and %d/2 nodeanns",
|
||||
numChannelUpds, numNodeAnns)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel between Bob and Carol.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, bob, chanPoint, false)
|
||||
}
|
||||
|
||||
// testNodeAnnouncement ensures that when a node is started with one or more
|
||||
// external IP addresses specified on the command line, that those addresses
|
||||
// announced to the network and reported in the network graph.
|
||||
func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
aliceSub := subscribeGraphNotifications(ctxb, t, net.Alice)
|
||||
defer close(aliceSub.quit)
|
||||
|
||||
advertisedAddrs := []string{
|
||||
"192.168.1.1:8333",
|
||||
"[2001:db8:85a3:8d3:1319:8a2e:370:7348]:8337",
|
||||
"bkb6azqggsaiskzi.onion:9735",
|
||||
"fomvuglh6h6vcag73xo5t5gv56ombih3zr2xvplkpbfd7wrog4swjwid.onion:1234",
|
||||
}
|
||||
|
||||
var lndArgs []string
|
||||
for _, addr := range advertisedAddrs {
|
||||
lndArgs = append(lndArgs, "--externalip="+addr)
|
||||
}
|
||||
|
||||
dave := net.NewNode(t.t, "Dave", lndArgs)
|
||||
defer shutdownAndAssert(net, t, dave)
|
||||
|
||||
// We must let Dave have an open channel before he can send a node
|
||||
// announcement, so we open a channel with Bob,
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.ConnectNodes(ctxt, t.t, net.Bob, dave)
|
||||
|
||||
// Alice shouldn't receive any new updates yet since the channel has yet
|
||||
// to be opened.
|
||||
select {
|
||||
case <-aliceSub.updateChan:
|
||||
t.Fatalf("received unexpected update from dave")
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
|
||||
// We'll then go ahead and open a channel between Bob and Dave. This
|
||||
// ensures that Alice receives the node announcement from Bob as part of
|
||||
// the announcement broadcast.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Bob, dave,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: 1000000,
|
||||
},
|
||||
)
|
||||
|
||||
assertAddrs := func(addrsFound []string, targetAddrs ...string) {
|
||||
addrs := make(map[string]struct{}, len(addrsFound))
|
||||
for _, addr := range addrsFound {
|
||||
addrs[addr] = struct{}{}
|
||||
}
|
||||
|
||||
for _, addr := range targetAddrs {
|
||||
if _, ok := addrs[addr]; !ok {
|
||||
t.Fatalf("address %v not found in node "+
|
||||
"announcement", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
waitForAddrsInUpdate := func(graphSub graphSubscription,
|
||||
nodePubKey string, targetAddrs ...string) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case graphUpdate := <-graphSub.updateChan:
|
||||
for _, update := range graphUpdate.NodeUpdates {
|
||||
if update.IdentityKey == nodePubKey {
|
||||
assertAddrs(
|
||||
update.Addresses, // nolint:staticcheck
|
||||
targetAddrs...,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
case err := <-graphSub.errChan:
|
||||
t.Fatalf("unable to recv graph update: %v", err)
|
||||
case <-time.After(defaultTimeout):
|
||||
t.Fatalf("did not receive node ann update")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We'll then wait for Alice to receive Dave's node announcement
|
||||
// including the expected advertised addresses from Bob since they
|
||||
// should already be connected.
|
||||
waitForAddrsInUpdate(
|
||||
aliceSub, dave.PubKeyStr, advertisedAddrs...,
|
||||
)
|
||||
|
||||
// Close the channel between Bob and Dave.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint, false)
|
||||
}
|
564
lntest/itest/lnd_channel_policy_test.go
Normal file
564
lntest/itest/lnd_channel_policy_test.go
Normal file
@ -0,0 +1,564 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/chainreg"
|
||||
"github.com/lightningnetwork/lnd/funding"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
// testUpdateChannelPolicy tests that policy updates made to a channel
|
||||
// gets propagated to other nodes in the network.
|
||||
func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
defaultFeeBase = 1000
|
||||
defaultFeeRate = 1
|
||||
defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta
|
||||
defaultMinHtlc = 1000
|
||||
)
|
||||
defaultMaxHtlc := calculateMaxHtlc(funding.MaxBtcFundingAmount)
|
||||
|
||||
// Launch notification clients for all nodes, such that we can
|
||||
// get notified when they discover new channels and updates in the
|
||||
// graph.
|
||||
aliceSub := subscribeGraphNotifications(ctxb, t, net.Alice)
|
||||
defer close(aliceSub.quit)
|
||||
bobSub := subscribeGraphNotifications(ctxb, t, net.Bob)
|
||||
defer close(bobSub.quit)
|
||||
|
||||
chanAmt := funding.MaxBtcFundingAmount
|
||||
pushAmt := chanAmt / 2
|
||||
|
||||
// Create a channel Alice->Bob.
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
PushAmt: pushAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// We add all the nodes' update channels to a slice, such that we can
|
||||
// make sure they all receive the expected updates.
|
||||
graphSubs := []graphSubscription{aliceSub, bobSub}
|
||||
nodes := []*lntest.HarnessNode{net.Alice, net.Bob}
|
||||
|
||||
// Alice and Bob should see each other's ChannelUpdates, advertising the
|
||||
// default routing policies.
|
||||
expectedPolicy := &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: defaultFeeBase,
|
||||
FeeRateMilliMsat: defaultFeeRate,
|
||||
TimeLockDelta: defaultTimeLockDelta,
|
||||
MinHtlc: defaultMinHtlc,
|
||||
MaxHtlcMsat: defaultMaxHtlc,
|
||||
}
|
||||
|
||||
for _, graphSub := range graphSubs {
|
||||
waitForChannelUpdate(
|
||||
t, graphSub,
|
||||
[]expectedChanUpdate{
|
||||
{net.Alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
{net.Bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// They should now know about the default policies.
|
||||
for _, node := range nodes {
|
||||
assertChannelPolicy(
|
||||
t, node, net.Alice.PubKeyStr, expectedPolicy, chanPoint,
|
||||
)
|
||||
assertChannelPolicy(
|
||||
t, node, net.Bob.PubKeyStr, expectedPolicy, chanPoint,
|
||||
)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
// Create Carol with options to rate limit channel updates up to 2 per
|
||||
// day, and create a new channel Bob->Carol.
|
||||
carol := net.NewNode(
|
||||
t.t, "Carol", []string{
|
||||
"--gossip.max-channel-update-burst=2",
|
||||
"--gossip.channel-update-interval=24h",
|
||||
},
|
||||
)
|
||||
|
||||
// Clean up carol's node when the test finishes.
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
carolSub := subscribeGraphNotifications(ctxb, t, carol)
|
||||
defer close(carolSub.quit)
|
||||
|
||||
graphSubs = append(graphSubs, carolSub)
|
||||
nodes = append(nodes, carol)
|
||||
|
||||
// Send some coins to Carol that can be used for channel funding.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, carol)
|
||||
|
||||
net.ConnectNodes(ctxb, t.t, carol, net.Bob)
|
||||
|
||||
// Open the channel Carol->Bob with a custom min_htlc value set. Since
|
||||
// Carol is opening the channel, she will require Bob to not forward
|
||||
// HTLCs smaller than this value, and hence he should advertise it as
|
||||
// part of his ChannelUpdate.
|
||||
const customMinHtlc = 5000
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint2 := openChannelAndAssert(
|
||||
ctxt, t, net, carol, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
PushAmt: pushAmt,
|
||||
MinHtlc: customMinHtlc,
|
||||
},
|
||||
)
|
||||
|
||||
expectedPolicyBob := &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: defaultFeeBase,
|
||||
FeeRateMilliMsat: defaultFeeRate,
|
||||
TimeLockDelta: defaultTimeLockDelta,
|
||||
MinHtlc: customMinHtlc,
|
||||
MaxHtlcMsat: defaultMaxHtlc,
|
||||
}
|
||||
expectedPolicyCarol := &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: defaultFeeBase,
|
||||
FeeRateMilliMsat: defaultFeeRate,
|
||||
TimeLockDelta: defaultTimeLockDelta,
|
||||
MinHtlc: defaultMinHtlc,
|
||||
MaxHtlcMsat: defaultMaxHtlc,
|
||||
}
|
||||
|
||||
for _, graphSub := range graphSubs {
|
||||
waitForChannelUpdate(
|
||||
t, graphSub,
|
||||
[]expectedChanUpdate{
|
||||
{net.Bob.PubKeyStr, expectedPolicyBob, chanPoint2},
|
||||
{carol.PubKeyStr, expectedPolicyCarol, chanPoint2},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check that all nodes now know about the updated policies.
|
||||
for _, node := range nodes {
|
||||
assertChannelPolicy(
|
||||
t, node, net.Bob.PubKeyStr, expectedPolicyBob,
|
||||
chanPoint2,
|
||||
)
|
||||
assertChannelPolicy(
|
||||
t, node, carol.PubKeyStr, expectedPolicyCarol,
|
||||
chanPoint2,
|
||||
)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
// First we'll try to send a payment from Alice to Carol with an amount
|
||||
// less than the min_htlc value required by Carol. This payment should
|
||||
// fail, as the channel Bob->Carol cannot carry HTLCs this small.
|
||||
payAmt := btcutil.Amount(4)
|
||||
invoice := &lnrpc.Invoice{
|
||||
Memo: "testing",
|
||||
Value: int64(payAmt),
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddInvoice(ctxt, invoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = completePaymentRequests(
|
||||
ctxt, net.Alice, net.Alice.RouterClient,
|
||||
[]string{resp.PaymentRequest}, true,
|
||||
)
|
||||
|
||||
// Alice knows about the channel policy of Carol and should therefore
|
||||
// not be able to find a path during routing.
|
||||
expErr := lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE
|
||||
if err.Error() != expErr.String() {
|
||||
t.Fatalf("expected %v, instead got %v", expErr, err)
|
||||
}
|
||||
|
||||
// Now we try to send a payment over the channel with a value too low
|
||||
// to be accepted. First we query for a route to route a payment of
|
||||
// 5000 mSAT, as this is accepted.
|
||||
payAmt = btcutil.Amount(5)
|
||||
routesReq := &lnrpc.QueryRoutesRequest{
|
||||
PubKey: carol.PubKeyStr,
|
||||
Amt: int64(payAmt),
|
||||
FinalCltvDelta: defaultTimeLockDelta,
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
routes, err := net.Alice.QueryRoutes(ctxt, routesReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get route: %v", err)
|
||||
}
|
||||
|
||||
if len(routes.Routes) != 1 {
|
||||
t.Fatalf("expected to find 1 route, got %v", len(routes.Routes))
|
||||
}
|
||||
|
||||
// We change the route to carry a payment of 4000 mSAT instead of 5000
|
||||
// mSAT.
|
||||
payAmt = btcutil.Amount(4)
|
||||
amtSat := int64(payAmt)
|
||||
amtMSat := int64(lnwire.NewMSatFromSatoshis(payAmt))
|
||||
routes.Routes[0].Hops[0].AmtToForward = amtSat // nolint:staticcheck
|
||||
routes.Routes[0].Hops[0].AmtToForwardMsat = amtMSat
|
||||
routes.Routes[0].Hops[1].AmtToForward = amtSat // nolint:staticcheck
|
||||
routes.Routes[0].Hops[1].AmtToForwardMsat = amtMSat
|
||||
|
||||
// Send the payment with the modified value.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
alicePayStream, err := net.Alice.SendToRoute(ctxt) // nolint:staticcheck
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create payment stream for alice: %v", err)
|
||||
}
|
||||
sendReq := &lnrpc.SendToRouteRequest{
|
||||
PaymentHash: resp.RHash,
|
||||
Route: routes.Routes[0],
|
||||
}
|
||||
|
||||
err = alicePayStream.Send(sendReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
// We expect this payment to fail, and that the min_htlc value is
|
||||
// communicated back to us, since the attempted HTLC value was too low.
|
||||
sendResp, err := alicePayStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
// Expected as part of the error message.
|
||||
substrs := []string{
|
||||
"AmountBelowMinimum",
|
||||
"HtlcMinimumMsat: (lnwire.MilliSatoshi) 5000 mSAT",
|
||||
}
|
||||
for _, s := range substrs {
|
||||
if !strings.Contains(sendResp.PaymentError, s) {
|
||||
t.Fatalf("expected error to contain \"%v\", instead "+
|
||||
"got %v", s, sendResp.PaymentError)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure sending using the original value succeeds.
|
||||
payAmt = btcutil.Amount(5)
|
||||
amtSat = int64(payAmt)
|
||||
amtMSat = int64(lnwire.NewMSatFromSatoshis(payAmt))
|
||||
routes.Routes[0].Hops[0].AmtToForward = amtSat // nolint:staticcheck
|
||||
routes.Routes[0].Hops[0].AmtToForwardMsat = amtMSat
|
||||
routes.Routes[0].Hops[1].AmtToForward = amtSat // nolint:staticcheck
|
||||
routes.Routes[0].Hops[1].AmtToForwardMsat = amtMSat
|
||||
|
||||
// Manually set the MPP payload a new for each payment since
|
||||
// the payment addr will change with each invoice, although we
|
||||
// can re-use the route itself.
|
||||
route := routes.Routes[0]
|
||||
route.Hops[len(route.Hops)-1].TlvPayload = true
|
||||
route.Hops[len(route.Hops)-1].MppRecord = &lnrpc.MPPRecord{
|
||||
PaymentAddr: resp.PaymentAddr,
|
||||
TotalAmtMsat: amtMSat,
|
||||
}
|
||||
|
||||
sendReq = &lnrpc.SendToRouteRequest{
|
||||
PaymentHash: resp.RHash,
|
||||
Route: route,
|
||||
}
|
||||
|
||||
err = alicePayStream.Send(sendReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
sendResp, err = alicePayStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
if sendResp.PaymentError != "" {
|
||||
t.Fatalf("expected payment to succeed, instead got %v",
|
||||
sendResp.PaymentError)
|
||||
}
|
||||
|
||||
// With our little cluster set up, we'll update the fees and the max htlc
|
||||
// size for the Bob side of the Alice->Bob channel, and make sure
|
||||
// all nodes learn about it.
|
||||
baseFee := int64(1500)
|
||||
feeRate := int64(12)
|
||||
timeLockDelta := uint32(66)
|
||||
maxHtlc := uint64(500000)
|
||||
|
||||
expectedPolicy = &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: baseFee,
|
||||
FeeRateMilliMsat: testFeeBase * feeRate,
|
||||
TimeLockDelta: timeLockDelta,
|
||||
MinHtlc: defaultMinHtlc,
|
||||
MaxHtlcMsat: maxHtlc,
|
||||
}
|
||||
|
||||
req := &lnrpc.PolicyUpdateRequest{
|
||||
BaseFeeMsat: baseFee,
|
||||
FeeRate: float64(feeRate),
|
||||
TimeLockDelta: timeLockDelta,
|
||||
MaxHtlcMsat: maxHtlc,
|
||||
Scope: &lnrpc.PolicyUpdateRequest_ChanPoint{
|
||||
ChanPoint: chanPoint,
|
||||
},
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if _, err := net.Bob.UpdateChannelPolicy(ctxt, req); err != nil {
|
||||
t.Fatalf("unable to get alice's balance: %v", err)
|
||||
}
|
||||
|
||||
// Wait for all nodes to have seen the policy update done by Bob.
|
||||
for _, graphSub := range graphSubs {
|
||||
waitForChannelUpdate(
|
||||
t, graphSub,
|
||||
[]expectedChanUpdate{
|
||||
{net.Bob.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Check that all nodes now know about Bob's updated policy.
|
||||
for _, node := range nodes {
|
||||
assertChannelPolicy(
|
||||
t, node, net.Bob.PubKeyStr, expectedPolicy, chanPoint,
|
||||
)
|
||||
}
|
||||
|
||||
// Now that all nodes have received the new channel update, we'll try
|
||||
// to send a payment from Alice to Carol to ensure that Alice has
|
||||
// internalized this fee update. This shouldn't affect the route that
|
||||
// Alice takes though: we updated the Alice -> Bob channel and she
|
||||
// doesn't pay for transit over that channel as it's direct.
|
||||
// Note that the payment amount is >= the min_htlc value for the
|
||||
// channel Bob->Carol, so it should successfully be forwarded.
|
||||
payAmt = btcutil.Amount(5)
|
||||
invoice = &lnrpc.Invoice{
|
||||
Memo: "testing",
|
||||
Value: int64(payAmt),
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err = carol.AddInvoice(ctxt, invoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = completePaymentRequests(
|
||||
ctxt, net.Alice, net.Alice.RouterClient,
|
||||
[]string{resp.PaymentRequest}, true,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
// We'll now open a channel from Alice directly to Carol.
|
||||
net.ConnectNodes(ctxb, t.t, net.Alice, carol)
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint3 := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, carol,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
PushAmt: pushAmt,
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint3)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint3)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
// Make a global update, and check that both channels' new policies get
|
||||
// propagated.
|
||||
baseFee = int64(800)
|
||||
feeRate = int64(123)
|
||||
timeLockDelta = uint32(22)
|
||||
maxHtlc *= 2
|
||||
|
||||
expectedPolicy.FeeBaseMsat = baseFee
|
||||
expectedPolicy.FeeRateMilliMsat = testFeeBase * feeRate
|
||||
expectedPolicy.TimeLockDelta = timeLockDelta
|
||||
expectedPolicy.MaxHtlcMsat = maxHtlc
|
||||
|
||||
req = &lnrpc.PolicyUpdateRequest{
|
||||
BaseFeeMsat: baseFee,
|
||||
FeeRate: float64(feeRate),
|
||||
TimeLockDelta: timeLockDelta,
|
||||
MaxHtlcMsat: maxHtlc,
|
||||
}
|
||||
req.Scope = &lnrpc.PolicyUpdateRequest_Global{}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err = net.Alice.UpdateChannelPolicy(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to update alice's channel policy: %v", err)
|
||||
}
|
||||
|
||||
// Wait for all nodes to have seen the policy updates for both of
|
||||
// Alice's channels.
|
||||
for _, graphSub := range graphSubs {
|
||||
waitForChannelUpdate(
|
||||
t, graphSub,
|
||||
[]expectedChanUpdate{
|
||||
{net.Alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
{net.Alice.PubKeyStr, expectedPolicy, chanPoint3},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// And finally check that all nodes remembers the policy update they
|
||||
// received.
|
||||
for _, node := range nodes {
|
||||
assertChannelPolicy(
|
||||
t, node, net.Alice.PubKeyStr, expectedPolicy,
|
||||
chanPoint, chanPoint3,
|
||||
)
|
||||
}
|
||||
|
||||
// Now, to test that Carol is properly rate limiting incoming updates,
|
||||
// we'll send two more update from Alice. Carol should accept the first,
|
||||
// but not the second, as she only allows two updates per day and a day
|
||||
// has yet to elapse from the previous update.
|
||||
const numUpdatesTilRateLimit = 2
|
||||
for i := 0; i < numUpdatesTilRateLimit; i++ {
|
||||
prevAlicePolicy := *expectedPolicy
|
||||
baseFee *= 2
|
||||
expectedPolicy.FeeBaseMsat = baseFee
|
||||
req.BaseFeeMsat = baseFee
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
_, err = net.Alice.UpdateChannelPolicy(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to update alice's channel policy: %v", err)
|
||||
}
|
||||
|
||||
// Wait for all nodes to have seen the policy updates for both
|
||||
// of Alice's channels. Carol will not see the last update as
|
||||
// the limit has been reached.
|
||||
for idx, graphSub := range graphSubs {
|
||||
expUpdates := []expectedChanUpdate{
|
||||
{net.Alice.PubKeyStr, expectedPolicy, chanPoint},
|
||||
{net.Alice.PubKeyStr, expectedPolicy, chanPoint3},
|
||||
}
|
||||
// Carol was added last, which is why we check the last
|
||||
// index.
|
||||
if i == numUpdatesTilRateLimit-1 && idx == len(graphSubs)-1 {
|
||||
expUpdates = nil
|
||||
}
|
||||
waitForChannelUpdate(t, graphSub, expUpdates)
|
||||
}
|
||||
|
||||
// And finally check that all nodes remembers the policy update
|
||||
// they received. Since Carol didn't receive the last update,
|
||||
// she still has Alice's old policy.
|
||||
for idx, node := range nodes {
|
||||
policy := expectedPolicy
|
||||
// Carol was added last, which is why we check the last
|
||||
// index.
|
||||
if i == numUpdatesTilRateLimit-1 && idx == len(nodes)-1 {
|
||||
policy = &prevAlicePolicy
|
||||
}
|
||||
assertChannelPolicy(
|
||||
t, node, net.Alice.PubKeyStr, policy, chanPoint,
|
||||
chanPoint3,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channels.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint2, false)
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint3, false)
|
||||
}
|
||||
|
||||
// updateChannelPolicy updates the channel policy of node to the
|
||||
// given fees and timelock delta. This function blocks until
|
||||
// listenerNode has received the policy update.
|
||||
func updateChannelPolicy(t *harnessTest, node *lntest.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint, baseFee int64, feeRate int64,
|
||||
timeLockDelta uint32, maxHtlc uint64, listenerNode *lntest.HarnessNode) {
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
expectedPolicy := &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: baseFee,
|
||||
FeeRateMilliMsat: feeRate,
|
||||
TimeLockDelta: timeLockDelta,
|
||||
MinHtlc: 1000, // default value
|
||||
MaxHtlcMsat: maxHtlc,
|
||||
}
|
||||
|
||||
updateFeeReq := &lnrpc.PolicyUpdateRequest{
|
||||
BaseFeeMsat: baseFee,
|
||||
FeeRate: float64(feeRate) / testFeeBase,
|
||||
TimeLockDelta: timeLockDelta,
|
||||
Scope: &lnrpc.PolicyUpdateRequest_ChanPoint{
|
||||
ChanPoint: chanPoint,
|
||||
},
|
||||
MaxHtlcMsat: maxHtlc,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
if _, err := node.UpdateChannelPolicy(ctxt, updateFeeReq); err != nil {
|
||||
t.Fatalf("unable to update chan policy: %v", err)
|
||||
}
|
||||
|
||||
// Wait for listener node to receive the channel update from node.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
graphSub := subscribeGraphNotifications(ctxt, t, listenerNode)
|
||||
defer close(graphSub.quit)
|
||||
|
||||
waitForChannelUpdate(
|
||||
t, graphSub,
|
||||
[]expectedChanUpdate{
|
||||
{node.PubKeyStr, expectedPolicy, chanPoint},
|
||||
},
|
||||
)
|
||||
}
|
@ -2,12 +2,19 @@ package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/funding"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/labels"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -495,3 +502,274 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// It should now not appear in the pending channels anymore.
|
||||
assertNumOpenChannelsPending(ctxt, t, carol, dave, 0)
|
||||
}
|
||||
|
||||
// testFundingPersistence is intended to ensure that the Funding Manager
|
||||
// persists the state of new channels prior to broadcasting the channel's
|
||||
// funding transaction. This ensures that the daemon maintains an up-to-date
|
||||
// representation of channels if the system is restarted or disconnected.
|
||||
// testFundingPersistence mirrors testBasicChannelFunding, but adds restarts
|
||||
// and checks for the state of channels with unconfirmed funding transactions.
|
||||
func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
chanAmt := funding.MaxBtcFundingAmount
|
||||
pushAmt := btcutil.Amount(0)
|
||||
|
||||
// As we need to create a channel that requires more than 1
|
||||
// confirmation before it's open, with the current set of defaults,
|
||||
// we'll need to create a new node instance.
|
||||
const numConfs = 5
|
||||
carolArgs := []string{fmt.Sprintf("--bitcoin.defaultchanconfs=%v", numConfs)}
|
||||
carol := net.NewNode(t.t, "Carol", carolArgs)
|
||||
|
||||
// Clean up carol's node when the test finishes.
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.ConnectNodes(ctxt, t.t, net.Alice, carol)
|
||||
|
||||
// Create a new channel that requires 5 confs before it's considered
|
||||
// open, then broadcast the funding transaction
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
pendingUpdate, err := net.OpenPendingChannel(ctxt, net.Alice, carol,
|
||||
chanAmt, pushAmt)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel: %v", err)
|
||||
}
|
||||
|
||||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed. Alice and Bob's nodes should reflect
|
||||
// this when queried via RPC.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1)
|
||||
|
||||
// Restart both nodes to test that the appropriate state has been
|
||||
// persisted and that both nodes recover gracefully.
|
||||
if err := net.RestartNode(net.Alice, nil); err != nil {
|
||||
t.Fatalf("Node restart failed: %v", err)
|
||||
}
|
||||
if err := net.RestartNode(carol, nil); err != nil {
|
||||
t.Fatalf("Node restart failed: %v", err)
|
||||
}
|
||||
|
||||
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
|
||||
" %v", err)
|
||||
}
|
||||
fundingTxStr := fundingTxID.String()
|
||||
|
||||
// Mine a block, then wait for Alice's node to notify us that the
|
||||
// channel has been opened. The funding transaction should be found
|
||||
// within the newly mined block.
|
||||
block := mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
|
||||
// Get the height that our transaction confirmed at.
|
||||
_, height, err := net.Miner.Client.GetBestBlock()
|
||||
require.NoError(t.t, err, "could not get best block")
|
||||
|
||||
// Restart both nodes to test that the appropriate state has been
|
||||
// persisted and that both nodes recover gracefully.
|
||||
if err := net.RestartNode(net.Alice, nil); err != nil {
|
||||
t.Fatalf("Node restart failed: %v", err)
|
||||
}
|
||||
if err := net.RestartNode(carol, nil); err != nil {
|
||||
t.Fatalf("Node restart failed: %v", err)
|
||||
}
|
||||
|
||||
// The following block ensures that after both nodes have restarted,
|
||||
// they have reconnected before the execution of the next test.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, net.Alice, carol)
|
||||
|
||||
// Next, mine enough blocks s.t the channel will open with a single
|
||||
// additional block mined.
|
||||
if _, err := net.Miner.Client.Generate(3); err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
|
||||
// Assert that our wallet has our opening transaction with a label
|
||||
// that does not have a channel ID set yet, because we have not
|
||||
// reached our required confirmations.
|
||||
tx := findTxAtHeight(ctxt, t, height, fundingTxStr, net.Alice)
|
||||
|
||||
// At this stage, we expect the transaction to be labelled, but not with
|
||||
// our channel ID because our transaction has not yet confirmed.
|
||||
label := labels.MakeLabel(labels.LabelTypeChannelOpen, nil)
|
||||
require.Equal(t.t, label, tx.Label, "open channel label wrong")
|
||||
|
||||
// Both nodes should still show a single channel as pending.
|
||||
time.Sleep(time.Second * 1)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1)
|
||||
|
||||
// Finally, mine the last block which should mark the channel as open.
|
||||
if _, err := net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
|
||||
// At this point, the channel should be fully opened and there should
|
||||
// be no pending channels remaining for either node.
|
||||
time.Sleep(time.Second * 1)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 0)
|
||||
|
||||
// The channel should be listed in the peer information returned by
|
||||
// both peers.
|
||||
outPoint := wire.OutPoint{
|
||||
Hash: *fundingTxID,
|
||||
Index: pendingUpdate.OutputIndex,
|
||||
}
|
||||
|
||||
// Re-lookup our transaction in the block that it confirmed in.
|
||||
tx = findTxAtHeight(ctxt, t, height, fundingTxStr, net.Alice)
|
||||
|
||||
// Create an additional check for our channel assertion that will
|
||||
// check that our label is as expected.
|
||||
check := func(channel *lnrpc.Channel) {
|
||||
shortChanID := lnwire.NewShortChanIDFromInt(
|
||||
channel.ChanId,
|
||||
)
|
||||
|
||||
label := labels.MakeLabel(
|
||||
labels.LabelTypeChannelOpen, &shortChanID,
|
||||
)
|
||||
require.Equal(t.t, label, tx.Label,
|
||||
"open channel label not updated")
|
||||
}
|
||||
|
||||
// Check both nodes to ensure that the channel is ready for operation.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.AssertChannelExists(ctxt, net.Alice, &outPoint, check)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to assert channel existence: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.AssertChannelExists(ctxt, carol, &outPoint); err != nil {
|
||||
t.Fatalf("unable to assert channel existence: %v", err)
|
||||
}
|
||||
|
||||
// Finally, immediately close the channel. This function will also
|
||||
// block until the channel is closed and will additionally assert the
|
||||
// relevant channel closing post conditions.
|
||||
chanPoint := &lnrpc.ChannelPoint{
|
||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||
FundingTxidBytes: pendingUpdate.Txid,
|
||||
},
|
||||
OutputIndex: pendingUpdate.OutputIndex,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// deriveFundingShim creates a channel funding shim by deriving the necessary
|
||||
// keys on both sides.
|
||||
func deriveFundingShim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
carol, dave *lntest.HarnessNode, chanSize btcutil.Amount,
|
||||
thawHeight uint32, keyIndex int32, publish bool) (*lnrpc.FundingShim,
|
||||
*lnrpc.ChannelPoint, *chainhash.Hash) {
|
||||
|
||||
ctxb := context.Background()
|
||||
keyLoc := &signrpc.KeyLocator{
|
||||
KeyFamily: 9999,
|
||||
KeyIndex: keyIndex,
|
||||
}
|
||||
carolFundingKey, err := carol.WalletKitClient.DeriveKey(ctxb, keyLoc)
|
||||
require.NoError(t.t, err)
|
||||
daveFundingKey, err := dave.WalletKitClient.DeriveKey(ctxb, keyLoc)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Now that we have the multi-sig keys for each party, we can manually
|
||||
// construct the funding transaction. We'll instruct the backend to
|
||||
// immediately create and broadcast a transaction paying out an exact
|
||||
// amount. Normally this would reside in the mempool, but we just
|
||||
// confirm it now for simplicity.
|
||||
_, fundingOutput, err := input.GenFundingPkScript(
|
||||
carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
|
||||
int64(chanSize),
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
var txid *chainhash.Hash
|
||||
targetOutputs := []*wire.TxOut{fundingOutput}
|
||||
if publish {
|
||||
txid, err = net.Miner.SendOutputsWithoutChange(
|
||||
targetOutputs, 5,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
} else {
|
||||
tx, err := net.Miner.CreateTransaction(targetOutputs, 5, false)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
txHash := tx.TxHash()
|
||||
txid = &txHash
|
||||
}
|
||||
|
||||
// At this point, we can being our external channel funding workflow.
|
||||
// We'll start by generating a pending channel ID externally that will
|
||||
// be used to track this new funding type.
|
||||
var pendingChanID [32]byte
|
||||
_, err = rand.Read(pendingChanID[:])
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Now that we have the pending channel ID, Dave (our responder) will
|
||||
// register the intent to receive a new channel funding workflow using
|
||||
// the pending channel ID.
|
||||
chanPoint := &lnrpc.ChannelPoint{
|
||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||
FundingTxidBytes: txid[:],
|
||||
},
|
||||
}
|
||||
chanPointShim := &lnrpc.ChanPointShim{
|
||||
Amt: int64(chanSize),
|
||||
ChanPoint: chanPoint,
|
||||
LocalKey: &lnrpc.KeyDescriptor{
|
||||
RawKeyBytes: daveFundingKey.RawKeyBytes,
|
||||
KeyLoc: &lnrpc.KeyLocator{
|
||||
KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
|
||||
KeyIndex: daveFundingKey.KeyLoc.KeyIndex,
|
||||
},
|
||||
},
|
||||
RemoteKey: carolFundingKey.RawKeyBytes,
|
||||
PendingChanId: pendingChanID[:],
|
||||
ThawHeight: thawHeight,
|
||||
}
|
||||
fundingShim := &lnrpc.FundingShim{
|
||||
Shim: &lnrpc.FundingShim_ChanPointShim{
|
||||
ChanPointShim: chanPointShim,
|
||||
},
|
||||
}
|
||||
_, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
|
||||
Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
|
||||
ShimRegister: fundingShim,
|
||||
},
|
||||
})
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// If we attempt to register the same shim (has the same pending chan
|
||||
// ID), then we should get an error.
|
||||
_, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
|
||||
Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
|
||||
ShimRegister: fundingShim,
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("duplicate pending channel ID funding shim " +
|
||||
"registration should trigger an error")
|
||||
}
|
||||
|
||||
// We'll take the chan point shim we just registered for Dave (the
|
||||
// responder), and swap the local/remote keys before we feed it in as
|
||||
// Carol's funding shim as the initiator.
|
||||
fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
|
||||
RawKeyBytes: carolFundingKey.RawKeyBytes,
|
||||
KeyLoc: &lnrpc.KeyLocator{
|
||||
KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
|
||||
KeyIndex: carolFundingKey.KeyLoc.KeyIndex,
|
||||
},
|
||||
}
|
||||
fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
|
||||
|
||||
return fundingShim, chanPoint, txid
|
||||
}
|
||||
|
1965
lntest/itest/lnd_misc_test.go
Normal file
1965
lntest/itest/lnd_misc_test.go
Normal file
File diff suppressed because it is too large
Load Diff
424
lntest/itest/lnd_open_channel_test.go
Normal file
424
lntest/itest/lnd_open_channel_test.go
Normal file
@ -0,0 +1,424 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcjson"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/integration/rpctest"
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/funding"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testOpenChannelAfterReorg tests that in the case where we have an open
|
||||
// channel where the funding tx gets reorged out, the channel will no
|
||||
// longer be present in the node's routing table.
|
||||
func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// Skip test for neutrino, as we cannot disconnect the miner at will.
|
||||
// TODO(halseth): remove when either can disconnect at will, or restart
|
||||
// node with connection to new miner.
|
||||
if net.BackendCfg.Name() == lntest.NeutrinoBackendName {
|
||||
t.Skipf("skipping reorg test for neutrino backend")
|
||||
}
|
||||
|
||||
var (
|
||||
ctxb = context.Background()
|
||||
temp = "temp"
|
||||
)
|
||||
|
||||
// Set up a new miner that we can use to cause a reorg.
|
||||
tempLogDir := fmt.Sprintf("%s/.tempminerlogs", lntest.GetLogDir())
|
||||
logFilename := "output-open_channel_reorg-temp_miner.log"
|
||||
tempMiner, tempMinerCleanUp, err := lntest.NewMiner(
|
||||
tempLogDir, logFilename, harnessNetParams,
|
||||
&rpcclient.NotificationHandlers{}, lntest.GetBtcdBinary(),
|
||||
)
|
||||
require.NoError(t.t, err, "failed to create temp miner")
|
||||
defer func() {
|
||||
require.NoError(
|
||||
t.t, tempMinerCleanUp(),
|
||||
"failed to clean up temp miner",
|
||||
)
|
||||
}()
|
||||
|
||||
// Setup the temp miner
|
||||
require.NoError(
|
||||
t.t, tempMiner.SetUp(false, 0), "unable to set up mining node",
|
||||
)
|
||||
|
||||
// We start by connecting the new miner to our original miner,
|
||||
// such that it will sync to our original chain.
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
nodeSlice := []*rpctest.Harness{net.Miner, tempMiner}
|
||||
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
||||
t.Fatalf("unable to join node on blocks: %v", err)
|
||||
}
|
||||
|
||||
// The two miners should be on the same blockheight.
|
||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
|
||||
|
||||
// We disconnect the two miners, such that we can mine two different
|
||||
// chains and can cause a reorg later.
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
|
||||
// Create a new channel that requires 1 confs before it's considered
|
||||
// open, then broadcast the funding transaction
|
||||
chanAmt := funding.MaxBtcFundingAmount
|
||||
pushAmt := btcutil.Amount(0)
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
pendingUpdate, err := net.OpenPendingChannel(ctxt, net.Alice, net.Bob,
|
||||
chanAmt, pushAmt)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel: %v", err)
|
||||
}
|
||||
|
||||
// Wait for miner to have seen the funding tx. The temporary miner is
|
||||
// disconnected, and won't see the transaction.
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to find funding tx in mempool: %v", err)
|
||||
}
|
||||
|
||||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed, and the channel should be pending.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1)
|
||||
|
||||
fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to convert funding txid into chainhash.Hash:"+
|
||||
" %v", err)
|
||||
}
|
||||
|
||||
// We now cause a fork, by letting our original miner mine 10 blocks,
|
||||
// and our new miner mine 15. This will also confirm our pending
|
||||
// channel on the original miner's chain, which should be considered
|
||||
// open.
|
||||
block := mineBlocks(t, net, 10, 1)[0]
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
if _, err := tempMiner.Client.Generate(15); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
// Ensure the chain lengths are what we expect, with the temp miner
|
||||
// being 5 blocks ahead.
|
||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 5)
|
||||
|
||||
// Wait for Alice to sync to the original miner's chain.
|
||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNodeBlockHeight(ctxt, net.Alice, minerHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sync to chain: %v", err)
|
||||
}
|
||||
|
||||
chanPoint := &lnrpc.ChannelPoint{
|
||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||
FundingTxidBytes: pendingUpdate.Txid,
|
||||
},
|
||||
OutputIndex: pendingUpdate.OutputIndex,
|
||||
}
|
||||
|
||||
// Ensure channel is no longer pending.
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
|
||||
|
||||
// Wait for Alice and Bob to recognize and advertise the new channel
|
||||
// generated above.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
// Alice should now have 1 edge in her graph.
|
||||
req := &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for alice's routing table: %v", err)
|
||||
}
|
||||
|
||||
numEdges := len(chanGraph.Edges)
|
||||
if numEdges != 1 {
|
||||
t.Fatalf("expected to find one edge in the graph, found %d",
|
||||
numEdges)
|
||||
}
|
||||
|
||||
// Now we disconnect Alice's chain backend from the original miner, and
|
||||
// connect the two miners together. Since the temporary miner knows
|
||||
// about a longer chain, both miners should sync to that chain.
|
||||
err = net.BackendCfg.DisconnectMiner()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
|
||||
// Connecting to the temporary miner should now cause our original
|
||||
// chain to be re-orged out.
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
|
||||
nodes := []*rpctest.Harness{tempMiner, net.Miner}
|
||||
if err := rpctest.JoinNodes(nodes, rpctest.Blocks); err != nil {
|
||||
t.Fatalf("unable to join node on blocks: %v", err)
|
||||
}
|
||||
|
||||
// Once again they should be on the same chain.
|
||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 0)
|
||||
|
||||
// Now we disconnect the two miners, and connect our original miner to
|
||||
// our chain backend once again.
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
|
||||
err = net.BackendCfg.ConnectMiner()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
|
||||
// This should have caused a reorg, and Alice should sync to the longer
|
||||
// chain, where the funding transaction is not confirmed.
|
||||
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNodeBlockHeight(ctxt, net.Alice, tempMinerHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sync to chain: %v", err)
|
||||
}
|
||||
|
||||
// Since the fundingtx was reorged out, Alice should now have no edges
|
||||
// in her graph.
|
||||
req = &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: true,
|
||||
}
|
||||
|
||||
var predErr error
|
||||
err = wait.Predicate(func() bool {
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err = net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
predErr = fmt.Errorf("unable to query for alice's routing table: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
numEdges = len(chanGraph.Edges)
|
||||
if numEdges != 0 {
|
||||
predErr = fmt.Errorf("expected to find no edge in the graph, found %d",
|
||||
numEdges)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf(predErr.Error())
|
||||
}
|
||||
|
||||
// Cleanup by mining the funding tx again, then closing the channel.
|
||||
block = mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeReorgedChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testBasicChannelCreationAndUpdates tests multiple channel opening and closing,
|
||||
// and ensures that if a node is subscribed to channel updates they will be
|
||||
// received correctly for both cooperative and force closed channels.
|
||||
func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
const (
|
||||
numChannels = 2
|
||||
amount = funding.MaxBtcFundingAmount
|
||||
)
|
||||
|
||||
// Subscribe Bob and Alice to channel event notifications.
|
||||
bobChanSub := subscribeChannelNotifications(ctxb, t, net.Bob)
|
||||
defer close(bobChanSub.quit)
|
||||
|
||||
aliceChanSub := subscribeChannelNotifications(ctxb, t, net.Alice)
|
||||
defer close(aliceChanSub.quit)
|
||||
|
||||
// Open the channel between Alice and Bob, asserting that the
|
||||
// channel has been properly open on-chain.
|
||||
chanPoints := make([]*lnrpc.ChannelPoint, numChannels)
|
||||
for i := 0; i < numChannels; i++ {
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoints[i] = openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: amount,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Since each of the channels just became open, Bob and Alice should
|
||||
// each receive an open and an active notification for each channel.
|
||||
var numChannelUpds int
|
||||
const totalNtfns = 3 * numChannels
|
||||
verifyOpenUpdatesReceived := func(sub channelSubscription) error {
|
||||
numChannelUpds = 0
|
||||
for numChannelUpds < totalNtfns {
|
||||
select {
|
||||
case update := <-sub.updateChan:
|
||||
switch update.Type {
|
||||
case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL:
|
||||
if numChannelUpds%3 != 0 {
|
||||
return fmt.Errorf("expected " +
|
||||
"open or active" +
|
||||
"channel ntfn, got pending open " +
|
||||
"channel ntfn instead")
|
||||
}
|
||||
case lnrpc.ChannelEventUpdate_OPEN_CHANNEL:
|
||||
if numChannelUpds%3 != 1 {
|
||||
return fmt.Errorf("expected " +
|
||||
"pending open or active" +
|
||||
"channel ntfn, got open" +
|
||||
"channel ntfn instead")
|
||||
}
|
||||
case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL:
|
||||
if numChannelUpds%3 != 2 {
|
||||
return fmt.Errorf("expected " +
|
||||
"pending open or open" +
|
||||
"channel ntfn, got active " +
|
||||
"channel ntfn instead")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("update type mismatch: "+
|
||||
"expected open or active channel "+
|
||||
"notification, got: %v",
|
||||
update.Type)
|
||||
}
|
||||
numChannelUpds++
|
||||
case <-time.After(time.Second * 10):
|
||||
return fmt.Errorf("timeout waiting for channel "+
|
||||
"notifications, only received %d/%d "+
|
||||
"chanupds", numChannelUpds,
|
||||
totalNtfns)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := verifyOpenUpdatesReceived(bobChanSub); err != nil {
|
||||
t.Fatalf("error verifying open updates: %v", err)
|
||||
}
|
||||
if err := verifyOpenUpdatesReceived(aliceChanSub); err != nil {
|
||||
t.Fatalf("error verifying open updates: %v", err)
|
||||
}
|
||||
|
||||
// Close the channel between Alice and Bob, asserting that the channel
|
||||
// has been properly closed on-chain.
|
||||
for i, chanPoint := range chanPoints {
|
||||
ctx, _ := context.WithTimeout(context.Background(), defaultTimeout)
|
||||
|
||||
// Force close half of the channels.
|
||||
force := i%2 == 0
|
||||
closeChannelAndAssert(ctx, t, net, net.Alice, chanPoint, force)
|
||||
if force {
|
||||
cleanupForceClose(t, net, net.Alice, chanPoint)
|
||||
}
|
||||
}
|
||||
|
||||
// verifyCloseUpdatesReceived is used to verify that Alice and Bob
|
||||
// receive the correct channel updates in order.
|
||||
verifyCloseUpdatesReceived := func(sub channelSubscription,
|
||||
forceType lnrpc.ChannelCloseSummary_ClosureType,
|
||||
closeInitiator lnrpc.Initiator) error {
|
||||
|
||||
// Ensure one inactive and one closed notification is received for each
|
||||
// closed channel.
|
||||
numChannelUpds := 0
|
||||
for numChannelUpds < 2*numChannels {
|
||||
expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
|
||||
|
||||
// Every other channel should be force closed. If this
|
||||
// channel was force closed, set the expected close type
|
||||
// the the type passed in.
|
||||
force := (numChannelUpds/2)%2 == 0
|
||||
if force {
|
||||
expectedCloseType = forceType
|
||||
}
|
||||
|
||||
select {
|
||||
case chanUpdate := <-sub.updateChan:
|
||||
err := verifyCloseUpdate(
|
||||
chanUpdate, expectedCloseType,
|
||||
closeInitiator,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numChannelUpds++
|
||||
case err := <-sub.errChan:
|
||||
return err
|
||||
case <-time.After(time.Second * 10):
|
||||
return fmt.Errorf("timeout waiting "+
|
||||
"for channel notifications, only "+
|
||||
"received %d/%d chanupds",
|
||||
numChannelUpds, 2*numChannels)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify Bob receives all closed channel notifications. He should
|
||||
// receive a remote force close notification for force closed channels.
|
||||
// All channels (cooperatively and force closed) should have a remote
|
||||
// close initiator because Alice closed the channels.
|
||||
if err := verifyCloseUpdatesReceived(bobChanSub,
|
||||
lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
|
||||
lnrpc.Initiator_INITIATOR_REMOTE); err != nil {
|
||||
t.Fatalf("errored verifying close updates: %v", err)
|
||||
}
|
||||
|
||||
// Verify Alice receives all closed channel notifications. She should
|
||||
// receive a remote force close notification for force closed channels.
|
||||
// All channels (cooperatively and force closed) should have a local
|
||||
// close initiator because Alice closed the channels.
|
||||
if err := verifyCloseUpdatesReceived(aliceChanSub,
|
||||
lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
|
||||
lnrpc.Initiator_INITIATOR_LOCAL); err != nil {
|
||||
t.Fatalf("errored verifying close updates: %v", err)
|
||||
}
|
||||
}
|
821
lntest/itest/lnd_payment_test.go
Normal file
821
lntest/itest/lnd_payment_test.go
Normal file
@ -0,0 +1,821 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
)
|
||||
|
||||
func testListPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// First start by deleting all payments that Alice knows of. This will
|
||||
// allow us to execute the test with a clean state for Alice.
|
||||
delPaymentsReq := &lnrpc.DeleteAllPaymentsRequest{}
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
if _, err := net.Alice.DeleteAllPayments(ctxt, delPaymentsReq); err != nil {
|
||||
t.Fatalf("unable to delete payments: %v", err)
|
||||
}
|
||||
|
||||
// Check that there are no payments before test.
|
||||
reqInit := &lnrpc.ListPaymentsRequest{}
|
||||
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
|
||||
paymentsRespInit, err := net.Alice.ListPayments(ctxt, reqInit)
|
||||
if err != nil {
|
||||
t.Fatalf("error when obtaining Alice payments: %v", err)
|
||||
}
|
||||
if len(paymentsRespInit.Payments) != 0 {
|
||||
t.Fatalf("incorrect number of payments, got %v, want %v",
|
||||
len(paymentsRespInit.Payments), 0)
|
||||
}
|
||||
|
||||
// Open a channel with 100k satoshis between Alice and Bob with Alice
|
||||
// being the sole funder of the channel.
|
||||
chanAmt := btcutil.Amount(100000)
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// Now that the channel is open, create an invoice for Bob which
|
||||
// expects a payment of 1000 satoshis from Alice paid via a particular
|
||||
// preimage.
|
||||
const paymentAmt = 1000
|
||||
preimage := bytes.Repeat([]byte("B"), 32)
|
||||
invoice := &lnrpc.Invoice{
|
||||
Memo: "testing",
|
||||
RPreimage: preimage,
|
||||
Value: paymentAmt,
|
||||
}
|
||||
addInvoiceCtxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
invoiceResp, err := net.Bob.AddInvoice(addInvoiceCtxt, invoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
}
|
||||
|
||||
// Wait for Alice to recognize and advertise the new channel generated
|
||||
// above.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
// With the invoice for Bob added, send a payment towards Alice paying
|
||||
// to the above generated invoice.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
sendAndAssertSuccess(
|
||||
ctxt, t, net.Alice,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: invoiceResp.PaymentRequest,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitSat: 1000000,
|
||||
},
|
||||
)
|
||||
|
||||
// Grab Alice's list of payments, she should show the existence of
|
||||
// exactly one payment.
|
||||
req := &lnrpc.ListPaymentsRequest{}
|
||||
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
|
||||
paymentsResp, err := net.Alice.ListPayments(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error when obtaining Alice payments: %v", err)
|
||||
}
|
||||
if len(paymentsResp.Payments) != 1 {
|
||||
t.Fatalf("incorrect number of payments, got %v, want %v",
|
||||
len(paymentsResp.Payments), 1)
|
||||
}
|
||||
p := paymentsResp.Payments[0] // nolint:staticcheck
|
||||
path := p.Htlcs[len(p.Htlcs)-1].Route.Hops
|
||||
|
||||
// Ensure that the stored path shows a direct payment to Bob with no
|
||||
// other nodes in-between.
|
||||
if len(path) != 1 || path[0].PubKey != net.Bob.PubKeyStr {
|
||||
t.Fatalf("incorrect path")
|
||||
}
|
||||
|
||||
// The payment amount should also match our previous payment directly.
|
||||
if p.Value != paymentAmt { // nolint:staticcheck
|
||||
t.Fatalf("incorrect amount, got %v, want %v",
|
||||
p.Value, paymentAmt) // nolint:staticcheck
|
||||
}
|
||||
|
||||
// The payment hash (or r-hash) should have been stored correctly.
|
||||
correctRHash := hex.EncodeToString(invoiceResp.RHash)
|
||||
if !reflect.DeepEqual(p.PaymentHash, correctRHash) {
|
||||
t.Fatalf("incorrect RHash, got %v, want %v",
|
||||
p.PaymentHash, correctRHash)
|
||||
}
|
||||
|
||||
// As we made a single-hop direct payment, there should have been no fee
|
||||
// applied.
|
||||
if p.Fee != 0 { // nolint:staticcheck
|
||||
t.Fatalf("incorrect Fee, got %v, want %v", p.Fee, 0) // nolint:staticcheck
|
||||
}
|
||||
|
||||
// Finally, verify that the payment request returned by the rpc matches
|
||||
// the invoice that we paid.
|
||||
if p.PaymentRequest != invoiceResp.PaymentRequest {
|
||||
t.Fatalf("incorrect payreq, got: %v, want: %v",
|
||||
p.PaymentRequest, invoiceResp.PaymentRequest)
|
||||
}
|
||||
|
||||
// Delete all payments from Alice. DB should have no payments.
|
||||
delReq := &lnrpc.DeleteAllPaymentsRequest{}
|
||||
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
|
||||
_, err = net.Alice.DeleteAllPayments(ctxt, delReq)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't delete payments at the end: %v", err)
|
||||
}
|
||||
|
||||
// Check that there are no payments after test.
|
||||
listReq := &lnrpc.ListPaymentsRequest{}
|
||||
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
|
||||
paymentsResp, err = net.Alice.ListPayments(ctxt, listReq)
|
||||
if err != nil {
|
||||
t.Fatalf("error when obtaining Alice payments: %v", err)
|
||||
}
|
||||
if len(paymentsResp.Payments) != 0 {
|
||||
t.Fatalf("incorrect number of payments, got %v, want %v",
|
||||
len(paymentsRespInit.Payments), 0)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testPaymentFollowingChannelOpen tests that the channel transition from
|
||||
// 'pending' to 'open' state does not cause any inconsistencies within other
|
||||
// subsystems trying to update the channel state in the db. We follow this
|
||||
// transition with a payment that updates the commitment state and verify that
|
||||
// the pending state is up to date.
|
||||
func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const paymentAmt = btcutil.Amount(100)
|
||||
channelCapacity := paymentAmt * 1000
|
||||
|
||||
// We first establish a channel between Alice and Bob.
|
||||
ctxt, cancel := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
defer cancel()
|
||||
pendingUpdate, err := net.OpenPendingChannel(
|
||||
ctxt, net.Alice, net.Bob, channelCapacity, 0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel: %v", err)
|
||||
}
|
||||
|
||||
// At this point, the channel's funding transaction will have been
|
||||
// broadcast, but not confirmed. Alice and Bob's nodes
|
||||
// should reflect this when queried via RPC.
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1)
|
||||
|
||||
// We are restarting Bob's node to let the link be created for the
|
||||
// pending channel.
|
||||
if err := net.RestartNode(net.Bob, nil); err != nil {
|
||||
t.Fatalf("Bob restart failed: %v", err)
|
||||
}
|
||||
|
||||
// We ensure that Bob reconnects to Alice.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.EnsureConnected(ctxt, t.t, net.Bob, net.Alice)
|
||||
|
||||
// We mine one block for the channel to be confirmed.
|
||||
_ = mineBlocks(t, net, 6, 1)[0]
|
||||
|
||||
// We verify that the channel is open from both nodes point of view.
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
|
||||
|
||||
// With the channel open, we'll create invoices for Bob that Alice will
|
||||
// pay to in order to advance the state of the channel.
|
||||
bobPayReqs, _, _, err := createPayReqs(
|
||||
net.Bob, paymentAmt, 1,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create pay reqs: %v", err)
|
||||
}
|
||||
|
||||
// Send payment to Bob so that a channel update to disk will be
|
||||
// executed.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
sendAndAssertSuccess(
|
||||
ctxt, t, net.Alice, &routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: bobPayReqs[0],
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitSat: 1000000,
|
||||
},
|
||||
)
|
||||
|
||||
// At this point we want to make sure the channel is opened and not
|
||||
// pending.
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
res, err := net.Bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to list bob channels: %v", err)
|
||||
}
|
||||
if len(res.Channels) == 0 {
|
||||
t.Fatalf("bob list of channels is empty")
|
||||
}
|
||||
|
||||
// Finally, immediately close the channel. This function will also
|
||||
// block until the channel is closed and will additionally assert the
|
||||
// relevant channel closing post conditions.
|
||||
chanPoint := &lnrpc.ChannelPoint{
|
||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||
FundingTxidBytes: pendingUpdate.Txid,
|
||||
},
|
||||
OutputIndex: pendingUpdate.OutputIndex,
|
||||
}
|
||||
ctxt, cancel = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
defer cancel()
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testAsyncPayments tests the performance of the async payments.
|
||||
func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
paymentAmt = 100
|
||||
)
|
||||
|
||||
// First establish a channel with a capacity equals to the overall
|
||||
// amount of payments, between Alice and Bob, at the end of the test
|
||||
// Alice should send all money from her side to Bob.
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
channelCapacity := btcutil.Amount(paymentAmt * 2000)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: channelCapacity,
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
info, err := getChanInfo(ctxt, net.Alice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get alice channel info: %v", err)
|
||||
}
|
||||
|
||||
// We'll create a number of invoices equal the max number of HTLCs that
|
||||
// can be carried in one direction. The number on the commitment will
|
||||
// likely be lower, but we can't guarantee that any more HTLCs will
|
||||
// succeed due to the limited path diversity and inability of the router
|
||||
// to retry via another path.
|
||||
numInvoices := int(input.MaxHTLCNumber / 2)
|
||||
|
||||
bobAmt := int64(numInvoices * paymentAmt)
|
||||
aliceAmt := info.LocalBalance - bobAmt
|
||||
|
||||
// With the channel open, we'll create invoices for Bob that Alice
|
||||
// will pay to in order to advance the state of the channel.
|
||||
bobPayReqs, _, _, err := createPayReqs(
|
||||
net.Bob, paymentAmt, numInvoices,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create pay reqs: %v", err)
|
||||
}
|
||||
|
||||
// Wait for Alice to receive the channel edge from the funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
// Simultaneously send payments from Alice to Bob using of Bob's payment
|
||||
// hashes generated above.
|
||||
now := time.Now()
|
||||
errChan := make(chan error)
|
||||
statusChan := make(chan *lnrpc.Payment)
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
payReq := bobPayReqs[i]
|
||||
go func() {
|
||||
ctxt, _ = context.WithTimeout(ctxb, lntest.AsyncBenchmarkTimeout)
|
||||
stream, err := net.Alice.RouterClient.SendPaymentV2(
|
||||
ctxt,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: payReq,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
result, err := getPaymentResult(stream)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
statusChan <- result
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait until all the payments have settled.
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
select {
|
||||
case result := <-statusChan:
|
||||
if result.Status == lnrpc.Payment_SUCCEEDED {
|
||||
continue
|
||||
}
|
||||
|
||||
case err := <-errChan:
|
||||
t.Fatalf("payment error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// All payments have been sent, mark the finish time.
|
||||
timeTaken := time.Since(now)
|
||||
|
||||
// Next query for Bob's and Alice's channel states, in order to confirm
|
||||
// that all payment have been successful transmitted.
|
||||
|
||||
// Wait for the revocation to be received so alice no longer has pending
|
||||
// htlcs listed and has correct balances. This is needed due to the fact
|
||||
// that we now pipeline the settles.
|
||||
err = wait.Predicate(func() bool {
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
aliceChan, err := getChanInfo(ctxt, net.Alice)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if len(aliceChan.PendingHtlcs) != 0 {
|
||||
return false
|
||||
}
|
||||
if aliceChan.RemoteBalance != bobAmt {
|
||||
return false
|
||||
}
|
||||
if aliceChan.LocalBalance != aliceAmt {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}, time.Second*5)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to assert alice's pending htlcs and/or remote/local balance")
|
||||
}
|
||||
|
||||
// Wait for Bob to receive revocation from Alice.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
bobChan, err := getChanInfo(ctxt, net.Bob)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get bob's channel info: %v", err)
|
||||
}
|
||||
if len(bobChan.PendingHtlcs) != 0 {
|
||||
t.Fatalf("bob's pending htlcs is incorrect, got %v, "+
|
||||
"expected %v", len(bobChan.PendingHtlcs), 0)
|
||||
}
|
||||
if bobChan.LocalBalance != bobAmt {
|
||||
t.Fatalf("bob's local balance is incorrect, got %v, expected"+
|
||||
" %v", bobChan.LocalBalance, bobAmt)
|
||||
}
|
||||
if bobChan.RemoteBalance != aliceAmt {
|
||||
t.Fatalf("bob's remote balance is incorrect, got %v, "+
|
||||
"expected %v", bobChan.RemoteBalance, aliceAmt)
|
||||
}
|
||||
|
||||
t.Log("\tBenchmark info: Elapsed time: ", timeTaken)
|
||||
t.Log("\tBenchmark info: TPS: ", float64(numInvoices)/timeTaken.Seconds())
|
||||
|
||||
// Finally, immediately close the channel. This function will also
|
||||
// block until the channel is closed and will additionally assert the
|
||||
// relevant channel closing post conditions.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// testBidirectionalAsyncPayments tests that nodes are able to send the
|
||||
// payments to each other in async manner without blocking.
|
||||
func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
paymentAmt = 1000
|
||||
)
|
||||
|
||||
// First establish a channel with a capacity equals to the overall
|
||||
// amount of payments, between Alice and Bob, at the end of the test
|
||||
// Alice should send all money from her side to Bob.
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: paymentAmt * 2000,
|
||||
PushAmt: paymentAmt * 1000,
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
info, err := getChanInfo(ctxt, net.Alice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get alice channel info: %v", err)
|
||||
}
|
||||
|
||||
// We'll create a number of invoices equal the max number of HTLCs that
|
||||
// can be carried in one direction. The number on the commitment will
|
||||
// likely be lower, but we can't guarantee that any more HTLCs will
|
||||
// succeed due to the limited path diversity and inability of the router
|
||||
// to retry via another path.
|
||||
numInvoices := int(input.MaxHTLCNumber / 2)
|
||||
|
||||
// Nodes should exchange the same amount of money and because of this
|
||||
// at the end balances should remain the same.
|
||||
aliceAmt := info.LocalBalance
|
||||
bobAmt := info.RemoteBalance
|
||||
|
||||
// With the channel open, we'll create invoices for Bob that Alice
|
||||
// will pay to in order to advance the state of the channel.
|
||||
bobPayReqs, _, _, err := createPayReqs(
|
||||
net.Bob, paymentAmt, numInvoices,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create pay reqs: %v", err)
|
||||
}
|
||||
|
||||
// With the channel open, we'll create invoices for Alice that Bob
|
||||
// will pay to in order to advance the state of the channel.
|
||||
alicePayReqs, _, _, err := createPayReqs(
|
||||
net.Alice, paymentAmt, numInvoices,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create pay reqs: %v", err)
|
||||
}
|
||||
|
||||
// Wait for Alice to receive the channel edge from the funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
t.Fatalf("bob didn't see the bob->alice channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
// Reset mission control to prevent previous payment results from
|
||||
// interfering with this test. A new channel has been opened, but
|
||||
// mission control operates on node pairs.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err = net.Alice.RouterClient.ResetMissionControl(
|
||||
ctxt, &routerrpc.ResetMissionControlRequest{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to reset mc for alice: %v", err)
|
||||
}
|
||||
|
||||
// Send payments from Alice to Bob and from Bob to Alice in async
|
||||
// manner.
|
||||
errChan := make(chan error)
|
||||
statusChan := make(chan *lnrpc.Payment)
|
||||
|
||||
send := func(node *lntest.HarnessNode, payReq string) {
|
||||
go func() {
|
||||
ctxt, _ = context.WithTimeout(
|
||||
ctxb, lntest.AsyncBenchmarkTimeout,
|
||||
)
|
||||
stream, err := node.RouterClient.SendPaymentV2(
|
||||
ctxt,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: payReq,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
result, err := getPaymentResult(stream)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
statusChan <- result
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
send(net.Bob, alicePayReqs[i])
|
||||
send(net.Alice, bobPayReqs[i])
|
||||
}
|
||||
|
||||
// Expect all payments to succeed.
|
||||
for i := 0; i < 2*numInvoices; i++ {
|
||||
select {
|
||||
case result := <-statusChan:
|
||||
if result.Status != lnrpc.Payment_SUCCEEDED {
|
||||
t.Fatalf("payment error: %v", result.Status)
|
||||
}
|
||||
|
||||
case err := <-errChan:
|
||||
t.Fatalf("payment error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for Alice and Bob to receive revocations messages, and update
|
||||
// states, i.e. balance info.
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
aliceInfo, err := getChanInfo(ctxt, net.Alice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get bob's channel info: %v", err)
|
||||
}
|
||||
if aliceInfo.RemoteBalance != bobAmt {
|
||||
t.Fatalf("alice's remote balance is incorrect, got %v, "+
|
||||
"expected %v", aliceInfo.RemoteBalance, bobAmt)
|
||||
}
|
||||
if aliceInfo.LocalBalance != aliceAmt {
|
||||
t.Fatalf("alice's local balance is incorrect, got %v, "+
|
||||
"expected %v", aliceInfo.LocalBalance, aliceAmt)
|
||||
}
|
||||
if len(aliceInfo.PendingHtlcs) != 0 {
|
||||
t.Fatalf("alice's pending htlcs is incorrect, got %v, "+
|
||||
"expected %v", len(aliceInfo.PendingHtlcs), 0)
|
||||
}
|
||||
|
||||
// Next query for Bob's and Alice's channel states, in order to confirm
|
||||
// that all payment have been successful transmitted.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
bobInfo, err := getChanInfo(ctxt, net.Bob)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get bob's channel info: %v", err)
|
||||
}
|
||||
|
||||
if bobInfo.LocalBalance != bobAmt {
|
||||
t.Fatalf("bob's local balance is incorrect, got %v, expected"+
|
||||
" %v", bobInfo.LocalBalance, bobAmt)
|
||||
}
|
||||
if bobInfo.RemoteBalance != aliceAmt {
|
||||
t.Fatalf("bob's remote balance is incorrect, got %v, "+
|
||||
"expected %v", bobInfo.RemoteBalance, aliceAmt)
|
||||
}
|
||||
if len(bobInfo.PendingHtlcs) != 0 {
|
||||
t.Fatalf("bob's pending htlcs is incorrect, got %v, "+
|
||||
"expected %v", len(bobInfo.PendingHtlcs), 0)
|
||||
}
|
||||
|
||||
// Finally, immediately close the channel. This function will also
|
||||
// block until the channel is closed and will additionally assert the
|
||||
// relevant channel closing post conditions.
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const chanAmt = btcutil.Amount(500000)
|
||||
|
||||
// Open a channel with 500k satoshis between Alice and Bob with Alice
|
||||
// being the sole funder of the channel.
|
||||
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
|
||||
chanPoint := openChannelAndAssert(
|
||||
ctxt, t, net, net.Alice, net.Bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
|
||||
// Next create a new invoice for Bob requesting 1k satoshis.
|
||||
// TODO(roasbeef): make global list of invoices for each node to re-use
|
||||
// and avoid collisions
|
||||
const paymentAmt = 1000
|
||||
invoice := &lnrpc.Invoice{
|
||||
Memo: "testing",
|
||||
RPreimage: makeFakePayHash(t),
|
||||
Value: paymentAmt,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
invoiceResp, err := net.Bob.AddInvoice(ctxt, invoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
}
|
||||
lastAddIndex := invoiceResp.AddIndex
|
||||
|
||||
// Create a new invoice subscription client for Bob, the notification
|
||||
// should be dispatched shortly below.
|
||||
req := &lnrpc.InvoiceSubscription{}
|
||||
ctx, cancelInvoiceSubscription := context.WithCancel(ctxb)
|
||||
bobInvoiceSubscription, err := net.Bob.SubscribeInvoices(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to subscribe to bob's invoice updates: %v", err)
|
||||
}
|
||||
|
||||
var settleIndex uint64
|
||||
quit := make(chan struct{})
|
||||
updateSent := make(chan struct{})
|
||||
go func() {
|
||||
invoiceUpdate, err := bobInvoiceSubscription.Recv()
|
||||
select {
|
||||
case <-quit:
|
||||
// Received cancellation
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recv invoice update: %v", err)
|
||||
}
|
||||
|
||||
// The invoice update should exactly match the invoice created
|
||||
// above, but should now be settled and have SettleDate
|
||||
if !invoiceUpdate.Settled { // nolint:staticcheck
|
||||
t.Fatalf("invoice not settled but should be")
|
||||
}
|
||||
if invoiceUpdate.SettleDate == 0 {
|
||||
t.Fatalf("invoice should have non zero settle date, but doesn't")
|
||||
}
|
||||
|
||||
if !bytes.Equal(invoiceUpdate.RPreimage, invoice.RPreimage) {
|
||||
t.Fatalf("payment preimages don't match: expected %v, got %v",
|
||||
invoice.RPreimage, invoiceUpdate.RPreimage)
|
||||
}
|
||||
|
||||
if invoiceUpdate.SettleIndex == 0 {
|
||||
t.Fatalf("invoice should have settle index")
|
||||
}
|
||||
|
||||
settleIndex = invoiceUpdate.SettleIndex
|
||||
|
||||
close(updateSent)
|
||||
}()
|
||||
|
||||
// Wait for the channel to be recognized by both Alice and Bob before
|
||||
// continuing the rest of the test.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
if err != nil {
|
||||
// TODO(roasbeef): will need to make num blocks to advertise a
|
||||
// node param
|
||||
close(quit)
|
||||
t.Fatalf("channel not seen by alice before timeout: %v", err)
|
||||
}
|
||||
|
||||
// With the assertion above set up, send a payment from Alice to Bob
|
||||
// which should finalize and settle the invoice.
|
||||
sendReq := &routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: invoiceResp.PaymentRequest,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
stream, err := net.Alice.RouterClient.SendPaymentV2(ctxt, sendReq)
|
||||
if err != nil {
|
||||
close(quit)
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
result, err := getPaymentResult(stream)
|
||||
if err != nil {
|
||||
close(quit)
|
||||
t.Fatalf("cannot get payment result: %v", err)
|
||||
}
|
||||
if result.Status != lnrpc.Payment_SUCCEEDED {
|
||||
close(quit)
|
||||
t.Fatalf("error when attempting recv: %v", result.Status)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second * 10):
|
||||
close(quit)
|
||||
t.Fatalf("update not sent after 10 seconds")
|
||||
case <-updateSent: // Fall through on success
|
||||
}
|
||||
|
||||
// With the base case working, we'll now cancel Bob's current
|
||||
// subscription in order to exercise the backlog fill behavior.
|
||||
cancelInvoiceSubscription()
|
||||
|
||||
// We'll now add 3 more invoices to Bob's invoice registry.
|
||||
const numInvoices = 3
|
||||
payReqs, _, newInvoices, err := createPayReqs(
|
||||
net.Bob, paymentAmt, numInvoices,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create pay reqs: %v", err)
|
||||
}
|
||||
|
||||
// Now that the set of invoices has been added, we'll re-register for
|
||||
// streaming invoice notifications for Bob, this time specifying the
|
||||
// add invoice of the last prior invoice.
|
||||
req = &lnrpc.InvoiceSubscription{
|
||||
AddIndex: lastAddIndex,
|
||||
}
|
||||
ctx, cancelInvoiceSubscription = context.WithCancel(ctxb)
|
||||
bobInvoiceSubscription, err = net.Bob.SubscribeInvoices(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to subscribe to bob's invoice updates: %v", err)
|
||||
}
|
||||
|
||||
// Since we specified a value of the prior add index above, we should
|
||||
// now immediately get the invoices we just added as we should get the
|
||||
// backlog of notifications.
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
invoiceUpdate, err := bobInvoiceSubscription.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to receive subscription")
|
||||
}
|
||||
|
||||
// We should now get the ith invoice we added, as they should
|
||||
// be returned in order.
|
||||
if invoiceUpdate.Settled { // nolint:staticcheck
|
||||
t.Fatalf("should have only received add events")
|
||||
}
|
||||
originalInvoice := newInvoices[i]
|
||||
rHash := sha256.Sum256(originalInvoice.RPreimage)
|
||||
if !bytes.Equal(invoiceUpdate.RHash, rHash[:]) {
|
||||
t.Fatalf("invoices have mismatched payment hashes: "+
|
||||
"expected %x, got %x", rHash[:],
|
||||
invoiceUpdate.RHash)
|
||||
}
|
||||
}
|
||||
|
||||
cancelInvoiceSubscription()
|
||||
|
||||
// We'll now have Bob settle out the remainder of these invoices so we
|
||||
// can test that all settled invoices are properly notified.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = completePaymentRequests(
|
||||
ctxt, net.Alice, net.Alice.RouterClient, payReqs, true,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
// With the set of invoices paid, we'll now cancel the old
|
||||
// subscription, and create a new one for Bob, this time using the
|
||||
// settle index to obtain the backlog of settled invoices.
|
||||
req = &lnrpc.InvoiceSubscription{
|
||||
SettleIndex: settleIndex,
|
||||
}
|
||||
ctx, cancelInvoiceSubscription = context.WithCancel(ctxb)
|
||||
bobInvoiceSubscription, err = net.Bob.SubscribeInvoices(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to subscribe to bob's invoice updates: %v", err)
|
||||
}
|
||||
|
||||
defer cancelInvoiceSubscription()
|
||||
|
||||
// As we specified the index of the past settle index, we should now
|
||||
// receive notifications for the three HTLCs that we just settled. As
|
||||
// the order that the HTLCs will be settled in is partially randomized,
|
||||
// we'll use a map to assert that the proper set has been settled.
|
||||
settledInvoices := make(map[[32]byte]struct{})
|
||||
for _, invoice := range newInvoices {
|
||||
rHash := sha256.Sum256(invoice.RPreimage)
|
||||
settledInvoices[rHash] = struct{}{}
|
||||
}
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
invoiceUpdate, err := bobInvoiceSubscription.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to receive subscription")
|
||||
}
|
||||
|
||||
// We should now get the ith invoice we added, as they should
|
||||
// be returned in order.
|
||||
if !invoiceUpdate.Settled { // nolint:staticcheck
|
||||
t.Fatalf("should have only received settle events")
|
||||
}
|
||||
|
||||
var rHash [32]byte
|
||||
copy(rHash[:], invoiceUpdate.RHash)
|
||||
if _, ok := settledInvoices[rHash]; !ok {
|
||||
t.Fatalf("unknown invoice settled: %x", rHash)
|
||||
}
|
||||
|
||||
delete(settledInvoices, rHash)
|
||||
}
|
||||
|
||||
// At this point, all the invoices should be fully settled.
|
||||
if len(settledInvoices) != 0 {
|
||||
t.Fatalf("not all invoices settled")
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
331
lntest/itest/lnd_recovery_test.go
Normal file
331
lntest/itest/lnd_recovery_test.go
Normal file
@ -0,0 +1,331 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
)
|
||||
|
||||
// testGetRecoveryInfo checks whether lnd gives the right information about
|
||||
// the wallet recovery process.
|
||||
func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// First, create a new node with strong passphrase and grab the mnemonic
|
||||
// used for key derivation. This will bring up Carol with an empty
|
||||
// wallet, and such that she is synced up.
|
||||
password := []byte("The Magic Words are Squeamish Ossifrage")
|
||||
carol, mnemonic, _, err := net.NewNodeWithSeed(
|
||||
"Carol", nil, password, false,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create node with seed; %v", err)
|
||||
}
|
||||
|
||||
shutdownAndAssert(net, t, carol)
|
||||
|
||||
checkInfo := func(expectedRecoveryMode, expectedRecoveryFinished bool,
|
||||
expectedProgress float64, recoveryWindow int32) {
|
||||
|
||||
// Restore Carol, passing in the password, mnemonic, and
|
||||
// desired recovery window.
|
||||
node, err := net.RestoreNodeWithSeed(
|
||||
"Carol", nil, password, mnemonic, recoveryWindow, nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to restore node: %v", err)
|
||||
}
|
||||
|
||||
// Wait for Carol to sync to the chain.
|
||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNodeBlockHeight(ctxt, node, minerHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sync to chain: %v", err)
|
||||
}
|
||||
|
||||
// Query carol for her current wallet recovery progress.
|
||||
var (
|
||||
recoveryMode bool
|
||||
recoveryFinished bool
|
||||
progress float64
|
||||
)
|
||||
|
||||
err = wait.Predicate(func() bool {
|
||||
// Verify that recovery info gives the right response.
|
||||
req := &lnrpc.GetRecoveryInfoRequest{}
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := node.GetRecoveryInfo(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query recovery info: %v", err)
|
||||
}
|
||||
|
||||
recoveryMode = resp.RecoveryMode
|
||||
recoveryFinished = resp.RecoveryFinished
|
||||
progress = resp.Progress
|
||||
|
||||
if recoveryMode != expectedRecoveryMode ||
|
||||
recoveryFinished != expectedRecoveryFinished ||
|
||||
progress != expectedProgress {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("expected recovery mode to be %v, got %v, "+
|
||||
"expected recovery finished to be %v, got %v, "+
|
||||
"expected progress %v, got %v",
|
||||
expectedRecoveryMode, recoveryMode,
|
||||
expectedRecoveryFinished, recoveryFinished,
|
||||
expectedProgress, progress,
|
||||
)
|
||||
}
|
||||
|
||||
// Lastly, shutdown this Carol so we can move on to the next
|
||||
// restoration.
|
||||
shutdownAndAssert(net, t, node)
|
||||
}
|
||||
|
||||
// Restore Carol with a recovery window of 0. Since it's not in recovery
|
||||
// mode, the recovery info will give a response with recoveryMode=false,
|
||||
// recoveryFinished=false, and progress=0
|
||||
checkInfo(false, false, 0, 0)
|
||||
|
||||
// Change the recovery windown to be 1 to turn on recovery mode. Since the
|
||||
// current chain height is the same as the birthday height, it should
|
||||
// indicate the recovery process is finished.
|
||||
checkInfo(true, true, 1, 1)
|
||||
|
||||
// We now go ahead 5 blocks. Because the wallet's syncing process is
|
||||
// controlled by a goroutine in the background, it will catch up quickly.
|
||||
// This makes the recovery progress back to 1.
|
||||
mineBlocks(t, net, 5, 0)
|
||||
checkInfo(true, true, 1, 1)
|
||||
}
|
||||
|
||||
// testOnchainFundRecovery checks lnd's ability to rescan for onchain outputs
|
||||
// when providing a valid aezeed that owns outputs on the chain. This test
|
||||
// performs multiple restorations using the same seed and various recovery
|
||||
// windows to ensure we detect funds properly.
|
||||
func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// First, create a new node with strong passphrase and grab the mnemonic
|
||||
// used for key derivation. This will bring up Carol with an empty
|
||||
// wallet, and such that she is synced up.
|
||||
password := []byte("The Magic Words are Squeamish Ossifrage")
|
||||
carol, mnemonic, _, err := net.NewNodeWithSeed(
|
||||
"Carol", nil, password, false,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create node with seed; %v", err)
|
||||
}
|
||||
shutdownAndAssert(net, t, carol)
|
||||
|
||||
// Create a closure for testing the recovery of Carol's wallet. This
|
||||
// method takes the expected value of Carol's balance when using the
|
||||
// given recovery window. Additionally, the caller can specify an action
|
||||
// to perform on the restored node before the node is shutdown.
|
||||
restoreCheckBalance := func(expAmount int64, expectedNumUTXOs uint32,
|
||||
recoveryWindow int32, fn func(*lntest.HarnessNode)) {
|
||||
|
||||
// Restore Carol, passing in the password, mnemonic, and
|
||||
// desired recovery window.
|
||||
node, err := net.RestoreNodeWithSeed(
|
||||
"Carol", nil, password, mnemonic, recoveryWindow, nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to restore node: %v", err)
|
||||
}
|
||||
|
||||
// Query carol for her current wallet balance, and also that we
|
||||
// gain the expected number of UTXOs.
|
||||
var (
|
||||
currBalance int64
|
||||
currNumUTXOs uint32
|
||||
)
|
||||
err = wait.Predicate(func() bool {
|
||||
req := &lnrpc.WalletBalanceRequest{}
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := node.WalletBalance(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query wallet balance: %v",
|
||||
err)
|
||||
}
|
||||
currBalance = resp.ConfirmedBalance
|
||||
|
||||
utxoReq := &lnrpc.ListUnspentRequest{
|
||||
MaxConfs: math.MaxInt32,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
utxoResp, err := node.ListUnspent(ctxt, utxoReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query utxos: %v", err)
|
||||
}
|
||||
currNumUTXOs = uint32(len(utxoResp.Utxos))
|
||||
|
||||
// Verify that Carol's balance and number of UTXOs
|
||||
// matches what's expected.
|
||||
if expAmount != currBalance {
|
||||
return false
|
||||
}
|
||||
if currNumUTXOs != expectedNumUTXOs {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("expected restored node to have %d satoshis, "+
|
||||
"instead has %d satoshis, expected %d utxos "+
|
||||
"instead has %d", expAmount, currBalance,
|
||||
expectedNumUTXOs, currNumUTXOs)
|
||||
}
|
||||
|
||||
// If the user provided a callback, execute the commands against
|
||||
// the restored Carol.
|
||||
if fn != nil {
|
||||
fn(node)
|
||||
}
|
||||
|
||||
// Lastly, shutdown this Carol so we can move on to the next
|
||||
// restoration.
|
||||
shutdownAndAssert(net, t, node)
|
||||
}
|
||||
|
||||
// Create a closure-factory for building closures that can generate and
|
||||
// skip a configurable number of addresses, before finally sending coins
|
||||
// to a next generated address. The returned closure will apply the same
|
||||
// behavior to both default P2WKH and NP2WKH scopes.
|
||||
skipAndSend := func(nskip int) func(*lntest.HarnessNode) {
|
||||
return func(node *lntest.HarnessNode) {
|
||||
newP2WKHAddrReq := &lnrpc.NewAddressRequest{
|
||||
Type: AddrTypeWitnessPubkeyHash,
|
||||
}
|
||||
|
||||
newNP2WKHAddrReq := &lnrpc.NewAddressRequest{
|
||||
Type: AddrTypeNestedPubkeyHash,
|
||||
}
|
||||
|
||||
// Generate and skip the number of addresses requested.
|
||||
for i := 0; i < nskip; i++ {
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err = node.NewAddress(ctxt, newP2WKHAddrReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate new "+
|
||||
"p2wkh address: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err = node.NewAddress(ctxt, newNP2WKHAddrReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate new "+
|
||||
"np2wkh address: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send one BTC to the next P2WKH address.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.SendCoins(
|
||||
ctxt, t.t, btcutil.SatoshiPerBitcoin, node,
|
||||
)
|
||||
|
||||
// And another to the next NP2WKH address.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
net.SendCoinsNP2WKH(
|
||||
ctxt, t.t, btcutil.SatoshiPerBitcoin, node,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Restore Carol with a recovery window of 0. Since no coins have been
|
||||
// sent, her balance should be zero.
|
||||
//
|
||||
// After, one BTC is sent to both her first external P2WKH and NP2WKH
|
||||
// addresses.
|
||||
restoreCheckBalance(0, 0, 0, skipAndSend(0))
|
||||
|
||||
// Check that restoring without a look-ahead results in having no funds
|
||||
// in the wallet, even though they exist on-chain.
|
||||
restoreCheckBalance(0, 0, 0, nil)
|
||||
|
||||
// Now, check that using a look-ahead of 1 recovers the balance from
|
||||
// the two transactions above. We should also now have 2 UTXOs in the
|
||||
// wallet at the end of the recovery attempt.
|
||||
//
|
||||
// After, we will generate and skip 9 P2WKH and NP2WKH addresses, and
|
||||
// send another BTC to the subsequent 10th address in each derivation
|
||||
// path.
|
||||
restoreCheckBalance(2*btcutil.SatoshiPerBitcoin, 2, 1, skipAndSend(9))
|
||||
|
||||
// Check that using a recovery window of 9 does not find the two most
|
||||
// recent txns.
|
||||
restoreCheckBalance(2*btcutil.SatoshiPerBitcoin, 2, 9, nil)
|
||||
|
||||
// Extending our recovery window to 10 should find the most recent
|
||||
// transactions, leaving the wallet with 4 BTC total. We should also
|
||||
// learn of the two additional UTXOs created above.
|
||||
//
|
||||
// After, we will skip 19 more addrs, sending to the 20th address past
|
||||
// our last found address, and repeat the same checks.
|
||||
restoreCheckBalance(4*btcutil.SatoshiPerBitcoin, 4, 10, skipAndSend(19))
|
||||
|
||||
// Check that recovering with a recovery window of 19 fails to find the
|
||||
// most recent transactions.
|
||||
restoreCheckBalance(4*btcutil.SatoshiPerBitcoin, 4, 19, nil)
|
||||
|
||||
// Ensure that using a recovery window of 20 succeeds with all UTXOs
|
||||
// found and the final balance reflected.
|
||||
|
||||
// After these checks are done, we'll want to make sure we can also
|
||||
// recover change address outputs. This is mainly motivated by a now
|
||||
// fixed bug in the wallet in which change addresses could at times be
|
||||
// created outside of the default key scopes. Recovery only used to be
|
||||
// performed on the default key scopes, so ideally this test case
|
||||
// would've caught the bug earlier. Carol has received 6 BTC so far from
|
||||
// the miner, we'll send 5 back to ensure all of her UTXOs get spent to
|
||||
// avoid fee discrepancies and a change output is formed.
|
||||
const minerAmt = 5 * btcutil.SatoshiPerBitcoin
|
||||
const finalBalance = 6 * btcutil.SatoshiPerBitcoin
|
||||
promptChangeAddr := func(node *lntest.HarnessNode) {
|
||||
minerAddr, err := net.Miner.NewAddress()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create new miner address: %v", err)
|
||||
}
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := node.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
|
||||
Addr: minerAddr.String(),
|
||||
Amount: minerAmt,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send coins to miner: %v", err)
|
||||
}
|
||||
txid, err := waitForTxInMempool(
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("transaction not found in mempool: %v", err)
|
||||
}
|
||||
if resp.Txid != txid.String() {
|
||||
t.Fatalf("txid mismatch: %v vs %v", resp.Txid,
|
||||
txid.String())
|
||||
}
|
||||
block := mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, txid)
|
||||
}
|
||||
restoreCheckBalance(finalBalance, 6, 20, promptChangeAddr)
|
||||
|
||||
// We should expect a static fee of 27750 satoshis for spending 6 inputs
|
||||
// (3 P2WPKH, 3 NP2WPKH) to two P2WPKH outputs. Carol should therefore
|
||||
// only have one UTXO present (the change output) of 6 - 5 - fee BTC.
|
||||
const fee = 27750
|
||||
restoreCheckBalance(finalBalance-minerAmt-fee, 1, 21, nil)
|
||||
}
|
1331
lntest/itest/lnd_revocation_test.go
Normal file
1331
lntest/itest/lnd_revocation_test.go
Normal file
File diff suppressed because it is too large
Load Diff
1192
lntest/itest/lnd_switch_test.go
Normal file
1192
lntest/itest/lnd_switch_test.go
Normal file
File diff suppressed because it is too large
Load Diff
11428
lntest/itest/lnd_test.go
11428
lntest/itest/lnd_test.go
File diff suppressed because it is too large
Load Diff
483
lntest/itest/utils.go
Normal file
483
lntest/itest/utils.go
Normal file
@ -0,0 +1,483 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// completePaymentRequests sends payments from a lightning node to complete all
|
||||
// payment requests. If the awaitResponse parameter is true, this function
|
||||
// does not return until all payments successfully complete without errors.
|
||||
func completePaymentRequests(ctx context.Context, client lnrpc.LightningClient,
|
||||
routerClient routerrpc.RouterClient, paymentRequests []string,
|
||||
awaitResponse bool) error {
|
||||
|
||||
// We start by getting the current state of the client's channels. This
|
||||
// is needed to ensure the payments actually have been committed before
|
||||
// we return.
|
||||
ctxt, _ := context.WithTimeout(ctx, defaultTimeout)
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
listResp, err := client.ListChannels(ctxt, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send sends a payment and returns an error if it doesn't succeeded.
|
||||
send := func(payReq string) error {
|
||||
ctxc, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
payStream, err := routerClient.SendPaymentV2(
|
||||
ctxc,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: payReq,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := getPaymentResult(payStream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Status != lnrpc.Payment_SUCCEEDED {
|
||||
return errors.New(resp.FailureReason)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Launch all payments simultaneously.
|
||||
results := make(chan error)
|
||||
for _, payReq := range paymentRequests {
|
||||
payReqCopy := payReq
|
||||
go func() {
|
||||
err := send(payReqCopy)
|
||||
if awaitResponse {
|
||||
results <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// If awaiting a response, verify that all payments succeeded.
|
||||
if awaitResponse {
|
||||
for range paymentRequests {
|
||||
err := <-results
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We are not waiting for feedback in the form of a response, but we
|
||||
// should still wait long enough for the server to receive and handle
|
||||
// the send before cancelling the request. We wait for the number of
|
||||
// updates to one of our channels has increased before we return.
|
||||
err = wait.Predicate(func() bool {
|
||||
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
|
||||
newListResp, err := client.ListChannels(ctxt, req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the number of open channels is now lower than before
|
||||
// attempting the payments, it means one of the payments
|
||||
// triggered a force closure (for example, due to an incorrect
|
||||
// preimage). Return early since it's clear the payment was
|
||||
// attempted.
|
||||
if len(newListResp.Channels) < len(listResp.Channels) {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, c1 := range listResp.Channels {
|
||||
for _, c2 := range newListResp.Channels {
|
||||
if c1.ChannelPoint != c2.ChannelPoint {
|
||||
continue
|
||||
}
|
||||
|
||||
// If this channel has an increased numbr of
|
||||
// updates, we assume the payments are
|
||||
// committed, and we can return.
|
||||
if c2.NumUpdates > c1.NumUpdates {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeFakePayHash creates random pre image hash
|
||||
func makeFakePayHash(t *harnessTest) []byte {
|
||||
randBuf := make([]byte, 32)
|
||||
|
||||
if _, err := rand.Read(randBuf); err != nil {
|
||||
t.Fatalf("internal error, cannot generate random string: %v", err)
|
||||
}
|
||||
|
||||
return randBuf
|
||||
}
|
||||
|
||||
// createPayReqs is a helper method that will create a slice of payment
|
||||
// requests for the given node.
|
||||
func createPayReqs(node *lntest.HarnessNode, paymentAmt btcutil.Amount,
|
||||
numInvoices int) ([]string, [][]byte, []*lnrpc.Invoice, error) {
|
||||
|
||||
payReqs := make([]string, numInvoices)
|
||||
rHashes := make([][]byte, numInvoices)
|
||||
invoices := make([]*lnrpc.Invoice, numInvoices)
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
preimage := make([]byte, 32)
|
||||
_, err := rand.Read(preimage)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("unable to generate "+
|
||||
"preimage: %v", err)
|
||||
}
|
||||
invoice := &lnrpc.Invoice{
|
||||
Memo: "testing",
|
||||
RPreimage: preimage,
|
||||
Value: int64(paymentAmt),
|
||||
}
|
||||
ctxt, _ := context.WithTimeout(
|
||||
context.Background(), defaultTimeout,
|
||||
)
|
||||
resp, err := node.AddInvoice(ctxt, invoice)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("unable to add "+
|
||||
"invoice: %v", err)
|
||||
}
|
||||
|
||||
// Set the payment address in the invoice so the caller can
|
||||
// properly use it.
|
||||
invoice.PaymentAddr = resp.PaymentAddr
|
||||
|
||||
payReqs[i] = resp.PaymentRequest
|
||||
rHashes[i] = resp.RHash
|
||||
invoices[i] = invoice
|
||||
}
|
||||
return payReqs, rHashes, invoices, nil
|
||||
}
|
||||
|
||||
// getChanInfo is a helper method for getting channel info for a node's sole
|
||||
// channel.
|
||||
func getChanInfo(ctx context.Context, node *lntest.HarnessNode) (
|
||||
*lnrpc.Channel, error) {
|
||||
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
channelInfo, err := node.ListChannels(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(channelInfo.Channels) != 1 {
|
||||
return nil, fmt.Errorf("node should only have a single "+
|
||||
"channel, instead it has %v", len(channelInfo.Channels))
|
||||
}
|
||||
|
||||
return channelInfo.Channels[0], nil
|
||||
}
|
||||
|
||||
// commitType is a simple enum used to run though the basic funding flow with
|
||||
// different commitment formats.
|
||||
type commitType byte
|
||||
|
||||
const (
|
||||
// commitTypeLegacy is the old school commitment type.
|
||||
commitTypeLegacy commitType = iota
|
||||
|
||||
// commiTypeTweakless is the commitment type where the remote key is
|
||||
// static (non-tweaked).
|
||||
commitTypeTweakless
|
||||
|
||||
// commitTypeAnchors is the kind of commitment that has extra outputs
|
||||
// used for anchoring down to commitment using CPFP.
|
||||
commitTypeAnchors
|
||||
)
|
||||
|
||||
// String returns that name of the commitment type.
|
||||
func (c commitType) String() string {
|
||||
switch c {
|
||||
case commitTypeLegacy:
|
||||
return "legacy"
|
||||
case commitTypeTweakless:
|
||||
return "tweakless"
|
||||
case commitTypeAnchors:
|
||||
return "anchors"
|
||||
default:
|
||||
return "invalid"
|
||||
}
|
||||
}
|
||||
|
||||
// Args returns the command line flag to supply to enable this commitment type.
|
||||
func (c commitType) Args() []string {
|
||||
switch c {
|
||||
case commitTypeLegacy:
|
||||
return []string{"--protocol.legacy.committweak"}
|
||||
case commitTypeTweakless:
|
||||
return []string{}
|
||||
case commitTypeAnchors:
|
||||
return []string{"--protocol.anchors"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// calcStaticFee calculates appropriate fees for commitment transactions. This
|
||||
// function provides a simple way to allow test balance assertions to take fee
|
||||
// calculations into account.
|
||||
func (c commitType) calcStaticFee(numHTLCs int) btcutil.Amount {
|
||||
const htlcWeight = input.HTLCWeight
|
||||
var (
|
||||
feePerKw = chainfee.SatPerKVByte(50000).FeePerKWeight()
|
||||
commitWeight = input.CommitWeight
|
||||
anchors = btcutil.Amount(0)
|
||||
)
|
||||
|
||||
// The anchor commitment type is slightly heavier, and we must also add
|
||||
// the value of the two anchors to the resulting fee the initiator
|
||||
// pays. In addition the fee rate is capped at 10 sat/vbyte for anchor
|
||||
// channels.
|
||||
if c == commitTypeAnchors {
|
||||
feePerKw = chainfee.SatPerKVByte(
|
||||
lnwallet.DefaultAnchorsCommitMaxFeeRateSatPerVByte * 1000,
|
||||
).FeePerKWeight()
|
||||
commitWeight = input.AnchorCommitWeight
|
||||
anchors = 2 * anchorSize
|
||||
}
|
||||
|
||||
return feePerKw.FeeForWeight(int64(commitWeight+htlcWeight*numHTLCs)) +
|
||||
anchors
|
||||
}
|
||||
|
||||
// channelCommitType retrieves the active channel commitment type for the given
|
||||
// chan point.
|
||||
func channelCommitType(node *lntest.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint) (commitType, error) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
channels, err := node.ListChannels(ctxt, req)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("listchannels failed: %v", err)
|
||||
}
|
||||
|
||||
for _, c := range channels.Channels {
|
||||
if c.ChannelPoint == txStr(chanPoint) {
|
||||
switch c.CommitmentType {
|
||||
|
||||
// If the anchor output size is non-zero, we are
|
||||
// dealing with the anchor type.
|
||||
case lnrpc.CommitmentType_ANCHORS:
|
||||
return commitTypeAnchors, nil
|
||||
|
||||
// StaticRemoteKey means it is tweakless,
|
||||
case lnrpc.CommitmentType_STATIC_REMOTE_KEY:
|
||||
return commitTypeTweakless, nil
|
||||
|
||||
// Otherwise legacy.
|
||||
default:
|
||||
return commitTypeLegacy, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("channel point %v not found", chanPoint)
|
||||
}
|
||||
|
||||
// calculateMaxHtlc re-implements the RequiredRemoteChannelReserve of the
|
||||
// funding manager's config, which corresponds to the maximum MaxHTLC value we
|
||||
// allow users to set when updating a channel policy.
|
||||
func calculateMaxHtlc(chanCap btcutil.Amount) uint64 {
|
||||
reserve := lnwire.NewMSatFromSatoshis(chanCap / 100)
|
||||
max := lnwire.NewMSatFromSatoshis(chanCap) - reserve
|
||||
return uint64(max)
|
||||
}
|
||||
|
||||
// waitForNodeBlockHeight queries the node for its current block height until
|
||||
// it reaches the passed height.
|
||||
func waitForNodeBlockHeight(ctx context.Context, node *lntest.HarnessNode,
|
||||
height int32) error {
|
||||
var predErr error
|
||||
err := wait.Predicate(func() bool {
|
||||
ctxt, _ := context.WithTimeout(ctx, defaultTimeout)
|
||||
info, err := node.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
|
||||
if err != nil {
|
||||
predErr = err
|
||||
return false
|
||||
}
|
||||
|
||||
if int32(info.BlockHeight) != height {
|
||||
predErr = fmt.Errorf("expected block height to "+
|
||||
"be %v, was %v", height, info.BlockHeight)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, defaultTimeout)
|
||||
if err != nil {
|
||||
return predErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNTxsFromMempool polls until finding the desired number of transactions in
|
||||
// the provided miner's mempool and returns the full transactions to the caller.
|
||||
func getNTxsFromMempool(miner *rpcclient.Client, n int,
|
||||
timeout time.Duration) ([]*wire.MsgTx, error) {
|
||||
|
||||
txids, err := waitForNTxsInMempool(miner, n, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var txes []*wire.MsgTx
|
||||
for _, txid := range txids {
|
||||
tx, err := miner.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txes = append(txes, tx.MsgTx())
|
||||
}
|
||||
return txes, nil
|
||||
}
|
||||
|
||||
// getTxFee retrieves parent transactions and reconstructs the fee paid.
|
||||
func getTxFee(miner *rpcclient.Client, tx *wire.MsgTx) (btcutil.Amount, error) {
|
||||
var balance btcutil.Amount
|
||||
for _, in := range tx.TxIn {
|
||||
parentHash := in.PreviousOutPoint.Hash
|
||||
rawTx, err := miner.GetRawTransaction(&parentHash)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
parent := rawTx.MsgTx()
|
||||
balance += btcutil.Amount(
|
||||
parent.TxOut[in.PreviousOutPoint.Index].Value,
|
||||
)
|
||||
}
|
||||
|
||||
for _, out := range tx.TxOut {
|
||||
balance -= btcutil.Amount(out.Value)
|
||||
}
|
||||
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
// channelSubscription houses the proxied update and error chans for a node's
|
||||
// channel subscriptions.
|
||||
type channelSubscription struct {
|
||||
updateChan chan *lnrpc.ChannelEventUpdate
|
||||
errChan chan error
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// subscribeChannelNotifications subscribes to channel updates and launches a
|
||||
// goroutine that forwards these to the returned channel.
|
||||
func subscribeChannelNotifications(ctxb context.Context, t *harnessTest,
|
||||
node *lntest.HarnessNode) channelSubscription {
|
||||
|
||||
// We'll first start by establishing a notification client which will
|
||||
// send us notifications upon channels becoming active, inactive or
|
||||
// closed.
|
||||
req := &lnrpc.ChannelEventSubscription{}
|
||||
ctx, cancelFunc := context.WithCancel(ctxb)
|
||||
|
||||
chanUpdateClient, err := node.SubscribeChannelEvents(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create channel update client: %v", err)
|
||||
}
|
||||
|
||||
// We'll launch a goroutine that will be responsible for proxying all
|
||||
// notifications recv'd from the client into the channel below.
|
||||
errChan := make(chan error, 1)
|
||||
quit := make(chan struct{})
|
||||
chanUpdates := make(chan *lnrpc.ChannelEventUpdate, 20)
|
||||
go func() {
|
||||
defer cancelFunc()
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
default:
|
||||
chanUpdate, err := chanUpdateClient.Recv()
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
return
|
||||
} else if err != nil {
|
||||
select {
|
||||
case errChan <- err:
|
||||
case <-quit:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case chanUpdates <- chanUpdate:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return channelSubscription{
|
||||
updateChan: chanUpdates,
|
||||
errChan: errChan,
|
||||
quit: quit,
|
||||
}
|
||||
}
|
||||
|
||||
// findTxAtHeight gets all of the transactions that a node's wallet has a record
|
||||
// of at the target height, and finds and returns the tx with the target txid,
|
||||
// failing if it is not found.
|
||||
func findTxAtHeight(ctx context.Context, t *harnessTest, height int32,
|
||||
target string, node *lntest.HarnessNode) *lnrpc.Transaction {
|
||||
|
||||
txns, err := node.LightningClient.GetTransactions(
|
||||
ctx, &lnrpc.GetTransactionsRequest{
|
||||
StartHeight: height,
|
||||
EndHeight: height,
|
||||
},
|
||||
)
|
||||
require.NoError(t.t, err, "could not get transactions")
|
||||
|
||||
for _, tx := range txns.Transactions {
|
||||
if tx.TxHash == target {
|
||||
return tx
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user