Merge pull request #4103 from halseth/itest-multi-hop-move

[itests] multi hop test move
This commit is contained in:
Johan T. Halseth 2020-03-24 09:37:53 +01:00 committed by GitHub
commit 8623bfac22
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 1188 additions and 979 deletions

View File

@ -18,17 +18,19 @@ import (
)
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
// we're forced to go to chain with an incoming HTLC, then when we find out the
// preimage via the witness beacon, we properly settle the HTLC on-chain in
// order to ensure we don't lose any funds.
func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
// we force close a channel with an incoming HTLC, and later find out the
// preimage via the witness beacon, we properly settle the HTLC on-chain using
// the HTLC success transaction in order to ensure we don't lose any funds.
func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
alice, bob *lntest.HarnessNode, c commitType) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, false,
t, net, alice, bob, false, c,
)
// Clean up carol's node when the test finishes.
@ -59,7 +61,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
alicePayStream, err := alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
@ -73,7 +75,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
@ -94,7 +96,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob,
bobForceClose := closeChannelAndAssert(ctxt, t, net, bob,
aliceChanPoint, true)
// Alice will sweep her output immediately.
@ -105,7 +107,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
}
// Suspend Bob to force Carol to go to chain.
restartBob, err := net.SuspendNode(net.Bob)
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
@ -197,7 +199,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// At this point we suspend Alice to make sure she'll handle the
// on-chain settle after a restart.
restartAlice, err := net.SuspendNode(net.Alice)
restartAlice, err := net.SuspendNode(alice)
if err != nil {
t.Fatalf("unable to suspend alice: %v", err)
}
@ -239,7 +241,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
@ -337,7 +339,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
@ -352,7 +354,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := net.Bob.ListChannels(ctxt, req)
chanInfo, err := bob.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
@ -411,90 +413,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// succeeded.
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
err = checkPaymentStatus(
ctxt, net.Alice, preimage, lnrpc.Payment_SUCCEEDED,
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
}
// waitForInvoiceAccepted waits until the specified invoice moved to the
// accepted state by the node.
func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode,
payHash lntypes.Hash) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
invoiceUpdates, err := node.SubscribeSingleInvoice(ctx,
&invoicesrpc.SubscribeSingleInvoiceRequest{
RHash: payHash[:],
},
)
if err != nil {
t.Fatalf("subscribe single invoice: %v", err)
}
for {
update, err := invoiceUpdates.Recv()
if err != nil {
t.Fatalf("invoice update err: %v", err)
}
if update.State == lnrpc.Invoice_ACCEPTED {
break
}
}
}
// checkPaymentStatus asserts that the given node list a payment with the given
// preimage has the expected status.
func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode,
preimage lntypes.Preimage, status lnrpc.Payment_PaymentStatus) error {
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
}
paymentsResp, err := node.ListPayments(ctxt, req)
if err != nil {
return fmt.Errorf("error when obtaining Alice payments: %v",
err)
}
payHash := preimage.Hash()
var found bool
for _, p := range paymentsResp.Payments {
if p.PaymentHash != payHash.String() {
continue
}
found = true
if p.Status != status {
return fmt.Errorf("expected payment status "+
"%v, got %v", status, p.Status)
}
switch status {
// If this expected status is SUCCEEDED, we expect the final preimage.
case lnrpc.Payment_SUCCEEDED:
if p.PaymentPreimage != preimage.String() {
return fmt.Errorf("preimage doesn't match: %v vs %v",
p.PaymentPreimage, preimage.String())
}
// Otherwise we expect an all-zero preimage.
default:
if p.PaymentPreimage != (lntypes.Preimage{}).String() {
return fmt.Errorf("expected zero preimage, got %v",
p.PaymentPreimage)
}
}
}
if !found {
return fmt.Errorf("payment with payment hash %v not found "+
"in response", payHash)
}
return nil
}

View File

@ -0,0 +1,253 @@
// +build rpctest
package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
)
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
// outgoing HTLC is about to time out, then we'll go to chain in order to claim
// it using the HTLC timeout transaction. Any dust HTLC's should be immediately
// canceled backwards. Once the timeout has been reached, then we should sweep
// it on-chain, and cancel the HTLC backwards.
func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
alice, bob *lntest.HarnessNode, c commitType) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, alice, bob, true, c,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
time.Sleep(time.Second * 1)
// Now that our channels are set up, we'll send two HTLC's from Alice
// to Carol. The first HTLC will be universally considered "dust",
// while the second will be a proper fully valued HTLC.
const (
dustHtlcAmt = btcutil.Amount(100)
htlcAmt = btcutil.Amount(30000)
finalCltvDelta = 40
)
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
// We'll create two random payment hashes unknown to carol, then send
// each of them by manually specifying the HTLC details.
carolPubKey := carol.PubKey[:]
dustPayHash := makeFakePayHash(t)
payHash := makeFakePayHash(t)
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(dustHtlcAmt),
PaymentHash: dustPayHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
// Verify that all nodes in the path now have two HTLC's with the
// proper parameters.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, dustPayHash, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// We'll now mine enough blocks to trigger Bob's broadcast of his
// commitment transaction due to the fact that the HTLC is about to
// timeout. With the default outgoing broadcast delta of zero, this will
// be the same height as the htlc expiry height.
numBlocks := padCLTV(
uint32(finalCltvDelta - lnd.DefaultOutgoingBroadcastDelta),
)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Bob's force close transaction should now be found in the mempool.
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
closeTxid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find closing txid: %v", err)
}
assertSpendingTxInMempool(
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
},
)
// Mine a block to confirm the closing transaction.
mineBlocks(t, net, 1, 1)
// At this point, Bob should have canceled backwards the dust HTLC
// that we sent earlier. This means Alice should now only have a single
// HTLC on her channel.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// With the closing transaction confirmed, we should expect Bob's HTLC
// timeout transaction to be broadcast due to the expiry being reached.
htlcTimeout, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
// We'll mine the remaining blocks in order to generate the sweep
// transaction of Bob's commitment output.
mineBlocks(t, net, defaultCSV, 1)
assertSpendingTxInMempool(
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
Hash: *closeTxid,
Index: 1,
},
)
// Bob's pending channel report should show that he has a commitment
// output awaiting sweeping, and also that there's an outgoing HTLC
// output pending.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("bob should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("bob should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
if len(forceCloseChan.PendingHtlcs) == 0 {
t.Fatalf("bob should have pending htlc but doesn't")
}
// Now we'll mine an additional block, which should confirm Bob's commit
// sweep. This block should also prompt Bob to broadcast their second
// layer sweep due to the CSV on the HTLC timeout output.
mineBlocks(t, net, 1, 1)
assertSpendingTxInMempool(
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
Hash: *htlcTimeout,
Index: 0,
},
)
// The block should have confirmed Bob's HTLC timeout transaction.
// Therefore, at this point, there should be no active HTLC's on the
// commitment transaction from Alice -> Bob.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
// At this point, Bob should show that the pending HTLC has advanced to
// the second stage and is to be swept.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("bob's htlc should have advanced to the second stage: %v", err)
}
// Next, we'll mine a final block that should confirm the second-layer
// sweeping transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Once this transaction has been confirmed, Bob should detect that he
// no longer has any pending channels.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending "+
"channels but shouldn't: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false)
}

View File

@ -20,17 +20,20 @@ import (
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC
// off-chain, then it goes on chain to claim the HTLC. In this scenario, the
// node that sent the outgoing HTLC should extract the preimage from the sweep
// transaction, and finish settling the HTLC backwards into the route.
func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
// off-chain, then it goes on chain to claim the HTLC uing the HTLC success
// transaction. In this scenario, the node that sent the outgoing HTLC should
// extract the preimage from the sweep transaction, and finish settling the
// HTLC backwards into the route.
func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
alice, bob *lntest.HarnessNode, c commitType) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, false,
t, net, alice, bob, false, c,
)
// Clean up carol's node when the test finishes.
@ -61,7 +64,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
alicePayStream, err := alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
@ -75,7 +78,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
@ -93,7 +96,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// hop logic.
waitForInvoiceAccepted(t, carol, payHash)
restartBob, err := net.SuspendNode(net.Bob)
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
@ -230,7 +233,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{net.Alice}
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
@ -302,7 +305,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// succeeded.
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
err = checkPaymentStatus(
ctxt, net.Alice, preimage, lnrpc.Payment_SUCCEEDED,
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
@ -311,5 +314,5 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false)
}

View File

@ -20,15 +20,18 @@ import (
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
// if the remote party goes to chain while we have an incoming HTLC, then when
// we found out the preimage via the witness beacon, we properly settle the
// HTLC on-chain in order to ensure that we don't lose any funds.
func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
// HTLC directly on-chain using the preimage in order to ensure that we don't
// lose any funds.
func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest,
alice, bob *lntest.HarnessNode, c commitType) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, false,
t, net, alice, bob, false, c,
)
// Clean up carol's node when the test finishes.
@ -58,7 +61,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
alicePayStream, err := alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
@ -72,7 +75,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
@ -94,12 +97,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// immediately force close the channel by broadcast her commitment
// transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice,
aliceForceClose := closeChannelAndAssert(ctxt, t, net, alice,
aliceChanPoint, true)
// Wait for the channel to be marked pending force close.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint)
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
if err != nil {
t.Fatalf("channel not pending force close: %v", err)
}
@ -118,7 +121,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
}
// Suspend bob, so Carol is forced to go on chain.
restartBob, err := net.SuspendNode(net.Bob)
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
@ -255,7 +258,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
@ -337,7 +340,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// succeeded.
ctxt, _ = context.WithTimeout(ctxt, defaultTimeout)
err = checkPaymentStatus(
ctxt, net.Alice, preimage, lnrpc.Payment_SUCCEEDED,
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())

View File

@ -0,0 +1,280 @@
// +build rpctest
package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
)
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
// scenario, if the node that extended the HTLC to the final node closes their
// commitment on-chain early, then it eventually recognizes this HTLC as one
// that's timed out. At this point, the node should timeout the HTLC using the
// HTLC timeout transaction, then cancel it backwards as normal.
func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, alice, bob, true, c,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With our channels set up, we'll then send a single HTLC from Alice
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
// opens up the base for out tests.
const (
finalCltvDelta = 40
htlcAmt = btcutil.Amount(30000)
)
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
// We'll now send a single HTLC across our multi-hop network.
carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t)
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
// Once the HTLC has cleared, all channels in our mini network should
// have the it locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// Now that all parties have the HTLC locked in, we'll immediately
// force close the Bob -> Carol channel. This should trigger contract
// resolution mode for both of them.
ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, bob, bobChanPoint, true)
// At this point, Bob should have a pending force close channel as he
// just went to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt,
pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
predErr = fmt.Errorf("bob should have nonzero limbo "+
"balance instead has: %v",
forceCloseChan.LimboBalance)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// We'll mine defaultCSV blocks in order to generate the sweep transaction
// of Bob's funding output.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's funding output sweep tx: %v", err)
}
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
// should hand off the now expired HTLC output to the utxo nursery.
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Bob's pending channel report should show that he has a single HTLC
// that's now in stage one.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending force " +
"close chan but doesn't")
return false
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We should also now find a transaction in the mempool, as Bob should
// have broadcast his second layer timeout transaction.
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
// Next, we'll mine an additional block. This should serve to confirm
// the second layer timeout transaction.
block := mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, timeoutTx)
// With the second layer timeout transaction confirmed, Bob should have
// canceled backwards the HTLC that carol sent.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
// Additionally, Bob should now show that HTLC as being advanced to the
// second stage.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We'll now mine 4 additional blocks. This should be enough for Bob's
// CSV timelock to expire and the sweeping transaction of the HTLC to be
// broadcast.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc sweep tx: %v", err)
}
// We'll then mine a final block which should confirm this second layer
// sweep transaction.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, sweepTx)
// At this point, Bob should no longer show any channels as pending
// close.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false)
}

View File

@ -0,0 +1,238 @@
// +build rpctest
package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
)
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
// multi-hop HTLC, and the final destination of the HTLC force closes the
// channel, then we properly timeout the HTLC directly on *their* commitment
// transaction once the timeout has expired. Once we sweep the transaction, we
// should also cancel back the initial HTLC.
func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, alice, bob, true, c,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With our channels set up, we'll then send a single HTLC from Alice
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
// opens up the base for out tests.
const (
finalCltvDelta = 40
htlcAmt = btcutil.Amount(30000)
)
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
// We'll now send a single HTLC across our multi-hop network.
carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t)
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
// Once the HTLC has cleared, all the nodes in our mini network should
// show that the HTLC has been locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// At this point, we'll now instruct Carol to force close the
// transaction. This will let us exercise that Bob is able to sweep the
// expired HTLC on Carol's version of the commitment transaction.
ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, carol, bobChanPoint, true)
// At this point, Bob should have a pending force close channel as
// Carol has gone directly to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for "+
"pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending " +
"force close channels but doesn't")
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Bob can sweep his output immediately.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's funding output sweep tx: %v",
err)
}
// Next, we'll mine enough blocks for the HTLC to expire. At this
// point, Bob should hand off the output to his internal utxo nursery,
// which will broadcast a sweep transaction.
numBlocks := padCLTV(finalCltvDelta - 1)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// If we check Bob's pending channel report, it should show that he has
// a single HTLC that's now in the second stage, as skip the initial
// first stage since this is a direct HTLC.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// Bob's sweeping transaction should now be found in the mempool at
// this point.
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
// If Bob's transaction isn't yet in the mempool, then due to
// internal message passing and the low period between blocks
// being mined, it may have been detected as a late
// registration. As a result, we'll mine another block and
// repeat the check. If it doesn't go through this time, then
// we'll fail.
// TODO(halseth): can we use waitForChannelPendingForceClose to
// avoid this hack?
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: "+
"%v", err)
}
}
// If we mine an additional block, then this should confirm Bob's
// transaction which sweeps the direct HTLC output.
block := mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, sweepTx)
// Now that the sweeping transaction has been confirmed, Bob should
// cancel back that HTLC. As a result, Alice should not know of any
// active HTLC's.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
// Now we'll check Bob's pending channel report. Since this was Carol's
// commitment, he doesn't have to wait for any CSV delays. As a result,
// he should show no additional pending transactions.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// We'll close out the test by closing the channel from Alice to Bob,
// and then shutting down the new node we created as its no longer
// needed.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false)
}

View File

@ -0,0 +1,284 @@
// +build rpctest
package itest
import (
"context"
"fmt"
"testing"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntypes"
)
func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) {
type testCase struct {
name string
test func(net *lntest.NetworkHarness, t *harnessTest, alice,
bob *lntest.HarnessNode, c commitType)
}
subTests := []testCase{
{
// bob: outgoing our commit timeout
// carol: incoming their commit watch and see timeout
name: "local force close immediate expiry",
test: testMultiHopHtlcLocalTimeout,
},
{
// bob: outgoing watch and see, they sweep on chain
// carol: incoming our commit, know preimage
name: "receiver chain claim",
test: testMultiHopReceiverChainClaim,
},
{
// bob: outgoing our commit watch and see timeout
// carol: incoming their commit watch and see timeout
name: "local force close on-chain htlc timeout",
test: testMultiHopLocalForceCloseOnChainHtlcTimeout,
},
{
// bob: outgoing their commit watch and see timeout
// carol: incoming our commit watch and see timeout
name: "remote force close on-chain htlc timeout",
test: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
},
{
// bob: outgoing our commit watch and see, they sweep on chain
// bob: incoming our commit watch and learn preimage
// carol: incoming their commit know preimage
name: "local chain claim",
test: testMultiHopHtlcLocalChainClaim,
},
{
// bob: outgoing their commit watch and see, they sweep on chain
// bob: incoming their commit watch and learn preimage
// carol: incoming our commit know preimage
name: "remote chain claim",
test: testMultiHopHtlcRemoteChainClaim,
},
}
commitTypes := []commitType{
commitTypeLegacy,
}
for _, commitType := range commitTypes {
testName := fmt.Sprintf("committype=%v", commitType.String())
success := t.t.Run(testName, func(t *testing.T) {
ht := newHarnessTest(t, net)
args := commitType.Args()
alice, err := net.NewNode("Alice", args)
if err != nil {
t.Fatalf("unable to create new node: %v", err)
}
defer shutdownAndAssert(net, ht, alice)
bob, err := net.NewNode("Bob", args)
if err != nil {
t.Fatalf("unable to create new node: %v", err)
}
defer shutdownAndAssert(net, ht, bob)
ctxb := context.Background()
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
if err := net.ConnectNodes(ctxt, alice, bob); err != nil {
t.Fatalf("unable to connect alice to bob: %v", err)
}
for _, subTest := range subTests {
subTest := subTest
success := ht.t.Run(subTest.name, func(t *testing.T) {
ht := newHarnessTest(t, net)
subTest.test(net, ht, alice, bob, commitType)
})
if !success {
return
}
}
})
if !success {
return
}
}
}
// waitForInvoiceAccepted waits until the specified invoice moved to the
// accepted state by the node.
func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode,
payHash lntypes.Hash) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
invoiceUpdates, err := node.SubscribeSingleInvoice(ctx,
&invoicesrpc.SubscribeSingleInvoiceRequest{
RHash: payHash[:],
},
)
if err != nil {
t.Fatalf("subscribe single invoice: %v", err)
}
for {
update, err := invoiceUpdates.Recv()
if err != nil {
t.Fatalf("invoice update err: %v", err)
}
if update.State == lnrpc.Invoice_ACCEPTED {
break
}
}
}
// checkPaymentStatus asserts that the given node list a payment with the given
// preimage has the expected status.
func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode,
preimage lntypes.Preimage, status lnrpc.Payment_PaymentStatus) error {
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
}
paymentsResp, err := node.ListPayments(ctxt, req)
if err != nil {
return fmt.Errorf("error when obtaining Alice payments: %v",
err)
}
payHash := preimage.Hash()
var found bool
for _, p := range paymentsResp.Payments {
if p.PaymentHash != payHash.String() {
continue
}
found = true
if p.Status != status {
return fmt.Errorf("expected payment status "+
"%v, got %v", status, p.Status)
}
switch status {
// If this expected status is SUCCEEDED, we expect the final preimage.
case lnrpc.Payment_SUCCEEDED:
if p.PaymentPreimage != preimage.String() {
return fmt.Errorf("preimage doesn't match: %v vs %v",
p.PaymentPreimage, preimage.String())
}
// Otherwise we expect an all-zero preimage.
default:
if p.PaymentPreimage != (lntypes.Preimage{}).String() {
return fmt.Errorf("expected zero preimage, got %v",
p.PaymentPreimage)
}
}
}
if !found {
return fmt.Errorf("payment with payment hash %v not found "+
"in response", payHash)
}
return nil
}
func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
alice, bob *lntest.HarnessNode, carolHodl bool, c commitType) (
*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) {
ctxb := context.Background()
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
err := net.EnsureConnected(ctxt, alice, bob)
if err != nil {
t.Fatalf("unable to connect peers: %v", err)
}
ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout)
err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice)
if err != nil {
t.Fatalf("unable to send coins to Alice: %v", err)
}
ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout)
err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, bob)
if err != nil {
t.Fatalf("unable to send coins to Bob: %v", err)
}
// We'll start the test by creating a channel between Alice and Bob,
// which will act as the first leg for out multi-hop HTLC.
const chanAmt = 1000000
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
aliceChanPoint := openChannelAndAssert(
ctxt, t, net, alice, bob,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint)
if err != nil {
t.Fatalf("alice didn't report channel: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
// Next, we'll create a new node "carol" and have Bob connect to her. If
// the carolHodl flag is set, we'll make carol always hold onto the
// HTLC, this way it'll force Bob to go to chain to resolve the HTLC.
carolFlags := c.Args()
if carolHodl {
carolFlags = append(carolFlags, "--hodl.exit-settle")
}
carol, err := net.NewNode("Carol", carolFlags)
if err != nil {
t.Fatalf("unable to create new node: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
if err := net.ConnectNodes(ctxt, bob, carol); err != nil {
t.Fatalf("unable to connect bob to carol: %v", err)
}
// We'll then create a channel from Bob to Carol. After this channel is
// open, our topology looks like: A -> B -> C.
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
bobChanPoint := openChannelAndAssert(
ctxt, t, net, bob, carol,
lntest.OpenChannelParams{
Amt: chanAmt,
},
)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
if err != nil {
t.Fatalf("alice didn't report channel: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
return aliceChanPoint, bobChanPoint, carol
}

File diff suppressed because it is too large Load Diff