mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-14 06:59:07 +02:00
Merge pull request #5756 from yyforyongyu/itest-1-run-ctx
itest: refactor context management inside harness node and net
This commit is contained in:
commit
c75c3bd847
4
Makefile
4
Makefile
@ -199,6 +199,10 @@ itest-parallel: build-itest db-instance
|
||||
rm -rf lntest/itest/*.log lntest/itest/.logs-*; date
|
||||
EXEC_SUFFIX=$(EXEC_SUFFIX) echo "$$(seq 0 $$(expr $(ITEST_PARALLELISM) - 1))" | xargs -P $(ITEST_PARALLELISM) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS) $(ITEST_FLAGS)
|
||||
|
||||
itest-clean:
|
||||
@$(call print, "Cleaning old itest processes")
|
||||
killall lnd-itest || echo "no running lnd-itest process found";
|
||||
|
||||
unit: btcd
|
||||
@$(call print, "Running unit tests.")
|
||||
$(UNIT)
|
||||
|
@ -6,6 +6,12 @@
|
||||
for running lnd alongside a bitcoind service is now provided in
|
||||
`contrib/init/lnd.service`.
|
||||
|
||||
## Code Health
|
||||
|
||||
### Code cleanup, refactor, typo fixes
|
||||
|
||||
* [Refactored itest to better manage contexts inside integration tests](https://github.com/lightningnetwork/lnd/pull/5756).
|
||||
|
||||
# Contributors (Alphabetical Order)
|
||||
|
||||
* Daniel McNally
|
||||
|
@ -39,7 +39,9 @@ const DefaultCSV = 4
|
||||
type NodeOption func(*NodeConfig)
|
||||
|
||||
// NetworkHarness is an integration testing harness for the lightning network.
|
||||
// The harness by default is created with two active nodes on the network:
|
||||
// Building on top of HarnessNode, it is responsible for handling interactions
|
||||
// among different nodes. The harness by default is created with two active
|
||||
// nodes on the network:
|
||||
// Alice and Bob.
|
||||
type NetworkHarness struct {
|
||||
netParams *chaincfg.Params
|
||||
@ -51,8 +53,8 @@ type NetworkHarness struct {
|
||||
// compiled with all required itest flags.
|
||||
lndBinary string
|
||||
|
||||
// Miner is a reference to a running full node that can be used to create
|
||||
// new blocks on the network.
|
||||
// Miner is a reference to a running full node that can be used to
|
||||
// create new blocks on the network.
|
||||
Miner *rpctest.Harness
|
||||
|
||||
// BackendCfg houses the information necessary to use a node as LND
|
||||
@ -79,19 +81,15 @@ type NetworkHarness struct {
|
||||
// lnd.
|
||||
feeService *feeService
|
||||
|
||||
quit chan struct{}
|
||||
// runCtx is a context with cancel method. It's used to signal when the
|
||||
// node needs to quit, and used as the parent context when spawning
|
||||
// children contexts for RPC requests.
|
||||
runCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
type DatabaseBackend int
|
||||
|
||||
const (
|
||||
BackendBbolt DatabaseBackend = iota
|
||||
BackendEtcd
|
||||
BackendPostgres
|
||||
)
|
||||
|
||||
// NewNetworkHarness creates a new network test harness.
|
||||
// TODO(roasbeef): add option to use golang's build library to a binary of the
|
||||
// current repo. This will save developers from having to manually `go install`
|
||||
@ -101,6 +99,8 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string,
|
||||
|
||||
feeService := startFeeService()
|
||||
|
||||
ctxt, cancel := context.WithCancel(context.Background())
|
||||
|
||||
n := NetworkHarness{
|
||||
activeNodes: make(map[int]*HarnessNode),
|
||||
nodesByPub: make(map[string]*HarnessNode),
|
||||
@ -109,7 +109,8 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string,
|
||||
Miner: r,
|
||||
BackendCfg: b,
|
||||
feeService: feeService,
|
||||
quit: make(chan struct{}),
|
||||
runCtx: ctxt,
|
||||
cancel: cancel,
|
||||
lndBinary: lndBinary,
|
||||
dbBackend: dbBackend,
|
||||
}
|
||||
@ -138,17 +139,6 @@ func (n *NetworkHarness) ProcessErrors() <-chan error {
|
||||
return n.lndErrorChan
|
||||
}
|
||||
|
||||
// fakeLogger is a fake grpclog.Logger implementation. This is used to stop
|
||||
// grpc's logger from printing directly to stdout.
|
||||
type fakeLogger struct{}
|
||||
|
||||
func (f *fakeLogger) Fatal(args ...interface{}) {}
|
||||
func (f *fakeLogger) Fatalf(format string, args ...interface{}) {}
|
||||
func (f *fakeLogger) Fatalln(args ...interface{}) {}
|
||||
func (f *fakeLogger) Print(args ...interface{}) {}
|
||||
func (f *fakeLogger) Printf(format string, args ...interface{}) {}
|
||||
func (f *fakeLogger) Println(args ...interface{}) {}
|
||||
|
||||
// SetUp starts the initial seeder nodes within the test harness. The initial
|
||||
// node's wallets will be funded wallets with ten 1 BTC outputs each. Finally
|
||||
// rpc clients capable of communicating with the initial seeder nodes are
|
||||
@ -159,7 +149,8 @@ func (n *NetworkHarness) SetUp(t *testing.T,
|
||||
|
||||
// Swap out grpc's default logger with out fake logger which drops the
|
||||
// statements on the floor.
|
||||
grpclog.SetLogger(&fakeLogger{})
|
||||
fakeLogger := grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard)
|
||||
grpclog.SetLoggerV2(fakeLogger)
|
||||
n.currentTestCase = testCase
|
||||
|
||||
// Start the initial seeder nodes within the test network, then connect
|
||||
@ -184,7 +175,6 @@ func (n *NetworkHarness) SetUp(t *testing.T,
|
||||
// First, make a connection between the two nodes. This will wait until
|
||||
// both nodes are fully started since the Connect RPC is guarded behind
|
||||
// the server.Started() flag that waits for all subsystems to be ready.
|
||||
ctxb := context.Background()
|
||||
n.ConnectNodes(t, n.Alice, n.Bob)
|
||||
|
||||
// Load up the wallets of the seeder nodes with 10 outputs of 1 BTC
|
||||
@ -195,7 +185,7 @@ func (n *NetworkHarness) SetUp(t *testing.T,
|
||||
clients := []lnrpc.LightningClient{n.Alice, n.Bob}
|
||||
for _, client := range clients {
|
||||
for i := 0; i < 10; i++ {
|
||||
resp, err := client.NewAddress(ctxb, addrReq)
|
||||
resp, err := client.NewAddress(n.runCtx, addrReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -226,12 +216,10 @@ func (n *NetworkHarness) SetUp(t *testing.T,
|
||||
}
|
||||
|
||||
// Now we want to wait for the nodes to catch up.
|
||||
ctxt, cancel := context.WithTimeout(ctxb, DefaultTimeout)
|
||||
defer cancel()
|
||||
if err := n.Alice.WaitForBlockchainSync(ctxt); err != nil {
|
||||
if err := n.Alice.WaitForBlockchainSync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := n.Bob.WaitForBlockchainSync(ctxt); err != nil {
|
||||
if err := n.Bob.WaitForBlockchainSync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -245,11 +233,11 @@ out:
|
||||
for {
|
||||
select {
|
||||
case <-balanceTicker.C:
|
||||
aliceResp, err := n.Alice.WalletBalance(ctxb, balReq)
|
||||
aliceResp, err := n.Alice.WalletBalance(n.runCtx, balReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bobResp, err := n.Bob.WalletBalance(ctxb, balReq)
|
||||
bobResp, err := n.Bob.WalletBalance(n.runCtx, balReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -280,7 +268,7 @@ func (n *NetworkHarness) TearDown() error {
|
||||
// Stop stops the test harness.
|
||||
func (n *NetworkHarness) Stop() {
|
||||
close(n.lndErrorChan)
|
||||
close(n.quit)
|
||||
n.cancel()
|
||||
|
||||
n.feeService.stop()
|
||||
}
|
||||
@ -380,8 +368,6 @@ func (n *NetworkHarness) newNodeWithSeed(name string, extraArgs []string,
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
// Create a request to generate a new aezeed. The new seed will have the
|
||||
// same password as the internal wallet.
|
||||
genSeedReq := &lnrpc.GenSeedRequest{
|
||||
@ -389,7 +375,7 @@ func (n *NetworkHarness) newNodeWithSeed(name string, extraArgs []string,
|
||||
SeedEntropy: entropy,
|
||||
}
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctxb, DefaultTimeout)
|
||||
ctxt, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
var genSeedResp *lnrpc.GenSeedResponse
|
||||
@ -411,7 +397,7 @@ func (n *NetworkHarness) newNodeWithSeed(name string, extraArgs []string,
|
||||
|
||||
// Pass the init request via rpc to finish unlocking the node. This will
|
||||
// also initialize the macaroon-authenticated LightningClient.
|
||||
response, err := node.Init(ctxb, initReq)
|
||||
response, err := node.Init(initReq)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -437,8 +423,6 @@ func (n *NetworkHarness) NewNodeRemoteSigner(name string, extraArgs []string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
// With the seed created, construct the init request to the node,
|
||||
// including the newly generated seed.
|
||||
initReq := &lnrpc.InitWalletRequest{
|
||||
@ -448,7 +432,7 @@ func (n *NetworkHarness) NewNodeRemoteSigner(name string, extraArgs []string,
|
||||
|
||||
// Pass the init request via rpc to finish unlocking the node. This will
|
||||
// also initialize the macaroon-authenticated LightningClient.
|
||||
_, err = node.Init(ctxb, initReq)
|
||||
_, err = node.Init(initReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -486,7 +470,7 @@ func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string,
|
||||
ChannelBackups: chanBackups,
|
||||
}
|
||||
|
||||
_, err = node.Init(context.Background(), initReq)
|
||||
_, err = node.Init(initReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -590,8 +574,7 @@ tryconnect:
|
||||
// been made, the method will block until the two nodes appear in each other's
|
||||
// peers list, or until the 15s timeout expires.
|
||||
func (n *NetworkHarness) EnsureConnected(t *testing.T, a, b *HarnessNode) {
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, DefaultTimeout*2)
|
||||
ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout*2)
|
||||
defer cancel()
|
||||
|
||||
// errConnectionRequested is used to signal that a connection was
|
||||
@ -728,8 +711,7 @@ func (n *NetworkHarness) ConnectNodesPerm(t *testing.T,
|
||||
func (n *NetworkHarness) connectNodes(t *testing.T, a, b *HarnessNode,
|
||||
perm bool) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, DefaultTimeout)
|
||||
ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||
@ -780,8 +762,7 @@ func (n *NetworkHarness) connectNodes(t *testing.T, a, b *HarnessNode,
|
||||
// DisconnectNodes disconnects node a from node b by sending RPC message
|
||||
// from a node to b node
|
||||
func (n *NetworkHarness) DisconnectNodes(a, b *HarnessNode) error {
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, DefaultTimeout)
|
||||
ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||
@ -836,15 +817,13 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error,
|
||||
unlockReq.RecoveryWindow = 1000
|
||||
}
|
||||
|
||||
if err := node.Unlock(context.Background(), unlockReq); err != nil {
|
||||
if err := node.Unlock(unlockReq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Give the node some time to catch up with the chain before we continue
|
||||
// with the tests.
|
||||
ctxc, done := context.WithTimeout(context.Background(), DefaultTimeout)
|
||||
defer done()
|
||||
return node.WaitForBlockchainSync(ctxc)
|
||||
// Give the node some time to catch up with the chain before we
|
||||
// continue with the tests.
|
||||
return node.WaitForBlockchainSync()
|
||||
}
|
||||
|
||||
// RestartNodeNoUnlock attempts to restart a lightning node by shutting it down
|
||||
@ -973,23 +952,19 @@ func saveProfilesPage(node *HarnessNode) error {
|
||||
// waitForTxInMempool blocks until the target txid is seen in the mempool. If
|
||||
// the transaction isn't seen within the network before the passed timeout,
|
||||
// then an error is returned.
|
||||
func (n *NetworkHarness) waitForTxInMempool(ctx context.Context,
|
||||
txid chainhash.Hash) error {
|
||||
|
||||
// Return immediately if harness has been torn down.
|
||||
select {
|
||||
case <-n.quit:
|
||||
return fmt.Errorf("NetworkHarness has been torn down")
|
||||
default:
|
||||
}
|
||||
|
||||
func (n *NetworkHarness) waitForTxInMempool(txid chainhash.Hash) error {
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
ctxt, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
var mempool []*chainhash.Hash
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-n.runCtx.Done():
|
||||
return fmt.Errorf("NetworkHarness has been torn down")
|
||||
case <-ctxt.Done():
|
||||
return fmt.Errorf("wanted %v, found %v txs "+
|
||||
"in mempool: %v", txid, len(mempool), mempool)
|
||||
|
||||
@ -1055,20 +1030,14 @@ type OpenChannelParams struct {
|
||||
func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode,
|
||||
p OpenChannelParams) (lnrpc.Lightning_OpenChannelClient, error) {
|
||||
|
||||
ctxb := context.Background()
|
||||
// The cancel is intentionally left out here because the returned
|
||||
// item(open channel client) relies on the context being active. This
|
||||
// will be fixed once we finish refactoring the NetworkHarness.
|
||||
ctx, _ := context.WithTimeout(ctxb, ChannelOpenTimeout) // nolint: govet
|
||||
|
||||
// Wait until srcNode and destNode have the latest chain synced.
|
||||
// Otherwise, we may run into a check within the funding manager that
|
||||
// prevents any funding workflows from being kicked off if the chain
|
||||
// isn't yet synced.
|
||||
if err := srcNode.WaitForBlockchainSync(ctx); err != nil {
|
||||
if err := srcNode.WaitForBlockchainSync(); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync srcNode chain: %v", err)
|
||||
}
|
||||
if err := destNode.WaitForBlockchainSync(ctx); err != nil {
|
||||
if err := destNode.WaitForBlockchainSync(); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync destNode chain: %v", err)
|
||||
}
|
||||
|
||||
@ -1091,7 +1060,9 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode,
|
||||
CommitmentType: p.CommitmentType,
|
||||
}
|
||||
|
||||
respStream, err := srcNode.OpenChannel(ctx, openReq)
|
||||
// We need to use n.runCtx here to keep the response stream alive after
|
||||
// the function is returned.
|
||||
respStream, err := srcNode.OpenChannel(n.runCtx, openReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open channel between "+
|
||||
"alice and bob: %v", err)
|
||||
@ -1100,17 +1071,18 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode,
|
||||
chanOpen := make(chan struct{})
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
// Consume the "channel pending" update. This waits until the node
|
||||
// notifies us that the final message in the channel funding workflow
|
||||
// has been sent to the remote node.
|
||||
// Consume the "channel pending" update. This waits until the
|
||||
// node notifies us that the final message in the channel
|
||||
// funding workflow has been sent to the remote node.
|
||||
resp, err := respStream.Recv()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending); !ok {
|
||||
errChan <- fmt.Errorf("expected channel pending update, "+
|
||||
"instead got %v", resp)
|
||||
_, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
||||
if !ok {
|
||||
errChan <- fmt.Errorf("expected channel pending: "+
|
||||
"update, instead got %v", resp)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1118,7 +1090,7 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode,
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(ChannelOpenTimeout):
|
||||
return nil, fmt.Errorf("timeout reached before chan pending "+
|
||||
"update sent: %v", err)
|
||||
case err := <-errChan:
|
||||
@ -1128,23 +1100,19 @@ func (n *NetworkHarness) OpenChannel(srcNode, destNode *HarnessNode,
|
||||
}
|
||||
}
|
||||
|
||||
// OpenPendingChannel attempts to open a channel between srcNode and destNode with the
|
||||
// passed channel funding parameters. If the passed context has a timeout, then
|
||||
// if the timeout is reached before the channel pending notification is
|
||||
// received, an error is returned.
|
||||
// OpenPendingChannel attempts to open a channel between srcNode and destNode
|
||||
// with the passed channel funding parameters. If the passed context has a
|
||||
// timeout, then if the timeout is reached before the channel pending
|
||||
// notification is received, an error is returned.
|
||||
func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
|
||||
amt btcutil.Amount,
|
||||
pushAmt btcutil.Amount) (*lnrpc.PendingUpdate, error) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, ChannelOpenTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Wait until srcNode and destNode have blockchain synced
|
||||
if err := srcNode.WaitForBlockchainSync(ctx); err != nil {
|
||||
if err := srcNode.WaitForBlockchainSync(); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync srcNode chain: %v", err)
|
||||
}
|
||||
if err := destNode.WaitForBlockchainSync(ctx); err != nil {
|
||||
if err := destNode.WaitForBlockchainSync(); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync destNode chain: %v", err)
|
||||
}
|
||||
|
||||
@ -1155,7 +1123,9 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
|
||||
Private: false,
|
||||
}
|
||||
|
||||
respStream, err := srcNode.OpenChannel(ctx, openReq)
|
||||
// We need to use n.runCtx here to keep the response stream alive after
|
||||
// the function is returned.
|
||||
respStream, err := srcNode.OpenChannel(n.runCtx, openReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open channel between "+
|
||||
"alice and bob: %v", err)
|
||||
@ -1164,9 +1134,9 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
|
||||
chanPending := make(chan *lnrpc.PendingUpdate)
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
// Consume the "channel pending" update. This waits until the node
|
||||
// notifies us that the final message in the channel funding workflow
|
||||
// has been sent to the remote node.
|
||||
// Consume the "channel pending" update. This waits until the
|
||||
// node notifies us that the final message in the channel
|
||||
// funding workflow has been sent to the remote node.
|
||||
resp, err := respStream.Recv()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
@ -1174,8 +1144,8 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
|
||||
}
|
||||
pendingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
||||
if !ok {
|
||||
errChan <- fmt.Errorf("expected channel pending update, "+
|
||||
"instead got %v", resp)
|
||||
errChan <- fmt.Errorf("expected channel pending "+
|
||||
"update, instead got %v", resp)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1183,7 +1153,7 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(ChannelOpenTimeout):
|
||||
return nil, fmt.Errorf("timeout reached before chan pending " +
|
||||
"update sent")
|
||||
case err := <-errChan:
|
||||
@ -1198,10 +1168,10 @@ func (n *NetworkHarness) OpenPendingChannel(srcNode, destNode *HarnessNode,
|
||||
// has a timeout, then if the timeout is reached before the channel has been
|
||||
// opened, then an error is returned.
|
||||
func (n *NetworkHarness) WaitForChannelOpen(
|
||||
openChanStream lnrpc.Lightning_OpenChannelClient) (*lnrpc.ChannelPoint, error) {
|
||||
openChanStream lnrpc.Lightning_OpenChannelClient) (
|
||||
*lnrpc.ChannelPoint, error) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, ChannelOpenTimeout)
|
||||
ctx, cancel := context.WithTimeout(n.runCtx, ChannelOpenTimeout)
|
||||
defer cancel()
|
||||
|
||||
errChan := make(chan error)
|
||||
@ -1238,14 +1208,14 @@ func (n *NetworkHarness) WaitForChannelOpen(
|
||||
// has a timeout, an error is returned if that timeout is reached before the
|
||||
// channel close is pending.
|
||||
func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
cp *lnrpc.ChannelPoint,
|
||||
force bool) (lnrpc.Lightning_CloseChannelClient, *chainhash.Hash, error) {
|
||||
cp *lnrpc.ChannelPoint, force bool) (lnrpc.Lightning_CloseChannelClient,
|
||||
*chainhash.Hash, error) {
|
||||
|
||||
ctxb := context.Background()
|
||||
// The cancel is intentionally left out here because the returned
|
||||
// item(close channel client) relies on the context being active. This
|
||||
// will be fixed once we finish refactoring the NetworkHarness.
|
||||
ctx, _ := context.WithTimeout(ctxb, ChannelCloseTimeout) // nolint: govet
|
||||
ctxt, cancel := context.WithTimeout(n.runCtx, ChannelCloseTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Create a channel outpoint that we can use to compare to channels
|
||||
// from the ListChannelsResponse.
|
||||
@ -1273,7 +1243,7 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
// not.
|
||||
filterChannel := func(node *HarnessNode,
|
||||
op wire.OutPoint) (*lnrpc.Channel, error) {
|
||||
listResp, err := node.ListChannels(ctx, listReq)
|
||||
listResp, err := node.ListChannels(ctxt, listReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1298,7 +1268,8 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
}
|
||||
|
||||
// Next, we'll fetch the target channel in order to get the
|
||||
// harness node that will be receiving the channel close request.
|
||||
// harness node that will be receiving the channel close
|
||||
// request.
|
||||
targetChan, err := filterChannel(lnNode, chanPoint)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -1315,7 +1286,9 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
return nil, nil, fmt.Errorf("channel of closing " +
|
||||
"node not active in time")
|
||||
}
|
||||
err = wait.Predicate(activeChanPredicate(receivingNode), timeout)
|
||||
err = wait.Predicate(
|
||||
activeChanPredicate(receivingNode), timeout,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("channel of receiving " +
|
||||
"node not active in time")
|
||||
@ -1331,7 +1304,9 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
closeReq := &lnrpc.CloseChannelRequest{
|
||||
ChannelPoint: cp, Force: force,
|
||||
}
|
||||
closeRespStream, err = lnNode.CloseChannel(ctx, closeReq)
|
||||
// We need to use n.runCtx to keep the client stream alive
|
||||
// after the function has returned.
|
||||
closeRespStream, err = lnNode.CloseChannel(n.runCtx, closeReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to close channel: %v", err)
|
||||
}
|
||||
@ -1357,7 +1332,7 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
return fmt.Errorf("unable to decode closeTxid: "+
|
||||
"%v", err)
|
||||
}
|
||||
if err := n.waitForTxInMempool(ctx, *closeTxid); err != nil {
|
||||
if err := n.waitForTxInMempool(*closeTxid); err != nil {
|
||||
return fmt.Errorf("error while waiting for "+
|
||||
"broadcast tx: %v", err)
|
||||
}
|
||||
@ -1375,11 +1350,8 @@ func (n *NetworkHarness) CloseChannel(lnNode *HarnessNode,
|
||||
// passed context has a timeout, then if the timeout is reached before the
|
||||
// notification is received then an error is returned.
|
||||
func (n *NetworkHarness) WaitForChannelClose(
|
||||
closeChanStream lnrpc.Lightning_CloseChannelClient) (*chainhash.Hash, error) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, ChannelCloseTimeout)
|
||||
defer cancel()
|
||||
closeChanStream lnrpc.Lightning_CloseChannelClient) (
|
||||
*chainhash.Hash, error) {
|
||||
|
||||
errChan := make(chan error)
|
||||
updateChan := make(chan *lnrpc.CloseStatusUpdate_ChanClose)
|
||||
@ -1403,7 +1375,7 @@ func (n *NetworkHarness) WaitForChannelClose(
|
||||
// Wait until either the deadline for the context expires, an error
|
||||
// occurs, or the channel close update is received.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(ChannelCloseTimeout):
|
||||
return nil, fmt.Errorf("timeout reached before update sent")
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
@ -1421,8 +1393,7 @@ func (n *NetworkHarness) WaitForChannelClose(
|
||||
func (n *NetworkHarness) AssertChannelExists(node *HarnessNode,
|
||||
chanPoint *wire.OutPoint, checks ...func(*lnrpc.Channel)) error {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, ChannelCloseTimeout)
|
||||
ctx, cancel := context.WithTimeout(n.runCtx, ChannelCloseTimeout)
|
||||
defer cancel()
|
||||
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
@ -1519,8 +1490,7 @@ func (n *NetworkHarness) SendCoinsNP2WKH(t *testing.T, amt btcutil.Amount,
|
||||
func (n *NetworkHarness) sendCoins(amt btcutil.Amount, target *HarnessNode,
|
||||
addrType lnrpc.AddressType, confirmed bool) error {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctx, cancel := context.WithTimeout(ctxb, DefaultTimeout)
|
||||
ctx, cancel := context.WithTimeout(n.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
@ -1783,13 +1783,9 @@ func assertChannelPolicyUpdate(t *testing.T, node *lntest.HarnessNode,
|
||||
advertisingNode string, policy *lnrpc.RoutingPolicy,
|
||||
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) {
|
||||
|
||||
ctxb := context.Background()
|
||||
ctxt, cancel := context.WithTimeout(ctxb, lntest.DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
require.NoError(
|
||||
t, node.WaitForChannelPolicyUpdate(
|
||||
ctxt, advertisingNode, policy,
|
||||
advertisingNode, policy,
|
||||
chanPoint, includeUnannounced,
|
||||
), "error while waiting for channel update",
|
||||
)
|
||||
|
@ -245,11 +245,9 @@ func testSendPaymentAMPInvoiceRepeat(net *lntest.NetworkHarness,
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "carol didn't report channel")
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "dave didn't report channel")
|
||||
|
||||
// Create an AMP invoice of a trivial amount, that we'll pay repeatedly
|
||||
|
@ -982,12 +982,11 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
)
|
||||
|
||||
// Wait for both sides to see the opened channel.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't report channel: %v", err)
|
||||
}
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't report channel: %v", err)
|
||||
}
|
||||
@ -1327,11 +1326,9 @@ func createLegacyRevocationChannel(net *lntest.NetworkHarness, t *harnessTest,
|
||||
}
|
||||
|
||||
_ = mineBlocks(t, net, 6, 1)
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
err = from.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = from.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
err = to.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = to.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
|
@ -18,8 +18,6 @@ import (
|
||||
// testChannelBalance creates a new channel between Alice and Bob, then checks
|
||||
// channel balance to be equal amount specified while creation of channel.
|
||||
func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// Open a channel with 0.16 BTC between Alice and Bob, ensuring the
|
||||
// channel has been opened properly.
|
||||
amount := funding.MaxBtcFundingAmount
|
||||
@ -61,14 +59,12 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
)
|
||||
|
||||
// Wait for both Alice and Bob to recognize this new channel.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -155,15 +151,13 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice and Carol to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -191,7 +185,7 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
errChan := make(chan error)
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
go func() {
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err := net.Alice.RouterClient.SendPaymentV2(ctxt,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
Dest: carolPubKey,
|
||||
|
@ -335,13 +335,12 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Wait for Alice and Carol to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -1430,8 +1429,7 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
carolPayReqs := []string{resp.PaymentRequest}
|
||||
|
||||
// Wait for Alice to receive the channel edge from the funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
|
@ -58,12 +58,10 @@ func testUpdateChanStatus(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice and Bob to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
err := alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "alice didn't see the alice->bob channel")
|
||||
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "bob didn't see the alice->bob channel")
|
||||
|
||||
// Launch a node for Carol which will connect to Alice and Bob in order
|
||||
@ -83,12 +81,9 @@ func testUpdateChanStatus(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
assertChannelUpdate := func(node *lntest.HarnessNode,
|
||||
policy *lnrpc.RoutingPolicy) {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
require.NoError(
|
||||
t.t, carol.WaitForChannelPolicyUpdate(
|
||||
ctxt, node.PubKeyStr, policy, chanPoint, false,
|
||||
node.PubKeyStr, policy, chanPoint, false,
|
||||
), "error while waiting for channel update",
|
||||
)
|
||||
}
|
||||
|
@ -85,12 +85,11 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
)
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
@ -163,18 +162,15 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint2)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't report channel: %v", err)
|
||||
}
|
||||
@ -187,7 +183,7 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Memo: "testing",
|
||||
Value: int64(payAmt),
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddInvoice(ctxt, invoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
@ -387,13 +383,11 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
)
|
||||
defer closeChannelAndAssert(t, net, net.Alice, chanPoint3, false)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint3)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint3)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint3)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint3)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
@ -505,8 +499,6 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// flag set is sent once a channel has been either unilaterally or cooperatively
|
||||
// closed.
|
||||
func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
chanAmt = 100000
|
||||
)
|
||||
@ -588,12 +580,9 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
assertPolicyUpdate := func(node *lntest.HarnessNode,
|
||||
policy *lnrpc.RoutingPolicy, chanPoint *lnrpc.ChannelPoint) {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
require.NoError(
|
||||
t.t, dave.WaitForChannelPolicyUpdate(
|
||||
ctxt, node.PubKeyStr, policy, chanPoint, false,
|
||||
node.PubKeyStr, policy, chanPoint, false,
|
||||
), "error while waiting for channel update",
|
||||
)
|
||||
}
|
||||
|
@ -161,8 +161,7 @@ func testEtcdFailoverCase(net *lntest.NetworkHarness, ht *harnessTest,
|
||||
|
||||
assertLeader(ht, observer, "Carol-2")
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol2.Unlock(ctxt, &lnrpc.UnlockWalletRequest{
|
||||
err = carol2.Unlock(&lnrpc.UnlockWalletRequest{
|
||||
WalletPassword: password,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -534,8 +534,6 @@ func (c *interceptorTestContext) closeChannels() {
|
||||
}
|
||||
|
||||
func (c *interceptorTestContext) waitForChannels() {
|
||||
ctxb := context.Background()
|
||||
|
||||
// Wait for all nodes to have seen all channels.
|
||||
for _, chanPoint := range c.networkChans {
|
||||
for _, node := range c.nodes {
|
||||
@ -547,8 +545,7 @@ func (c *interceptorTestContext) waitForChannels() {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(c.t.t, err, fmt.Sprintf("(%d): timeout "+
|
||||
"waiting for channel(%s) open", node.NodeID,
|
||||
point))
|
||||
|
@ -196,7 +196,6 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness,
|
||||
// open or an error occurs in the funding process. A series of
|
||||
// assertions will be executed to ensure the funding process completed
|
||||
// successfully.
|
||||
ctxb := context.Background()
|
||||
chanPoint := openChannelAndAssert(
|
||||
t, net, alice, bob,
|
||||
lntest.OpenChannelParams{
|
||||
@ -207,12 +206,10 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness,
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
|
||||
err := alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "alice didn't report channel")
|
||||
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "bob didn't report channel")
|
||||
|
||||
cType, err := channelCommitType(alice, chanPoint)
|
||||
@ -816,13 +813,11 @@ func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
block := mineBlocks(t, net, 6, 1)[0]
|
||||
assertTxInBlock(t, block, txHash)
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint1)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint1)
|
||||
require.NoError(t.t, err)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint2)
|
||||
require.NoError(t.t, err)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint3)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint3)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// With the channel open, ensure that it is counted towards Carol's
|
||||
|
@ -74,8 +74,8 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
require.Len(t.t, chans.Channels[0].PendingHtlcs, 1)
|
||||
activeHtlc := chans.Channels[0].PendingHtlcs[0]
|
||||
|
||||
require.NoError(t.t, net.Alice.WaitForBlockchainSync(ctxb))
|
||||
require.NoError(t.t, net.Bob.WaitForBlockchainSync(ctxb))
|
||||
require.NoError(t.t, net.Alice.WaitForBlockchainSync())
|
||||
require.NoError(t.t, net.Bob.WaitForBlockchainSync())
|
||||
|
||||
info, err := net.Alice.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
|
||||
require.NoError(t.t, err)
|
||||
@ -99,8 +99,8 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
mineBlocksSlow(t, net, blocksTillForce, 0)
|
||||
|
||||
require.NoError(t.t, net.Alice.WaitForBlockchainSync(ctxb))
|
||||
require.NoError(t.t, net.Bob.WaitForBlockchainSync(ctxb))
|
||||
require.NoError(t.t, net.Alice.WaitForBlockchainSync())
|
||||
require.NoError(t.t, net.Bob.WaitForBlockchainSync())
|
||||
|
||||
// Our channel should not have been force closed, instead we expect our
|
||||
// channel to still be open and our invoice to have been canceled before
|
||||
|
@ -48,15 +48,13 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice and Carol to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't see the carol->alice channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -92,7 +90,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Hash: payHash[:],
|
||||
Private: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
@ -165,7 +163,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
req := &lnrpc.ListPaymentsRequest{
|
||||
IncludeIncomplete: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
paymentsResp, err := net.Alice.ListPayments(ctxt, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when obtaining payments: %v",
|
||||
@ -299,7 +297,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
for i, preimage := range preimages {
|
||||
var expectedState lnrpc.Invoice_InvoiceState
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
if i%2 == 0 {
|
||||
settle := &invoicesrpc.SettleInvoiceMsg{
|
||||
Preimage: preimage[:],
|
||||
@ -368,7 +366,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
req := &lnrpc.ListPaymentsRequest{
|
||||
IncludeIncomplete: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
paymentsResp, err := net.Alice.ListPayments(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("error when obtaining Alice payments: %v", err)
|
||||
|
@ -665,8 +665,7 @@ func testStatelessInit(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
NewPassword: newPw,
|
||||
StatelessInit: true,
|
||||
}
|
||||
ctxb := context.Background()
|
||||
response, err := carol.InitChangePassword(ctxb, changePwReq)
|
||||
response, err := carol.InitChangePassword(changePwReq)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Again, make sure no macaroon files have been created by the node
|
||||
|
@ -34,12 +34,10 @@ func testMaxHtlcPathfind(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice and Bob to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "alice does not have open channel")
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "bob does not have open channel")
|
||||
|
||||
// Alice and bob should have one channel open with each other now.
|
||||
@ -87,7 +85,7 @@ func testMaxHtlcPathfind(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// Now, we're going to try to send another payment from Bob -> Alice.
|
||||
// We've hit our max remote htlcs, so we expect this payment to spin
|
||||
// out dramatically with pathfinding.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
payment, err := net.Bob.RouterClient.SendPaymentV2(
|
||||
ctxt, &routerrpc.SendPaymentRequest{
|
||||
Amt: 1000,
|
||||
|
@ -236,23 +236,22 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Wait for all channels to be recognized and advertized.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointFC)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointFC)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
err = fred.WaitForNetworkChannelOpen(ctxt, chanPointFC)
|
||||
err = fred.WaitForNetworkChannelOpen(chanPointFC)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -386,15 +385,13 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice and Bob to receive the channel edge from the
|
||||
// funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't see the bob->alice channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -407,8 +404,7 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Get the ListChannel response from Alice.
|
||||
listReq := &lnrpc.ListChannelsRequest{}
|
||||
ctxb = context.Background()
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := alice.ListChannels(ctxt, listReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for %s's channel list: %v",
|
||||
@ -484,8 +480,6 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// max pending channel number was exceeded and that '--maxpendingchannels' flag
|
||||
// exists and works properly.
|
||||
func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
maxPendingChannels := lncfg.DefaultMaxPendingChannels + 1
|
||||
amount := funding.MaxBtcFundingAmount
|
||||
|
||||
@ -557,8 +551,7 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// Ensure that the funding transaction enters a block, and is
|
||||
// properly advertised by Alice.
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, fundingChanPoint)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(fundingChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("channel not seen on network before "+
|
||||
"timeout: %v", err)
|
||||
@ -878,8 +871,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Carol to receive the channel edge from the funding
|
||||
// manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't see the carol->%s channel "+
|
||||
"before timeout: %v", node.Name(), err)
|
||||
@ -975,7 +967,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
balResp, err := node.WalletBalance(ctxt, balReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get dave's balance: %v", err)
|
||||
@ -1352,16 +1344,15 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
chanPointStr := fmt.Sprintf("%v:%v", txid, chanPoint.OutputIndex)
|
||||
|
||||
// Wait for channel to be confirmed open.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "alice wait for network channel open")
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "bob wait for network channel open")
|
||||
|
||||
// Now that the channel is open, we'll obtain its channel ID real quick
|
||||
// so we can use it to query the graph below.
|
||||
listReq := &lnrpc.ListChannelsRequest{}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
aliceChannelList, err := net.Alice.ListChannels(ctxt, listReq)
|
||||
require.NoError(t.t, err)
|
||||
var chanID uint64
|
||||
|
@ -319,8 +319,6 @@ func (c *mppTestContext) shutdownNodes() {
|
||||
}
|
||||
|
||||
func (c *mppTestContext) waitForChannels() {
|
||||
ctxb := context.Background()
|
||||
|
||||
// Wait for all nodes to have seen all channels.
|
||||
for _, chanPoint := range c.networkChans {
|
||||
for _, node := range c.nodes {
|
||||
@ -333,8 +331,7 @@ func (c *mppTestContext) waitForChannels() {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
c.t.Fatalf("(%v:%d): timeout waiting for "+
|
||||
"channel(%s) open: %v",
|
||||
|
@ -29,8 +29,7 @@ func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice); err != nil {
|
||||
if err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice); err != nil {
|
||||
t.Fatalf("channel not seen by alice before timeout: %v", err)
|
||||
}
|
||||
|
||||
@ -115,7 +114,7 @@ out:
|
||||
for {
|
||||
select {
|
||||
case <-checkTableTicker.C:
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err := net.Alice.GetNodeInfo(ctxt, nodeInfoReq)
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
"unable to find") {
|
||||
@ -138,7 +137,7 @@ out:
|
||||
Memo: "kek99",
|
||||
Value: payAmt,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate carol invoice: %v", err)
|
||||
@ -154,8 +153,7 @@ out:
|
||||
|
||||
// Before we send the payment, ensure that the announcement of the new
|
||||
// channel has been processed by Alice.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointBob); err != nil {
|
||||
if err := net.Alice.WaitForNetworkChannelOpen(chanPointBob); err != nil {
|
||||
t.Fatalf("channel not seen by alice before timeout: %v", err)
|
||||
}
|
||||
|
||||
|
@ -107,8 +107,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -130,13 +129,11 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We'll wait for all parties to recognize the new channels within the
|
||||
// network.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointDave)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't advertise his channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't advertise her channel in time: %v",
|
||||
err)
|
||||
|
@ -210,8 +210,6 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
|
||||
alice, bob *lntest.HarnessNode, carolHodl bool, c lnrpc.CommitmentType) (
|
||||
*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) {
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
net.EnsureConnected(t.t, alice, bob)
|
||||
|
||||
// Make sure there are enough utxos for anchoring.
|
||||
@ -242,14 +240,12 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint)
|
||||
err := alice.WaitForNetworkChannelOpen(aliceChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint)
|
||||
err = bob.WaitForNetworkChannelOpen(aliceChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
@ -289,18 +285,15 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
|
||||
FundingShim: bobFundingShim,
|
||||
},
|
||||
)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
|
||||
err = bob.WaitForNetworkChannelOpen(bobChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(bobChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
|
||||
err = alice.WaitForNetworkChannelOpen(bobChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
|
@ -237,12 +237,10 @@ func testAnchorReservedValue(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
aliceChanPoint1, aliceChanPoint2, aliceChanPoint3,
|
||||
}
|
||||
for _, chanPoint := range chanPoints {
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
|
@ -142,14 +142,12 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice and Bob to recognize and advertise the new channel
|
||||
// generated above.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -159,7 +157,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
req := &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: true,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
chanGraph, err := net.Alice.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for alice's routing table: %v", err)
|
||||
|
@ -69,12 +69,11 @@ func testListPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice to recognize and advertise the new channel generated
|
||||
// above.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
if err = net.Alice.WaitForNetworkChannelOpen(chanPoint); err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
if err = net.Bob.WaitForNetworkChannelOpen(chanPoint); err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
@ -292,8 +291,7 @@ func runAsyncPayments(net *lntest.NetworkHarness, t *harnessTest, alice,
|
||||
}
|
||||
|
||||
// Wait for Alice to receive the channel edge from the funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -307,7 +305,7 @@ func runAsyncPayments(net *lntest.NetworkHarness, t *harnessTest, alice,
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
payReq := bobPayReqs[i]
|
||||
go func() {
|
||||
ctxt, _ = context.WithTimeout(ctxb, lntest.AsyncBenchmarkTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, lntest.AsyncBenchmarkTimeout)
|
||||
stream, err := alice.RouterClient.SendPaymentV2(
|
||||
ctxt,
|
||||
&routerrpc.SendPaymentRequest{
|
||||
@ -465,12 +463,11 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest)
|
||||
}
|
||||
|
||||
// Wait for Alice to receive the channel edge from the funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
if err = net.Alice.WaitForNetworkChannelOpen(chanPoint); err != nil {
|
||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil {
|
||||
if err = net.Bob.WaitForNetworkChannelOpen(chanPoint); err != nil {
|
||||
t.Fatalf("bob didn't see the bob->alice channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
@ -478,7 +475,7 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest)
|
||||
// Reset mission control to prevent previous payment results from
|
||||
// interfering with this test. A new channel has been opened, but
|
||||
// mission control operates on node pairs.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
_, err = net.Alice.RouterClient.ResetMissionControl(
|
||||
ctxt, &routerrpc.ResetMissionControlRequest{},
|
||||
)
|
||||
@ -680,8 +677,7 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for the channel to be recognized by both Alice and Bob before
|
||||
// continuing the rest of the test.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
// TODO(roasbeef): will need to make num blocks to advertise a
|
||||
// node param
|
||||
|
@ -215,11 +215,9 @@ func runPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest, carol,
|
||||
txHash := finalTx.TxHash()
|
||||
block := mineBlocks(t, net, 6, 1)[0]
|
||||
assertTxInBlock(t, block, &txHash)
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint2)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// With the channel open, ensure that it is counted towards Carol's
|
||||
@ -431,11 +429,9 @@ func testPsbtChanFundingExternal(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// for the new channel to be propagated through the network.
|
||||
block := mineBlocks(t, net, 6, 1)[0]
|
||||
assertTxInBlock(t, block, &txHash)
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint2)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint2)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// With the channel open, ensure that it is counted towards Carol's
|
||||
@ -483,11 +479,11 @@ func openChannelPsbt(ctx context.Context, srcNode, destNode *lntest.HarnessNode,
|
||||
// Otherwise, we may run into a check within the funding manager that
|
||||
// prevents any funding workflows from being kicked off if the chain
|
||||
// isn't yet synced.
|
||||
if err := srcNode.WaitForBlockchainSync(ctx); err != nil {
|
||||
if err := srcNode.WaitForBlockchainSync(); err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to sync srcNode chain: %v",
|
||||
err)
|
||||
}
|
||||
if err := destNode.WaitForBlockchainSync(ctx); err != nil {
|
||||
if err := destNode.WaitForBlockchainSync(); err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to sync destNode chain: %v",
|
||||
err)
|
||||
}
|
||||
|
@ -24,8 +24,6 @@ import (
|
||||
// retribution in the event that she fails immediately after detecting Bob's
|
||||
// breach txn in the mempool.
|
||||
func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
chanAmt = funding.MaxBtcFundingAmount
|
||||
paymentAmt = 10000
|
||||
@ -71,8 +69,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Wait for Carol to receive the channel edge from the funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't see the carol->bob channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -260,7 +257,6 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// commitment output has zero-value.
|
||||
func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness,
|
||||
t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
chanAmt = funding.MaxBtcFundingAmount
|
||||
@ -312,8 +308,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
||||
}
|
||||
|
||||
// Wait for Dave to receive the channel edge from the funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't see the dave->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -475,7 +470,6 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
||||
// remote party breaches before settling extended HTLCs.
|
||||
func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const (
|
||||
chanAmt = funding.MaxBtcFundingAmount
|
||||
@ -559,8 +553,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
}
|
||||
|
||||
// Wait for Dave to receive the channel edge from the funding manager.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't see the dave->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -1017,8 +1010,7 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(
|
||||
}
|
||||
|
||||
// Wait for Dave to receive the channel edge from the funding manager.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't see the dave->carol channel before "+
|
||||
"timeout: %v", err)
|
||||
|
@ -138,8 +138,7 @@ func testSingleHopSendToRouteCase(net *lntest.NetworkHarness, t *harnessTest,
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", node.Name(),
|
||||
@ -527,8 +526,7 @@ func runMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest,
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -567,8 +565,7 @@ func runMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// We'll wait for all parties to recognize the new channels within the
|
||||
// network.
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointBob)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointBob)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise his channel in time: %v", err)
|
||||
}
|
||||
@ -647,8 +644,7 @@ func testSendToRouteErrorPropagation(net *lntest.NetworkHarness, t *harnessTest)
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise her channel: %v", err)
|
||||
}
|
||||
@ -678,8 +674,7 @@ func testSendToRouteErrorPropagation(net *lntest.NetworkHarness, t *harnessTest)
|
||||
Amt: chanAmt,
|
||||
},
|
||||
)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't advertise her channel: %v", err)
|
||||
}
|
||||
@ -690,7 +685,7 @@ func testSendToRouteErrorPropagation(net *lntest.NetworkHarness, t *harnessTest)
|
||||
PubKey: charlie.PubKeyStr,
|
||||
Amt: int64(1),
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
fakeRoute, err := carol.QueryRoutes(ctxt, fakeReq)
|
||||
if err != nil {
|
||||
t.Fatalf("unable get fake route: %v", err)
|
||||
@ -842,8 +837,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -1142,8 +1136,7 @@ func testInvoiceRoutingHints(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
chanPointEve,
|
||||
}
|
||||
for i, chanPoint := range aliceChans {
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("timed out waiting for channel open %s: %v",
|
||||
chanNames[i], err)
|
||||
@ -1263,14 +1256,12 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err := net.Alice.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the channel alice <-> bob before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPointAlice)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPointAlice)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't see the channel alice <-> bob before "+
|
||||
"timeout: %v", err)
|
||||
@ -1299,20 +1290,17 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPointBob)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPointBob)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't see the channel bob <-> carol before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointBob)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointBob)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't see the channel bob <-> carol before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPointBob)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPointBob)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't see the channel bob <-> carol before "+
|
||||
"timeout: %v", err)
|
||||
@ -1344,20 +1332,17 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
|
||||
},
|
||||
)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't see the channel carol <-> dave before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't see the channel carol <-> dave before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointBob)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointBob)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't see the channel bob <-> carol before "+
|
||||
"timeout: %v", err)
|
||||
@ -1387,7 +1372,7 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
|
||||
Private: true,
|
||||
}
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := dave.AddInvoice(ctxt, invoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice for dave: %v", err)
|
||||
@ -1506,8 +1491,7 @@ func testQueryRoutes(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -1802,8 +1786,7 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d) timed out waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
|
@ -48,13 +48,12 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Alice to recognize and advertise the new channel generated
|
||||
// above.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
}
|
||||
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = net.Bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't advertise channel before "+
|
||||
"timeout: %v", err)
|
||||
@ -78,7 +77,7 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
payHash := &lnrpc.PaymentHash{
|
||||
RHash: invoiceResp.RHash,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
dbInvoice, err := net.Bob.LookupInvoice(ctxt, payHash)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to lookup invoice: %v", err)
|
||||
|
@ -118,8 +118,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -141,13 +140,11 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We'll wait for all parties to recognize the new channels within the
|
||||
// network.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointDave)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't advertise his channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't advertise her channel in time: %v",
|
||||
err)
|
||||
@ -251,7 +248,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Memo: "testing",
|
||||
Value: paymentAmt,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddInvoice(ctxt, finalInvoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
@ -396,8 +393,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -419,23 +415,19 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We'll wait for all parties to recognize the new channels within the
|
||||
// network.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointDave)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't advertise his channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't advertise her channel in time: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
// Make sure all nodes are fully synced before we continue.
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
for _, node := range nodes {
|
||||
err := node.WaitForBlockchainSync(ctxt)
|
||||
err := node.WaitForBlockchainSync()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to wait for sync: %v", err)
|
||||
}
|
||||
@ -503,10 +495,8 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Make sure all nodes are fully synced again.
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
for _, node := range nodes {
|
||||
err := node.WaitForBlockchainSync(ctxt)
|
||||
err := node.WaitForBlockchainSync()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to wait for sync: %v", err)
|
||||
}
|
||||
@ -555,7 +545,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
Memo: "testing",
|
||||
Value: paymentAmt,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddInvoice(ctxt, finalInvoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
@ -702,8 +692,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -725,13 +714,11 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
|
||||
|
||||
// We'll wait for all parties to recognize the new channels within the
|
||||
// network.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointDave)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't advertise his channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't advertise her channel in time: %v",
|
||||
err)
|
||||
@ -842,7 +829,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
|
||||
Memo: "testing",
|
||||
Value: paymentAmt,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddInvoice(ctxt, finalInvoice)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to add invoice: %v", err)
|
||||
@ -896,7 +883,6 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
|
||||
// 5. Carol <-- Dave <-- Alice X expect settle to propagate
|
||||
func testSwitchOfflineDeliveryOutgoingOffline(
|
||||
net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
const chanAmt = btcutil.Amount(1000000)
|
||||
const pushAmt = btcutil.Amount(900000)
|
||||
@ -991,8 +977,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(
|
||||
Index: chanPoint.OutputIndex,
|
||||
}
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = node.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("%s(%d): timeout waiting for "+
|
||||
"channel(%s) open: %v", nodeNames[i],
|
||||
@ -1014,13 +999,11 @@ func testSwitchOfflineDeliveryOutgoingOffline(
|
||||
|
||||
// We'll wait for all parties to recognize the new channels within the
|
||||
// network.
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = dave.WaitForNetworkChannelOpen(ctxt, chanPointDave)
|
||||
err = dave.WaitForNetworkChannelOpen(chanPointDave)
|
||||
if err != nil {
|
||||
t.Fatalf("dave didn't advertise his channel: %v", err)
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = carol.WaitForNetworkChannelOpen(ctxt, chanPointCarol)
|
||||
err = carol.WaitForNetworkChannelOpen(chanPointCarol)
|
||||
if err != nil {
|
||||
t.Fatalf("carol didn't advertise her channel in time: %v",
|
||||
err)
|
||||
|
@ -496,11 +496,9 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
||||
},
|
||||
OutputIndex: upd.ChanPending.OutputIndex,
|
||||
}
|
||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
err = srcNode.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = srcNode.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
err = destNode.WaitForNetworkChannelOpen(ctxt, chanPoint)
|
||||
err = destNode.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Send a test payment to ensure the channel is operating as normal.
|
||||
|
308
lntest/node.go
308
lntest/node.go
@ -6,18 +6,16 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
@ -26,7 +24,6 @@ import (
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"github.com/lightningnetwork/lnd/chanbackup"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
@ -39,16 +36,13 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/macaroons"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/status"
|
||||
"gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultNodePort is the start of the range for listening ports of
|
||||
// harness nodes. Ports are monotonically increasing starting from this
|
||||
// number and are determined by the results of nextAvailablePort().
|
||||
defaultNodePort = 5555
|
||||
|
||||
// logPubKeyBytes is the number of bytes of the node's PubKey that will
|
||||
// be appended to the log file name. The whole PubKey is too long and
|
||||
// not really necessary to quickly identify what node produced which
|
||||
@ -59,110 +53,24 @@ const (
|
||||
// release of announcements by AuthenticatedGossiper to the network.
|
||||
trickleDelay = 50
|
||||
|
||||
// listenerFormat is the format string that is used to generate local
|
||||
// listener addresses.
|
||||
listenerFormat = "127.0.0.1:%d"
|
||||
|
||||
// NeutrinoBackendName is the name of the neutrino backend.
|
||||
NeutrinoBackendName = "neutrino"
|
||||
|
||||
postgresDsn = "postgres://postgres:postgres@localhost:6432/%s?sslmode=disable"
|
||||
|
||||
// commitInterval specifies the maximum interval the graph database
|
||||
// will wait between attempting to flush a batch of modifications to
|
||||
// disk(db.batch-commit-interval).
|
||||
commitInterval = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
// numActiveNodes is the number of active nodes within the test network.
|
||||
numActiveNodes = 0
|
||||
numActiveNodesMtx sync.Mutex
|
||||
|
||||
// lastPort is the last port determined to be free for use by a new
|
||||
// node. It should be used atomically.
|
||||
lastPort uint32 = defaultNodePort
|
||||
|
||||
// logOutput is a flag that can be set to append the output from the
|
||||
// seed nodes to log files.
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
"log output from node n to file output-n.log")
|
||||
|
||||
// logSubDir is the default directory where the logs are written to if
|
||||
// logOutput is true.
|
||||
logSubDir = flag.String("logdir", ".", "default dir to write logs to")
|
||||
|
||||
// goroutineDump is a flag that can be set to dump the active
|
||||
// goroutines of test nodes on failure.
|
||||
goroutineDump = flag.Bool("goroutinedump", false,
|
||||
"write goroutine dump from node n to file pprof-n.log")
|
||||
|
||||
// btcdExecutable is the full path to the btcd binary.
|
||||
btcdExecutable = flag.String(
|
||||
"btcdexec", "", "full path to btcd binary",
|
||||
)
|
||||
)
|
||||
|
||||
func postgresDatabaseDsn(dbName string) string {
|
||||
return fmt.Sprintf(postgresDsn, dbName)
|
||||
}
|
||||
|
||||
// NextAvailablePort returns the first port that is available for listening by
|
||||
// a new node. It panics if no port is found and the maximum available TCP port
|
||||
// is reached.
|
||||
func NextAvailablePort() int {
|
||||
port := atomic.AddUint32(&lastPort, 1)
|
||||
for port < 65535 {
|
||||
// If there are no errors while attempting to listen on this
|
||||
// port, close the socket and return it as available. While it
|
||||
// could be the case that some other process picks up this port
|
||||
// between the time the socket is closed and it's reopened in
|
||||
// the harness node, in practice in CI servers this seems much
|
||||
// less likely than simply some other process already being
|
||||
// bound at the start of the tests.
|
||||
addr := fmt.Sprintf(listenerFormat, port)
|
||||
l, err := net.Listen("tcp4", addr)
|
||||
if err == nil {
|
||||
err := l.Close()
|
||||
if err == nil {
|
||||
return int(port)
|
||||
}
|
||||
}
|
||||
port = atomic.AddUint32(&lastPort, 1)
|
||||
}
|
||||
|
||||
// No ports available? Must be a mistake.
|
||||
panic("no ports available for listening")
|
||||
}
|
||||
|
||||
// ApplyPortOffset adds the given offset to the lastPort variable, making it
|
||||
// possible to run the tests in parallel without colliding on the same ports.
|
||||
func ApplyPortOffset(offset uint32) {
|
||||
_ = atomic.AddUint32(&lastPort, offset)
|
||||
}
|
||||
|
||||
// GetLogDir returns the passed --logdir flag or the default value if it wasn't
|
||||
// set.
|
||||
func GetLogDir() string {
|
||||
if logSubDir != nil && *logSubDir != "" {
|
||||
return *logSubDir
|
||||
}
|
||||
return "."
|
||||
}
|
||||
|
||||
// GetBtcdBinary returns the full path to the binary of the custom built btcd
|
||||
// executable or an empty string if none is set.
|
||||
func GetBtcdBinary() string {
|
||||
if btcdExecutable != nil {
|
||||
return *btcdExecutable
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// GenerateBtcdListenerAddresses is a function that returns two listener
|
||||
// addresses with unique ports and should be used to overwrite rpctest's default
|
||||
// generator which is prone to use colliding ports.
|
||||
func GenerateBtcdListenerAddresses() (string, string) {
|
||||
return fmt.Sprintf(listenerFormat, NextAvailablePort()),
|
||||
fmt.Sprintf(listenerFormat, NextAvailablePort())
|
||||
}
|
||||
|
||||
// generateListeningPorts returns four ints representing ports to listen on
|
||||
// designated for the current lightning network test. This returns the next
|
||||
// available ports for the p2p, rpc, rest and profiling services.
|
||||
@ -286,7 +194,7 @@ func (cfg NodeConfig) genArgs() []string {
|
||||
args = append(args, "--nobootstrap")
|
||||
args = append(args, "--debuglevel=debug")
|
||||
args = append(args, "--bitcoin.defaultchanconfs=1")
|
||||
args = append(args, fmt.Sprintf("--db.batch-commit-interval=%v", 10*time.Millisecond))
|
||||
args = append(args, fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval))
|
||||
args = append(args, fmt.Sprintf("--bitcoin.defaultremotedelay=%v", DefaultCSV))
|
||||
args = append(args, fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()))
|
||||
args = append(args, fmt.Sprintf("--restlisten=%v", cfg.RESTAddr()))
|
||||
@ -390,10 +298,8 @@ type HarnessNode struct {
|
||||
pidFile string
|
||||
logFile *os.File
|
||||
|
||||
// processExit is a channel that's closed once it's detected that the
|
||||
// process this instance of HarnessNode is bound to has exited.
|
||||
processExit chan struct{}
|
||||
|
||||
// chanWatchRequests receives a request for watching a particular event
|
||||
// for a given channel.
|
||||
chanWatchRequests chan *chanWatchRequest
|
||||
|
||||
// For each outpoint, we'll track an integer which denotes the number of
|
||||
@ -410,8 +316,13 @@ type HarnessNode struct {
|
||||
// node and the outpoint.
|
||||
policyUpdates policyUpdateMap
|
||||
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
// runCtx is a context with cancel method. It's used to signal when the
|
||||
// node needs to quit, and used as the parent context when spawning
|
||||
// children contexts for RPC requests.
|
||||
runCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
lnrpc.LightningClient
|
||||
|
||||
@ -728,7 +639,10 @@ func renameFile(fromFileName, toFileName string) {
|
||||
func (hn *HarnessNode) start(lndBinary string, lndError chan<- error,
|
||||
wait bool) error {
|
||||
|
||||
hn.quit = make(chan struct{})
|
||||
// Init the runCtx.
|
||||
ctxt, cancel := context.WithCancel(context.Background())
|
||||
hn.runCtx = ctxt
|
||||
hn.cancel = cancel
|
||||
|
||||
args := hn.Cfg.genArgs()
|
||||
hn.cmd = exec.Command(lndBinary, args...)
|
||||
@ -826,19 +740,15 @@ func (hn *HarnessNode) start(lndBinary string, lndError chan<- error,
|
||||
|
||||
// Launch a new goroutine which that bubbles up any potential fatal
|
||||
// process errors to the goroutine running the tests.
|
||||
hn.processExit = make(chan struct{})
|
||||
hn.wg.Add(1)
|
||||
go func() {
|
||||
defer hn.wg.Done()
|
||||
|
||||
err := hn.cmd.Wait()
|
||||
if err != nil {
|
||||
lndError <- errors.Errorf("%v\n%v\n", err, errb.String())
|
||||
lndError <- fmt.Errorf("%v\n%v", err, errb.String())
|
||||
}
|
||||
|
||||
// Signal any onlookers that this process has exited.
|
||||
close(hn.processExit)
|
||||
|
||||
// Make sure log file is closed and renamed if necessary.
|
||||
finalizeLogfile()
|
||||
|
||||
@ -849,7 +759,12 @@ func (hn *HarnessNode) start(lndBinary string, lndError chan<- error,
|
||||
|
||||
// Write process ID to a file.
|
||||
if err := hn.writePidFile(); err != nil {
|
||||
hn.cmd.Process.Kill()
|
||||
err = fmt.Errorf("writePidFile err: %w", err)
|
||||
cmdErr := hn.cmd.Process.Kill()
|
||||
if cmdErr != nil {
|
||||
err = fmt.Errorf("kill process got err: %w: %v",
|
||||
cmdErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -859,12 +774,17 @@ func (hn *HarnessNode) start(lndBinary string, lndError chan<- error,
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since Stop uses the LightningClient to stop the node, if we fail to get a
|
||||
// connected client, we have to kill the process.
|
||||
// Since Stop uses the LightningClient to stop the node, if we fail to
|
||||
// get a connected client, we have to kill the process.
|
||||
useMacaroons := !hn.Cfg.HasSeed
|
||||
conn, err := hn.ConnectRPC(useMacaroons)
|
||||
if err != nil {
|
||||
hn.cmd.Process.Kill()
|
||||
err = fmt.Errorf("ConnectRPC err: %w", err)
|
||||
cmdErr := hn.cmd.Process.Kill()
|
||||
if cmdErr != nil {
|
||||
err = fmt.Errorf("kill process got err: %w: %v",
|
||||
cmdErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -910,7 +830,7 @@ func (hn *HarnessNode) waitForState(conn grpc.ClientConnInterface,
|
||||
predicate func(state lnrpc.WalletState) bool) error {
|
||||
|
||||
stateClient := lnrpc.NewStateClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(hn.runCtx)
|
||||
defer cancel()
|
||||
|
||||
stateStream, err := stateClient.SubscribeState(
|
||||
@ -1004,13 +924,13 @@ func (hn *HarnessNode) initClientWhenReady(timeout time.Duration) error {
|
||||
|
||||
// Init initializes a harness node by passing the init request via rpc. After
|
||||
// the request is submitted, this method will block until a
|
||||
// macaroon-authenticated RPC connection can be established to the harness node.
|
||||
// Once established, the new connection is used to initialize the
|
||||
// macaroon-authenticated RPC connection can be established to the harness
|
||||
// node. Once established, the new connection is used to initialize the
|
||||
// LightningClient and subscribes the HarnessNode to topology changes.
|
||||
func (hn *HarnessNode) Init(ctx context.Context,
|
||||
func (hn *HarnessNode) Init(
|
||||
initReq *lnrpc.InitWalletRequest) (*lnrpc.InitWalletResponse, error) {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctx, DefaultTimeout)
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
response, err := hn.InitWallet(ctxt, initReq)
|
||||
if err != nil {
|
||||
@ -1049,11 +969,11 @@ func (hn *HarnessNode) Init(ctx context.Context,
|
||||
// a macaroon-authenticated RPC connection can be established to the harness
|
||||
// node. Once established, the new connection is used to initialize the
|
||||
// LightningClient and subscribes the HarnessNode to topology changes.
|
||||
func (hn *HarnessNode) InitChangePassword(ctx context.Context,
|
||||
func (hn *HarnessNode) InitChangePassword(
|
||||
chngPwReq *lnrpc.ChangePasswordRequest) (*lnrpc.ChangePasswordResponse,
|
||||
error) {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctx, DefaultTimeout)
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
response, err := hn.ChangePassword(ctxt, chngPwReq)
|
||||
if err != nil {
|
||||
@ -1091,10 +1011,9 @@ func (hn *HarnessNode) InitChangePassword(ctx context.Context,
|
||||
// should be called after the restart of a HarnessNode that was created with a
|
||||
// seed+password. Once this method returns, the HarnessNode will be ready to
|
||||
// accept normal gRPC requests and harness command.
|
||||
func (hn *HarnessNode) Unlock(ctx context.Context,
|
||||
unlockReq *lnrpc.UnlockWalletRequest) error {
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctx, DefaultTimeout)
|
||||
func (hn *HarnessNode) Unlock(unlockReq *lnrpc.UnlockWalletRequest) error {
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Otherwise, we'll need to unlock the node before it's able to start
|
||||
// up properly.
|
||||
@ -1110,8 +1029,7 @@ func (hn *HarnessNode) Unlock(ctx context.Context,
|
||||
// waitTillServerStarted makes a subscription to the server's state change and
|
||||
// blocks until the server is in state ServerActive.
|
||||
func (hn *HarnessNode) waitTillServerStarted() error {
|
||||
ctxb := context.Background()
|
||||
ctxt, cancel := context.WithTimeout(ctxb, NodeStartTimeout)
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, NodeStartTimeout)
|
||||
defer cancel()
|
||||
|
||||
client, err := hn.StateClient.SubscribeState(
|
||||
@ -1277,7 +1195,7 @@ func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
|
||||
grpc.WithTransportCredentials(tlsCreds),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout)
|
||||
ctx, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
if mac == nil {
|
||||
@ -1331,7 +1249,7 @@ func (hn *HarnessNode) cleanup() error {
|
||||
// Stop attempts to stop the active lnd process.
|
||||
func (hn *HarnessNode) stop() error {
|
||||
// Do nothing if the process is not running.
|
||||
if hn.processExit == nil {
|
||||
if hn.runCtx == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1341,10 +1259,9 @@ func (hn *HarnessNode) stop() error {
|
||||
// Don't watch for error because sometimes the RPC connection gets
|
||||
// closed before a response is returned.
|
||||
req := lnrpc.StopRequest{}
|
||||
ctx := context.Background()
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
_, err := hn.LightningClient.StopDaemon(ctx, &req)
|
||||
_, err := hn.LightningClient.StopDaemon(hn.runCtx, &req)
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
@ -1362,18 +1279,28 @@ func (hn *HarnessNode) stop() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for lnd process and other goroutines to exit.
|
||||
select {
|
||||
case <-hn.processExit:
|
||||
case <-time.After(DefaultTimeout * 2):
|
||||
return fmt.Errorf("process did not exit")
|
||||
// Stop the runCtx and wait for goroutines to finish.
|
||||
hn.cancel()
|
||||
|
||||
// Wait for lnd process to exit.
|
||||
err := wait.NoError(func() error {
|
||||
if hn.cmd.ProcessState == nil {
|
||||
return fmt.Errorf("process did not exit")
|
||||
}
|
||||
|
||||
if !hn.cmd.ProcessState.Exited() {
|
||||
return fmt.Errorf("process did not exit")
|
||||
}
|
||||
|
||||
// Wait for goroutines to be finished.
|
||||
hn.wg.Wait()
|
||||
|
||||
return nil
|
||||
}, DefaultTimeout*2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
close(hn.quit)
|
||||
hn.wg.Wait()
|
||||
|
||||
hn.quit = nil
|
||||
hn.processExit = nil
|
||||
hn.LightningClient = nil
|
||||
hn.WalletUnlockerClient = nil
|
||||
hn.Watchtower = nil
|
||||
@ -1381,13 +1308,25 @@ func (hn *HarnessNode) stop() error {
|
||||
|
||||
// Close any attempts at further grpc connections.
|
||||
if hn.conn != nil {
|
||||
err := hn.conn.Close()
|
||||
if err != nil &&
|
||||
!strings.Contains(err.Error(), "connection is closing") {
|
||||
err := status.Code(hn.conn.Close())
|
||||
switch err {
|
||||
case codes.OK:
|
||||
return nil
|
||||
|
||||
return fmt.Errorf("error attempting to stop grpc "+
|
||||
"client: %v", err)
|
||||
// When the context is canceled above, we might get the
|
||||
// following error as the context is no longer active.
|
||||
case codes.Canceled:
|
||||
return nil
|
||||
|
||||
case codes.Unknown:
|
||||
return fmt.Errorf("unknown error attempting to stop "+
|
||||
"grpc client: %v", err)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("error attempting to stop "+
|
||||
"grpc client: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1464,12 +1403,12 @@ func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) ([]byte, error) {
|
||||
return txid, nil
|
||||
}
|
||||
|
||||
func checkChanPointInGraph(ctx context.Context,
|
||||
node *HarnessNode, chanPoint wire.OutPoint) bool {
|
||||
func (hn *HarnessNode) checkChanPointInGraph(chanPoint wire.OutPoint) bool {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(ctx, DefaultTimeout)
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
chanGraph, err := node.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{})
|
||||
|
||||
chanGraph, err := hn.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@ -1534,7 +1473,7 @@ func (hn *HarnessNode) lightningNetworkWatcher() {
|
||||
hn.handlePolicyUpdateWatchRequest(watchRequest)
|
||||
}
|
||||
|
||||
case <-hn.quit:
|
||||
case <-hn.runCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1544,9 +1483,12 @@ func (hn *HarnessNode) lightningNetworkWatcher() {
|
||||
// outpoint is seen as being fully advertised within the network. A channel is
|
||||
// considered "fully advertised" once both of its directional edges has been
|
||||
// advertised within the test Lightning Network.
|
||||
func (hn *HarnessNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
func (hn *HarnessNode) WaitForNetworkChannelOpen(
|
||||
chanPoint *lnrpc.ChannelPoint) error {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
|
||||
op, err := MakeOutpoint(chanPoint)
|
||||
@ -1564,7 +1506,7 @@ func (hn *HarnessNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
select {
|
||||
case <-eventChan:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
case <-ctxt.Done():
|
||||
return fmt.Errorf("channel:%s not opened before timeout: %s",
|
||||
op, hn)
|
||||
}
|
||||
@ -1574,9 +1516,12 @@ func (hn *HarnessNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
// outpoint is seen as closed within the network. A channel is considered
|
||||
// closed once a transaction spending the funding outpoint is seen within a
|
||||
// confirmed block.
|
||||
func (hn *HarnessNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
func (hn *HarnessNode) WaitForNetworkChannelClose(
|
||||
chanPoint *lnrpc.ChannelPoint) error {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
|
||||
op, err := MakeOutpoint(chanPoint)
|
||||
@ -1594,7 +1539,7 @@ func (hn *HarnessNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
select {
|
||||
case <-eventChan:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
case <-ctxt.Done():
|
||||
return fmt.Errorf("channel:%s not closed before timeout: "+
|
||||
"%s", op, hn)
|
||||
}
|
||||
@ -1602,10 +1547,13 @@ func (hn *HarnessNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
|
||||
// WaitForChannelPolicyUpdate will block until a channel policy with the target
|
||||
// outpoint and advertisingNode is seen within the network.
|
||||
func (hn *HarnessNode) WaitForChannelPolicyUpdate(ctx context.Context,
|
||||
func (hn *HarnessNode) WaitForChannelPolicyUpdate(
|
||||
advertisingNode string, policy *lnrpc.RoutingPolicy,
|
||||
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) error {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
|
||||
op, err := MakeOutpoint(chanPoint)
|
||||
@ -1642,7 +1590,7 @@ func (hn *HarnessNode) WaitForChannelPolicyUpdate(ctx context.Context,
|
||||
case <-eventChan:
|
||||
return nil
|
||||
|
||||
case <-ctx.Done():
|
||||
case <-ctxt.Done():
|
||||
return fmt.Errorf("channel:%s policy not updated "+
|
||||
"before timeout: [%s:%v] %s", op,
|
||||
advertisingNode, policy, hn.String())
|
||||
@ -1654,12 +1602,15 @@ func (hn *HarnessNode) WaitForChannelPolicyUpdate(ctx context.Context,
|
||||
// the blockchain. If the passed context object has a set timeout, it will
|
||||
// continually poll until the timeout has elapsed. In the case that the chain
|
||||
// isn't synced before the timeout is up, this function will return an error.
|
||||
func (hn *HarnessNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
func (hn *HarnessNode) WaitForBlockchainSync() error {
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
ticker := time.NewTicker(time.Millisecond * 100)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
resp, err := hn.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||
resp, err := hn.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1668,10 +1619,10 @@ func (hn *HarnessNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-ctxt.Done():
|
||||
return fmt.Errorf("timeout while waiting for " +
|
||||
"blockchain sync")
|
||||
case <-hn.quit:
|
||||
case <-hn.runCtx.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
}
|
||||
@ -1680,13 +1631,14 @@ func (hn *HarnessNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
|
||||
// WaitForBalance waits until the node sees the expected confirmed/unconfirmed
|
||||
// balance within their wallet.
|
||||
func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount, confirmed bool) error {
|
||||
ctx := context.Background()
|
||||
func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount,
|
||||
confirmed bool) error {
|
||||
|
||||
req := &lnrpc.WalletBalanceRequest{}
|
||||
|
||||
var lastBalance btcutil.Amount
|
||||
doesBalanceMatch := func() bool {
|
||||
balance, err := hn.WalletBalance(ctx, req)
|
||||
balance, err := hn.WalletBalance(hn.runCtx, req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@ -1799,7 +1751,7 @@ func (hn *HarnessNode) handleOpenChannelWatchRequest(req *chanWatchRequest) {
|
||||
// node. This lets us handle the case where a node has already seen a
|
||||
// channel before a notification has been requested, causing us to miss
|
||||
// it.
|
||||
chanFound := checkChanPointInGraph(context.Background(), hn, targetChan)
|
||||
chanFound := hn.checkChanPointInGraph(targetChan)
|
||||
if chanFound {
|
||||
close(req.eventChan)
|
||||
return
|
||||
@ -1889,16 +1841,14 @@ func (hn *HarnessNode) newTopologyClient(
|
||||
func (hn *HarnessNode) receiveTopologyClientStream(
|
||||
receiver chan *lnrpc.GraphTopologyUpdate) error {
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
// Create a topology client to receive graph updates.
|
||||
client, err := hn.newTopologyClient(ctxb)
|
||||
client, err := hn.newTopologyClient(hn.runCtx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create topologyClient failed: %v", err)
|
||||
}
|
||||
|
||||
// We use the context to time out when retrying graph subscription.
|
||||
ctxt, cancel := context.WithTimeout(ctxb, DefaultTimeout)
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
@ -1917,12 +1867,12 @@ func (hn *HarnessNode) receiveTopologyClientStream(
|
||||
return fmt.Errorf("graph subscription: " +
|
||||
"router not started before timeout")
|
||||
case <-time.After(wait.PollInterval):
|
||||
case <-hn.quit:
|
||||
case <-hn.runCtx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
// Re-create the topology client.
|
||||
client, err = hn.newTopologyClient(ctxb)
|
||||
client, err = hn.newTopologyClient(hn.runCtx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create topologyClient "+
|
||||
"failed: %v", err)
|
||||
@ -1934,6 +1884,10 @@ func (hn *HarnessNode) receiveTopologyClientStream(
|
||||
// End of subscription stream. Do nothing and quit.
|
||||
return nil
|
||||
|
||||
case strings.Contains(err.Error(), context.Canceled.Error()):
|
||||
// End of subscription stream. Do nothing and quit.
|
||||
return nil
|
||||
|
||||
default:
|
||||
// An expected error is returned, return and leave it
|
||||
// to be handled by the caller.
|
||||
@ -1943,7 +1897,7 @@ func (hn *HarnessNode) receiveTopologyClientStream(
|
||||
// Send the update or quit.
|
||||
select {
|
||||
case receiver <- update:
|
||||
case <-hn.quit:
|
||||
case <-hn.runCtx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -2014,9 +1968,7 @@ func (hn *HarnessNode) handlePolicyUpdateWatchRequest(req *chanWatchRequest) {
|
||||
// the format defined in type policyUpdateMap.
|
||||
func (hn *HarnessNode) getChannelPolicies(include bool) policyUpdateMap {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultTimeout,
|
||||
)
|
||||
ctxt, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
graph, err := hn.DescribeGraph(ctxt, &lnrpc.ChannelGraphRequest{
|
||||
|
116
lntest/test_common.go
Normal file
116
lntest/test_common.go
Normal file
@ -0,0 +1,116 @@
|
||||
package lntest
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultNodePort is the start of the range for listening ports of
|
||||
// harness nodes. Ports are monotonically increasing starting from this
|
||||
// number and are determined by the results of nextAvailablePort().
|
||||
defaultNodePort = 5555
|
||||
|
||||
// listenerFormat is the format string that is used to generate local
|
||||
// listener addresses.
|
||||
listenerFormat = "127.0.0.1:%d"
|
||||
|
||||
// NeutrinoBackendName is the name of the neutrino backend.
|
||||
NeutrinoBackendName = "neutrino"
|
||||
)
|
||||
|
||||
type DatabaseBackend int
|
||||
|
||||
const (
|
||||
BackendBbolt DatabaseBackend = iota
|
||||
BackendEtcd
|
||||
BackendPostgres
|
||||
)
|
||||
|
||||
var (
|
||||
// lastPort is the last port determined to be free for use by a new
|
||||
// node. It should be used atomically.
|
||||
lastPort uint32 = defaultNodePort
|
||||
|
||||
// logOutput is a flag that can be set to append the output from the
|
||||
// seed nodes to log files.
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
"log output from node n to file output-n.log")
|
||||
|
||||
// logSubDir is the default directory where the logs are written to if
|
||||
// logOutput is true.
|
||||
logSubDir = flag.String("logdir", ".", "default dir to write logs to")
|
||||
|
||||
// goroutineDump is a flag that can be set to dump the active
|
||||
// goroutines of test nodes on failure.
|
||||
goroutineDump = flag.Bool("goroutinedump", false,
|
||||
"write goroutine dump from node n to file pprof-n.log")
|
||||
|
||||
// btcdExecutable is the full path to the btcd binary.
|
||||
btcdExecutable = flag.String(
|
||||
"btcdexec", "", "full path to btcd binary",
|
||||
)
|
||||
)
|
||||
|
||||
// NextAvailablePort returns the first port that is available for listening by
|
||||
// a new node. It panics if no port is found and the maximum available TCP port
|
||||
// is reached.
|
||||
func NextAvailablePort() int {
|
||||
port := atomic.AddUint32(&lastPort, 1)
|
||||
for port < 65535 {
|
||||
// If there are no errors while attempting to listen on this
|
||||
// port, close the socket and return it as available. While it
|
||||
// could be the case that some other process picks up this port
|
||||
// between the time the socket is closed and it's reopened in
|
||||
// the harness node, in practice in CI servers this seems much
|
||||
// less likely than simply some other process already being
|
||||
// bound at the start of the tests.
|
||||
addr := fmt.Sprintf(listenerFormat, port)
|
||||
l, err := net.Listen("tcp4", addr)
|
||||
if err == nil {
|
||||
err := l.Close()
|
||||
if err == nil {
|
||||
return int(port)
|
||||
}
|
||||
}
|
||||
port = atomic.AddUint32(&lastPort, 1)
|
||||
}
|
||||
|
||||
// No ports available? Must be a mistake.
|
||||
panic("no ports available for listening")
|
||||
}
|
||||
|
||||
// ApplyPortOffset adds the given offset to the lastPort variable, making it
|
||||
// possible to run the tests in parallel without colliding on the same ports.
|
||||
func ApplyPortOffset(offset uint32) {
|
||||
_ = atomic.AddUint32(&lastPort, offset)
|
||||
}
|
||||
|
||||
// GetLogDir returns the passed --logdir flag or the default value if it wasn't
|
||||
// set.
|
||||
func GetLogDir() string {
|
||||
if logSubDir != nil && *logSubDir != "" {
|
||||
return *logSubDir
|
||||
}
|
||||
return "."
|
||||
}
|
||||
|
||||
// GetBtcdBinary returns the full path to the binary of the custom built btcd
|
||||
// executable or an empty string if none is set.
|
||||
func GetBtcdBinary() string {
|
||||
if btcdExecutable != nil {
|
||||
return *btcdExecutable
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// GenerateBtcdListenerAddresses is a function that returns two listener
|
||||
// addresses with unique ports and should be used to overwrite rpctest's
|
||||
// default generator which is prone to use colliding ports.
|
||||
func GenerateBtcdListenerAddresses() (string, string) {
|
||||
return fmt.Sprintf(listenerFormat, NextAvailablePort()),
|
||||
fmt.Sprintf(listenerFormat, NextAvailablePort())
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user