mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-05-04 08:50:20 +02:00
test: eliminate sleep by tightly polling for existence of node
This commit elminates a sleep in the testHtlcErrorPropagation test by instead polling for the existence of our target in a tight loop. This is a small patch over until we get a “topology client” within the ChannelRouter. This method is robust and can be applied in future tests until we get the notification client into the ChannelRouter.
This commit is contained in:
parent
51e38d7544
commit
28aa092ac2
34
lnd_test.go
34
lnd_test.go
@ -402,13 +402,14 @@ func testChannelForceClosure(net *networkHarness, t *harnessTest) {
|
|||||||
var sweepingTXID *chainhash.Hash
|
var sweepingTXID *chainhash.Hash
|
||||||
var mempool []*chainhash.Hash
|
var mempool []*chainhash.Hash
|
||||||
mempoolTimeout := time.After(3 * time.Second)
|
mempoolTimeout := time.After(3 * time.Second)
|
||||||
checkMempoolTick := time.Tick(100 * time.Millisecond)
|
checkMempoolTick := time.NewTicker(100 * time.Millisecond)
|
||||||
|
defer checkMempoolTick.Stop()
|
||||||
mempoolPoll:
|
mempoolPoll:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-mempoolTimeout:
|
case <-mempoolTimeout:
|
||||||
t.Fatalf("sweep tx not found in mempool")
|
t.Fatalf("sweep tx not found in mempool")
|
||||||
case <-checkMempoolTick:
|
case <-checkMempoolTick.C:
|
||||||
mempool, err = net.Miner.Node.GetRawMempool()
|
mempool, err = net.Miner.Node.GetRawMempool()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to fetch node's mempool: %v", err)
|
t.Fatalf("unable to fetch node's mempool: %v", err)
|
||||||
@ -1477,9 +1478,31 @@ func testHtlcErrorPropagation(net *networkHarness, t *harnessTest) {
|
|||||||
chanPointBob := openChannelAndAssert(t, net, ctxt, net.Bob, carol,
|
chanPointBob := openChannelAndAssert(t, net, ctxt, net.Bob, carol,
|
||||||
chanAmt, 0)
|
chanAmt, 0)
|
||||||
|
|
||||||
// TODO(roasbeef): remove sleep once topology notification hooks are
|
// Ensure that Alice has Carol in her routing table before proceeding.
|
||||||
// in.
|
nodeInfoReq := &lnrpc.NodeInfoRequest{
|
||||||
time.Sleep(time.Second * 1)
|
PubKey: carol.PubKeyStr,
|
||||||
|
}
|
||||||
|
checkTableTimeout := time.After(time.Second * 10)
|
||||||
|
checkTableTicker := time.NewTicker(100 * time.Millisecond)
|
||||||
|
defer checkTableTicker.Stop()
|
||||||
|
|
||||||
|
out:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-checkTableTicker.C:
|
||||||
|
_, err := net.Alice.GetNodeInfo(ctxb, nodeInfoReq)
|
||||||
|
if err != nil && strings.Contains(err.Error(),
|
||||||
|
"unable to find") {
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break out
|
||||||
|
case <-checkTableTimeout:
|
||||||
|
t.Fatalf("carol's node announcement didn't propagate within " +
|
||||||
|
"the timeout period")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// With the channels, open we can now start to test our multi-hop error
|
// With the channels, open we can now start to test our multi-hop error
|
||||||
// scenarios. First, we'll generate an invoice from carol that we'll
|
// scenarios. First, we'll generate an invoice from carol that we'll
|
||||||
@ -1508,7 +1531,6 @@ func testHtlcErrorPropagation(net *networkHarness, t *harnessTest) {
|
|||||||
Dest: carol.PubKey[:],
|
Dest: carol.PubKey[:],
|
||||||
Amt: payAmt,
|
Amt: payAmt,
|
||||||
}
|
}
|
||||||
time.Sleep(time.Millisecond * 500)
|
|
||||||
if err := alicePayStream.Send(sendReq); err != nil {
|
if err := alicePayStream.Send(sendReq); err != nil {
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
t.Fatalf("unable to send payment: %v", err)
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user