mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-05-03 16:30:18 +02:00
multi: move timeouts into package wait
This commit moves the definition of timeout values into package `wait`, preparing the incoming merging of `lntemp` and `lntest`.
This commit is contained in:
parent
0bc86a3b4b
commit
106fbeae85
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntemp"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -272,7 +271,7 @@ func runAsyncPayments(ht *lntemp.HarnessTest, alice, bob *node.HarnessNode) {
|
|||||||
settled := make(chan struct{})
|
settled := make(chan struct{})
|
||||||
defer close(settled)
|
defer close(settled)
|
||||||
|
|
||||||
timeout := lntest.AsyncBenchmarkTimeout * 2
|
timeout := wait.AsyncBenchmarkTimeout * 2
|
||||||
for i := 0; i < numInvoices; i++ {
|
for i := 0; i < numInvoices; i++ {
|
||||||
payReq := bobPayReqs[i]
|
payReq := bobPayReqs[i]
|
||||||
go func() {
|
go func() {
|
||||||
@ -376,7 +375,7 @@ func testBidirectionalAsyncPayments(ht *lntemp.HarnessTest) {
|
|||||||
settled := make(chan struct{})
|
settled := make(chan struct{})
|
||||||
defer close(settled)
|
defer close(settled)
|
||||||
|
|
||||||
timeout := lntest.AsyncBenchmarkTimeout * 4
|
timeout := wait.AsyncBenchmarkTimeout * 4
|
||||||
send := func(node *node.HarnessNode, payReq string) {
|
send := func(node *node.HarnessNode, payReq string) {
|
||||||
req := &routerrpc.SendPaymentRequest{
|
req := &routerrpc.SendPaymentRequest{
|
||||||
PaymentRequest: payReq,
|
PaymentRequest: payReq,
|
||||||
|
@ -38,9 +38,9 @@ var (
|
|||||||
const (
|
const (
|
||||||
testFeeBase = 1e+6
|
testFeeBase = 1e+6
|
||||||
defaultCSV = lntest.DefaultCSV
|
defaultCSV = lntest.DefaultCSV
|
||||||
defaultTimeout = lntest.DefaultTimeout
|
defaultTimeout = wait.DefaultTimeout
|
||||||
minerMempoolTimeout = lntest.MinerMempoolTimeout
|
minerMempoolTimeout = wait.MinerMempoolTimeout
|
||||||
channelCloseTimeout = lntest.ChannelCloseTimeout
|
channelCloseTimeout = wait.ChannelCloseTimeout
|
||||||
itestLndBinary = "../../lnd-itest"
|
itestLndBinary = "../../lnd-itest"
|
||||||
anchorSize = 330
|
anchorSize = 330
|
||||||
noFeeLimitMsat = math.MaxInt64
|
noFeeLimitMsat = math.MaxInt64
|
||||||
|
@ -1686,7 +1686,7 @@ func (h *HarnessTest) OpenMultiChannelsAsync(
|
|||||||
case cp := <-r.result:
|
case cp := <-r.result:
|
||||||
channelPoints = append(channelPoints, cp)
|
channelPoints = append(channelPoints, cp)
|
||||||
|
|
||||||
case <-time.After(lntest.ChannelOpenTimeout):
|
case <-time.After(wait.ChannelOpenTimeout):
|
||||||
require.Failf(h, "timeout", "wait channel point "+
|
require.Failf(h, "timeout", "wait channel point "+
|
||||||
"timeout for channel %s=>%s", r.Local.Name(),
|
"timeout for channel %s=>%s", r.Local.Name(),
|
||||||
r.Remote.Name())
|
r.Remote.Name())
|
||||||
|
@ -198,7 +198,7 @@ func (h *HarnessMiner) AssertNumTxsInMempool(n int) []*chainhash.Hash {
|
|||||||
|
|
||||||
return fmt.Errorf("want %v, got %v in mempool: %v",
|
return fmt.Errorf("want %v, got %v in mempool: %v",
|
||||||
n, len(mem), mem)
|
n, len(mem), mem)
|
||||||
}, lntest.MinerMempoolTimeout)
|
}, wait.MinerMempoolTimeout)
|
||||||
require.NoError(h, err, "assert tx in mempool timeout")
|
require.NoError(h, err, "assert tx in mempool timeout")
|
||||||
|
|
||||||
return mem
|
return mem
|
||||||
@ -286,7 +286,7 @@ func (h *HarnessMiner) AssertTxInMempool(txid *chainhash.Hash) *wire.MsgTx {
|
|||||||
|
|
||||||
return fmt.Errorf("txid %v not found in mempool: %v", txid,
|
return fmt.Errorf("txid %v not found in mempool: %v", txid,
|
||||||
mempool)
|
mempool)
|
||||||
}, lntest.MinerMempoolTimeout)
|
}, wait.MinerMempoolTimeout)
|
||||||
|
|
||||||
require.NoError(h, err, "timeout checking mempool")
|
require.NoError(h, err, "timeout checking mempool")
|
||||||
return msgTx
|
return msgTx
|
||||||
@ -377,7 +377,7 @@ func (h *HarnessMiner) AssertOutpointInMempool(op wire.OutPoint) *wire.MsgTx {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("outpoint %v not found in mempool", op)
|
return fmt.Errorf("outpoint %v not found in mempool", op)
|
||||||
}, lntest.MinerMempoolTimeout)
|
}, wait.MinerMempoolTimeout)
|
||||||
|
|
||||||
require.NoError(h, err, "timeout checking mempool")
|
require.NoError(h, err, "timeout checking mempool")
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/chanbackup"
|
"github.com/lightningnetwork/lnd/chanbackup"
|
||||||
"github.com/lightningnetwork/lnd/kvdb/etcd"
|
"github.com/lightningnetwork/lnd/kvdb/etcd"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -228,7 +229,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
|
|||||||
case lntest.BackendSqlite:
|
case lntest.BackendSqlite:
|
||||||
args = append(args, "--db.backend=sqlite")
|
args = append(args, "--db.backend=sqlite")
|
||||||
args = append(args, fmt.Sprintf("--db.sqlite.busytimeout=%v",
|
args = append(args, fmt.Sprintf("--db.sqlite.busytimeout=%v",
|
||||||
lntest.SqliteBusyTimeout))
|
wait.SqliteBusyTimeout))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.FeeURL != "" {
|
if cfg.FeeURL != "" {
|
||||||
|
@ -324,7 +324,7 @@ func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
|
|||||||
hn.Cfg.TLSCertPath, "",
|
hn.Cfg.TLSCertPath, "",
|
||||||
)
|
)
|
||||||
return err
|
return err
|
||||||
}, DefaultTimeout)
|
}, wait.DefaultTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error reading TLS cert: %v", err)
|
return nil, fmt.Errorf("error reading TLS cert: %v", err)
|
||||||
}
|
}
|
||||||
@ -334,7 +334,7 @@ func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
|
|||||||
grpc.WithTransportCredentials(tlsCreds),
|
grpc.WithTransportCredentials(tlsCreds),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
ctx, cancel := context.WithTimeout(hn.runCtx, wait.DefaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if mac == nil {
|
if mac == nil {
|
||||||
@ -354,7 +354,7 @@ func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
|
|||||||
func (hn *HarnessNode) ConnectRPC() (*grpc.ClientConn, error) {
|
func (hn *HarnessNode) ConnectRPC() (*grpc.ClientConn, error) {
|
||||||
// If we should use a macaroon, always take the admin macaroon as a
|
// If we should use a macaroon, always take the admin macaroon as a
|
||||||
// default.
|
// default.
|
||||||
mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, DefaultTimeout)
|
mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, wait.DefaultTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -540,7 +540,7 @@ func (hn *HarnessNode) waitTillServerState(
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-time.After(lntest.NodeStartTimeout):
|
case <-time.After(wait.NodeStartTimeout):
|
||||||
return fmt.Errorf("timeout waiting for server state")
|
return fmt.Errorf("timeout waiting for server state")
|
||||||
case err := <-errChan:
|
case err := <-errChan:
|
||||||
return fmt.Errorf("receive server state err: %v", err)
|
return fmt.Errorf("receive server state err: %v", err)
|
||||||
@ -581,7 +581,7 @@ func (hn *HarnessNode) initLightningClient() error {
|
|||||||
"got err: %v", err)
|
"got err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-time.After(DefaultTimeout):
|
case <-time.After(wait.DefaultTimeout):
|
||||||
return fmt.Errorf("timeout creating topology client stream")
|
return fmt.Errorf("timeout creating topology client stream")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -652,7 +652,7 @@ func (hn *HarnessNode) waitForProcessExit() {
|
|||||||
hn.printErrf("wait process exit got err: %v", err)
|
hn.printErrf("wait process exit got err: %v", err)
|
||||||
break
|
break
|
||||||
|
|
||||||
case <-time.After(DefaultTimeout * 2):
|
case <-time.After(wait.DefaultTimeout * 2):
|
||||||
hn.printErrf("timeout waiting for process to exit")
|
hn.printErrf("timeout waiting for process to exit")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -705,7 +705,7 @@ func (hn *HarnessNode) Stop() error {
|
|||||||
default:
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}, DefaultTimeout)
|
}, wait.DefaultTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -720,7 +720,7 @@ func (hn *HarnessNode) Stop() error {
|
|||||||
// If the goroutines fail to finish before timeout, we'll print
|
// If the goroutines fail to finish before timeout, we'll print
|
||||||
// the error to console and continue.
|
// the error to console and continue.
|
||||||
select {
|
select {
|
||||||
case <-time.After(DefaultTimeout):
|
case <-time.After(wait.DefaultTimeout):
|
||||||
hn.printErrf("timeout on wait group")
|
hn.printErrf("timeout on wait group")
|
||||||
case <-done:
|
case <-done:
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/lightningnetwork/lnd/lnutils"
|
"github.com/lightningnetwork/lnd/lnutils"
|
||||||
)
|
)
|
||||||
@ -31,9 +30,6 @@ const (
|
|||||||
// watchPolicyUpdate specifies that this is a request to watch a policy
|
// watchPolicyUpdate specifies that this is a request to watch a policy
|
||||||
// update event.
|
// update event.
|
||||||
watchPolicyUpdate
|
watchPolicyUpdate
|
||||||
|
|
||||||
// TODO(yy): remove once temp tests is finished.
|
|
||||||
DefaultTimeout = lntest.DefaultTimeout
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// chanWatchRequest is a request to the lightningNetworkWatcher to be notified
|
// chanWatchRequest is a request to the lightningNetworkWatcher to be notified
|
||||||
@ -123,7 +119,7 @@ func (nw *nodeWatcher) WaitForNumChannelUpdates(op wire.OutPoint,
|
|||||||
"want %d, got %d", expected, num)
|
"want %d, got %d", expected, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
return wait.NoError(checkNumUpdates, DefaultTimeout)
|
return wait.NoError(checkNumUpdates, wait.DefaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForNumNodeUpdates will block until a given number of node updates has
|
// WaitForNumNodeUpdates will block until a given number of node updates has
|
||||||
@ -142,8 +138,7 @@ func (nw *nodeWatcher) WaitForNumNodeUpdates(pubkey string,
|
|||||||
return fmt.Errorf("timeout waiting for num node updates, "+
|
return fmt.Errorf("timeout waiting for num node updates, "+
|
||||||
"want %d, got %d", expected, num)
|
"want %d, got %d", expected, num)
|
||||||
}
|
}
|
||||||
|
err := wait.NoError(checkNumUpdates, wait.DefaultTimeout)
|
||||||
err := wait.NoError(checkNumUpdates, DefaultTimeout)
|
|
||||||
|
|
||||||
return updates, err
|
return updates, err
|
||||||
}
|
}
|
||||||
@ -161,7 +156,7 @@ func (nw *nodeWatcher) WaitForChannelOpen(chanPoint *lnrpc.ChannelPoint) error {
|
|||||||
chanWatchType: watchOpenChannel,
|
chanWatchType: watchOpenChannel,
|
||||||
}
|
}
|
||||||
|
|
||||||
timer := time.After(DefaultTimeout)
|
timer := time.After(wait.DefaultTimeout)
|
||||||
select {
|
select {
|
||||||
case <-eventChan:
|
case <-eventChan:
|
||||||
return nil
|
return nil
|
||||||
@ -192,7 +187,7 @@ func (nw *nodeWatcher) WaitForChannelClose(
|
|||||||
chanWatchType: watchCloseChannel,
|
chanWatchType: watchCloseChannel,
|
||||||
}
|
}
|
||||||
|
|
||||||
timer := time.After(DefaultTimeout)
|
timer := time.After(wait.DefaultTimeout)
|
||||||
select {
|
select {
|
||||||
case <-eventChan:
|
case <-eventChan:
|
||||||
closedChan, ok := nw.state.closedChans.Load(op)
|
closedChan, ok := nw.state.closedChans.Load(op)
|
||||||
@ -218,7 +213,7 @@ func (nw *nodeWatcher) WaitForChannelPolicyUpdate(
|
|||||||
op := nw.rpc.MakeOutpoint(chanPoint)
|
op := nw.rpc.MakeOutpoint(chanPoint)
|
||||||
|
|
||||||
ticker := time.NewTicker(wait.PollInterval)
|
ticker := time.NewTicker(wait.PollInterval)
|
||||||
timer := time.After(DefaultTimeout)
|
timer := time.After(wait.DefaultTimeout)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
eventChan := make(chan struct{})
|
eventChan := make(chan struct{})
|
||||||
|
@ -15,14 +15,13 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// TODO(yy): remove once temp tests is finished.
|
DefaultTimeout = wait.DefaultTimeout
|
||||||
DefaultTimeout = lntest.DefaultTimeout
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HarnessRPC wraps all lnd's RPC clients into a single struct for easier
|
// HarnessRPC wraps all lnd's RPC clients into a single struct for easier
|
||||||
|
@ -58,6 +58,11 @@ const (
|
|||||||
// will wait between attempting to flush a batch of modifications to
|
// will wait between attempting to flush a batch of modifications to
|
||||||
// disk(db.batch-commit-interval).
|
// disk(db.batch-commit-interval).
|
||||||
commitInterval = 10 * time.Millisecond
|
commitInterval = 10 * time.Millisecond
|
||||||
|
|
||||||
|
DefaultTimeout = wait.DefaultTimeout
|
||||||
|
NodeStartTimeout = wait.NodeStartTimeout
|
||||||
|
ChannelOpenTimeout = wait.ChannelOpenTimeout
|
||||||
|
ChannelCloseTimeout = wait.ChannelCloseTimeout
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -282,7 +287,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
|
|||||||
case BackendSqlite:
|
case BackendSqlite:
|
||||||
args = append(args, "--db.backend=sqlite")
|
args = append(args, "--db.backend=sqlite")
|
||||||
args = append(args, fmt.Sprintf("--db.sqlite.busytimeout=%v",
|
args = append(args, fmt.Sprintf("--db.sqlite.busytimeout=%v",
|
||||||
SqliteBusyTimeout))
|
wait.SqliteBusyTimeout))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.FeeURL != "" {
|
if cfg.FeeURL != "" {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
//go:build !darwin && !kvdb_etcd && !kvdb_postgres
|
//go:build !darwin && !kvdb_etcd && !kvdb_postgres
|
||||||
// +build !darwin,!kvdb_etcd,!kvdb_postgres
|
// +build !darwin,!kvdb_etcd,!kvdb_postgres
|
||||||
|
|
||||||
package lntest
|
package wait
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
@ -1,7 +1,7 @@
|
|||||||
//go:build darwin && !kvdb_etcd && !kvdb_postgres
|
//go:build darwin && !kvdb_etcd && !kvdb_postgres
|
||||||
// +build darwin,!kvdb_etcd,!kvdb_postgres
|
// +build darwin,!kvdb_etcd,!kvdb_postgres
|
||||||
|
|
||||||
package lntest
|
package wait
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
@ -1,7 +1,7 @@
|
|||||||
//go:build kvdb_etcd || kvdb_postgres
|
//go:build kvdb_etcd || kvdb_postgres
|
||||||
// +build kvdb_etcd kvdb_postgres
|
// +build kvdb_etcd kvdb_postgres
|
||||||
|
|
||||||
package lntest
|
package wait
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user