mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-12 22:19:06 +02:00
Merge pull request #6864 from Juneezee/test/t.Cleanup
test: replace defer cleanup with `t.Cleanup`
This commit is contained in:
commit
3b44afcd49
@ -155,7 +155,7 @@ type testContext struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) {
|
||||
func setup(t *testing.T, initialChans []LocalChannel) *testContext {
|
||||
t.Helper()
|
||||
|
||||
// First, we'll create all the dependencies that we'll need in order to
|
||||
@ -178,7 +178,7 @@ func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) {
|
||||
chanController := &mockChanController{
|
||||
openChanSignals: make(chan openChanIntent, 10),
|
||||
}
|
||||
memGraph, _, _ := newMemChanGraph()
|
||||
memGraph, _ := newMemChanGraph(t)
|
||||
|
||||
// We'll keep track of the funds available to the agent, to make sure
|
||||
// it correctly uses this value when querying the ChannelBudget.
|
||||
@ -224,14 +224,14 @@ func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) {
|
||||
t.Fatalf("unable to start agent: %v", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
t.Cleanup(func() {
|
||||
// We must close quit before agent.Stop(), to make sure
|
||||
// ChannelBudget won't block preventing the agent from exiting.
|
||||
close(quit)
|
||||
agent.Stop()
|
||||
}
|
||||
})
|
||||
|
||||
return ctx, cleanup
|
||||
return ctx
|
||||
}
|
||||
|
||||
// respondMoreChans consumes the moreChanArgs element and responds to the agent
|
||||
@ -279,8 +279,7 @@ func respondNodeScores(t *testing.T, testCtx *testContext,
|
||||
func TestAgentChannelOpenSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
// We'll send an initial "no" response to advance the agent past its
|
||||
// initial check.
|
||||
@ -324,8 +323,7 @@ func TestAgentChannelOpenSignal(t *testing.T) {
|
||||
func TestAgentHeuristicUpdateSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
pub, err := testCtx.graph.addRandNode()
|
||||
require.NoError(t, err, "unable to generate key")
|
||||
@ -386,8 +384,7 @@ var _ ChannelController = (*mockFailingChanController)(nil)
|
||||
func TestAgentChannelFailureSignal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
testCtx.chanController = &mockFailingChanController{}
|
||||
|
||||
@ -436,8 +433,7 @@ func TestAgentChannelCloseSignal(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
testCtx, cleanup := setup(t, initialChans)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, initialChans)
|
||||
|
||||
// We'll send an initial "no" response to advance the agent past its
|
||||
// initial check.
|
||||
@ -478,8 +474,7 @@ func TestAgentChannelCloseSignal(t *testing.T) {
|
||||
func TestAgentBalanceUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
// We'll send an initial "no" response to advance the agent past its
|
||||
// initial check.
|
||||
@ -525,8 +520,7 @@ func TestAgentBalanceUpdate(t *testing.T) {
|
||||
func TestAgentImmediateAttach(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
const numChans = 5
|
||||
|
||||
@ -591,8 +585,7 @@ func TestAgentImmediateAttach(t *testing.T) {
|
||||
func TestAgentPrivateChannels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
// The chanController should be initialized such that all of its open
|
||||
// channel requests are for private channels.
|
||||
@ -652,8 +645,7 @@ func TestAgentPrivateChannels(t *testing.T) {
|
||||
func TestAgentPendingChannelState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
// We'll only return a single directive for a pre-chosen node.
|
||||
nodeKey, err := testCtx.graph.addRandNode()
|
||||
@ -764,8 +756,7 @@ func TestAgentPendingChannelState(t *testing.T) {
|
||||
func TestAgentPendingOpenChannel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
// We'll send an initial "no" response to advance the agent past its
|
||||
// initial check.
|
||||
@ -796,8 +787,7 @@ func TestAgentPendingOpenChannel(t *testing.T) {
|
||||
func TestAgentOnNodeUpdates(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
// We'll send an initial "yes" response to advance the agent past its
|
||||
// initial check. This will cause it to try to get directives from an
|
||||
@ -844,8 +834,7 @@ func TestAgentOnNodeUpdates(t *testing.T) {
|
||||
func TestAgentSkipPendingConns(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
connect := make(chan chan error)
|
||||
testCtx.agent.cfg.ConnectToPeer = func(*btcec.PublicKey, []net.Addr) (bool, error) {
|
||||
@ -1025,8 +1014,7 @@ func TestAgentSkipPendingConns(t *testing.T) {
|
||||
func TestAgentQuitWhenPendingConns(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
connect := make(chan chan error)
|
||||
|
||||
@ -1216,8 +1204,7 @@ func TestAgentChannelSizeAllocation(t *testing.T) {
|
||||
// Total number of nodes in our mock graph.
|
||||
const numNodes = 20
|
||||
|
||||
testCtx, cleanup := setup(t, nil)
|
||||
defer cleanup()
|
||||
testCtx := setup(t, nil)
|
||||
|
||||
nodeScores := make(map[NodeID]*NodeScore)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
|
@ -37,22 +37,18 @@ func TestBetweennessCentralityEmptyGraph(t *testing.T) {
|
||||
)
|
||||
|
||||
for _, chanGraph := range chanGraphs {
|
||||
graph, cleanup, err := chanGraph.genFunc()
|
||||
success := t.Run(chanGraph.name, func(t1 *testing.T) {
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
graph, err := chanGraph.genFunc(t1)
|
||||
require.NoError(t1, err, "unable to create graph")
|
||||
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
err := centralityMetric.Refresh(graph)
|
||||
require.NoError(t, err)
|
||||
err = centralityMetric.Refresh(graph)
|
||||
require.NoError(t1, err)
|
||||
|
||||
centrality := centralityMetric.GetMetric(false)
|
||||
require.Equal(t, 0, len(centrality))
|
||||
require.Equal(t1, 0, len(centrality))
|
||||
|
||||
centrality = centralityMetric.GetMetric(true)
|
||||
require.Equal(t, 0, len(centrality))
|
||||
require.Equal(t1, 0, len(centrality))
|
||||
})
|
||||
if !success {
|
||||
break
|
||||
@ -81,13 +77,9 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
|
||||
for _, numWorkers := range workers {
|
||||
for _, chanGraph := range chanGraphs {
|
||||
numWorkers := numWorkers
|
||||
graph, cleanup, err := chanGraph.genFunc()
|
||||
graph, err := chanGraph.genFunc(t)
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
testName := fmt.Sprintf(
|
||||
"%v %d workers", chanGraph.name, numWorkers,
|
||||
)
|
||||
@ -97,7 +89,7 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
|
||||
numWorkers,
|
||||
)
|
||||
require.NoError(
|
||||
t, err,
|
||||
t1, err,
|
||||
"construction must succeed with "+
|
||||
"positive number of workers",
|
||||
)
|
||||
@ -107,7 +99,7 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
|
||||
)
|
||||
|
||||
err = metric.Refresh(graph)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t1, err)
|
||||
|
||||
for _, expected := range tests {
|
||||
expected := expected
|
||||
@ -115,7 +107,7 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
|
||||
expected.normalize,
|
||||
)
|
||||
|
||||
require.Equal(t,
|
||||
require.Equal(t1,
|
||||
centralityTestGraph.nodes,
|
||||
len(centrality),
|
||||
)
|
||||
@ -125,8 +117,8 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) {
|
||||
graphNodes[i],
|
||||
)
|
||||
result, ok := centrality[nodeID]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, c, result)
|
||||
require.True(t1, ok)
|
||||
require.Equal(t1, c, result)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -2,9 +2,7 @@ package autopilot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
prand "math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -14,7 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type genGraphFunc func() (testGraph, func(), error)
|
||||
type genGraphFunc func(t *testing.T) (testGraph, error)
|
||||
|
||||
type testGraph interface {
|
||||
ChannelGraph
|
||||
@ -25,34 +23,25 @@ type testGraph interface {
|
||||
addRandNode() (*btcec.PublicKey, error)
|
||||
}
|
||||
|
||||
func newDiskChanGraph() (testGraph, func(), error) {
|
||||
// First, create a temporary directory to be used for the duration of
|
||||
// this test.
|
||||
tempDirName, err := ioutil.TempDir("", "channeldb")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func newDiskChanGraph(t *testing.T) (testGraph, error) {
|
||||
// Next, create channeldb for the first time.
|
||||
cdb, err := channeldb.Open(tempDirName)
|
||||
cdb, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cleanUp := func() {
|
||||
cdb.Close()
|
||||
os.RemoveAll(tempDirName)
|
||||
return nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, cdb.Close())
|
||||
})
|
||||
|
||||
return &databaseChannelGraph{
|
||||
db: cdb.ChannelGraph(),
|
||||
}, cleanUp, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ testGraph = (*databaseChannelGraph)(nil)
|
||||
|
||||
func newMemChanGraph() (testGraph, func(), error) {
|
||||
return newMemChannelGraph(), nil, nil
|
||||
func newMemChanGraph(_ *testing.T) (testGraph, error) {
|
||||
return newMemChannelGraph(), nil
|
||||
}
|
||||
|
||||
var _ testGraph = (*memChannelGraph)(nil)
|
||||
@ -86,13 +75,10 @@ func TestPrefAttachmentSelectEmptyGraph(t *testing.T) {
|
||||
|
||||
for _, graph := range chanGraphs {
|
||||
success := t.Run(graph.name, func(t1 *testing.T) {
|
||||
graph, cleanup, err := graph.genFunc()
|
||||
graph, err := graph.genFunc(t1)
|
||||
if err != nil {
|
||||
t1.Fatalf("unable to create graph: %v", err)
|
||||
}
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
// With the necessary state initialized, we'll now
|
||||
// attempt to get the score for this one node.
|
||||
@ -131,13 +117,10 @@ func TestPrefAttachmentSelectTwoVertexes(t *testing.T) {
|
||||
|
||||
for _, graph := range chanGraphs {
|
||||
success := t.Run(graph.name, func(t1 *testing.T) {
|
||||
graph, cleanup, err := graph.genFunc()
|
||||
graph, err := graph.genFunc(t1)
|
||||
if err != nil {
|
||||
t1.Fatalf("unable to create graph: %v", err)
|
||||
}
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
prefAttach := NewPrefAttachment()
|
||||
|
||||
@ -231,13 +214,10 @@ func TestPrefAttachmentSelectGreedyAllocation(t *testing.T) {
|
||||
|
||||
for _, graph := range chanGraphs {
|
||||
success := t.Run(graph.name, func(t1 *testing.T) {
|
||||
graph, cleanup, err := graph.genFunc()
|
||||
graph, err := graph.genFunc(t1)
|
||||
if err != nil {
|
||||
t1.Fatalf("unable to create graph: %v", err)
|
||||
}
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
prefAttach := NewPrefAttachment()
|
||||
|
||||
@ -363,13 +343,10 @@ func TestPrefAttachmentSelectSkipNodes(t *testing.T) {
|
||||
|
||||
for _, graph := range chanGraphs {
|
||||
success := t.Run(graph.name, func(t1 *testing.T) {
|
||||
graph, cleanup, err := graph.genFunc()
|
||||
graph, err := graph.genFunc(t1)
|
||||
if err != nil {
|
||||
t1.Fatalf("unable to create graph: %v", err)
|
||||
}
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
prefAttach := NewPrefAttachment()
|
||||
|
||||
|
@ -85,22 +85,19 @@ func TestTopCentrality(t *testing.T) {
|
||||
for _, chanGraph := range chanGraphs {
|
||||
chanGraph := chanGraph
|
||||
|
||||
success := t.Run(chanGraph.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
success := t.Run(chanGraph.name, func(t1 *testing.T) {
|
||||
t1.Parallel()
|
||||
|
||||
graph, cleanup, err := chanGraph.genFunc()
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
if cleanup != nil {
|
||||
defer cleanup()
|
||||
}
|
||||
graph, err := chanGraph.genFunc(t1)
|
||||
require.NoError(t1, err, "unable to create graph")
|
||||
|
||||
// Build the test graph.
|
||||
graphNodes := buildTestGraph(
|
||||
t, graph, centralityTestGraph,
|
||||
t1, graph, centralityTestGraph,
|
||||
)
|
||||
|
||||
for _, chans := range channelsWith {
|
||||
testTopCentrality(t, graph, graphNodes, chans)
|
||||
testTopCentrality(t1, graph, graphNodes, chans)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -14,9 +14,8 @@ func BenchmarkReadHeaderAndBody(t *testing.B) {
|
||||
// Create a test connection, grabbing either side of the connection
|
||||
// into local variables. If the initial crypto handshake fails, then
|
||||
// we'll get a non-nil error here.
|
||||
localConn, remoteConn, cleanUp, err := establishTestConnection()
|
||||
localConn, remoteConn, err := establishTestConnection(t)
|
||||
require.NoError(t, err, "unable to establish test connection: %v", err)
|
||||
defer cleanUp()
|
||||
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
|
@ -48,18 +48,20 @@ func makeListener() (*Listener, *lnwire.NetAddress, error) {
|
||||
return listener, netAddr, nil
|
||||
}
|
||||
|
||||
func establishTestConnection() (net.Conn, net.Conn, func(), error) {
|
||||
func establishTestConnection(t testing.TB) (net.Conn, net.Conn, error) {
|
||||
listener, netAddr, err := makeListener()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
defer listener.Close()
|
||||
t.Cleanup(func() {
|
||||
listener.Close()
|
||||
})
|
||||
|
||||
// Nos, generate the long-term private keys remote end of the connection
|
||||
// within our test.
|
||||
remotePriv, err := btcec.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv}
|
||||
|
||||
@ -83,29 +85,28 @@ func establishTestConnection() (net.Conn, net.Conn, func(), error) {
|
||||
|
||||
remote := <-remoteConnChan
|
||||
if remote.err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
local := <-localConnChan
|
||||
if local.err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cleanUp := func() {
|
||||
t.Cleanup(func() {
|
||||
local.conn.Close()
|
||||
remote.conn.Close()
|
||||
}
|
||||
})
|
||||
|
||||
return local.conn, remote.conn, cleanUp, nil
|
||||
return local.conn, remote.conn, nil
|
||||
}
|
||||
|
||||
func TestConnectionCorrectness(t *testing.T) {
|
||||
// Create a test connection, grabbing either side of the connection
|
||||
// into local variables. If the initial crypto handshake fails, then
|
||||
// we'll get a non-nil error here.
|
||||
localConn, remoteConn, cleanUp, err := establishTestConnection()
|
||||
localConn, remoteConn, err := establishTestConnection(t)
|
||||
require.NoError(t, err, "unable to establish test connection")
|
||||
defer cleanUp()
|
||||
|
||||
// Test out some message full-message reads.
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -257,9 +258,8 @@ func TestWriteMessageChunking(t *testing.T) {
|
||||
// Create a test connection, grabbing either side of the connection
|
||||
// into local variables. If the initial crypto handshake fails, then
|
||||
// we'll get a non-nil error here.
|
||||
localConn, remoteConn, cleanUp, err := establishTestConnection()
|
||||
localConn, remoteConn, err := establishTestConnection(t)
|
||||
require.NoError(t, err, "unable to establish test connection")
|
||||
defer cleanUp()
|
||||
|
||||
// Attempt to write a message which is over 3x the max allowed payload
|
||||
// size.
|
||||
|
@ -65,6 +65,9 @@ func setUpNotifier(t *testing.T, bitcoindConn *chain.BitcoindConn,
|
||||
if err := notifier.Start(); err != nil {
|
||||
t.Fatalf("unable to start notifier: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, notifier.Stop())
|
||||
})
|
||||
|
||||
return notifier
|
||||
}
|
||||
@ -107,15 +110,13 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) {
|
||||
miner, tearDown := chainntnfs.NewMiner(
|
||||
miner := chainntnfs.NewMiner(
|
||||
t, []string{"--txindex"}, true, 25,
|
||||
)
|
||||
defer tearDown()
|
||||
|
||||
bitcoindConn, cleanUp := chainntnfs.NewBitcoindBackend(
|
||||
bitcoindConn := chainntnfs.NewBitcoindBackend(
|
||||
t, miner.P2PAddress(), true, rpcPolling,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
hintCache := initHintCache(t)
|
||||
blockCache := blockcache.NewBlockCache(10000)
|
||||
@ -123,7 +124,6 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) {
|
||||
notifier := setUpNotifier(
|
||||
t, bitcoindConn, hintCache, hintCache, blockCache,
|
||||
)
|
||||
defer notifier.Stop()
|
||||
|
||||
syncNotifierWithMiner(t, notifier, miner)
|
||||
|
||||
@ -198,21 +198,16 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) {
|
||||
miner, tearDown := chainntnfs.NewMiner(t, nil, true, 25)
|
||||
defer tearDown()
|
||||
miner := chainntnfs.NewMiner(t, nil, true, 25)
|
||||
|
||||
bitcoindConn, cleanUp := chainntnfs.NewBitcoindBackend(
|
||||
bitcoindConn := chainntnfs.NewBitcoindBackend(
|
||||
t, miner.P2PAddress(), false, rpcpolling,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
hintCache := initHintCache(t)
|
||||
blockCache := blockcache.NewBlockCache(10000)
|
||||
|
||||
notifier := setUpNotifier(
|
||||
t, bitcoindConn, hintCache, hintCache, blockCache,
|
||||
)
|
||||
defer notifier.Stop()
|
||||
notifier := setUpNotifier(t, bitcoindConn, hintCache, hintCache, blockCache)
|
||||
|
||||
// Since the node has its txindex disabled, we fall back to scanning the
|
||||
// chain manually. A transaction unknown to the network should not be
|
||||
|
@ -61,6 +61,9 @@ func setUpNotifier(t *testing.T, h *rpctest.Harness) *BtcdNotifier {
|
||||
if err := notifier.Start(); err != nil {
|
||||
t.Fatalf("unable to start notifier: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, notifier.Stop())
|
||||
})
|
||||
|
||||
return notifier
|
||||
}
|
||||
@ -70,13 +73,11 @@ func setUpNotifier(t *testing.T, h *rpctest.Harness) *BtcdNotifier {
|
||||
func TestHistoricalConfDetailsTxIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
harness, tearDown := chainntnfs.NewMiner(
|
||||
harness := chainntnfs.NewMiner(
|
||||
t, []string{"--txindex"}, true, 25,
|
||||
)
|
||||
defer tearDown()
|
||||
|
||||
notifier := setUpNotifier(t, harness)
|
||||
defer notifier.Stop()
|
||||
|
||||
// A transaction unknown to the node should not be found within the
|
||||
// txindex even if it is enabled, so we should not proceed with any
|
||||
@ -144,11 +145,9 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
|
||||
func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
harness, tearDown := chainntnfs.NewMiner(t, nil, true, 25)
|
||||
defer tearDown()
|
||||
harness := chainntnfs.NewMiner(t, nil, true, 25)
|
||||
|
||||
notifier := setUpNotifier(t, harness)
|
||||
defer notifier.Stop()
|
||||
|
||||
// Since the node has its txindex disabled, we fall back to scanning the
|
||||
// chain manually. A transaction unknown to the network should not be
|
||||
|
@ -1816,8 +1816,7 @@ func TestInterfaces(t *testing.T, targetBackEnd string) {
|
||||
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
|
||||
// this node with a chain length of 125, so we have plenty of BTC to
|
||||
// play around with.
|
||||
miner, tearDown := chainntnfs.NewMiner(t, nil, true, 25)
|
||||
defer tearDown()
|
||||
miner := chainntnfs.NewMiner(t, nil, true, 25)
|
||||
|
||||
rpcConfig := miner.RPCConfig()
|
||||
p2pAddr := miner.P2PAddress()
|
||||
@ -1850,14 +1849,13 @@ func TestInterfaces(t *testing.T, targetBackEnd string) {
|
||||
blockCache := blockcache.NewBlockCache(10000)
|
||||
|
||||
var (
|
||||
cleanUp func()
|
||||
newNotifier func() (chainntnfs.TestChainNotifier, error)
|
||||
)
|
||||
|
||||
switch notifierType {
|
||||
case "bitcoind":
|
||||
var bitcoindConn *chain.BitcoindConn
|
||||
bitcoindConn, cleanUp = chainntnfs.NewBitcoindBackend(
|
||||
bitcoindConn = chainntnfs.NewBitcoindBackend(
|
||||
t, p2pAddr, true, false,
|
||||
)
|
||||
newNotifier = func() (chainntnfs.TestChainNotifier, error) {
|
||||
@ -1869,7 +1867,7 @@ func TestInterfaces(t *testing.T, targetBackEnd string) {
|
||||
|
||||
case "bitcoind-rpc-polling":
|
||||
var bitcoindConn *chain.BitcoindConn
|
||||
bitcoindConn, cleanUp = chainntnfs.NewBitcoindBackend(
|
||||
bitcoindConn = chainntnfs.NewBitcoindBackend(
|
||||
t, p2pAddr, true, true,
|
||||
)
|
||||
newNotifier = func() (chainntnfs.TestChainNotifier, error) {
|
||||
@ -1889,9 +1887,7 @@ func TestInterfaces(t *testing.T, targetBackEnd string) {
|
||||
|
||||
case "neutrino":
|
||||
var spvNode *neutrino.ChainService
|
||||
spvNode, cleanUp = chainntnfs.NewNeutrinoBackend(
|
||||
t, p2pAddr,
|
||||
)
|
||||
spvNode = chainntnfs.NewNeutrinoBackend(t, p2pAddr)
|
||||
newNotifier = func() (chainntnfs.TestChainNotifier, error) {
|
||||
return neutrinonotify.New(
|
||||
spvNode, hintCache, hintCache,
|
||||
@ -1964,9 +1960,5 @@ func TestInterfaces(t *testing.T, targetBackEnd string) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if cleanUp != nil {
|
||||
cleanUp()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func CreateSpendTx(t *testing.T, prevOutPoint *wire.OutPoint,
|
||||
// NewMiner spawns testing harness backed by a btcd node that can serve as a
|
||||
// miner.
|
||||
func NewMiner(t *testing.T, extraArgs []string, createChain bool,
|
||||
spendableOutputs uint32) (*rpctest.Harness, func()) {
|
||||
spendableOutputs uint32) *rpctest.Harness {
|
||||
|
||||
t.Helper()
|
||||
|
||||
@ -175,12 +175,15 @@ func NewMiner(t *testing.T, extraArgs []string, createChain bool,
|
||||
|
||||
node, err := rpctest.New(NetParams, nil, extraArgs, "")
|
||||
require.NoError(t, err, "unable to create backend node")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, node.TearDown())
|
||||
})
|
||||
|
||||
if err := node.SetUp(createChain, spendableOutputs); err != nil {
|
||||
node.TearDown()
|
||||
t.Fatalf("unable to set up backend node: %v", err)
|
||||
}
|
||||
|
||||
return node, func() { node.TearDown() }
|
||||
return node
|
||||
}
|
||||
|
||||
// NewBitcoindBackend spawns a new bitcoind node that connects to a miner at the
|
||||
@ -190,7 +193,7 @@ func NewMiner(t *testing.T, extraArgs []string, createChain bool,
|
||||
// used for block and tx notifications or if its ZMQ interface should be used.
|
||||
// A connection to the newly spawned bitcoind node is returned.
|
||||
func NewBitcoindBackend(t *testing.T, minerAddr string, txindex,
|
||||
rpcpolling bool) (*chain.BitcoindConn, func()) {
|
||||
rpcpolling bool) *chain.BitcoindConn {
|
||||
|
||||
t.Helper()
|
||||
|
||||
@ -219,6 +222,10 @@ func NewBitcoindBackend(t *testing.T, minerAddr string, txindex,
|
||||
if err := bitcoind.Start(); err != nil {
|
||||
t.Fatalf("unable to start bitcoind: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
_ = bitcoind.Process.Kill()
|
||||
_ = bitcoind.Wait()
|
||||
})
|
||||
|
||||
// Wait for the bitcoind instance to start up.
|
||||
host := fmt.Sprintf("127.0.0.1:%d", rpcPort)
|
||||
@ -257,21 +264,16 @@ func NewBitcoindBackend(t *testing.T, minerAddr string, txindex,
|
||||
return conn.Start()
|
||||
}, 10*time.Second)
|
||||
if err != nil {
|
||||
bitcoind.Process.Kill()
|
||||
bitcoind.Wait()
|
||||
t.Fatalf("unable to establish connection to bitcoind: %v", err)
|
||||
}
|
||||
t.Cleanup(conn.Stop)
|
||||
|
||||
return conn, func() {
|
||||
conn.Stop()
|
||||
bitcoind.Process.Kill()
|
||||
bitcoind.Wait()
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
// NewNeutrinoBackend spawns a new neutrino node that connects to a miner at
|
||||
// the specified address.
|
||||
func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService, func()) {
|
||||
func NewNeutrinoBackend(t *testing.T, minerAddr string) *neutrino.ChainService {
|
||||
t.Helper()
|
||||
|
||||
spvDir := t.TempDir()
|
||||
@ -283,6 +285,9 @@ func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService,
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create walletdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
spvDatabase.Close()
|
||||
})
|
||||
|
||||
// Create an instance of neutrino connected to the running btcd
|
||||
// instance.
|
||||
@ -294,7 +299,6 @@ func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService,
|
||||
}
|
||||
spvNode, err := neutrino.NewChainService(spvConfig)
|
||||
if err != nil {
|
||||
spvDatabase.Close()
|
||||
t.Fatalf("unable to create neutrino: %v", err)
|
||||
}
|
||||
|
||||
@ -304,9 +308,9 @@ func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService,
|
||||
for !spvNode.IsCurrent() {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
return spvNode, func() {
|
||||
t.Cleanup(func() {
|
||||
spvNode.Stop()
|
||||
spvDatabase.Close()
|
||||
}
|
||||
})
|
||||
|
||||
return spvNode
|
||||
}
|
||||
|
@ -95,11 +95,6 @@ func TestUpdateAndSwap(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
// Ensure that all created files are removed at the end of the
|
||||
// test case.
|
||||
defer os.Remove(testCase.fileName)
|
||||
defer os.Remove(testCase.tempFileName)
|
||||
|
||||
backupFile := NewMultiFile(testCase.fileName)
|
||||
|
||||
// To start with, we'll make a random byte slice that'll pose
|
||||
@ -112,10 +107,11 @@ func TestUpdateAndSwap(t *testing.T) {
|
||||
// If the old temporary file is meant to exist, then we'll
|
||||
// create it now as an empty file.
|
||||
if testCase.oldTempExists {
|
||||
_, err := os.Create(testCase.tempFileName)
|
||||
f, err := os.Create(testCase.tempFileName)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create temp file: %v", err)
|
||||
}
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
// TODO(roasbeef): mock out fs calls?
|
||||
}
|
||||
|
@ -346,9 +346,8 @@ func createTestChannelState(t *testing.T, cdb *ChannelStateDB) *OpenChannel {
|
||||
func TestOpenChannelPutGetDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -487,11 +486,10 @@ func TestOptionalShutdown(t *testing.T) {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to make test database: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -572,9 +570,8 @@ func assertRevocationLogEntryEqual(t *testing.T, c *ChannelCommitment,
|
||||
func TestChannelStateTransition(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -889,9 +886,8 @@ func TestChannelStateTransition(t *testing.T) {
|
||||
func TestFetchPendingChannels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -960,9 +956,8 @@ func TestFetchPendingChannels(t *testing.T) {
|
||||
func TestFetchClosedChannels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -1041,9 +1036,8 @@ func TestFetchWaitingCloseChannels(t *testing.T) {
|
||||
// We'll start by creating two channels within our test database. One of
|
||||
// them will have their funding transaction confirmed on-chain, while
|
||||
// the other one will remain unconfirmed.
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -1154,9 +1148,8 @@ func TestFetchWaitingCloseChannels(t *testing.T) {
|
||||
func TestRefresh(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -1298,12 +1291,11 @@ func TestCloseInitiator(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to make test database: %v",
|
||||
err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -1345,12 +1337,11 @@ func TestCloseInitiator(t *testing.T) {
|
||||
// TestCloseChannelStatus tests setting of a channel status on the historical
|
||||
// channel on channel close.
|
||||
func TestCloseChannelStatus(t *testing.T) {
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to make test database: %v",
|
||||
err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
@ -1655,33 +1655,28 @@ func (c *ChannelStateDB) FetchHistoricalChannel(outPoint *wire.OutPoint) (
|
||||
// MakeTestDB creates a new instance of the ChannelDB for testing purposes.
|
||||
// A callback which cleans up the created temporary directories is also
|
||||
// returned and intended to be executed after the test completes.
|
||||
func MakeTestDB(modifiers ...OptionModifier) (*DB, func(), error) {
|
||||
func MakeTestDB(t *testing.T, modifiers ...OptionModifier) (*DB, error) {
|
||||
// First, create a temporary directory to be used for the duration of
|
||||
// this test.
|
||||
tempDirName, err := ioutil.TempDir("", "channeldb")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tempDirName := t.TempDir()
|
||||
|
||||
// Next, create channeldb for the first time.
|
||||
backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
|
||||
if err != nil {
|
||||
backendCleanup()
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cdb, err := CreateWithBackend(backend, modifiers...)
|
||||
if err != nil {
|
||||
backendCleanup()
|
||||
os.RemoveAll(tempDirName)
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cleanUp := func() {
|
||||
t.Cleanup(func() {
|
||||
cdb.Close()
|
||||
backendCleanup()
|
||||
os.RemoveAll(tempDirName)
|
||||
}
|
||||
})
|
||||
|
||||
return cdb, cleanUp, nil
|
||||
return cdb, nil
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func TestOpenWithCreate(t *testing.T) {
|
||||
dbPath := filepath.Join(tempDirName, "cdb")
|
||||
backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb")
|
||||
require.NoError(t, err, "unable to get test db backend")
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
cdb, err := CreateWithBackend(backend)
|
||||
require.NoError(t, err, "unable to create channeldb")
|
||||
@ -72,7 +72,7 @@ func TestWipe(t *testing.T) {
|
||||
dbPath := filepath.Join(tempDirName, "cdb")
|
||||
backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb")
|
||||
require.NoError(t, err, "unable to get test db backend")
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
fullDB, err := CreateWithBackend(backend)
|
||||
require.NoError(t, err, "unable to create channeldb")
|
||||
@ -101,9 +101,8 @@ func TestFetchClosedChannelForID(t *testing.T) {
|
||||
|
||||
const numChans = 101
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -172,9 +171,8 @@ func TestFetchClosedChannelForID(t *testing.T) {
|
||||
func TestAddrsForNode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
graph := fullDB.ChannelGraph()
|
||||
|
||||
@ -226,9 +224,8 @@ func TestAddrsForNode(t *testing.T) {
|
||||
func TestFetchChannel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -324,9 +321,8 @@ func genRandomChannelShell() (*ChannelShell, error) {
|
||||
func TestRestoreChannelShells(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -414,9 +410,8 @@ func TestRestoreChannelShells(t *testing.T) {
|
||||
func TestAbandonChannel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -581,12 +576,11 @@ func TestFetchChannels(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to make test "+
|
||||
"database: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -652,9 +646,8 @@ func TestFetchChannels(t *testing.T) {
|
||||
|
||||
// TestFetchHistoricalChannel tests lookup of historical channels.
|
||||
func TestFetchHistoricalChannel(t *testing.T) {
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
|
@ -20,9 +20,8 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) {
|
||||
// First, we'll set up a test database, and use that to instantiate the
|
||||
// forwarding event log that we'll be using for the duration of the
|
||||
// test.
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
defer cleanUp()
|
||||
|
||||
log := ForwardingLog{
|
||||
db: db,
|
||||
@ -89,9 +88,8 @@ func TestForwardingLogQueryOptions(t *testing.T) {
|
||||
// First, we'll set up a test database, and use that to instantiate the
|
||||
// forwarding event log that we'll be using for the duration of the
|
||||
// test.
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
defer cleanUp()
|
||||
|
||||
log := ForwardingLog{
|
||||
db: db,
|
||||
@ -189,9 +187,8 @@ func TestForwardingLogQueryLimit(t *testing.T) {
|
||||
// First, we'll set up a test database, and use that to instantiate the
|
||||
// forwarding event log that we'll be using for the duration of the
|
||||
// test.
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
defer cleanUp()
|
||||
|
||||
log := ForwardingLog{
|
||||
db: db,
|
||||
@ -301,9 +298,8 @@ func TestForwardingLogStoreEvent(t *testing.T) {
|
||||
// First, we'll set up a test database, and use that to instantiate the
|
||||
// forwarding event log that we'll be using for the duration of the
|
||||
// test.
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
defer cleanUp()
|
||||
|
||||
log := ForwardingLog{
|
||||
db: db,
|
||||
|
@ -149,8 +149,7 @@ func TestInvoiceWorkflow(t *testing.T) {
|
||||
}
|
||||
|
||||
func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
// Create a fake invoice which we'll use several times in the tests
|
||||
@ -293,8 +292,7 @@ func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) {
|
||||
// TestAddDuplicatePayAddr asserts that the payment addresses of inserted
|
||||
// invoices are unique.
|
||||
func TestAddDuplicatePayAddr(t *testing.T) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create two invoices with the same payment addr.
|
||||
@ -320,8 +318,7 @@ func TestAddDuplicatePayAddr(t *testing.T) {
|
||||
// addresses to be inserted if they are blank to support JIT legacy keysend
|
||||
// invoices.
|
||||
func TestAddDuplicateKeysendPayAddr(t *testing.T) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create two invoices with the same _blank_ payment addr.
|
||||
@ -363,8 +360,7 @@ func TestAddDuplicateKeysendPayAddr(t *testing.T) {
|
||||
// ensures that the HTLC's payment hash always matches the payment hash in the
|
||||
// returned invoice.
|
||||
func TestFailInvoiceLookupMPPPayAddrOnly(t *testing.T) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and insert a random invoice.
|
||||
@ -391,8 +387,7 @@ func TestFailInvoiceLookupMPPPayAddrOnly(t *testing.T) {
|
||||
// TestInvRefEquivocation asserts that retrieving or updating an invoice using
|
||||
// an equivocating InvoiceRef results in ErrInvRefEquivocation.
|
||||
func TestInvRefEquivocation(t *testing.T) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add two random invoices.
|
||||
@ -431,8 +426,7 @@ func TestInvRefEquivocation(t *testing.T) {
|
||||
func TestInvoiceCancelSingleHtlc(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
preimage := lntypes.Preimage{1}
|
||||
@ -499,8 +493,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) {
|
||||
func TestInvoiceCancelSingleHtlcAMP(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t, OptionClock(testClock))
|
||||
require.NoError(t, err, "unable to make test db: %v", err)
|
||||
|
||||
// We'll start out by creating an invoice and writing it to the DB.
|
||||
@ -656,8 +649,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) {
|
||||
func TestInvoiceAddTimeSeries(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t, OptionClock(testClock))
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
_, err = db.InvoicesAddedSince(0)
|
||||
@ -812,8 +804,7 @@ func TestSettleIndexAmpPayments(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testClock := clock.NewTestClock(testNow)
|
||||
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t, OptionClock(testClock))
|
||||
require.Nil(t, err)
|
||||
|
||||
// First, we'll make a sample invoice that'll be paid to several times
|
||||
@ -969,8 +960,7 @@ func TestSettleIndexAmpPayments(t *testing.T) {
|
||||
func TestScanInvoices(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
var invoices map[lntypes.Hash]*Invoice
|
||||
@ -1028,8 +1018,7 @@ func TestScanInvoices(t *testing.T) {
|
||||
func TestDuplicateSettleInvoice(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t, OptionClock(testClock))
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
// We'll start out by creating an invoice and writing it to the DB.
|
||||
@ -1087,8 +1076,7 @@ func TestDuplicateSettleInvoice(t *testing.T) {
|
||||
func TestQueryInvoices(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t, OptionClock(testClock))
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
// To begin the test, we'll add 50 invoices to the database. We'll
|
||||
@ -1400,8 +1388,7 @@ func getUpdateInvoice(amt lnwire.MilliSatoshi) InvoiceUpdateCallback {
|
||||
func TestCustomRecords(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
preimage := lntypes.Preimage{1}
|
||||
@ -1470,8 +1457,7 @@ func TestInvoiceHtlcAMPFields(t *testing.T) {
|
||||
}
|
||||
|
||||
func testInvoiceHtlcAMPFields(t *testing.T, isAMP bool) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.Nil(t, err)
|
||||
|
||||
testInvoice, err := randInvoice(1000)
|
||||
@ -1652,8 +1638,7 @@ func TestHTLCSet(t *testing.T) {
|
||||
// TestAddInvoiceWithHTLCs asserts that you can't insert an invoice that already
|
||||
// has HTLCs.
|
||||
func TestAddInvoiceWithHTLCs(t *testing.T) {
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.Nil(t, err)
|
||||
|
||||
testInvoice, err := randInvoice(1000)
|
||||
@ -1672,8 +1657,7 @@ func TestAddInvoiceWithHTLCs(t *testing.T) {
|
||||
// that invoices with duplicate set ids are disallowed.
|
||||
func TestSetIDIndex(t *testing.T) {
|
||||
testClock := clock.NewTestClock(testNow)
|
||||
db, cleanUp, err := MakeTestDB(OptionClock(testClock))
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t, OptionClock(testClock))
|
||||
require.Nil(t, err)
|
||||
|
||||
// We'll start out by creating an invoice and writing it to the DB.
|
||||
@ -1983,8 +1967,7 @@ func getUpdateInvoiceAMPSettle(setID *[32]byte,
|
||||
func TestUnexpectedInvoicePreimage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
invoice, err := randInvoice(lnwire.MilliSatoshi(100))
|
||||
@ -2040,8 +2023,7 @@ func TestUpdateHTLCPreimages(t *testing.T) {
|
||||
}
|
||||
|
||||
func testUpdateHTLCPreimages(t *testing.T, test updateHTLCPreimageTestCase) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
// We'll start out by creating an invoice and writing it to the DB.
|
||||
@ -2772,8 +2754,7 @@ func testUpdateHTLC(t *testing.T, test updateHTLCTest) {
|
||||
func TestDeleteInvoices(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
|
||||
// Add some invoices to the test db.
|
||||
@ -2856,9 +2837,8 @@ func TestDeleteInvoices(t *testing.T) {
|
||||
func TestAddInvoiceInvalidFeatureDeps(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test db")
|
||||
defer cleanup()
|
||||
|
||||
invoice, err := randInvoice(500)
|
||||
require.NoError(t, err)
|
||||
|
@ -15,8 +15,7 @@ import (
|
||||
func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
|
||||
migrationFunc migration, shouldFail bool, dryRun bool) {
|
||||
|
||||
cdb, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
cdb, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -86,8 +85,7 @@ func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
|
||||
func TestVersionFetchPut(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -450,7 +448,7 @@ func TestMigrationReversion(t *testing.T) {
|
||||
|
||||
backend, cleanup, err = kvdb.GetTestBackend(tempDirName, "cdb")
|
||||
require.NoError(t, err, "unable to get test db backend")
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
_, err = CreateWithBackend(backend)
|
||||
if err != ErrDBReversion {
|
||||
@ -498,8 +496,7 @@ func TestMigrationDryRun(t *testing.T) {
|
||||
func TestOptionalMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test read an empty optional meta.
|
||||
@ -527,8 +524,7 @@ func TestOptionalMeta(t *testing.T) {
|
||||
func TestApplyOptionalVersions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Overwrite the migration function so we can count how many times the
|
||||
@ -581,8 +577,7 @@ func TestApplyOptionalVersions(t *testing.T) {
|
||||
func TestFetchMeta(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
meta := &Meta{}
|
||||
@ -601,8 +596,7 @@ func TestFetchMeta(t *testing.T) {
|
||||
func TestMarkerAndTombstone(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanUp, err := MakeTestDB()
|
||||
defer cleanUp()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that a generic marker is not present in a fresh DB.
|
||||
|
@ -14,9 +14,8 @@ import (
|
||||
func TestLinkNodeEncodeDecode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
@ -103,9 +102,8 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
|
||||
func TestDeleteLinkNode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
|
@ -54,8 +54,7 @@ func genInfo() (*PaymentCreationInfo, *HTLCAttemptInfo,
|
||||
func TestPaymentControlSwitchFail(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -185,9 +184,7 @@ func TestPaymentControlSwitchFail(t *testing.T) {
|
||||
func TestPaymentControlSwitchDoubleSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -258,9 +255,7 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) {
|
||||
func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -287,9 +282,7 @@ func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) {
|
||||
func TestPaymentControlFailsWithoutInFlight(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -311,9 +304,7 @@ func TestPaymentControlFailsWithoutInFlight(t *testing.T) {
|
||||
func TestPaymentControlDeleteNonInFlight(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
// Create a sequence number for duplicate payments that will not collide
|
||||
@ -520,8 +511,7 @@ func TestPaymentControlDeleteNonInFlight(t *testing.T) {
|
||||
func TestPaymentControlDeletePayments(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -574,8 +564,7 @@ func TestPaymentControlDeletePayments(t *testing.T) {
|
||||
func TestPaymentControlDeleteSinglePayment(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -678,9 +667,7 @@ func TestPaymentControlMultiShard(t *testing.T) {
|
||||
}
|
||||
|
||||
runSubTest := func(t *testing.T, test testCase) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
|
||||
db, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to init db: %v", err)
|
||||
}
|
||||
@ -924,9 +911,7 @@ func TestPaymentControlMultiShard(t *testing.T) {
|
||||
func TestPaymentControlMPPRecordValidation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to init db")
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
@ -1017,8 +1002,7 @@ func TestDeleteFailedAttempts(t *testing.T) {
|
||||
}
|
||||
|
||||
func testDeleteFailedAttempts(t *testing.T, keepFailedPaymentAttempts bool) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
defer cleanup()
|
||||
db, err := MakeTestDB(t)
|
||||
|
||||
require.NoError(t, err, "unable to init db")
|
||||
db.keepFailedPaymentAttempts = keepFailedPaymentAttempts
|
||||
|
@ -398,11 +398,10 @@ func TestQueryPayments(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to init db: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
// Make a preliminary query to make sure it's ok to
|
||||
// query when we have no payments.
|
||||
@ -514,11 +513,9 @@ func TestQueryPayments(t *testing.T) {
|
||||
// case where a specific duplicate is not found and the duplicates bucket is not
|
||||
// present when we expect it to be.
|
||||
func TestFetchPaymentWithSequenceNumber(t *testing.T) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer cleanup()
|
||||
|
||||
pControl := NewPaymentControl(db)
|
||||
|
||||
// Generate a test payment which does not have duplicates.
|
||||
|
@ -10,9 +10,8 @@ import (
|
||||
|
||||
// TestFlapCount tests lookup and writing of flap count to disk.
|
||||
func TestFlapCount(t *testing.T) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
|
||||
// Try to read flap count for a peer that we have no records for.
|
||||
_, err = db.ReadFlapCount(testPub)
|
||||
|
@ -48,9 +48,8 @@ func TestPersistReport(t *testing.T) {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
|
||||
channelOutpoint := testChanPoint1
|
||||
|
||||
@ -85,9 +84,8 @@ func TestPersistReport(t *testing.T) {
|
||||
// channel, testing that the appropriate error is returned based on the state
|
||||
// of the existing bucket.
|
||||
func TestFetchChannelReadBucket(t *testing.T) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
|
||||
channelOutpoint := testChanPoint1
|
||||
|
||||
@ -197,9 +195,8 @@ func TestFetchChannelWriteBucket(t *testing.T) {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
|
||||
// Update our db to the starting state we expect.
|
||||
err = kvdb.Update(db, test.setup, func() {})
|
||||
|
@ -291,9 +291,8 @@ func TestDerializeRevocationLog(t *testing.T) {
|
||||
func TestFetchLogBucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanUp()
|
||||
|
||||
backend := fullDB.ChannelStateDB().backend
|
||||
|
||||
@ -326,9 +325,8 @@ func TestFetchLogBucket(t *testing.T) {
|
||||
func TestDeleteLogBucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanUp()
|
||||
|
||||
backend := fullDB.ChannelStateDB().backend
|
||||
|
||||
@ -423,9 +421,8 @@ func TestPutRevocationLog(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanUp()
|
||||
|
||||
backend := fullDB.ChannelStateDB().backend
|
||||
|
||||
@ -523,9 +520,8 @@ func TestFetchRevocationLogCompatible(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
fullDB, cleanUp, err := MakeTestDB()
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanUp()
|
||||
|
||||
backend := fullDB.ChannelStateDB().backend
|
||||
|
||||
|
@ -15,9 +15,8 @@ import (
|
||||
func TestWaitingProofStore(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db, cleanup, err := MakeTestDB()
|
||||
db, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "failed to make test database")
|
||||
defer cleanup()
|
||||
|
||||
proof1 := NewWaitingProof(true, &lnwire.AnnounceSignatures{
|
||||
NodeSignature: wireSig,
|
||||
|
@ -13,9 +13,8 @@ import (
|
||||
func TestWitnessCacheSha256Retrieval(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cdb, cleanUp, err := MakeTestDB()
|
||||
cdb, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
wCache := cdb.NewWitnessCache()
|
||||
|
||||
@ -54,9 +53,8 @@ func TestWitnessCacheSha256Retrieval(t *testing.T) {
|
||||
func TestWitnessCacheSha256Deletion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cdb, cleanUp, err := MakeTestDB()
|
||||
cdb, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
wCache := cdb.NewWitnessCache()
|
||||
|
||||
@ -101,9 +99,8 @@ func TestWitnessCacheSha256Deletion(t *testing.T) {
|
||||
func TestWitnessCacheUnknownWitness(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cdb, cleanUp, err := MakeTestDB()
|
||||
cdb, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
wCache := cdb.NewWitnessCache()
|
||||
|
||||
@ -118,9 +115,8 @@ func TestWitnessCacheUnknownWitness(t *testing.T) {
|
||||
// TestAddSha256Witnesses tests that insertion using AddSha256Witnesses behaves
|
||||
// identically to the insertion via the generalized interface.
|
||||
func TestAddSha256Witnesses(t *testing.T) {
|
||||
cdb, cleanUp, err := MakeTestDB()
|
||||
cdb, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
defer cleanUp()
|
||||
|
||||
wCache := cdb.NewWitnessCache()
|
||||
|
||||
|
@ -44,7 +44,7 @@ func TestEtcdElector(t *testing.T) {
|
||||
|
||||
etcdCfg, cleanup, err := etcd.NewEmbeddedEtcdInstance(tmpDir, 0, 0, "")
|
||||
require.NoError(t, err)
|
||||
defer cleanup()
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
@ -956,18 +956,18 @@ restartCheck:
|
||||
|
||||
func initBreachedState(t *testing.T) (*BreachArbiter,
|
||||
*lnwallet.LightningChannel, *lnwallet.LightningChannel,
|
||||
*lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent,
|
||||
func(), func()) {
|
||||
*lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent) {
|
||||
|
||||
// Create a pair of channels using a notifier that allows us to signal
|
||||
// a spend of the funding transaction. Alice's channel will be the on
|
||||
// observing a breach.
|
||||
alice, bob, cleanUpChans, err := createInitChannels(t, 1)
|
||||
alice, bob, err := createInitChannels(t, 1)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
|
||||
// Instantiate a breach arbiter to handle the breach of alice's channel.
|
||||
contractBreaches := make(chan *ContractBreachEvent)
|
||||
|
||||
brar, cleanUpArb, err := createTestArbiter(
|
||||
brar, err := createTestArbiter(
|
||||
t, contractBreaches, alice.State().Db.GetParentDB(),
|
||||
)
|
||||
require.NoError(t, err, "unable to initialize test breach arbiter")
|
||||
@ -1003,8 +1003,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter,
|
||||
t.Fatalf("Can't update the channel state: %v", err)
|
||||
}
|
||||
|
||||
return brar, alice, bob, bobClose, contractBreaches, cleanUpChans,
|
||||
cleanUpArb
|
||||
return brar, alice, bob, bobClose, contractBreaches
|
||||
}
|
||||
|
||||
// TestBreachHandoffSuccess tests that a channel's close observer properly
|
||||
@ -1012,10 +1011,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter,
|
||||
// breach close. This test verifies correctness in the event that the handoff
|
||||
// experiences no interruptions.
|
||||
func TestBreachHandoffSuccess(t *testing.T) {
|
||||
brar, alice, _, bobClose, contractBreaches,
|
||||
cleanUpChans, cleanUpArb := initBreachedState(t)
|
||||
defer cleanUpChans()
|
||||
defer cleanUpArb()
|
||||
brar, alice, _, bobClose, contractBreaches := initBreachedState(t)
|
||||
|
||||
chanPoint := alice.ChanPoint
|
||||
|
||||
@ -1093,10 +1089,7 @@ func TestBreachHandoffSuccess(t *testing.T) {
|
||||
// arbiter fails to write the information to disk, and that a subsequent attempt
|
||||
// at the handoff succeeds.
|
||||
func TestBreachHandoffFail(t *testing.T) {
|
||||
brar, alice, _, bobClose, contractBreaches,
|
||||
cleanUpChans, cleanUpArb := initBreachedState(t)
|
||||
defer cleanUpChans()
|
||||
defer cleanUpArb()
|
||||
brar, alice, _, bobClose, contractBreaches := initBreachedState(t)
|
||||
|
||||
// Before alerting Alice of the breach, instruct our failing retribution
|
||||
// store to fail the next database operation, which we expect to write
|
||||
@ -1140,11 +1133,10 @@ func TestBreachHandoffFail(t *testing.T) {
|
||||
assertNoArbiterBreach(t, brar, chanPoint)
|
||||
assertNotPendingClosed(t, alice)
|
||||
|
||||
brar, cleanUpArb, err := createTestArbiter(
|
||||
brar, err := createTestArbiter(
|
||||
t, contractBreaches, alice.State().Db.GetParentDB(),
|
||||
)
|
||||
require.NoError(t, err, "unable to initialize test breach arbiter")
|
||||
defer cleanUpArb()
|
||||
|
||||
// Signal a spend of the funding transaction and wait for the close
|
||||
// observer to exit. This time we are allowing the handoff to succeed.
|
||||
@ -1183,9 +1175,7 @@ func TestBreachHandoffFail(t *testing.T) {
|
||||
// TestBreachCreateJusticeTx tests that we create three different variants of
|
||||
// the justice tx.
|
||||
func TestBreachCreateJusticeTx(t *testing.T) {
|
||||
brar, _, _, _, _, cleanUpChans, cleanUpArb := initBreachedState(t)
|
||||
defer cleanUpChans()
|
||||
defer cleanUpArb()
|
||||
brar, _, _, _, _ := initBreachedState(t)
|
||||
|
||||
// In this test we just want to check that the correct inputs are added
|
||||
// to the justice tx, not that we create a valid spend, so we just set
|
||||
@ -1564,10 +1554,7 @@ func TestBreachSpends(t *testing.T) {
|
||||
}
|
||||
|
||||
func testBreachSpends(t *testing.T, test breachTest) {
|
||||
brar, alice, _, bobClose, contractBreaches,
|
||||
cleanUpChans, cleanUpArb := initBreachedState(t)
|
||||
defer cleanUpChans()
|
||||
defer cleanUpArb()
|
||||
brar, alice, _, bobClose, contractBreaches := initBreachedState(t)
|
||||
|
||||
var (
|
||||
height = bobClose.ChanSnapshot.CommitHeight
|
||||
@ -1783,10 +1770,7 @@ func testBreachSpends(t *testing.T, test breachTest) {
|
||||
// "split" the justice tx in case the first justice tx doesn't confirm within
|
||||
// a reasonable time.
|
||||
func TestBreachDelayedJusticeConfirmation(t *testing.T) {
|
||||
brar, alice, _, bobClose, contractBreaches,
|
||||
cleanUpChans, cleanUpArb := initBreachedState(t)
|
||||
defer cleanUpChans()
|
||||
defer cleanUpArb()
|
||||
brar, alice, _, bobClose, contractBreaches := initBreachedState(t)
|
||||
|
||||
var (
|
||||
height = bobClose.ChanSnapshot.CommitHeight
|
||||
@ -2123,7 +2107,7 @@ func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
|
||||
// createTestArbiter instantiates a breach arbiter with a failing retribution
|
||||
// store, so that controlled failures can be tested.
|
||||
func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
||||
db *channeldb.DB) (*BreachArbiter, func(), error) {
|
||||
db *channeldb.DB) (*BreachArbiter, error) {
|
||||
|
||||
// Create a failing retribution store, that wraps a normal one.
|
||||
store := newFailingRetributionStore(func() RetributionStorer {
|
||||
@ -2148,21 +2132,21 @@ func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
||||
})
|
||||
|
||||
if err := ba.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, ba.Stop())
|
||||
})
|
||||
|
||||
// The caller is responsible for closing the database.
|
||||
cleanUp := func() {
|
||||
ba.Stop()
|
||||
}
|
||||
|
||||
return ba, cleanUp, nil
|
||||
return ba, nil
|
||||
}
|
||||
|
||||
// createInitChannels creates two initialized test channels funded with 10 BTC,
|
||||
// with 5 BTC allocated to each side. Within the channel, Alice is the
|
||||
// initiator.
|
||||
func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), error) {
|
||||
func createInitChannels(t *testing.T, revocationWindow int) (
|
||||
*lnwallet.LightningChannel, *lnwallet.LightningChannel, error) {
|
||||
|
||||
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(
|
||||
channels.AlicesPrivKey,
|
||||
)
|
||||
@ -2172,7 +2156,7 @@ func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.Lightning
|
||||
|
||||
channelCapacity, err := btcutil.NewAmount(10)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
channelBal := channelCapacity / 2
|
||||
@ -2240,23 +2224,23 @@ func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.Lightning
|
||||
|
||||
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
||||
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
||||
|
||||
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
||||
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
||||
|
||||
@ -2266,23 +2250,29 @@ func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.Lightning
|
||||
false, 0,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
dbAlice, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbAlice.Close())
|
||||
})
|
||||
|
||||
dbBob, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbBob.Close())
|
||||
})
|
||||
|
||||
estimator := chainfee.NewStaticEstimator(12500, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
commitFee := feePerKw.FeeForWeight(input.CommitWeight)
|
||||
@ -2309,7 +2299,7 @@ func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.Lightning
|
||||
|
||||
var chanIDBytes [8]byte
|
||||
if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
shortChanID := lnwire.NewShortChanIDFromInt(
|
||||
@ -2360,25 +2350,31 @@ func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.Lightning
|
||||
aliceSigner, aliceChannelState, alicePool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePool.Start()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, alicePool.Stop())
|
||||
})
|
||||
|
||||
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
||||
channelBob, err := lnwallet.NewLightningChannel(
|
||||
bobSigner, bobChannelState, bobPool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPool.Start()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, bobPool.Stop())
|
||||
})
|
||||
|
||||
addr := &net.TCPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: 18556,
|
||||
}
|
||||
if err := channelAlice.State().SyncPending(addr, 101); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
addr = &net.TCPAddr{
|
||||
@ -2386,22 +2382,17 @@ func createInitChannels(t *testing.T, revocationWindow int) (*lnwallet.Lightning
|
||||
Port: 18555,
|
||||
}
|
||||
if err := channelBob.State().SyncPending(addr, 101); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
cleanUpFunc := func() {
|
||||
dbBob.Close()
|
||||
dbAlice.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Now that the channel are open, simulate the start of a session by
|
||||
// having Alice and Bob extend their revocation windows to each other.
|
||||
err = initRevocationWindows(channelAlice, channelBob, revocationWindow)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return channelAlice, channelBob, cleanUpFunc, nil
|
||||
return channelAlice, channelBob, nil
|
||||
}
|
||||
|
||||
// initRevocationWindows simulates a new channel being opened within the p2p
|
||||
|
@ -24,19 +24,20 @@ func TestChainArbitratorRepublishCloses(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
|
||||
// Create 10 test channels and sync them to the database.
|
||||
const numChans = 10
|
||||
var channels []*channeldb.OpenChannel
|
||||
for i := 0; i < numChans; i++ {
|
||||
lChannel, _, cleanup, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderTweaklessBit,
|
||||
lChannel, _, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderTweaklessBit,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
channel := lChannel.State()
|
||||
|
||||
@ -94,11 +95,9 @@ func TestChainArbitratorRepublishCloses(t *testing.T) {
|
||||
if err := chainArb.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chainArb.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chainArb.Stop())
|
||||
})
|
||||
|
||||
// Half of the channels should have had their closing tx re-published.
|
||||
if len(published) != numChans/2 {
|
||||
@ -137,15 +136,16 @@ func TestResolveContract(t *testing.T) {
|
||||
|
||||
db, err := channeldb.Open(t.TempDir())
|
||||
require.NoError(t, err, "unable to open db")
|
||||
defer db.Close()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
|
||||
// With the DB created, we'll make a new channel, and mark it as
|
||||
// pending open within the database.
|
||||
newChannel, _, cleanup, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderTweaklessBit,
|
||||
newChannel, _, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderTweaklessBit,
|
||||
)
|
||||
require.NoError(t, err, "unable to make new test channel")
|
||||
defer cleanup()
|
||||
channel := newChannel.State()
|
||||
channel.Db = db.ChannelStateDB()
|
||||
addr := &net.TCPAddr{
|
||||
@ -177,11 +177,9 @@ func TestResolveContract(t *testing.T) {
|
||||
if err := chainArb.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chainArb.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chainArb.Stop())
|
||||
})
|
||||
|
||||
channelArb := chainArb.activeChannels[channel.FundingOutpoint]
|
||||
|
||||
|
@ -25,11 +25,10 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
|
||||
|
||||
// First, we'll create two channels which already have established a
|
||||
// commitment contract between themselves.
|
||||
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderTweaklessBit,
|
||||
aliceChannel, bobChannel, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderTweaklessBit,
|
||||
)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
defer cleanUp()
|
||||
|
||||
// With the channels created, we'll now create a chain watcher instance
|
||||
// which will be watching for any closes of Alice's channel.
|
||||
@ -110,11 +109,10 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
|
||||
|
||||
// First, we'll create two channels which already have established a
|
||||
// commitment contract between themselves.
|
||||
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderTweaklessBit,
|
||||
aliceChannel, bobChannel, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderTweaklessBit,
|
||||
)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
defer cleanUp()
|
||||
|
||||
// With the channels created, we'll now create a chain watcher instance
|
||||
// which will be watching for any closes of Alice's channel.
|
||||
@ -255,13 +253,12 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
dlpScenario := func(t *testing.T, testCase dlpTestCase) bool {
|
||||
// First, we'll create two channels which already have
|
||||
// established a commitment contract between themselves.
|
||||
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderBit,
|
||||
aliceChannel, bobChannel, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderBit,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test channels: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
// Based on the number of random updates for this state, make a
|
||||
// new HTLC to add to the commitment, and then lock in a state
|
||||
@ -430,13 +427,12 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
|
||||
// First, we'll create two channels which already have
|
||||
// established a commitment contract between themselves.
|
||||
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderBit,
|
||||
aliceChannel, bobChannel, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderBit,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test channels: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
// We'll execute a number of state transitions based on the
|
||||
// randomly selected number from testing/quick. We do this to
|
||||
|
@ -460,11 +460,9 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) {
|
||||
if err := chanArbCtx.chanArb.Start(nil); err != nil {
|
||||
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chanArbCtx.chanArb.Stop(); err != nil {
|
||||
t.Fatalf("unable to stop chan arb: %v", err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chanArbCtx.chanArb.Stop())
|
||||
})
|
||||
|
||||
// It should start out in the default state.
|
||||
chanArbCtx.AssertState(StateDefault)
|
||||
@ -681,11 +679,9 @@ func TestChannelArbitratorBreachClose(t *testing.T) {
|
||||
if err := chanArb.Start(nil); err != nil {
|
||||
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chanArb.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chanArb.Stop())
|
||||
})
|
||||
|
||||
// It should start out in the default state.
|
||||
chanArbCtx.AssertState(StateDefault)
|
||||
@ -1990,11 +1986,9 @@ func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) {
|
||||
if err := chanArb.Start(nil); err != nil {
|
||||
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chanArb.Stop(); err != nil {
|
||||
t.Fatalf("unable to stop chan arb: %v", err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chanArb.Stop())
|
||||
})
|
||||
|
||||
// Now that our channel arb has started, we'll set up
|
||||
// its contract signals channel so we can send it
|
||||
@ -2098,14 +2092,13 @@ func TestRemoteCloseInitiator(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, create alice's channel.
|
||||
alice, _, cleanUp, err := lnwallet.CreateTestChannels(
|
||||
channeldb.SingleFunderTweaklessBit,
|
||||
alice, _, err := lnwallet.CreateTestChannels(
|
||||
t, channeldb.SingleFunderTweaklessBit,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test channels: %v",
|
||||
err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
// Create a mock log which will not block the test's
|
||||
// expected number of transitions transitions, and has
|
||||
@ -2148,11 +2141,9 @@ func TestRemoteCloseInitiator(t *testing.T) {
|
||||
t.Fatalf("unable to start "+
|
||||
"ChannelArbitrator: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chanArb.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chanArb.Stop())
|
||||
})
|
||||
|
||||
// It should start out in the default state.
|
||||
chanArbCtx.AssertState(StateDefault)
|
||||
@ -2501,11 +2492,9 @@ func TestChannelArbitratorAnchors(t *testing.T) {
|
||||
if err := chanArb.Start(nil); err != nil {
|
||||
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := chanArb.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, chanArb.Stop())
|
||||
})
|
||||
|
||||
signals := &ContractSignals{
|
||||
ShortChanID: lnwire.ShortChannelID{},
|
||||
|
@ -170,7 +170,7 @@ var _ UtxoSweeper = &mockSweeper{}
|
||||
// unencumbered by a time lock.
|
||||
func TestCommitSweepResolverNoDelay(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
res := lnwallet.CommitOutputResolution{
|
||||
SelfOutputSignDesc: input.SignDescriptor{
|
||||
@ -227,7 +227,7 @@ func TestCommitSweepResolverNoDelay(t *testing.T) {
|
||||
// that is encumbered by a time lock. sweepErr indicates whether the local node
|
||||
// fails to sweep the output.
|
||||
func testCommitSweepResolverDelay(t *testing.T, sweepErr error) {
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
const sweepProcessInterval = 100 * time.Millisecond
|
||||
amt := int64(100)
|
||||
|
@ -36,7 +36,7 @@ var (
|
||||
// for which the preimage is already known initially.
|
||||
func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, false)
|
||||
ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage
|
||||
@ -49,7 +49,7 @@ func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) {
|
||||
// started.
|
||||
func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, false)
|
||||
ctx.resolve()
|
||||
@ -65,7 +65,7 @@ func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) {
|
||||
// htlc that times out after the resolver has been started.
|
||||
func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, false)
|
||||
|
||||
@ -104,7 +104,7 @@ func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) {
|
||||
// has already expired when the resolver starts.
|
||||
func TestHtlcIncomingResolverFwdTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, true)
|
||||
ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage
|
||||
@ -117,7 +117,7 @@ func TestHtlcIncomingResolverFwdTimeout(t *testing.T) {
|
||||
// which the invoice has already been settled when the resolver starts.
|
||||
func TestHtlcIncomingResolverExitSettle(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, true)
|
||||
ctx.registry.notifyResolution = invoices.NewSettleResolution(
|
||||
@ -149,7 +149,7 @@ func TestHtlcIncomingResolverExitSettle(t *testing.T) {
|
||||
// an invoice that is already canceled when the resolver starts.
|
||||
func TestHtlcIncomingResolverExitCancel(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, true)
|
||||
ctx.registry.notifyResolution = invoices.NewFailResolution(
|
||||
@ -165,7 +165,7 @@ func TestHtlcIncomingResolverExitCancel(t *testing.T) {
|
||||
// for a hodl invoice that is settled after the resolver has started.
|
||||
func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, true)
|
||||
ctx.resolve()
|
||||
@ -183,7 +183,7 @@ func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) {
|
||||
// for a hodl invoice that times out.
|
||||
func TestHtlcIncomingResolverExitTimeoutHodl(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, true)
|
||||
|
||||
@ -220,7 +220,7 @@ func TestHtlcIncomingResolverExitTimeoutHodl(t *testing.T) {
|
||||
// for a hodl invoice that is canceled after the resolver has started.
|
||||
func TestHtlcIncomingResolverExitCancelHodl(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
ctx := newIncomingResolverTestContext(t, true)
|
||||
|
||||
|
@ -23,7 +23,7 @@ const (
|
||||
// timed out.
|
||||
func TestHtlcOutgoingResolverTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
// Setup the resolver with our test resolution.
|
||||
ctx := newOutgoingResolverTestContext(t)
|
||||
@ -44,7 +44,7 @@ func TestHtlcOutgoingResolverTimeout(t *testing.T) {
|
||||
// is claimed by the remote party.
|
||||
func TestHtlcOutgoingResolverRemoteClaim(t *testing.T) {
|
||||
t.Parallel()
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
// Setup the resolver with our test resolution and start the resolution
|
||||
// process.
|
||||
|
@ -477,7 +477,7 @@ type checkpoint struct {
|
||||
func testHtlcSuccess(t *testing.T, resolution lnwallet.IncomingHtlcResolution,
|
||||
checkpoints []checkpoint) {
|
||||
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
// We first run the resolver from start to finish, ensuring it gets
|
||||
// checkpointed at every expected stage. We store the checkpointed data
|
||||
@ -521,7 +521,7 @@ func testHtlcSuccess(t *testing.T, resolution lnwallet.IncomingHtlcResolution,
|
||||
func runFromCheckpoint(t *testing.T, ctx *htlcResolverTestContext,
|
||||
expectedCheckpoints []checkpoint) [][]byte {
|
||||
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
var checkpointedState [][]byte
|
||||
|
||||
|
@ -1286,7 +1286,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
|
||||
func testHtlcTimeout(t *testing.T, resolution lnwallet.OutgoingHtlcResolution,
|
||||
checkpoints []checkpoint) {
|
||||
|
||||
defer timeout(t)()
|
||||
defer timeout()()
|
||||
|
||||
// We first run the resolver from start to finish, ensuring it gets
|
||||
// checkpointed at every expected stage. We store the checkpointed data
|
||||
|
@ -53,9 +53,8 @@ func initIncubateTests() {
|
||||
// TestNurseryStoreInit verifies basic properties of the nursery store before
|
||||
// any modifying calls are made.
|
||||
func TestNurseryStoreInit(t *testing.T) {
|
||||
cdb, cleanUp, err := channeldb.MakeTestDB()
|
||||
cdb, err := channeldb.MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to open channel db")
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := NewNurseryStore(&chainHash, cdb)
|
||||
require.NoError(t, err, "unable to open nursery store")
|
||||
@ -69,9 +68,8 @@ func TestNurseryStoreInit(t *testing.T) {
|
||||
// outputs through the nursery store, verifying the properties of the
|
||||
// intermediate states.
|
||||
func TestNurseryStoreIncubate(t *testing.T) {
|
||||
cdb, cleanUp, err := channeldb.MakeTestDB()
|
||||
cdb, err := channeldb.MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to open channel db")
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := NewNurseryStore(&chainHash, cdb)
|
||||
require.NoError(t, err, "unable to open nursery store")
|
||||
@ -306,9 +304,8 @@ func TestNurseryStoreIncubate(t *testing.T) {
|
||||
// populated entries from the height index as it is purged, and that the last
|
||||
// purged height is set appropriately.
|
||||
func TestNurseryStoreGraduate(t *testing.T) {
|
||||
cdb, cleanUp, err := channeldb.MakeTestDB()
|
||||
cdb, err := channeldb.MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to open channel db")
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := NewNurseryStore(&chainHash, cdb)
|
||||
require.NoError(t, err, "unable to open nursery store")
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
// timeout implements a test level timeout.
|
||||
func timeout(t *testing.T) func() {
|
||||
func timeout() func() {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
|
@ -409,7 +409,6 @@ type nurseryTestContext struct {
|
||||
sweeper *mockSweeperFull
|
||||
timeoutChan chan chan time.Time
|
||||
t *testing.T
|
||||
dbCleanup func()
|
||||
}
|
||||
|
||||
func createNurseryTestContext(t *testing.T,
|
||||
@ -419,7 +418,7 @@ func createNurseryTestContext(t *testing.T,
|
||||
// alternative, mocking nurseryStore, is not chosen because there is
|
||||
// still considerable logic in the store.
|
||||
|
||||
cdb, cleanup, err := channeldb.MakeTestDB()
|
||||
cdb, err := channeldb.MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to open channeldb")
|
||||
|
||||
store, err := NewNurseryStore(&chainhash.Hash{}, cdb)
|
||||
@ -480,7 +479,6 @@ func createNurseryTestContext(t *testing.T,
|
||||
sweeper: sweeper,
|
||||
timeoutChan: timeoutChan,
|
||||
t: t,
|
||||
dbCleanup: cleanup,
|
||||
}
|
||||
|
||||
ctx.receiveTx = func() wire.MsgTx {
|
||||
@ -528,8 +526,6 @@ func (ctx *nurseryTestContext) notifyEpoch(height int32) {
|
||||
}
|
||||
|
||||
func (ctx *nurseryTestContext) finish() {
|
||||
defer ctx.dbCleanup()
|
||||
|
||||
// Add a final restart point in this state
|
||||
ctx.restart()
|
||||
|
||||
|
@ -106,6 +106,9 @@ crash](https://github.com/lightningnetwork/lnd/pull/7019).
|
||||
* [Create a helper function to wait for peer to come
|
||||
online](https://github.com/lightningnetwork/lnd/pull/6931).
|
||||
|
||||
* [test: replace defer cleanup with
|
||||
`t.Cleanup`](https://github.com/lightningnetwork/lnd/pull/6864)
|
||||
|
||||
### Tooling and documentation
|
||||
|
||||
* [The `golangci-lint` tool was updated to
|
||||
|
@ -1340,7 +1340,9 @@ func TestFundingManagerNormalWorkflow(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -1443,7 +1445,9 @@ func testLocalCSVLimit(t *testing.T, aliceMaxCSV, bobRequiredCSV uint16) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Set a maximum local delay in alice's config to aliceMaxCSV and overwrite
|
||||
// bob's required remote delay function to return bobRequiredCSV.
|
||||
@ -1583,7 +1587,9 @@ func TestFundingManagerRestartBehavior(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Run through the process of opening the channel, up until the funding
|
||||
// transaction is broadcasted.
|
||||
@ -1736,7 +1742,9 @@ func TestFundingManagerOfflinePeer(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Run through the process of opening the channel, up until the funding
|
||||
// transaction is broadcasted.
|
||||
@ -1894,7 +1902,9 @@ func TestFundingManagerPeerTimeoutAfterInitFunding(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -1957,7 +1967,9 @@ func TestFundingManagerPeerTimeoutAfterFundingOpen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2029,7 +2041,9 @@ func TestFundingManagerPeerTimeoutAfterFundingAccept(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2106,7 +2120,9 @@ func TestFundingManagerFundingTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2150,7 +2166,9 @@ func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2217,7 +2235,9 @@ func TestFundingManagerReceiveFundingLockedTwice(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2324,7 +2344,9 @@ func TestFundingManagerRestartAfterChanAnn(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2416,7 +2438,9 @@ func TestFundingManagerRestartAfterReceivingFundingLocked(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2504,7 +2528,9 @@ func TestFundingManagerPrivateChannel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2621,7 +2647,9 @@ func TestFundingManagerPrivateRestart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// We will consume the channel updates as we go, so no buffering is needed.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -2758,7 +2786,9 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// This is the custom parameters we'll use.
|
||||
const csvDelay = 67
|
||||
@ -3143,7 +3173,9 @@ func TestFundingManagerMaxPendingChannels(t *testing.T) {
|
||||
cfg.MaxPendingChannels = maxPending
|
||||
},
|
||||
)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Create InitFundingMsg structs for maxPending+1 channels.
|
||||
var initReqs []*InitFundingMsg
|
||||
@ -3316,7 +3348,9 @@ func TestFundingManagerRejectPush(t *testing.T) {
|
||||
cfg.RejectPush = true
|
||||
},
|
||||
)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Create a funding request and start the workflow.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -3374,7 +3408,9 @@ func TestFundingManagerMaxConfs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Create a funding request and start the workflow.
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -3494,7 +3530,9 @@ func TestFundingManagerFundAll(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
alice.fundingMgr.cfg.Wallet.WalletController.(*mock.WalletController).Utxos = allCoins
|
||||
|
||||
@ -3878,7 +3916,9 @@ func TestFundingManagerUpfrontShutdown(t *testing.T) {
|
||||
|
||||
func testUpfrontFailure(t *testing.T, pkscript []byte, expectErr bool) {
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
updateChan := make(chan *lnrpc.OpenStatusUpdate)
|
||||
@ -3944,7 +3984,9 @@ func TestFundingManagerZeroConf(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alice, bob := setupFundingManagers(t)
|
||||
defer tearDownFundingManagers(t, alice, bob)
|
||||
t.Cleanup(func() {
|
||||
tearDownFundingManagers(t, alice, bob)
|
||||
})
|
||||
|
||||
// Alice and Bob will have the same set of feature bits in our test.
|
||||
featureBits := []lnwire.FeatureBit{
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -27,7 +27,6 @@ func TestMailBoxCouriers(t *testing.T) {
|
||||
// First, we'll create new instance of the current default mailbox
|
||||
// type.
|
||||
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
||||
defer ctx.mailbox.Stop()
|
||||
|
||||
// We'll be adding 10 message of both types to the mailbox.
|
||||
const numPackets = 10
|
||||
@ -215,6 +214,7 @@ func newMailboxContext(t *testing.T, startTime time.Time,
|
||||
expiry: expiry,
|
||||
})
|
||||
ctx.mailbox.Start()
|
||||
t.Cleanup(ctx.mailbox.Stop)
|
||||
|
||||
return ctx
|
||||
}
|
||||
@ -311,7 +311,6 @@ func TestMailBoxFailAdd(t *testing.T) {
|
||||
thirdBatchExpiry = thirdBatchStart.Add(expiry)
|
||||
)
|
||||
ctx := newMailboxContext(t, firstBatchStart, expiry)
|
||||
defer ctx.mailbox.Stop()
|
||||
|
||||
failAdds := func(adds []*htlcPacket) {
|
||||
for _, add := range adds {
|
||||
@ -377,7 +376,6 @@ func TestMailBoxPacketPrioritization(t *testing.T) {
|
||||
// First, we'll create new instance of the current default mailbox
|
||||
// type.
|
||||
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
||||
defer ctx.mailbox.Stop()
|
||||
|
||||
const numPackets = 5
|
||||
|
||||
@ -476,7 +474,6 @@ func TestMailBoxAddExpiry(t *testing.T) {
|
||||
)
|
||||
|
||||
ctx := newMailboxContext(t, firstBatchStart, expiry)
|
||||
defer ctx.mailbox.Stop()
|
||||
|
||||
// Each batch will consist of 10 messages.
|
||||
const numBatchPackets = 10
|
||||
@ -503,7 +500,6 @@ func TestMailBoxDuplicateAddPacket(t *testing.T) {
|
||||
|
||||
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
||||
ctx.mailbox.Start()
|
||||
defer ctx.mailbox.Stop()
|
||||
|
||||
addTwice := func(t *testing.T, pkt *htlcPacket) {
|
||||
// The first add should succeed.
|
||||
@ -553,7 +549,6 @@ func testMailBoxDust(t *testing.T, chantype channeldb.ChannelType) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
||||
defer ctx.mailbox.Stop()
|
||||
|
||||
_, _, aliceID, bobID := genIDs()
|
||||
|
||||
|
@ -2798,11 +2798,10 @@ func TestLocalPaymentNoForwardingEvents(t *testing.T) {
|
||||
// First, we'll create our traditional three hop network. We'll only be
|
||||
// interacting with and asserting the state of the first end point for
|
||||
// this test.
|
||||
channels, cleanUp, _, err := createClusterChannels(
|
||||
btcutil.SatoshiPerBitcoin*3,
|
||||
btcutil.SatoshiPerBitcoin*5)
|
||||
channels, _, err := createClusterChannels(
|
||||
t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5,
|
||||
)
|
||||
require.NoError(t, err, "unable to create channel")
|
||||
defer cleanUp()
|
||||
|
||||
n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
|
||||
channels.bobToCarol, channels.carolToBob, testStartingHeight)
|
||||
@ -2856,11 +2855,10 @@ func TestMultiHopPaymentForwardingEvents(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, we'll create our traditional three hop network.
|
||||
channels, cleanUp, _, err := createClusterChannels(
|
||||
btcutil.SatoshiPerBitcoin*3,
|
||||
btcutil.SatoshiPerBitcoin*5)
|
||||
channels, _, err := createClusterChannels(
|
||||
t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5,
|
||||
)
|
||||
require.NoError(t, err, "unable to create channel")
|
||||
defer cleanUp()
|
||||
|
||||
n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice,
|
||||
channels.bobToCarol, channels.carolToBob, testStartingHeight)
|
||||
@ -3011,11 +3009,10 @@ func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, we'll create our traditional three hop network.
|
||||
channels, cleanUp, _, err := createClusterChannels(
|
||||
btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5,
|
||||
channels, _, err := createClusterChannels(
|
||||
t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5,
|
||||
)
|
||||
require.NoError(t, err, "unable to create channel")
|
||||
defer cleanUp()
|
||||
|
||||
n := newThreeHopNetwork(
|
||||
t, channels.aliceToBob, channels.bobToAlice,
|
||||
@ -3411,11 +3408,10 @@ func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int,
|
||||
|
||||
// First, we'll create our traditional three hop
|
||||
// network.
|
||||
channels, cleanUp, _, err := createClusterChannels(
|
||||
btcutil.SatoshiPerBitcoin*3,
|
||||
btcutil.SatoshiPerBitcoin*5)
|
||||
channels, _, err := createClusterChannels(
|
||||
t, btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5,
|
||||
)
|
||||
require.NoError(t, err, "unable to create channel")
|
||||
defer cleanUp()
|
||||
|
||||
// Mock time so that all events are reported with a static timestamp.
|
||||
now := time.Now()
|
||||
@ -3429,31 +3425,31 @@ func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int,
|
||||
if err := aliceNotifier.Start(); err != nil {
|
||||
t.Fatalf("could not start alice notifier")
|
||||
}
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
if err := aliceNotifier.Stop(); err != nil {
|
||||
t.Fatalf("failed to stop alice notifier: %v", err)
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
bobNotifier := NewHtlcNotifier(mockTime)
|
||||
if err := bobNotifier.Start(); err != nil {
|
||||
t.Fatalf("could not start bob notifier")
|
||||
}
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
if err := bobNotifier.Stop(); err != nil {
|
||||
t.Fatalf("failed to stop bob notifier: %v", err)
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
carolNotifier := NewHtlcNotifier(mockTime)
|
||||
if err := carolNotifier.Start(); err != nil {
|
||||
t.Fatalf("could not start carol notifier")
|
||||
}
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
if err := carolNotifier.Stop(); err != nil {
|
||||
t.Fatalf("failed to stop carol notifier: %v", err)
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
// Create a notifier server option which will set our htlc notifiers
|
||||
// for the three hop network.
|
||||
@ -3475,7 +3471,7 @@ func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int,
|
||||
t.Fatalf("unable to start three hop "+
|
||||
"network: %v", err)
|
||||
}
|
||||
defer n.stop()
|
||||
t.Cleanup(n.stop)
|
||||
|
||||
// Before we forward anything, subscribe to htlc events
|
||||
// from each notifier.
|
||||
@ -3484,21 +3480,21 @@ func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int,
|
||||
t.Fatalf("could not subscribe to alice's"+
|
||||
" events: %v", err)
|
||||
}
|
||||
defer aliceEvents.Cancel()
|
||||
t.Cleanup(aliceEvents.Cancel)
|
||||
|
||||
bobEvents, err := bobNotifier.SubscribeHtlcEvents()
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe to bob's"+
|
||||
" events: %v", err)
|
||||
}
|
||||
defer bobEvents.Cancel()
|
||||
t.Cleanup(bobEvents.Cancel)
|
||||
|
||||
carolEvents, err := carolNotifier.SubscribeHtlcEvents()
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe to carol's"+
|
||||
" events: %v", err)
|
||||
}
|
||||
defer carolEvents.Cancel()
|
||||
t.Cleanup(carolEvents.Cancel)
|
||||
|
||||
// Send multiple payments, as specified by the test to test incrementing
|
||||
// of htlc ids.
|
||||
@ -4118,11 +4114,10 @@ func TestSwitchDustForwarding(t *testing.T) {
|
||||
// - Bob has a dust limit of 800sats with Alice
|
||||
// - Bob has a dust limit of 200sats with Carol
|
||||
// - Carol has a dust limit of 800sats with Bob
|
||||
channels, cleanUp, _, err := createClusterChannels(
|
||||
btcutil.SatoshiPerBitcoin, btcutil.SatoshiPerBitcoin,
|
||||
channels, _, err := createClusterChannels(
|
||||
t, btcutil.SatoshiPerBitcoin, btcutil.SatoshiPerBitcoin,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer cleanUp()
|
||||
|
||||
n := newThreeHopNetwork(
|
||||
t, channels.aliceToBob, channels.bobToAlice,
|
||||
@ -5114,10 +5109,9 @@ func testSwitchHandlePacketForward(t *testing.T, zeroConf, private,
|
||||
t.Parallel()
|
||||
|
||||
// Create a link for Alice that we'll add to the switch.
|
||||
aliceLink, _, _, _, cleanUp, _, err :=
|
||||
newSingleLinkTestHarness(btcutil.SatoshiPerBitcoin, 0)
|
||||
aliceLink, _, _, _, _, err :=
|
||||
newSingleLinkTestHarness(t, btcutil.SatoshiPerBitcoin, 0)
|
||||
require.NoError(t, err)
|
||||
defer cleanUp()
|
||||
|
||||
s, err := initSwitchWithTempDB(t, testStartingHeight)
|
||||
if err != nil {
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
@ -124,10 +123,10 @@ type testLightningChannel struct {
|
||||
// representations.
|
||||
//
|
||||
// TODO(roasbeef): need to factor out, similar func re-used in many parts of codebase
|
||||
func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
func createTestChannel(t *testing.T, alicePrivKey, bobPrivKey []byte,
|
||||
aliceAmount, bobAmount, aliceReserve, bobReserve btcutil.Amount,
|
||||
chanID lnwire.ShortChannelID) (*testLightningChannel,
|
||||
*testLightningChannel, func(), error) {
|
||||
*testLightningChannel, error) {
|
||||
|
||||
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(alicePrivKey)
|
||||
bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(bobPrivKey)
|
||||
@ -160,7 +159,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
var hash [sha256.Size]byte
|
||||
randomSeed, err := generateRandomBytes(sha256.Size)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
copy(hash[:], randomSeed)
|
||||
|
||||
@ -209,23 +208,23 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
|
||||
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
||||
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
||||
|
||||
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
||||
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
||||
|
||||
@ -235,33 +234,29 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
isAliceInitiator, 0,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
alicePath, err := ioutil.TempDir("", "alicedb")
|
||||
dbAlice, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbAlice.Close())
|
||||
})
|
||||
|
||||
dbAlice, err := channeldb.Open(alicePath)
|
||||
dbBob, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
bobPath, err := ioutil.TempDir("", "bobdb")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
dbBob, err := channeldb.Open(bobPath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbBob.Close())
|
||||
})
|
||||
|
||||
estimator := chainfee.NewStaticEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
commitFee := feePerKw.FeeForWeight(724)
|
||||
|
||||
@ -333,18 +328,11 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
}
|
||||
|
||||
if err := aliceChannelState.SyncPending(bobAddr, broadcastHeight); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := bobChannelState.SyncPending(aliceAddr, broadcastHeight); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
cleanUpFunc := func() {
|
||||
dbAlice.Close()
|
||||
dbBob.Close()
|
||||
os.RemoveAll(bobPath)
|
||||
os.RemoveAll(alicePath)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv}
|
||||
@ -355,7 +343,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
aliceSigner, aliceChannelState, alicePool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePool.Start()
|
||||
|
||||
@ -364,7 +352,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
bobSigner, bobChannelState, bobPool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPool.Start()
|
||||
|
||||
@ -372,18 +360,18 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
// having Alice and Bob extend their revocation windows to each other.
|
||||
aliceNextRevoke, err := channelAlice.NextRevocationKey()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := channelBob.InitNextRevocation(aliceNextRevoke); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bobNextRevoke, err := channelBob.NextRevocationKey()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := channelAlice.InitNextRevocation(bobNextRevoke); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
restoreAlice := func() (*lnwallet.LightningChannel, error) {
|
||||
@ -487,8 +475,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
restore: restoreBob,
|
||||
}
|
||||
|
||||
return testLightningChannelAlice, testLightningChannelBob, cleanUpFunc,
|
||||
nil
|
||||
return testLightningChannelAlice, testLightningChannelBob, nil
|
||||
}
|
||||
|
||||
// getChanID retrieves the channel point from an lnnwire message.
|
||||
@ -872,34 +859,28 @@ type clusterChannels struct {
|
||||
|
||||
// createClusterChannels creates lightning channels which are needed for
|
||||
// network cluster to be initialized.
|
||||
func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
|
||||
*clusterChannels, func(), func() (*clusterChannels, error), error) {
|
||||
func createClusterChannels(t *testing.T, aliceToBob, bobToCarol btcutil.Amount) (
|
||||
*clusterChannels, func() (*clusterChannels, error), error) {
|
||||
|
||||
_, _, firstChanID, secondChanID := genIDs()
|
||||
|
||||
// Create lightning channels between Alice<->Bob and Bob<->Carol
|
||||
aliceChannel, firstBobChannel, cleanAliceBob, err :=
|
||||
createTestChannel(alicePrivKey, bobPrivKey, aliceToBob,
|
||||
aliceToBob, 0, 0, firstChanID)
|
||||
aliceChannel, firstBobChannel, err := createTestChannel(t, alicePrivKey,
|
||||
bobPrivKey, aliceToBob, aliceToBob, 0, 0, firstChanID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Errorf("unable to create "+
|
||||
return nil, nil, errors.Errorf("unable to create "+
|
||||
"alice<->bob channel: %v", err)
|
||||
}
|
||||
|
||||
secondBobChannel, carolChannel, cleanBobCarol, err :=
|
||||
createTestChannel(bobPrivKey, carolPrivKey, bobToCarol,
|
||||
bobToCarol, 0, 0, secondChanID)
|
||||
secondBobChannel, carolChannel, err := createTestChannel(t, bobPrivKey,
|
||||
carolPrivKey, bobToCarol, bobToCarol, 0, 0, secondChanID,
|
||||
)
|
||||
if err != nil {
|
||||
cleanAliceBob()
|
||||
return nil, nil, nil, errors.Errorf("unable to create "+
|
||||
return nil, nil, errors.Errorf("unable to create "+
|
||||
"bob<->carol channel: %v", err)
|
||||
}
|
||||
|
||||
cleanUp := func() {
|
||||
cleanAliceBob()
|
||||
cleanBobCarol()
|
||||
}
|
||||
|
||||
restoreFromDb := func() (*clusterChannels, error) {
|
||||
|
||||
a2b, err := aliceChannel.restore()
|
||||
@ -935,7 +916,7 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
|
||||
bobToAlice: firstBobChannel.channel,
|
||||
bobToCarol: secondBobChannel.channel,
|
||||
carolToBob: carolChannel.channel,
|
||||
}, cleanUp, restoreFromDb, nil
|
||||
}, restoreFromDb, nil
|
||||
}
|
||||
|
||||
// newThreeHopNetwork function creates the following topology and returns the
|
||||
@ -1066,22 +1047,22 @@ func serverOptionRejectHtlc(alice, bob, carol bool) serverOption {
|
||||
|
||||
// createTwoClusterChannels creates lightning channels which are needed for
|
||||
// a 2 hop network cluster to be initialized.
|
||||
func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
|
||||
*testLightningChannel, *testLightningChannel,
|
||||
func(), error) {
|
||||
func createTwoClusterChannels(t *testing.T, aliceToBob,
|
||||
bobToCarol btcutil.Amount) (*testLightningChannel,
|
||||
*testLightningChannel, error) {
|
||||
|
||||
_, _, firstChanID, _ := genIDs()
|
||||
|
||||
// Create lightning channels between Alice<->Bob and Bob<->Carol
|
||||
alice, bob, cleanAliceBob, err :=
|
||||
createTestChannel(alicePrivKey, bobPrivKey, aliceToBob,
|
||||
aliceToBob, 0, 0, firstChanID)
|
||||
alice, bob, err := createTestChannel(t, alicePrivKey, bobPrivKey,
|
||||
aliceToBob, aliceToBob, 0, 0, firstChanID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Errorf("unable to create "+
|
||||
return nil, nil, errors.Errorf("unable to create "+
|
||||
"alice<->bob channel: %v", err)
|
||||
}
|
||||
|
||||
return alice, bob, cleanAliceBob, nil
|
||||
return alice, bob, nil
|
||||
}
|
||||
|
||||
// hopNetwork is the base struct for two and three hop networks
|
||||
@ -1211,8 +1192,8 @@ type twoHopNetwork struct {
|
||||
bobChannelLink *channelLink
|
||||
}
|
||||
|
||||
// newTwoHopNetwork function creates the following topology and returns the
|
||||
// control object to manage this cluster:
|
||||
// newTwoHopNetwork function creates and starts the following topology and
|
||||
// returns the control object to manage this cluster:
|
||||
//
|
||||
// alice bob
|
||||
// server - <-connection-> - server
|
||||
@ -1265,7 +1246,7 @@ func newTwoHopNetwork(t testing.TB,
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return &twoHopNetwork{
|
||||
n := &twoHopNetwork{
|
||||
aliceServer: aliceServer,
|
||||
aliceChannelLink: aliceChannelLink.(*channelLink),
|
||||
|
||||
@ -1274,6 +1255,11 @@ func newTwoHopNetwork(t testing.TB,
|
||||
|
||||
hopNetwork: *hopNetwork,
|
||||
}
|
||||
|
||||
require.NoError(t, n.start())
|
||||
t.Cleanup(n.stop)
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// start starts the two hop network alice,bob servers.
|
||||
@ -1398,7 +1384,7 @@ func waitLinksEligible(links map[string]*channelLink) error {
|
||||
}
|
||||
|
||||
// timeout implements a test level timeout.
|
||||
func timeout(t *testing.T) func() {
|
||||
func timeout() func() {
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
// TestSettleInvoice tests settling of an invoice and related notifications.
|
||||
func TestSettleInvoice(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0)
|
||||
require.Nil(t, err)
|
||||
@ -190,7 +189,6 @@ func TestSettleInvoice(t *testing.T) {
|
||||
|
||||
func testCancelInvoice(t *testing.T, gc bool) {
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
// If set to true, then also delete the invoice from the DB after
|
||||
// cancellation.
|
||||
@ -528,11 +526,9 @@ func TestCancelHoldInvoice(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := registry.Stop(); err != nil {
|
||||
t.Fatalf("failed to stop invoice registry: %v", err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, registry.Stop())
|
||||
})
|
||||
|
||||
// Add the invoice.
|
||||
_, err = registry.AddInvoice(testHodlInvoice, testInvoicePaymentHash)
|
||||
@ -590,7 +586,6 @@ func TestCancelHoldInvoice(t *testing.T) {
|
||||
// forwarded htlc hashes as well.
|
||||
func TestUnknownInvoice(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
// Notify arrival of a new htlc paying to this invoice. This should
|
||||
// succeed.
|
||||
@ -624,7 +619,6 @@ func testKeySend(t *testing.T, keySendEnabled bool) {
|
||||
defer timeout()()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
ctx.registry.cfg.AcceptKeySend = keySendEnabled
|
||||
|
||||
@ -751,7 +745,6 @@ func testHoldKeysend(t *testing.T, timeoutKeysend bool) {
|
||||
const holdDuration = time.Minute
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
ctx.registry.cfg.AcceptKeySend = true
|
||||
ctx.registry.cfg.KeysendHoldTime = holdDuration
|
||||
@ -840,7 +833,6 @@ func TestMppPayment(t *testing.T) {
|
||||
defer timeout()()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
// Add the invoice.
|
||||
_, err := ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash)
|
||||
@ -1135,7 +1127,6 @@ func testHeightExpiryWithRegistry(t *testing.T, numParts int, settle bool) {
|
||||
defer timeout()()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
require.Greater(t, numParts, 0, "test requires at least one part")
|
||||
|
||||
@ -1242,7 +1233,6 @@ func TestMultipleSetHeightExpiry(t *testing.T) {
|
||||
defer timeout()()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
// Add a hold invoice.
|
||||
invoice := *testInvoice
|
||||
@ -1331,7 +1321,6 @@ func TestSettleInvoicePaymentAddrRequired(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0)
|
||||
require.Nil(t, err)
|
||||
@ -1407,7 +1396,6 @@ func TestSettleInvoicePaymentAddrRequiredOptionalGrace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0)
|
||||
require.Nil(t, err)
|
||||
@ -1505,7 +1493,6 @@ func TestAMPWithoutMPPPayload(t *testing.T) {
|
||||
defer timeout()()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
ctx.registry.cfg.AcceptAMP = true
|
||||
|
||||
@ -1591,7 +1578,6 @@ func testSpontaneousAmpPayment(
|
||||
defer timeout()()
|
||||
|
||||
ctx := newTestContext(t)
|
||||
defer ctx.cleanup()
|
||||
|
||||
ctx.registry.cfg.AcceptAMP = ampEnabled
|
||||
|
||||
|
@ -183,8 +183,7 @@ type testContext struct {
|
||||
notifier *mockChainNotifier
|
||||
clock *clock.TestClock
|
||||
|
||||
cleanup func()
|
||||
t *testing.T
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newTestContext(t *testing.T) *testContext {
|
||||
@ -213,6 +212,9 @@ func newTestContext(t *testing.T) *testContext {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, registry.Stop())
|
||||
})
|
||||
|
||||
ctx := testContext{
|
||||
cdb: cdb,
|
||||
@ -220,11 +222,6 @@ func newTestContext(t *testing.T) *testContext {
|
||||
notifier: notifier,
|
||||
clock: clock,
|
||||
t: t,
|
||||
cleanup: func() {
|
||||
if err = registry.Stop(); err != nil {
|
||||
t.Fatalf("failed to stop invoice registry: %v", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
return &ctx
|
||||
|
@ -18,7 +18,6 @@ func TestDump(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
defer f.Cleanup()
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
@ -53,7 +52,6 @@ func TestAbortContext(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
defer f.Cleanup()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -21,10 +21,9 @@ const (
|
||||
|
||||
// EtcdTestFixture holds internal state of the etcd test fixture.
|
||||
type EtcdTestFixture struct {
|
||||
t *testing.T
|
||||
cli *clientv3.Client
|
||||
config *Config
|
||||
cleanup func()
|
||||
t *testing.T
|
||||
cli *clientv3.Client
|
||||
config *Config
|
||||
}
|
||||
|
||||
// NewTestEtcdInstance creates an embedded etcd instance for testing, listening
|
||||
@ -47,6 +46,7 @@ func NewEtcdTestFixture(t *testing.T) *EtcdTestFixture {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
config, etcdCleanup := NewTestEtcdInstance(t, tmpDir)
|
||||
t.Cleanup(etcdCleanup)
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{config.Host},
|
||||
@ -63,10 +63,9 @@ func NewEtcdTestFixture(t *testing.T) *EtcdTestFixture {
|
||||
cli.Lease = namespace.NewLease(cli.Lease, defaultNamespace)
|
||||
|
||||
return &EtcdTestFixture{
|
||||
t: t,
|
||||
cli: cli,
|
||||
config: config,
|
||||
cleanup: etcdCleanup,
|
||||
t: t,
|
||||
cli: cli,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,9 +132,3 @@ func (f *EtcdTestFixture) Dump() map[string]string {
|
||||
func (f *EtcdTestFixture) BackendConfig() Config {
|
||||
return *f.config
|
||||
}
|
||||
|
||||
// Cleanup should be called at test fixture teardown to stop the embedded
|
||||
// etcd instance and remove all temp db files form the filesystem.
|
||||
func (f *EtcdTestFixture) Cleanup() {
|
||||
f.cleanup()
|
||||
}
|
||||
|
@ -15,7 +15,6 @@ func TestChangeDuringManualTx(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
defer f.Cleanup()
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
@ -44,7 +43,6 @@ func TestChangeDuringUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f := NewEtcdTestFixture(t)
|
||||
defer f.Cleanup()
|
||||
|
||||
db, err := newEtcdBackend(context.TODO(), f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
@ -26,11 +26,10 @@ func TestPutToEmpty(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
f.Cleanup()
|
||||
txQueue.Stop()
|
||||
}()
|
||||
})
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
@ -54,11 +53,10 @@ func TestGetPutDel(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
f.Cleanup()
|
||||
txQueue.Stop()
|
||||
}()
|
||||
})
|
||||
|
||||
testKeyValues := []KV{
|
||||
{"a", "1"},
|
||||
@ -156,11 +154,10 @@ func testFirstLastNextPrev(t *testing.T, prefetchKeys []string,
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
f.Cleanup()
|
||||
txQueue.Stop()
|
||||
}()
|
||||
})
|
||||
|
||||
testKeyValues := []KV{
|
||||
{"kb", "1"},
|
||||
@ -331,11 +328,10 @@ func TestCommitError(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
f.Cleanup()
|
||||
txQueue.Stop()
|
||||
}()
|
||||
})
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
@ -381,11 +377,10 @@ func TestManualTxError(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
txQueue := NewCommitQueue(ctx)
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
f.Cleanup()
|
||||
txQueue.Stop()
|
||||
}()
|
||||
})
|
||||
|
||||
db, err := newEtcdBackend(ctx, f.BackendConfig())
|
||||
require.NoError(t, err)
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
// etcd database driver.
|
||||
func TestWalletDBInterface(t *testing.T) {
|
||||
f := NewEtcdTestFixture(t)
|
||||
defer f.Cleanup()
|
||||
cfg := f.BackendConfig()
|
||||
walletdbtest.TestInterface(t, dbType, context.TODO(), &cfg)
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -5,10 +5,9 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
prand "math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/shachain"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -103,17 +103,15 @@ var (
|
||||
// CreateTestChannels creates to fully populated channels to be used within
|
||||
// testing fixtures. The channels will be returned as if the funding process
|
||||
// has just completed. The channel itself is funded with 10 BTC, with 5 BTC
|
||||
// allocated to each side. Within the channel, Alice is the initiator. The
|
||||
// function also returns a "cleanup" function that is meant to be called once
|
||||
// the test has been finalized. The clean up function will remote all temporary
|
||||
// files created. If tweaklessCommits is true, then the commits within the
|
||||
// channels will use the new format, otherwise the legacy format.
|
||||
func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
*LightningChannel, *LightningChannel, func(), error) {
|
||||
// allocated to each side. Within the channel, Alice is the initiator. If
|
||||
// tweaklessCommits is true, then the commits within the channels will use the
|
||||
// new format, otherwise the legacy format.
|
||||
func CreateTestChannels(t *testing.T, chanType channeldb.ChannelType) (
|
||||
*LightningChannel, *LightningChannel, error) {
|
||||
|
||||
channelCapacity, err := btcutil.NewAmount(testChannelCapacity)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
channelBal := channelCapacity / 2
|
||||
@ -202,23 +200,23 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
|
||||
bobRoot, err := chainhash.NewHash(bobKeys[0].Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
||||
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
||||
|
||||
aliceRoot, err := chainhash.NewHash(aliceKeys[0].Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
||||
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
||||
|
||||
@ -227,33 +225,29 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
bobCommitPoint, *fundingTxIn, chanType, isAliceInitiator, 0,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
alicePath, err := ioutil.TempDir("", "alicedb")
|
||||
dbAlice, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbAlice.Close())
|
||||
})
|
||||
|
||||
dbAlice, err := channeldb.Open(alicePath)
|
||||
dbBob, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
bobPath, err := ioutil.TempDir("", "bobdb")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
dbBob, err := channeldb.Open(bobPath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbBob.Close())
|
||||
})
|
||||
|
||||
estimator := chainfee.NewStaticEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
commitFee := calcStaticFee(chanType, 0)
|
||||
var anchorAmt btcutil.Amount
|
||||
@ -305,7 +299,7 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
|
||||
var chanIDBytes [8]byte
|
||||
if _, err := io.ReadFull(rand.Reader, chanIDBytes[:]); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
shortChanID := lnwire.NewShortChanIDFromInt(
|
||||
@ -358,9 +352,12 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
aliceSigner, aliceChannelState, alicePool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePool.Start()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, alicePool.Stop())
|
||||
})
|
||||
|
||||
obfuscator := createStateHintObfuscator(aliceChannelState)
|
||||
|
||||
@ -369,21 +366,24 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
bobSigner, bobChannelState, bobPool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPool.Start()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, bobPool.Stop())
|
||||
})
|
||||
|
||||
err = SetStateNumHint(
|
||||
aliceCommitTx, 0, obfuscator,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
err = SetStateNumHint(
|
||||
bobCommitTx, 0, obfuscator,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
addr := &net.TCPAddr{
|
||||
@ -391,7 +391,7 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
Port: 18556,
|
||||
}
|
||||
if err := channelAlice.channelState.SyncPending(addr, 101); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
addr = &net.TCPAddr{
|
||||
@ -400,25 +400,17 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
}
|
||||
|
||||
if err := channelBob.channelState.SyncPending(addr, 101); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
cleanUpFunc := func() {
|
||||
os.RemoveAll(bobPath)
|
||||
os.RemoveAll(alicePath)
|
||||
|
||||
alicePool.Stop()
|
||||
bobPool.Stop()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Now that the channel are open, simulate the start of a session by
|
||||
// having Alice and Bob extend their revocation windows to each other.
|
||||
err = initRevocationWindows(channelAlice, channelBob)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return channelAlice, channelBob, cleanUpFunc, nil
|
||||
return channelAlice, channelBob, nil
|
||||
}
|
||||
|
||||
// initRevocationWindows simulates a new channel being opened within the p2p
|
||||
|
@ -37,14 +37,17 @@ func setupTestRootKeyStorage(t *testing.T) kvdb.Backend {
|
||||
kvdb.DefaultDBTimeout,
|
||||
)
|
||||
require.NoError(t, err, "Error opening store DB")
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, db.Close())
|
||||
})
|
||||
|
||||
store, err := macaroons.NewRootKeyStorage(db)
|
||||
if err != nil {
|
||||
db.Close()
|
||||
t.Fatalf("Error creating root key store: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
require.NoError(t, err, "Error creating root key store")
|
||||
|
||||
err = store.CreateUnlock(&defaultPw)
|
||||
require.NoError(t, store.Close())
|
||||
require.NoError(t, err, "error creating unlock")
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
|
@ -43,11 +43,10 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
|
||||
|
||||
mockSwitch := &mockMessageSwitch{}
|
||||
|
||||
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||
notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
alicePeer, bobChan, err := createTestPeer(
|
||||
t, notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
defer cleanUp()
|
||||
|
||||
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||
|
||||
@ -147,11 +146,10 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
||||
|
||||
mockSwitch := &mockMessageSwitch{}
|
||||
|
||||
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||
notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
alicePeer, bobChan, err := createTestPeer(
|
||||
t, notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
defer cleanUp()
|
||||
|
||||
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||
mockLink := newMockUpdateHandler(chanID)
|
||||
@ -270,11 +268,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
||||
|
||||
mockSwitch := &mockMessageSwitch{}
|
||||
|
||||
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||
notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
alicePeer, bobChan, err := createTestPeer(
|
||||
t, notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
defer cleanUp()
|
||||
|
||||
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||
|
||||
@ -456,11 +453,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
||||
|
||||
mockSwitch := &mockMessageSwitch{}
|
||||
|
||||
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||
notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
alicePeer, bobChan, err := createTestPeer(
|
||||
t, notifier, broadcastTxChan, noUpdate, mockSwitch,
|
||||
)
|
||||
require.NoError(t, err, "unable to create test channels")
|
||||
defer cleanUp()
|
||||
|
||||
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||
mockLink := newMockUpdateHandler(chanID)
|
||||
@ -784,14 +780,13 @@ func TestCustomShutdownScript(t *testing.T) {
|
||||
mockSwitch := &mockMessageSwitch{}
|
||||
|
||||
// Open a channel.
|
||||
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||
notifier, broadcastTxChan, test.update,
|
||||
alicePeer, bobChan, err := createTestPeer(
|
||||
t, notifier, broadcastTxChan, test.update,
|
||||
mockSwitch,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test channels: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
chanPoint := bobChan.ChannelPoint()
|
||||
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
||||
|
@ -5,10 +5,8 @@ import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -56,10 +54,10 @@ var noUpdate = func(a, b *channeldb.OpenChannel) {}
|
||||
// one of the nodes, together with the channel seen from both nodes. It takes
|
||||
// an updateChan function which can be used to modify the default values on
|
||||
// the channel states for each peer.
|
||||
func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
func createTestPeer(t *testing.T, notifier chainntnfs.ChainNotifier,
|
||||
publTx chan *wire.MsgTx, updateChan func(a, b *channeldb.OpenChannel),
|
||||
mockSwitch *mockMessageSwitch) (
|
||||
*Brontide, *lnwallet.LightningChannel, func(), error) {
|
||||
*Brontide, *lnwallet.LightningChannel, error) {
|
||||
|
||||
nodeKeyLocator := keychain.KeyLocator{
|
||||
Family: keychain.KeyFamilyNodeKey,
|
||||
@ -141,23 +139,23 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
|
||||
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
||||
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
||||
|
||||
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
||||
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
||||
|
||||
@ -167,33 +165,29 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
isAliceInitiator, 0,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
alicePath, err := ioutil.TempDir("", "alicedb")
|
||||
dbAlice, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbAlice.Close())
|
||||
})
|
||||
|
||||
dbAlice, err := channeldb.Open(alicePath)
|
||||
dbBob, err := channeldb.Open(t.TempDir())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
bobPath, err := ioutil.TempDir("", "bobdb")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
dbBob, err := channeldb.Open(bobPath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, dbBob.Close())
|
||||
})
|
||||
|
||||
estimator := chainfee.NewStaticEstimator(12500, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): need to factor in commit fee?
|
||||
@ -218,7 +212,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
|
||||
var chanIDBytes [8]byte
|
||||
if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
shortChanID := lnwire.NewShortChanIDFromInt(
|
||||
@ -269,7 +263,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
}
|
||||
|
||||
if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bobAddr := &net.TCPAddr{
|
||||
@ -278,12 +272,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
}
|
||||
|
||||
if err := bobChannelState.SyncPending(bobAddr, 0); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
cleanUpFunc := func() {
|
||||
os.RemoveAll(bobPath)
|
||||
os.RemoveAll(alicePath)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv}
|
||||
@ -294,18 +283,24 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
aliceSigner, aliceChannelState, alicePool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
_ = alicePool.Start()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, alicePool.Stop())
|
||||
})
|
||||
|
||||
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
||||
channelBob, err := lnwallet.NewLightningChannel(
|
||||
bobSigner, bobChannelState, bobPool,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
_ = bobPool.Start()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, bobPool.Stop())
|
||||
})
|
||||
|
||||
chainIO := &mock.ChainIO{
|
||||
BestHeight: broadcastHeight,
|
||||
@ -344,15 +339,15 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = chanStatusMgr.Start(); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
errBuffer, err := queue.NewCircularBuffer(ErrorBufferSize)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var pubKey [33]byte
|
||||
@ -392,7 +387,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
alicePeer.wg.Add(1)
|
||||
go alicePeer.channelManager()
|
||||
|
||||
return alicePeer, channelBob, cleanUpFunc, nil
|
||||
return alicePeer, channelBob, nil
|
||||
}
|
||||
|
||||
// mockMessageSwitch is a mock implementation of the messageSwitch interface
|
||||
|
@ -71,7 +71,9 @@ func testWorkerPool(t *testing.T, test workerPoolTest) {
|
||||
|
||||
p := test.newPool()
|
||||
startGeneric(t, p)
|
||||
defer stopGeneric(t, p)
|
||||
t.Cleanup(func() {
|
||||
stopGeneric(t, p)
|
||||
})
|
||||
|
||||
submitNonblockingGeneric(t, p, test.numWorkers)
|
||||
})
|
||||
@ -81,7 +83,9 @@ func testWorkerPool(t *testing.T, test workerPoolTest) {
|
||||
|
||||
p := test.newPool()
|
||||
startGeneric(t, p)
|
||||
defer stopGeneric(t, p)
|
||||
t.Cleanup(func() {
|
||||
stopGeneric(t, p)
|
||||
})
|
||||
|
||||
submitBlockingGeneric(t, p, test.numWorkers)
|
||||
})
|
||||
@ -91,7 +95,9 @@ func testWorkerPool(t *testing.T, test workerPoolTest) {
|
||||
|
||||
p := test.newPool()
|
||||
startGeneric(t, p)
|
||||
defer stopGeneric(t, p)
|
||||
t.Cleanup(func() {
|
||||
stopGeneric(t, p)
|
||||
})
|
||||
|
||||
submitPartialBlockingGeneric(t, p, test.numWorkers)
|
||||
})
|
||||
|
@ -14,7 +14,7 @@ func testQueueAddDrain(t *testing.T, size, numStart, numStop, numAdd, numDrain i
|
||||
queue.Start()
|
||||
}
|
||||
for i := 0; i < numStop; i++ {
|
||||
defer queue.Stop()
|
||||
t.Cleanup(queue.Stop)
|
||||
}
|
||||
|
||||
// Pushes should never block for long.
|
||||
|
@ -392,8 +392,7 @@ func (m *mockChainView) Stop() error {
|
||||
func TestEdgeUpdateNotification(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, 0)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, 0)
|
||||
|
||||
// First we'll create the utxo for the channel to be "closed"
|
||||
const chanValue = 10000
|
||||
@ -571,8 +570,7 @@ func TestNodeUpdateNotification(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
// We only accept node announcements from nodes having a known channel,
|
||||
// so create one now.
|
||||
@ -753,8 +751,7 @@ func TestNotificationCancellation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
// Create a new client to receive notifications.
|
||||
ntfnClient, err := ctx.router.SubscribeTopology()
|
||||
@ -834,8 +831,7 @@ func TestChannelCloseNotification(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
// First we'll create the utxo for the channel to be "closed"
|
||||
const chanValue = 10000
|
||||
|
@ -2219,10 +2219,7 @@ func TestPathFindSpecExample(t *testing.T) {
|
||||
// we'll pass that in to ensure that the router uses 100 as the current
|
||||
// height.
|
||||
const startingHeight = 100
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingHeight, specExampleFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingHeight, specExampleFilePath)
|
||||
|
||||
// We'll first exercise the scenario of a direct payment from Bob to
|
||||
// Carol, so we set "B" as the source node so path finding starts from
|
||||
|
@ -511,11 +511,9 @@ func testPaymentLifecycle(t *testing.T, test paymentLifecycleTestCase,
|
||||
}
|
||||
|
||||
router, sendResult, getPaymentResult := setupRouter()
|
||||
defer func() {
|
||||
if err := router.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, router.Stop())
|
||||
})
|
||||
|
||||
// Craft a LightningPayment struct.
|
||||
var preImage lntypes.Preimage
|
||||
|
@ -96,7 +96,7 @@ func (c *testCtx) RestartRouter(t *testing.T) {
|
||||
|
||||
func createTestCtxFromGraphInstance(t *testing.T,
|
||||
startingHeight uint32, graphInstance *testGraphInstance,
|
||||
strictPruning bool) (*testCtx, func()) {
|
||||
strictPruning bool) *testCtx {
|
||||
|
||||
return createTestCtxFromGraphInstanceAssumeValid(
|
||||
t, startingHeight, graphInstance, false, strictPruning,
|
||||
@ -105,7 +105,7 @@ func createTestCtxFromGraphInstance(t *testing.T,
|
||||
|
||||
func createTestCtxFromGraphInstanceAssumeValid(t *testing.T,
|
||||
startingHeight uint32, graphInstance *testGraphInstance,
|
||||
assumeValid bool, strictPruning bool) (*testCtx, func()) {
|
||||
assumeValid bool, strictPruning bool) *testCtx {
|
||||
|
||||
// We'll initialize an instance of the channel router with mock
|
||||
// versions of the chain and channel notifier. As we don't need to test
|
||||
@ -186,15 +186,15 @@ func createTestCtxFromGraphInstanceAssumeValid(t *testing.T,
|
||||
notifier: notifier,
|
||||
}
|
||||
|
||||
cleanUp := func() {
|
||||
t.Cleanup(func() {
|
||||
ctx.router.Stop()
|
||||
}
|
||||
})
|
||||
|
||||
return ctx, cleanUp
|
||||
return ctx
|
||||
}
|
||||
|
||||
func createTestCtxSingleNode(t *testing.T,
|
||||
startingHeight uint32) (*testCtx, func()) {
|
||||
startingHeight uint32) *testCtx {
|
||||
|
||||
graph, graphBackend, err := makeTestGraph(t, true)
|
||||
require.NoError(t, err, "failed to make test graph")
|
||||
@ -217,7 +217,7 @@ func createTestCtxSingleNode(t *testing.T,
|
||||
}
|
||||
|
||||
func createTestCtxFromFile(t *testing.T,
|
||||
startingHeight uint32, testGraph string) (*testCtx, func()) {
|
||||
startingHeight uint32, testGraph string) *testCtx {
|
||||
|
||||
// We'll attempt to locate and parse out the file
|
||||
// that encodes the graph that our tests should be run against.
|
||||
@ -251,10 +251,7 @@ func TestFindRoutesWithFeeLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// This test will attempt to find routes from roasbeef to sophon for 100
|
||||
// satoshis with a fee limit of 10 satoshis. There are two routes from
|
||||
@ -301,10 +298,7 @@ func TestSendPaymentRouteFailureFallback(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// Craft a LightningPayment struct that'll send a payment from roasbeef
|
||||
// to luo ji for 1000 satoshis, with a maximum of 1000 satoshis in fees.
|
||||
@ -395,10 +389,9 @@ func TestChannelUpdateValidation(t *testing.T) {
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingBlockHeight, testGraph, true,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// Assert that the initially configured fee is retrieved correctly.
|
||||
_, policy, _, err := ctx.router.GetChannelByID(
|
||||
@ -503,10 +496,7 @@ func TestSendPaymentErrorRepeatedFeeInsufficient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// Get the channel ID.
|
||||
roasbeefSongokuChanID := ctx.getChannelIDFromAlias(
|
||||
@ -613,10 +603,7 @@ func TestSendPaymentErrorFeeInsufficientPrivateEdge(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// Get the channel ID.
|
||||
roasbeefSongoku := lnwire.NewShortChanIDFromInt(
|
||||
@ -746,10 +733,7 @@ func TestSendPaymentPrivateEdgeUpdateFeeExceedsLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// Get the channel ID.
|
||||
roasbeefSongoku := lnwire.NewShortChanIDFromInt(
|
||||
@ -868,10 +852,9 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
ctx := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// Craft a LightningPayment struct that'll send a payment from roasbeef
|
||||
// to sophon for 1k satoshis.
|
||||
@ -992,10 +975,7 @@ func TestSendPaymentErrorPathPruning(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// Craft a LightningPayment struct that'll send a payment from roasbeef
|
||||
// to luo ji for 1000 satoshis, with a maximum of 1000 satoshis in fees.
|
||||
@ -1153,8 +1133,7 @@ func TestSendPaymentErrorPathPruning(t *testing.T) {
|
||||
func TestAddProof(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cleanup := createTestCtxSingleNode(t, 0)
|
||||
defer cleanup()
|
||||
ctx := createTestCtxSingleNode(t, 0)
|
||||
|
||||
// Before creating out edge, we'll create two new nodes within the
|
||||
// network that the channel will connect.
|
||||
@ -1212,10 +1191,7 @@ func TestIgnoreNodeAnnouncement(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
pub := priv1.PubKey()
|
||||
node := &channeldb.LightningNode{
|
||||
@ -1249,10 +1225,9 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingBlockHeight, testGraph, false,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
var pub1 [33]byte
|
||||
copy(pub1[:], priv1.PubKey().SerializeCompressed())
|
||||
@ -1317,10 +1292,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
var pub1 [33]byte
|
||||
copy(pub1[:], priv1.PubKey().SerializeCompressed())
|
||||
@ -1577,8 +1549,7 @@ func TestWakeUpOnStaleBranch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
const chanValue = 10000
|
||||
|
||||
@ -1787,8 +1758,7 @@ func TestDisconnectedBlocks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
const chanValue = 10000
|
||||
|
||||
@ -1985,8 +1955,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
const chanValue = 10000
|
||||
|
||||
@ -2224,10 +2193,9 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) {
|
||||
}
|
||||
|
||||
const startingHeight = 100
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingHeight, testGraph, strictPruning,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// All of the channels should exist before pruning them.
|
||||
assertChannelsPruned(t, ctx.graph, testChannels)
|
||||
@ -2353,10 +2321,9 @@ func testPruneChannelGraphDoubleDisabled(t *testing.T, assumeValid bool) {
|
||||
require.NoError(t, err, "unable to create test graph")
|
||||
|
||||
const startingHeight = 100
|
||||
ctx, cleanUp := createTestCtxFromGraphInstanceAssumeValid(
|
||||
ctx := createTestCtxFromGraphInstanceAssumeValid(
|
||||
t, startingHeight, testGraph, assumeValid, false,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// All the channels should exist within the graph before pruning them
|
||||
// when not using AssumeChannelValid, otherwise we should have pruned
|
||||
@ -2394,10 +2361,7 @@ func TestFindPathFeeWeighting(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
var preImage [32]byte
|
||||
copy(preImage[:], bytes.Repeat([]byte{9}, 32))
|
||||
@ -2436,8 +2400,7 @@ func TestIsStaleNode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
// Before we can insert a node in to the database, we need to create a
|
||||
// channel that it's linked to.
|
||||
@ -2513,8 +2476,7 @@ func TestIsKnownEdge(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxSingleNode(t, startingBlockHeight)
|
||||
|
||||
// First, we'll create a new channel edge (just the info) and insert it
|
||||
// into the database.
|
||||
@ -2560,10 +2522,7 @@ func TestIsStaleEdgePolicy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromFile(
|
||||
t, startingBlockHeight, basicGraphFilePath,
|
||||
)
|
||||
defer cleanUp()
|
||||
ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath)
|
||||
|
||||
// First, we'll create a new channel edge (just the info) and insert it
|
||||
// into the database.
|
||||
@ -2710,10 +2669,9 @@ func TestUnknownErrorSource(t *testing.T) {
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingBlockHeight, testGraph, false,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// Create a payment to node c.
|
||||
var payHash lntypes.Hash
|
||||
@ -2841,10 +2799,9 @@ func TestSendToRouteStructuredError(t *testing.T) {
|
||||
require.NoError(t, err, "unable to create graph")
|
||||
|
||||
const startingBlockHeight = 101
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingBlockHeight, testGraph, false,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// Set up an init channel for the control tower, such that we can make
|
||||
// sure the payment is initiated correctly.
|
||||
@ -2957,10 +2914,9 @@ func TestSendToRouteMaxHops(t *testing.T) {
|
||||
|
||||
const startingBlockHeight = 101
|
||||
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingBlockHeight, testGraph, false,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
// Create a 30 hop route that exceeds the maximum hop limit.
|
||||
const payAmt = lnwire.MilliSatoshi(10000)
|
||||
@ -3063,10 +3019,9 @@ func TestBuildRoute(t *testing.T) {
|
||||
|
||||
const startingBlockHeight = 101
|
||||
|
||||
ctx, cleanUp := createTestCtxFromGraphInstance(
|
||||
ctx := createTestCtxFromGraphInstance(
|
||||
t, startingBlockHeight, testGraph, false,
|
||||
)
|
||||
defer cleanUp()
|
||||
|
||||
checkHops := func(rt *route.Route, expected []uint64,
|
||||
payAddr [32]byte) {
|
||||
@ -3247,8 +3202,7 @@ func assertChanChainRejection(t *testing.T, ctx *testCtx,
|
||||
func TestChannelOnChainRejectionZombie(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cleanup := createTestCtxSingleNode(t, 0)
|
||||
defer cleanup()
|
||||
ctx := createTestCtxSingleNode(t, 0)
|
||||
|
||||
// To start, we'll make an edge for the channel, but we won't add the
|
||||
// funding transaction to the mock blockchain, which should cause the
|
||||
@ -3368,9 +3322,9 @@ func TestSendMPPaymentSucceed(t *testing.T) {
|
||||
|
||||
// Make sure the router can start and stop without error.
|
||||
require.NoError(t, router.Start(), "router failed to start")
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, router.Stop(), "router failed to stop")
|
||||
}()
|
||||
})
|
||||
|
||||
// Once the router is started, check that the mocked methods are called
|
||||
// as expected.
|
||||
@ -3535,9 +3489,9 @@ func TestSendMPPaymentSucceedOnExtraShards(t *testing.T) {
|
||||
|
||||
// Make sure the router can start and stop without error.
|
||||
require.NoError(t, router.Start(), "router failed to start")
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, router.Stop(), "router failed to stop")
|
||||
}()
|
||||
})
|
||||
|
||||
// Once the router is started, check that the mocked methods are called
|
||||
// as expected.
|
||||
@ -3747,9 +3701,9 @@ func TestSendMPPaymentFailed(t *testing.T) {
|
||||
|
||||
// Make sure the router can start and stop without error.
|
||||
require.NoError(t, router.Start(), "router failed to start")
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, router.Stop(), "router failed to stop")
|
||||
}()
|
||||
})
|
||||
|
||||
// Once the router is started, check that the mocked methods are called
|
||||
// as expected.
|
||||
@ -3951,9 +3905,9 @@ func TestSendMPPaymentFailedWithShardsInFlight(t *testing.T) {
|
||||
|
||||
// Make sure the router can start and stop without error.
|
||||
require.NoError(t, router.Start(), "router failed to start")
|
||||
defer func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, router.Stop(), "router failed to stop")
|
||||
}()
|
||||
})
|
||||
|
||||
// Once the router is started, check that the mocked methods are called
|
||||
// as expected.
|
||||
@ -4107,8 +4061,7 @@ func TestBlockDifferenceFix(t *testing.T) {
|
||||
initialBlockHeight := uint32(0)
|
||||
|
||||
// Starting height here is set to 0, which is behind where we want to be.
|
||||
ctx, cleanup := createTestCtxSingleNode(t, initialBlockHeight)
|
||||
defer cleanup()
|
||||
ctx := createTestCtxSingleNode(t, initialBlockHeight)
|
||||
|
||||
// Add initial block to our mini blockchain.
|
||||
block := &wire.MsgBlock{
|
||||
|
@ -75,7 +75,7 @@ func TestTLSAutoRegeneration(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't retrieve TLS config")
|
||||
}
|
||||
defer cleanUp()
|
||||
t.Cleanup(cleanUp)
|
||||
|
||||
// Grab the certificate to test that getTLSConfig did its job correctly
|
||||
// and generated a new cert.
|
||||
|
@ -15,15 +15,10 @@ func TestStore(t *testing.T) {
|
||||
t.Run("bolt", func(t *testing.T) {
|
||||
|
||||
// Create new store.
|
||||
cdb, cleanUp, err := channeldb.MakeTestDB()
|
||||
cdb, err := channeldb.MakeTestDB(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel db: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testStore(t, func() (SweeperStore, error) {
|
||||
var chain chainhash.Hash
|
||||
|
@ -25,7 +25,7 @@ func TestCheckOnionServiceSucceed(t *testing.T) {
|
||||
|
||||
// Create mock server and client connection.
|
||||
proxy := createTestProxy(t)
|
||||
defer proxy.cleanUp()
|
||||
t.Cleanup(proxy.cleanUp)
|
||||
server := proxy.serverConn
|
||||
|
||||
// Assign a fake service ID to the controller.
|
||||
@ -47,7 +47,7 @@ func TestCheckOnionServiceFailOnServiceIDNotMatch(t *testing.T) {
|
||||
|
||||
// Create mock server and client connection.
|
||||
proxy := createTestProxy(t)
|
||||
defer proxy.cleanUp()
|
||||
t.Cleanup(proxy.cleanUp)
|
||||
server := proxy.serverConn
|
||||
|
||||
// Assign a fake service ID to the controller.
|
||||
@ -69,7 +69,7 @@ func TestCheckOnionServiceSucceedOnMultipleServices(t *testing.T) {
|
||||
|
||||
// Create mock server and client connection.
|
||||
proxy := createTestProxy(t)
|
||||
defer proxy.cleanUp()
|
||||
t.Cleanup(proxy.cleanUp)
|
||||
server := proxy.serverConn
|
||||
|
||||
// Assign a fake service ID to the controller.
|
||||
@ -92,7 +92,7 @@ func TestCheckOnionServiceFailOnClosedConnection(t *testing.T) {
|
||||
|
||||
// Create mock server and client connection.
|
||||
proxy := createTestProxy(t)
|
||||
defer proxy.cleanUp()
|
||||
t.Cleanup(proxy.cleanUp)
|
||||
server := proxy.serverConn
|
||||
|
||||
// Assign a fake service ID to the controller.
|
||||
|
@ -153,7 +153,7 @@ func createTestProxy(t *testing.T) *testProxy {
|
||||
func TestReadResponse(t *testing.T) {
|
||||
// Create mock server and client connection.
|
||||
proxy := createTestProxy(t)
|
||||
defer proxy.cleanUp()
|
||||
t.Cleanup(proxy.cleanUp)
|
||||
server := proxy.serverConn
|
||||
|
||||
// Create a dummy tor controller.
|
||||
@ -300,7 +300,7 @@ func TestReconnectTCMustBeRunning(t *testing.T) {
|
||||
func TestReconnectSucceed(t *testing.T) {
|
||||
// Create mock server and client connection.
|
||||
proxy := createTestProxy(t)
|
||||
defer proxy.cleanUp()
|
||||
t.Cleanup(proxy.cleanUp)
|
||||
|
||||
// Create a tor controller and mark the controller as started.
|
||||
c := &Controller{
|
||||
|
@ -1528,8 +1528,10 @@ func TestClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
h := newHarness(t, tc.cfg)
|
||||
defer h.server.Stop()
|
||||
defer h.client.ForceQuit()
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, h.server.Stop())
|
||||
h.client.ForceQuit()
|
||||
})
|
||||
|
||||
tc.fn(h)
|
||||
})
|
||||
|
@ -68,6 +68,9 @@ func initServer(t *testing.T, db wtserver.DB,
|
||||
if err = s.Start(); err != nil {
|
||||
t.Fatalf("unable to start server: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, s.Stop())
|
||||
})
|
||||
|
||||
return s
|
||||
}
|
||||
@ -83,7 +86,6 @@ func TestServerOnlyAcceptOnePeer(t *testing.T) {
|
||||
const timeoutDuration = 500 * time.Millisecond
|
||||
|
||||
s := initServer(t, nil, timeoutDuration)
|
||||
defer s.Stop()
|
||||
|
||||
localPub := randPubKey(t)
|
||||
|
||||
@ -284,7 +286,6 @@ func testServerCreateSession(t *testing.T, i int, test createSessionTestCase) {
|
||||
const timeoutDuration = 500 * time.Millisecond
|
||||
|
||||
s := initServer(t, nil, timeoutDuration)
|
||||
defer s.Stop()
|
||||
|
||||
localPub := randPubKey(t)
|
||||
|
||||
@ -639,7 +640,6 @@ func testServerStateUpdates(t *testing.T, test stateUpdateTestCase) {
|
||||
const timeoutDuration = 100 * time.Millisecond
|
||||
|
||||
s := initServer(t, nil, timeoutDuration)
|
||||
defer s.Stop()
|
||||
|
||||
localPub := randPubKey(t)
|
||||
|
||||
@ -747,7 +747,6 @@ func TestServerDeleteSession(t *testing.T) {
|
||||
const timeoutDuration = 100 * time.Millisecond
|
||||
|
||||
s := initServer(t, db, timeoutDuration)
|
||||
defer s.Stop()
|
||||
|
||||
// Create a session for peer2 so that the server's db isn't completely
|
||||
// empty.
|
||||
|
@ -37,8 +37,7 @@ func TestWitnessBeaconIntercept(t *testing.T) {
|
||||
[]byte{2},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer subscription.CancelSubscription()
|
||||
t.Cleanup(subscription.CancelSubscription)
|
||||
|
||||
require.NoError(t, interceptedFwd.Settle(preimage))
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user