mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-06 11:08:06 +02:00
lntemp: complete HarnessNode
with setup functions
This commit adds more methods in `HarnessNode` and a `SetupHarness` helper to easily setup a new itest.
This commit is contained in:
parent
a3fa4ba10a
commit
645257a55f
@ -2,10 +2,19 @@ package lntemp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCase defines a test case that's been used in the integration test.
|
||||
@ -59,6 +68,10 @@ type HarnessTest struct {
|
||||
// stopChainBackend points to the cleanup function returned by the
|
||||
// chainBackend.
|
||||
stopChainBackend func()
|
||||
|
||||
// cleaned specifies whether the cleanup has been applied for the
|
||||
// current HarnessTest.
|
||||
cleaned bool
|
||||
}
|
||||
|
||||
// NewHarnessTest creates a new instance of a harnessTest from a regular
|
||||
@ -80,3 +93,376 @@ func NewHarnessTest(t *testing.T, lndBinary string,
|
||||
lndErrorChan: make(chan error, 10),
|
||||
}
|
||||
}
|
||||
|
||||
// Start will assemble the chain backend and the miner for the HarnessTest. It
|
||||
// also starts the fee service and watches lnd process error.
|
||||
func (h *HarnessTest) Start(chain node.BackendConfig, miner *HarnessMiner) {
|
||||
// Spawn a new goroutine to watch for any fatal errors that any of the
|
||||
// running lnd processes encounter. If an error occurs, then the test
|
||||
// case should naturally as a result and we log the server error here
|
||||
// to help debug.
|
||||
go func() {
|
||||
select {
|
||||
case err, more := <-h.lndErrorChan:
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
h.Logf("lnd finished with error (stderr):\n%v", err)
|
||||
|
||||
case <-h.runCtx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the fee service.
|
||||
h.feeService = startFeeService(h.T)
|
||||
|
||||
// Assemble the node manager with chainBackend and feeServiceURL.
|
||||
h.manager.chainBackend = chain
|
||||
h.manager.feeServiceURL = h.feeService.url
|
||||
|
||||
// Assemble the miner.
|
||||
h.Miner = miner
|
||||
}
|
||||
|
||||
// ChainBackendName returns the chain backend name used in the test.
|
||||
func (h *HarnessTest) ChainBackendName() string {
|
||||
return h.manager.chainBackend.Name()
|
||||
}
|
||||
|
||||
// SetUp starts the initial seeder nodes within the test harness. The initial
|
||||
// node's wallets will be funded wallets with 10x10 BTC outputs each.
|
||||
func (h *HarnessTest) SetupStandbyNodes() {
|
||||
h.Log("Setting up standby nodes Alice and Bob...")
|
||||
defer h.Log("Finshed the setup, now running tests...")
|
||||
|
||||
lndArgs := []string{
|
||||
"--default-remote-max-htlcs=483",
|
||||
"--dust-threshold=5000000",
|
||||
}
|
||||
// Start the initial seeder nodes within the test network, then connect
|
||||
// their respective RPC clients.
|
||||
h.Alice = h.NewNode("Alice", lndArgs)
|
||||
h.Bob = h.NewNode("Bob", lndArgs)
|
||||
|
||||
// First, make a connection between the two nodes. This will wait until
|
||||
// both nodes are fully started since the Connect RPC is guarded behind
|
||||
// the server.Started() flag that waits for all subsystems to be ready.
|
||||
h.ConnectNodes(h.Alice, h.Bob)
|
||||
|
||||
addrReq := &lnrpc.NewAddressRequest{
|
||||
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
||||
}
|
||||
|
||||
// Load up the wallets of the seeder nodes with 10 outputs of 10 BTC
|
||||
// each.
|
||||
nodes := []*node.HarnessNode{h.Alice, h.Bob}
|
||||
for _, hn := range nodes {
|
||||
h.manager.standbyNodes[hn.PubKeyStr] = hn
|
||||
for i := 0; i < 10; i++ {
|
||||
resp := hn.RPC.NewAddress(addrReq)
|
||||
|
||||
addr, err := btcutil.DecodeAddress(
|
||||
resp.Address, h.Miner.ActiveNet,
|
||||
)
|
||||
require.NoError(h, err)
|
||||
|
||||
addrScript, err := txscript.PayToAddrScript(addr)
|
||||
require.NoError(h, err)
|
||||
|
||||
output := &wire.TxOut{
|
||||
PkScript: addrScript,
|
||||
Value: 10 * btcutil.SatoshiPerBitcoin,
|
||||
}
|
||||
_, err = h.Miner.SendOutputs(
|
||||
[]*wire.TxOut{output}, 7500,
|
||||
)
|
||||
require.NoError(h, err, "send output failed")
|
||||
}
|
||||
}
|
||||
|
||||
// We generate several blocks in order to give the outputs created
|
||||
// above a good number of confirmations.
|
||||
h.Miner.MineBlocks(2)
|
||||
|
||||
// Now we want to wait for the nodes to catch up.
|
||||
h.WaitForBlockchainSync(h.Alice)
|
||||
h.WaitForBlockchainSync(h.Bob)
|
||||
|
||||
// Now block until both wallets have fully synced up.
|
||||
expectedBalance := int64(btcutil.SatoshiPerBitcoin * 100)
|
||||
err := wait.NoError(func() error {
|
||||
aliceResp := h.Alice.RPC.WalletBalance()
|
||||
bobResp := h.Bob.RPC.WalletBalance()
|
||||
|
||||
if aliceResp.ConfirmedBalance != expectedBalance {
|
||||
return fmt.Errorf("expected 10 BTC, instead "+
|
||||
"alice has %d", aliceResp.ConfirmedBalance)
|
||||
}
|
||||
|
||||
if bobResp.ConfirmedBalance != expectedBalance {
|
||||
return fmt.Errorf("expected 10 BTC, instead "+
|
||||
"bob has %d", bobResp.ConfirmedBalance)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, DefaultTimeout)
|
||||
require.NoError(h, err, "timeout checking balance for node")
|
||||
}
|
||||
|
||||
// Stop stops the test harness.
|
||||
func (h *HarnessTest) Stop() {
|
||||
// Do nothing if it's not started.
|
||||
if h.runCtx == nil {
|
||||
h.Log("HarnessTest is not started")
|
||||
return
|
||||
}
|
||||
|
||||
// Stop all running nodes.
|
||||
for _, node := range h.manager.activeNodes {
|
||||
h.Shutdown(node)
|
||||
}
|
||||
|
||||
close(h.lndErrorChan)
|
||||
|
||||
// Stop the fee service.
|
||||
h.feeService.stop()
|
||||
|
||||
// Stop the chainBackend.
|
||||
h.stopChainBackend()
|
||||
|
||||
// Stop the miner.
|
||||
h.Miner.Stop()
|
||||
}
|
||||
|
||||
// RunTestCase executes a harness test case. Any errors or panics will be
|
||||
// represented as fatal.
|
||||
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
description := errors.Wrap(err, 2).ErrorStack()
|
||||
h.Fatalf("Failed: (%v) panic with: \n%v",
|
||||
testCase.Name, description)
|
||||
}
|
||||
}()
|
||||
|
||||
testCase.TestFunc(h)
|
||||
}
|
||||
|
||||
// resetStandbyNodes resets all standby nodes by attaching the new testing.T
|
||||
// and restarting them with the original config.
|
||||
func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
|
||||
for _, hn := range h.manager.standbyNodes {
|
||||
// Inherit the testing.T.
|
||||
h.T = t
|
||||
|
||||
// Reset the config so the node will be using the default
|
||||
// config for the coming test. This will also inherit the
|
||||
// test's running context.
|
||||
h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
|
||||
}
|
||||
}
|
||||
|
||||
// Subtest creates a child HarnessTest, which inherits the harness net and
|
||||
// stand by nodes created by the parent test. It will return a cleanup function
|
||||
// which resets all the standby nodes' configs back to its original state and
|
||||
// create snapshots of each nodes' internal state.
|
||||
func (h *HarnessTest) Subtest(t *testing.T) (*HarnessTest, func()) {
|
||||
st := &HarnessTest{
|
||||
T: t,
|
||||
manager: h.manager,
|
||||
Miner: h.Miner,
|
||||
standbyNodes: h.standbyNodes,
|
||||
feeService: h.feeService,
|
||||
lndErrorChan: make(chan error, 10),
|
||||
}
|
||||
|
||||
// Inherit context from the main test.
|
||||
st.runCtx, st.cancel = context.WithCancel(h.runCtx)
|
||||
|
||||
// Reset the standby nodes.
|
||||
st.resetStandbyNodes(t)
|
||||
|
||||
cleanup := func() {
|
||||
// Don't bother run the cleanups if the test is failed.
|
||||
if st.Failed() {
|
||||
st.Log("test failed, skipped cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
// Don't run cleanup if it's already done. This can happen if
|
||||
// we have multiple level inheritance of the parent harness
|
||||
// test. For instance, a `Subtest(st)`.
|
||||
if st.cleaned {
|
||||
st.Log("test already cleaned, skipped cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
// We require the mempool to be cleaned from the test.
|
||||
require.Empty(st, st.Miner.GetRawMempool(), "mempool not "+
|
||||
"cleaned, please mine blocks to clean them all.")
|
||||
|
||||
// When we finish the test, reset the nodes' configs and take a
|
||||
// snapshot of each of the nodes' internal states.
|
||||
for _, node := range st.manager.standbyNodes {
|
||||
st.cleanupStandbyNode(node)
|
||||
}
|
||||
|
||||
// If found running nodes, shut them down.
|
||||
st.shutdownNonStandbyNodes()
|
||||
|
||||
// Assert that mempool is cleaned
|
||||
st.Miner.AssertNumTxsInMempool(0)
|
||||
|
||||
// Finally, cancel the run context. We have to do it here
|
||||
// because we need to keep the context alive for the above
|
||||
// assertions used in cleanup.
|
||||
st.cancel()
|
||||
|
||||
// We now want to mark the parent harness as cleaned to avoid
|
||||
// running cleanup again since its internal state has been
|
||||
// cleaned up by its child harness tests.
|
||||
h.cleaned = true
|
||||
}
|
||||
|
||||
return st, cleanup
|
||||
}
|
||||
|
||||
// shutdownNonStandbyNodes will shutdown any non-standby nodes.
|
||||
func (h *HarnessTest) shutdownNonStandbyNodes() {
|
||||
for pks, node := range h.manager.activeNodes {
|
||||
// If it's a standby node, skip.
|
||||
_, ok := h.manager.standbyNodes[pks]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// The process may not be in a state to always shutdown
|
||||
// immediately, so we'll retry up to a hard limit to ensure we
|
||||
// eventually shutdown.
|
||||
err := wait.NoError(func() error {
|
||||
return h.manager.shutdownNode(node)
|
||||
}, DefaultTimeout)
|
||||
require.NoErrorf(h, err, "unable to shutdown %s", node.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStandbyNode is a function should be called with defer whenever a
|
||||
// subtest is created. It will reset the standby nodes configs, snapshot the
|
||||
// states, and validate the node has a clean state.
|
||||
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
|
||||
// Remove connections made from this test.
|
||||
h.removeConnectionns(hn)
|
||||
|
||||
// Delete all payments made from this test.
|
||||
hn.RPC.DeleteAllPayments()
|
||||
|
||||
// Update the node's internal state.
|
||||
hn.UpdateState()
|
||||
|
||||
// Finally, check the node is in a clean state for the following tests.
|
||||
h.validateNodeState(hn)
|
||||
}
|
||||
|
||||
// removeConnectionns will remove all connections made on the standby nodes
|
||||
// expect the connections between Alice and Bob.
|
||||
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
|
||||
resp := hn.RPC.ListPeers()
|
||||
for _, peer := range resp.Peers {
|
||||
// Skip disconnecting Alice and Bob.
|
||||
switch peer.PubKey {
|
||||
case h.Alice.PubKeyStr:
|
||||
continue
|
||||
case h.Bob.PubKeyStr:
|
||||
continue
|
||||
}
|
||||
|
||||
hn.RPC.DisconnectPeer(peer.PubKey)
|
||||
}
|
||||
}
|
||||
|
||||
// SetTestName set the test case name.
|
||||
func (h *HarnessTest) SetTestName(name string) {
|
||||
h.manager.currentTestCase = name
|
||||
|
||||
// Overwrite the old log filename so we can create new log files.
|
||||
for _, node := range h.manager.standbyNodes {
|
||||
node.Cfg.LogFilenamePrefix = name
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode creates a new node and asserts its creation. The node is guaranteed
|
||||
// to have finished its initialization and all its subservers are started.
|
||||
func (h *HarnessTest) NewNode(name string,
|
||||
extraArgs []string) *node.HarnessNode {
|
||||
|
||||
node, err := h.manager.newNode(h.T, name, extraArgs, false, nil, false)
|
||||
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
// Shutdown shuts down the given node and asserts that no errors occur.
|
||||
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
|
||||
// The process may not be in a state to always shutdown immediately, so
|
||||
// we'll retry up to a hard limit to ensure we eventually shutdown.
|
||||
err := wait.NoError(func() error {
|
||||
return h.manager.shutdownNode(node)
|
||||
}, DefaultTimeout)
|
||||
require.NoErrorf(h, err, "unable to shutdown %v", node.Name())
|
||||
}
|
||||
|
||||
// RestartNode restarts a given node and asserts.
|
||||
func (h *HarnessTest) RestartNode(hn *node.HarnessNode,
|
||||
chanBackups ...*lnrpc.ChanBackupSnapshot) {
|
||||
|
||||
err := h.manager.restartNode(hn, nil, chanBackups...)
|
||||
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
|
||||
|
||||
// Give the node some time to catch up with the chain before we
|
||||
// continue with the tests.
|
||||
h.WaitForBlockchainSync(hn)
|
||||
}
|
||||
|
||||
// RestartNodeWithExtraArgs updates the node's config and restarts it.
|
||||
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
|
||||
extraArgs []string) {
|
||||
|
||||
hn.SetExtraArgs(extraArgs)
|
||||
h.RestartNode(hn, nil)
|
||||
}
|
||||
|
||||
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
|
||||
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
|
||||
h.feeService.setFee(fee)
|
||||
}
|
||||
|
||||
// validateNodeState checks that the node doesn't have any uncleaned states
|
||||
// which will affect its following tests.
|
||||
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) {
|
||||
errStr := func(subject string) string {
|
||||
return fmt.Sprintf("%s: found %s channels, please close "+
|
||||
"them properly", hn.Name(), subject)
|
||||
}
|
||||
// If the node still has open channels, it's most likely that the
|
||||
// current test didn't close it properly.
|
||||
require.Zerof(h, hn.State.OpenChannel.Active, errStr("active"))
|
||||
require.Zerof(h, hn.State.OpenChannel.Public, errStr("public"))
|
||||
require.Zerof(h, hn.State.OpenChannel.Private, errStr("private"))
|
||||
require.Zerof(h, hn.State.OpenChannel.Pending, errStr("pending open"))
|
||||
|
||||
// The number of pending force close channels should be zero.
|
||||
require.Zerof(h, hn.State.CloseChannel.PendingForceClose,
|
||||
errStr("pending force"))
|
||||
|
||||
// The number of waiting close channels should be zero.
|
||||
require.Zerof(h, hn.State.CloseChannel.WaitingClose,
|
||||
errStr("waiting close"))
|
||||
|
||||
// Ths number of payments should be zero.
|
||||
// TODO(yy): no need to check since it's deleted in the cleanup? Or
|
||||
// check it in a wait?
|
||||
require.Zerof(h, hn.State.Payment.Total, "%s: found "+
|
||||
"uncleaned payments, please delete all of them properly",
|
||||
hn.Name())
|
||||
}
|
||||
|
115
lntemp/harness_setup.go
Normal file
115
lntemp/harness_setup.go
Normal file
@ -0,0 +1,115 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/integration/rpctest"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// SetupHarness creates a new HarnessTest with a series of setups such that the
|
||||
// instance is ready for usage. The setups are,
|
||||
// 1. create the directories to hold lnd files.
|
||||
// 2. start a btcd miner.
|
||||
// 3. start a chain backend(btcd, bitcoind, or neutrino).
|
||||
// 4. connect the miner and the chain backend.
|
||||
// 5. start the HarnessTest.
|
||||
func SetupHarness(t *testing.T, binaryPath, dbBackendName string) *HarnessTest {
|
||||
t.Log("Setting up HarnessTest...")
|
||||
|
||||
// Parse testing flags that influence our test execution.
|
||||
logDir := lntest.GetLogDir()
|
||||
require.NoError(t, os.MkdirAll(logDir, 0700), "create log dir failed")
|
||||
|
||||
// Parse database backend
|
||||
dbBackend := prepareDbBackend(t, dbBackendName)
|
||||
|
||||
// Create a new HarnessTest.
|
||||
ht := NewHarnessTest(t, binaryPath, dbBackend)
|
||||
|
||||
// Init the miner.
|
||||
t.Log("Prepare the miner and mine blocks to activate segwit...")
|
||||
miner := prepareMiner(ht.runCtx, ht.T)
|
||||
|
||||
// Start a chain backend.
|
||||
chainBackend, cleanUp := prepareChainBackend(t, miner.P2PAddress())
|
||||
ht.stopChainBackend = cleanUp
|
||||
|
||||
// Connect our chainBackend to our miner.
|
||||
t.Log("Connecting the miner with the chain backend...")
|
||||
require.NoError(t, chainBackend.ConnectMiner(), "connect miner")
|
||||
|
||||
// Start the HarnessTest with the chainBackend and miner.
|
||||
ht.Start(chainBackend, miner)
|
||||
|
||||
return ht
|
||||
}
|
||||
|
||||
// prepareMiner creates an instance of the btcd's rpctest.Harness that will act
|
||||
// as the miner for all tests. This will be used to fund the wallets of the
|
||||
// nodes within the test network and to drive blockchain related events within
|
||||
// the network. Revert the default setting of accepting non-standard
|
||||
// transactions on simnet to reject them. Transactions on the lightning network
|
||||
// should always be standard to get better guarantees of getting included in to
|
||||
// blocks.
|
||||
func prepareMiner(ctxt context.Context, t *testing.T) *HarnessMiner {
|
||||
miner := NewMiner(ctxt, t)
|
||||
|
||||
// Before we start anything, we want to overwrite some of the
|
||||
// connection settings to make the tests more robust. We might need to
|
||||
// restart the miner while there are already blocks present, which will
|
||||
// take a bit longer than the 1 second the default settings amount to.
|
||||
// Doubling both values will give us retries up to 4 seconds.
|
||||
miner.MaxConnRetries = rpctest.DefaultMaxConnectionRetries * 2
|
||||
miner.ConnectionRetryTimeout = rpctest.DefaultConnectionRetryTimeout * 2
|
||||
|
||||
// Set up miner and connect chain backend to it.
|
||||
require.NoError(t, miner.SetUp(true, 50))
|
||||
require.NoError(t, miner.Client.NotifyNewTransactions(false))
|
||||
|
||||
// Next mine enough blocks in order for segwit and the CSV package
|
||||
// soft-fork to activate on SimNet.
|
||||
numBlocks := harnessNetParams.MinerConfirmationWindow * 2
|
||||
miner.GenerateBlocks(numBlocks)
|
||||
|
||||
return miner
|
||||
}
|
||||
|
||||
// prepareChainBackend creates a new chain backend.
|
||||
func prepareChainBackend(t *testing.T,
|
||||
minerAddr string) (lntest.BackendConfig, func()) {
|
||||
|
||||
chainBackend, cleanUp, err := lntest.NewBackend(
|
||||
minerAddr, harnessNetParams,
|
||||
)
|
||||
require.NoError(t, err, "new backend")
|
||||
|
||||
return chainBackend, func() {
|
||||
require.NoError(t, cleanUp(), "cleanup")
|
||||
}
|
||||
}
|
||||
|
||||
// prepareDbBackend parses a DatabaseBackend based on the name given.
|
||||
func prepareDbBackend(t *testing.T,
|
||||
dbBackendName string) lntest.DatabaseBackend {
|
||||
|
||||
var dbBackend lntest.DatabaseBackend
|
||||
switch dbBackendName {
|
||||
case "bbolt":
|
||||
dbBackend = lntest.BackendBbolt
|
||||
|
||||
case "etcd":
|
||||
dbBackend = lntest.BackendEtcd
|
||||
|
||||
case "postgres":
|
||||
dbBackend = lntest.BackendPostgres
|
||||
|
||||
default:
|
||||
require.Fail(t, "unknown db backend")
|
||||
}
|
||||
|
||||
return dbBackend
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user