mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-08 20:28:04 +02:00
Merge pull request #6759 from yyforyongyu/1-new-itest
itest: start a scaffolding testing framework
This commit is contained in:
commit
c65abf8b05
109
.github/workflows/main.yml
vendored
109
.github/workflows/main.yml
vendored
@ -373,6 +373,115 @@ jobs:
|
||||
path: logs-itest-windows.zip
|
||||
retention-days: 5
|
||||
|
||||
########################
|
||||
# run new integration tests
|
||||
########################
|
||||
new-integration-test:
|
||||
name: run new itests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
# Allow other tests in the matrix to continue if one fails.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: btcd
|
||||
args: backend=btcd
|
||||
- name: bitcoind
|
||||
args: backend=bitcoind
|
||||
- name: bitcoind-notxindex
|
||||
args: backend="bitcoind notxindex"
|
||||
- name: bitcoind-rpcpolling
|
||||
args: backend="bitcoind rpcpolling"
|
||||
- name: bitcoind-etcd
|
||||
args: backend=bitcoind dbbackend=etcd
|
||||
- name: bitcoind-postgres
|
||||
args: backend=bitcoind dbbackend=postgres
|
||||
- name: neutrino
|
||||
args: backend=neutrino
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: go cache
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: /home/runner/work/go
|
||||
key: lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ github.job }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ github.job }}-${{ hashFiles('**/go.sum') }}
|
||||
lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ github.job }}-
|
||||
lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-
|
||||
lnd-${{ runner.os }}-go-
|
||||
|
||||
- name: setup go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '${{ env.GO_VERSION }}'
|
||||
|
||||
- name: install bitcoind
|
||||
run: ./scripts/install_bitcoind.sh
|
||||
|
||||
- name: run new ${{ matrix.name }}
|
||||
run: make itest-parallel temptest=true ${{ matrix.args }}
|
||||
|
||||
- name: Zip log files on failure
|
||||
if: ${{ failure() }}
|
||||
timeout-minutes: 1 # timeout after 1 minute
|
||||
run: 7z a logs-itest-${{ matrix.name }}.zip lntest/itest/**/*.log
|
||||
|
||||
- name: Upload log files on failure
|
||||
uses: actions/upload-artifact@v2.2.4
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
name: logs-itest-${{ matrix.name }}
|
||||
path: logs-itest-${{ matrix.name }}.zip
|
||||
retention-days: 5
|
||||
|
||||
########################
|
||||
# run new windows integration test
|
||||
########################
|
||||
new-windows-integration-test:
|
||||
name: run new windows itest
|
||||
runs-on: windows-latest
|
||||
env:
|
||||
GOCACHE: ${{ github.workspace }}/go/pkg/build
|
||||
GOPATH: ${{ github.workspace }}/go
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: go cache
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ${{ env.GOPATH }}
|
||||
key: lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ github.job }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ github.job }}-${{ hashFiles('**/go.sum') }}
|
||||
lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ github.job }}-
|
||||
lnd-${{ runner.os }}-go-${{ env.GO_VERSION }}-
|
||||
lnd-${{ runner.os }}-go-
|
||||
|
||||
- name: setup go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '${{ env.GO_VERSION }}'
|
||||
|
||||
- name: run new itest
|
||||
run: make itest-parallel temptest=true windows=1 tranches=2 parallel=2
|
||||
|
||||
- name: Zip log files on failure
|
||||
if: ${{ failure() }}
|
||||
run: 7z a logs-itest-windows.zip lntest/itest/**/*.log
|
||||
|
||||
- name: Upload log files on failure
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
name: logs-itest-windows
|
||||
path: logs-itest-windows.zip
|
||||
retention-days: 5
|
||||
|
||||
|
||||
########################
|
||||
# check pinned dependencies
|
||||
########################
|
||||
|
@ -147,6 +147,15 @@ https://github.com/lightningnetwork/lnd/pull/6963/)
|
||||
easily](https://github.com/lightningnetwork/lnd/pull/5561), in preparation for
|
||||
adding a data migration functionality to `lndinit`.
|
||||
|
||||
### Integration test
|
||||
|
||||
The `lntest` has been
|
||||
[refactored](https://github.com/lightningnetwork/lnd/pull/6759) to provide a
|
||||
better testing suite for writing integration tests. A new defined structure is
|
||||
implemented, please refer to
|
||||
[README](https://github.com/lightningnetwork/lnd/tree/master/lntemp) for more
|
||||
details.
|
||||
|
||||
# Contributors (Alphabetical Order)
|
||||
|
||||
* Carla Kirk-Cohen
|
||||
@ -165,3 +174,4 @@ https://github.com/lightningnetwork/lnd/pull/6963/)
|
||||
* Oliver Gugger
|
||||
* Priyansh Rastogi
|
||||
* Roei Erez
|
||||
* Yong Yu
|
||||
|
96
lntemp/README.md
Normal file
96
lntemp/README.md
Normal file
@ -0,0 +1,96 @@
|
||||
# `lntest`
|
||||
|
||||
`lntest` is a package which holds the components used for the `lnd`’s
|
||||
integration tests. It is responsible for managing `lnd` nodes, chain backends
|
||||
and miners, advancing nodes’ states and providing assertions.
|
||||
|
||||
### Quick Start
|
||||
|
||||
A simple example to run the integration test.
|
||||
|
||||
```go
|
||||
func TestFoo(t *testing.T) {
|
||||
// Get the binary path and setup the harness test.
|
||||
//
|
||||
// TODO: define the binary path to lnd and the name of the database
|
||||
// backend.
|
||||
harnessTest := lntemp.SetupHarness(t, binary, *dbBackendFlag)
|
||||
defer harnessTest.Stop()
|
||||
|
||||
// Setup standby nodes, Alice and Bob, which will be alive and shared
|
||||
// among all the test cases.
|
||||
harnessTest.SetupStandbyNodes()
|
||||
|
||||
// Run the subset of the test cases selected in this tranche.
|
||||
//
|
||||
// TODO: define your own testCases.
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.Name, func(st *testing.T) {
|
||||
// Create a separate harness test for the testcase to
|
||||
// avoid overwriting the external harness test that is
|
||||
// tied to the parent test.
|
||||
ht, cleanup := harnessTest.Subtest(st)
|
||||
defer cleanup()
|
||||
|
||||
// Run the test cases.
|
||||
ht.RunTestCase(tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Package Structure
|
||||
|
||||
This package has four major components, `HarnessTest`, `HarnessMiner`,
|
||||
`node.HarnessNode` and `rpc.HarnessRPC`, with the following architecture,
|
||||
|
||||
```
|
||||
+----------------------------------------------------------+
|
||||
| |
|
||||
| HarnessTest |
|
||||
| |
|
||||
| +----------------+ +----------------+ +--------------+ |
|
||||
| | HarnessNode | | HarnessNode | | HarnessMiner | |
|
||||
| | | | | +--------------+ |
|
||||
| | +------------+ | | +------------+ | |
|
||||
| | | HarnessRPC | | | | HarnessRPC | | +--------------+ |
|
||||
| | +------------+ | | +------------+ | | HarnessMiner | |
|
||||
| +----------------+ +----------------+ +--------------+ |
|
||||
+----------------------------------------------------------+
|
||||
```
|
||||
|
||||
- `HarnessRPC` holds all the RPC clients and adds a layer over all the RPC
|
||||
methods to assert no error happened at the RPC level.
|
||||
|
||||
- `HarnessNode` builds on top of the `HarnessRPC`. It is responsible for
|
||||
managing the `lnd` node, including start and stop pf the `lnd` process,
|
||||
authentication of the gRPC connection, topology subscription(`NodeWatcher`)
|
||||
and maintains an internal state(`NodeState`).
|
||||
|
||||
- `HarnessMiner` builds on top of `btcd`’s `rcptest.Harness` and is responsilbe
|
||||
for managing blocks and the mempool.
|
||||
|
||||
- `HarnessTest` builds on top of `testing.T` and can be viewed as the assertion
|
||||
machine. It provides multiple ways to initialize a node, such as with/without
|
||||
seed, backups, etc. It also handles interactions between nodes like
|
||||
connecting nodes and opening/closing channels so it’s easier to acquire or
|
||||
validate a desired test states such as node’s balance, mempool condition,
|
||||
etc.
|
||||
|
||||
### Standby Nodes
|
||||
|
||||
Standby nodes are `HarnessNode`s created when initializing the integration test
|
||||
and stay alive across all the test cases. Creating a new node is not without a
|
||||
cost. With block height increasing, it takes significantly longer to initialize
|
||||
a new node and wait for it to be synced. Standby nodes, however, don’t have
|
||||
this problem as they are digesting blocks all the time. Thus it’s encouraged to
|
||||
use standby nodes wherever possible.
|
||||
|
||||
Currently there are two standby nodes, Alice and Bob. Their internal states are
|
||||
recorded and taken into account when `HarnessTest` makes assertions. When
|
||||
making a new test case using `Subtest`, there’s a cleanup function which
|
||||
further validates the current test case has no dangling uncleaned states, such
|
||||
as transactions left in mempool, open channels, etc.
|
||||
|
140
lntemp/fee_service.go
Normal file
140
lntemp/fee_service.go
Normal file
@ -0,0 +1,140 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// WebFeeService defines an interface that's used to provide fee estimation
|
||||
// service used in the integration tests. It must provide an URL so that a lnd
|
||||
// node can be started with the flag `--feeurl` and uses the customized fee
|
||||
// estimator.
|
||||
type WebFeeService interface {
|
||||
// Start starts the service.
|
||||
Start() error
|
||||
|
||||
// Stop stops the service.
|
||||
Stop() error
|
||||
|
||||
// URL returns the service's endpoint.
|
||||
URL() string
|
||||
|
||||
// SetFeeRate sets the estimated fee rate for a given confirmation
|
||||
// target.
|
||||
SetFeeRate(feeRate chainfee.SatPerKWeight, conf uint32)
|
||||
}
|
||||
|
||||
const (
|
||||
// feeServiceTarget is the confirmation target for which a fee estimate
|
||||
// is returned. Requests for higher confirmation targets will fall back
|
||||
// to this.
|
||||
feeServiceTarget = 1
|
||||
|
||||
// DefaultFeeRateSatPerKw specifies the default fee rate used in the
|
||||
// tests.
|
||||
DefaultFeeRateSatPerKw = 12500
|
||||
)
|
||||
|
||||
// FeeService runs a web service that provides fee estimation information.
|
||||
type FeeService struct {
|
||||
*testing.T
|
||||
|
||||
feeRateMap map[uint32]uint32
|
||||
url string
|
||||
|
||||
srv *http.Server
|
||||
wg sync.WaitGroup
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// Compile-time check for the WebFeeService interface.
|
||||
var _ WebFeeService = (*FeeService)(nil)
|
||||
|
||||
// Start spins up a go-routine to serve fee estimates.
|
||||
func NewFeeService(t *testing.T) *FeeService {
|
||||
port := lntest.NextAvailablePort()
|
||||
f := FeeService{
|
||||
T: t,
|
||||
url: fmt.Sprintf(
|
||||
"http://localhost:%v/fee-estimates.json", port,
|
||||
),
|
||||
}
|
||||
|
||||
// Initialize default fee estimate.
|
||||
f.feeRateMap = map[uint32]uint32{
|
||||
feeServiceTarget: DefaultFeeRateSatPerKw,
|
||||
}
|
||||
|
||||
listenAddr := fmt.Sprintf(":%v", port)
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/fee-estimates.json", f.handleRequest)
|
||||
|
||||
f.srv = &http.Server{
|
||||
Addr: listenAddr,
|
||||
Handler: mux,
|
||||
}
|
||||
return &f
|
||||
}
|
||||
|
||||
// Start starts the web server.
|
||||
func (f *FeeService) Start() error {
|
||||
f.wg.Add(1)
|
||||
go func() {
|
||||
defer f.wg.Done()
|
||||
|
||||
if err := f.srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||
require.NoErrorf(f, err, "cannot start fee api")
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleRequest handles a client request for fee estimates.
|
||||
func (f *FeeService) handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
bytes, err := json.Marshal(
|
||||
struct {
|
||||
Fees map[uint32]uint32 `json:"fee_by_block_target"`
|
||||
}{
|
||||
Fees: f.feeRateMap,
|
||||
},
|
||||
)
|
||||
require.NoErrorf(f, err, "cannot serialize estimates")
|
||||
|
||||
_, err = io.WriteString(w, string(bytes))
|
||||
require.NoError(f, err, "cannot send estimates")
|
||||
}
|
||||
|
||||
// Stop stops the web server.
|
||||
func (f *FeeService) Stop() error {
|
||||
err := f.srv.Shutdown(context.Background())
|
||||
require.NoError(f, err, "cannot stop fee api")
|
||||
|
||||
f.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFeeRate sets a fee for the given confirmation target.
|
||||
func (f *FeeService) SetFeeRate(fee chainfee.SatPerKWeight, conf uint32) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.feeRateMap[conf] = uint32(fee.FeePerKVByte())
|
||||
}
|
||||
|
||||
// URL returns the service endpoint.
|
||||
func (f *FeeService) URL() string {
|
||||
return f.url
|
||||
}
|
678
lntemp/harness.go
Normal file
678
lntemp/harness.go
Normal file
@ -0,0 +1,678 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCase defines a test case that's been used in the integration test.
|
||||
type TestCase struct {
|
||||
// Name specifies the test name.
|
||||
Name string
|
||||
|
||||
// TestFunc is the test case wrapped in a function.
|
||||
TestFunc func(t *HarnessTest)
|
||||
}
|
||||
|
||||
// standbyNodes are a list of nodes which are created during the initialization
|
||||
// of the test and used across all test cases.
|
||||
type standbyNodes struct {
|
||||
// Alice and Bob are the initial seeder nodes that are automatically
|
||||
// created to be the initial participants of the test network.
|
||||
Alice *node.HarnessNode
|
||||
Bob *node.HarnessNode
|
||||
}
|
||||
|
||||
// HarnessTest builds on top of a testing.T with enhanced error detection. It
|
||||
// is responsible for managing the interactions among different nodes, and
|
||||
// providing easy-to-use assertions.
|
||||
type HarnessTest struct {
|
||||
*testing.T
|
||||
|
||||
// Embed the standbyNodes so we can easily access them via `ht.Alice`.
|
||||
standbyNodes
|
||||
|
||||
// Miner is a reference to a running full node that can be used to
|
||||
// create new blocks on the network.
|
||||
Miner *HarnessMiner
|
||||
|
||||
// manager handles the start and stop of a given node.
|
||||
manager *nodeManager
|
||||
|
||||
// feeService is a web service that provides external fee estimates to
|
||||
// lnd.
|
||||
feeService WebFeeService
|
||||
|
||||
// Channel for transmitting stderr output from failed lightning node
|
||||
// to main process.
|
||||
lndErrorChan chan error
|
||||
|
||||
// runCtx is a context with cancel method. It's used to signal when the
|
||||
// node needs to quit, and used as the parent context when spawning
|
||||
// children contexts for RPC requests.
|
||||
runCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// stopChainBackend points to the cleanup function returned by the
|
||||
// chainBackend.
|
||||
stopChainBackend func()
|
||||
|
||||
// cleaned specifies whether the cleanup has been applied for the
|
||||
// current HarnessTest.
|
||||
cleaned bool
|
||||
}
|
||||
|
||||
// NewHarnessTest creates a new instance of a harnessTest from a regular
|
||||
// testing.T instance.
|
||||
func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService,
|
||||
dbBackend lntest.DatabaseBackend) *HarnessTest {
|
||||
|
||||
// Create the run context.
|
||||
ctxt, cancel := context.WithCancel(context.Background())
|
||||
|
||||
manager := newNodeManager(lndBinary, dbBackend)
|
||||
return &HarnessTest{
|
||||
T: t,
|
||||
manager: manager,
|
||||
feeService: feeService,
|
||||
runCtx: ctxt,
|
||||
cancel: cancel,
|
||||
// We need to use buffered channel here as we don't want to
|
||||
// block sending errors.
|
||||
lndErrorChan: make(chan error, 10),
|
||||
}
|
||||
}
|
||||
|
||||
// Start will assemble the chain backend and the miner for the HarnessTest. It
|
||||
// also starts the fee service and watches lnd process error.
|
||||
func (h *HarnessTest) Start(chain node.BackendConfig, miner *HarnessMiner) {
|
||||
// Spawn a new goroutine to watch for any fatal errors that any of the
|
||||
// running lnd processes encounter. If an error occurs, then the test
|
||||
// case should naturally as a result and we log the server error here
|
||||
// to help debug.
|
||||
go func() {
|
||||
select {
|
||||
case err, more := <-h.lndErrorChan:
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
h.Logf("lnd finished with error (stderr):\n%v", err)
|
||||
|
||||
case <-h.runCtx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the fee service.
|
||||
err := h.feeService.Start()
|
||||
require.NoError(h, err, "failed to start fee service")
|
||||
|
||||
// Assemble the node manager with chainBackend and feeServiceURL.
|
||||
h.manager.chainBackend = chain
|
||||
h.manager.feeServiceURL = h.feeService.URL()
|
||||
|
||||
// Assemble the miner.
|
||||
h.Miner = miner
|
||||
}
|
||||
|
||||
// ChainBackendName returns the chain backend name used in the test.
|
||||
func (h *HarnessTest) ChainBackendName() string {
|
||||
return h.manager.chainBackend.Name()
|
||||
}
|
||||
|
||||
// SetUp starts the initial seeder nodes within the test harness. The initial
|
||||
// node's wallets will be funded wallets with 10x10 BTC outputs each.
|
||||
func (h *HarnessTest) SetupStandbyNodes() {
|
||||
h.Log("Setting up standby nodes Alice and Bob...")
|
||||
defer h.Log("Finshed the setup, now running tests...")
|
||||
|
||||
lndArgs := []string{
|
||||
"--default-remote-max-htlcs=483",
|
||||
"--dust-threshold=5000000",
|
||||
}
|
||||
// Start the initial seeder nodes within the test network, then connect
|
||||
// their respective RPC clients.
|
||||
h.Alice = h.NewNode("Alice", lndArgs)
|
||||
h.Bob = h.NewNode("Bob", lndArgs)
|
||||
|
||||
// First, make a connection between the two nodes. This will wait until
|
||||
// both nodes are fully started since the Connect RPC is guarded behind
|
||||
// the server.Started() flag that waits for all subsystems to be ready.
|
||||
h.ConnectNodes(h.Alice, h.Bob)
|
||||
|
||||
addrReq := &lnrpc.NewAddressRequest{
|
||||
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
||||
}
|
||||
|
||||
// Load up the wallets of the seeder nodes with 10 outputs of 10 BTC
|
||||
// each.
|
||||
nodes := []*node.HarnessNode{h.Alice, h.Bob}
|
||||
for _, hn := range nodes {
|
||||
h.manager.standbyNodes[hn.PubKeyStr] = hn
|
||||
for i := 0; i < 10; i++ {
|
||||
resp := hn.RPC.NewAddress(addrReq)
|
||||
|
||||
addr, err := btcutil.DecodeAddress(
|
||||
resp.Address, h.Miner.ActiveNet,
|
||||
)
|
||||
require.NoError(h, err)
|
||||
|
||||
addrScript, err := txscript.PayToAddrScript(addr)
|
||||
require.NoError(h, err)
|
||||
|
||||
output := &wire.TxOut{
|
||||
PkScript: addrScript,
|
||||
Value: 10 * btcutil.SatoshiPerBitcoin,
|
||||
}
|
||||
_, err = h.Miner.SendOutputs(
|
||||
[]*wire.TxOut{output}, 7500,
|
||||
)
|
||||
require.NoError(h, err, "send output failed")
|
||||
}
|
||||
}
|
||||
|
||||
// We generate several blocks in order to give the outputs created
|
||||
// above a good number of confirmations.
|
||||
h.Miner.MineBlocks(2)
|
||||
|
||||
// Now we want to wait for the nodes to catch up.
|
||||
h.WaitForBlockchainSync(h.Alice)
|
||||
h.WaitForBlockchainSync(h.Bob)
|
||||
|
||||
// Now block until both wallets have fully synced up.
|
||||
expectedBalance := int64(btcutil.SatoshiPerBitcoin * 100)
|
||||
err := wait.NoError(func() error {
|
||||
aliceResp := h.Alice.RPC.WalletBalance()
|
||||
bobResp := h.Bob.RPC.WalletBalance()
|
||||
|
||||
if aliceResp.ConfirmedBalance != expectedBalance {
|
||||
return fmt.Errorf("expected 10 BTC, instead "+
|
||||
"alice has %d", aliceResp.ConfirmedBalance)
|
||||
}
|
||||
|
||||
if bobResp.ConfirmedBalance != expectedBalance {
|
||||
return fmt.Errorf("expected 10 BTC, instead "+
|
||||
"bob has %d", bobResp.ConfirmedBalance)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, DefaultTimeout)
|
||||
require.NoError(h, err, "timeout checking balance for node")
|
||||
}
|
||||
|
||||
// Stop stops the test harness.
|
||||
func (h *HarnessTest) Stop() {
|
||||
// Do nothing if it's not started.
|
||||
if h.runCtx == nil {
|
||||
h.Log("HarnessTest is not started")
|
||||
return
|
||||
}
|
||||
|
||||
// Stop all running nodes.
|
||||
for _, node := range h.manager.activeNodes {
|
||||
h.Shutdown(node)
|
||||
}
|
||||
|
||||
close(h.lndErrorChan)
|
||||
|
||||
// Stop the fee service.
|
||||
err := h.feeService.Stop()
|
||||
require.NoError(h, err, "failed to stop fee service")
|
||||
|
||||
// Stop the chainBackend.
|
||||
h.stopChainBackend()
|
||||
|
||||
// Stop the miner.
|
||||
h.Miner.Stop()
|
||||
}
|
||||
|
||||
// RunTestCase executes a harness test case. Any errors or panics will be
|
||||
// represented as fatal.
|
||||
func (h *HarnessTest) RunTestCase(testCase *TestCase) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
description := errors.Wrap(err, 2).ErrorStack()
|
||||
h.Fatalf("Failed: (%v) panic with: \n%v",
|
||||
testCase.Name, description)
|
||||
}
|
||||
}()
|
||||
|
||||
testCase.TestFunc(h)
|
||||
}
|
||||
|
||||
// resetStandbyNodes resets all standby nodes by attaching the new testing.T
|
||||
// and restarting them with the original config.
|
||||
func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
|
||||
for _, hn := range h.manager.standbyNodes {
|
||||
// Inherit the testing.T.
|
||||
h.T = t
|
||||
|
||||
// Reset the config so the node will be using the default
|
||||
// config for the coming test. This will also inherit the
|
||||
// test's running context.
|
||||
h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
|
||||
}
|
||||
}
|
||||
|
||||
// Subtest creates a child HarnessTest, which inherits the harness net and
|
||||
// stand by nodes created by the parent test. It will return a cleanup function
|
||||
// which resets all the standby nodes' configs back to its original state and
|
||||
// create snapshots of each nodes' internal state.
|
||||
func (h *HarnessTest) Subtest(t *testing.T) (*HarnessTest, func()) {
|
||||
st := &HarnessTest{
|
||||
T: t,
|
||||
manager: h.manager,
|
||||
Miner: h.Miner,
|
||||
standbyNodes: h.standbyNodes,
|
||||
feeService: h.feeService,
|
||||
lndErrorChan: make(chan error, 10),
|
||||
}
|
||||
|
||||
// Inherit context from the main test.
|
||||
st.runCtx, st.cancel = context.WithCancel(h.runCtx)
|
||||
|
||||
// Reset the standby nodes.
|
||||
st.resetStandbyNodes(t)
|
||||
|
||||
cleanup := func() {
|
||||
// Don't bother run the cleanups if the test is failed.
|
||||
if st.Failed() {
|
||||
st.Log("test failed, skipped cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
// Don't run cleanup if it's already done. This can happen if
|
||||
// we have multiple level inheritance of the parent harness
|
||||
// test. For instance, a `Subtest(st)`.
|
||||
if st.cleaned {
|
||||
st.Log("test already cleaned, skipped cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
// We require the mempool to be cleaned from the test.
|
||||
require.Empty(st, st.Miner.GetRawMempool(), "mempool not "+
|
||||
"cleaned, please mine blocks to clean them all.")
|
||||
|
||||
// When we finish the test, reset the nodes' configs and take a
|
||||
// snapshot of each of the nodes' internal states.
|
||||
for _, node := range st.manager.standbyNodes {
|
||||
st.cleanupStandbyNode(node)
|
||||
}
|
||||
|
||||
// If found running nodes, shut them down.
|
||||
st.shutdownNonStandbyNodes()
|
||||
|
||||
// Assert that mempool is cleaned
|
||||
st.Miner.AssertNumTxsInMempool(0)
|
||||
|
||||
// Finally, cancel the run context. We have to do it here
|
||||
// because we need to keep the context alive for the above
|
||||
// assertions used in cleanup.
|
||||
st.cancel()
|
||||
|
||||
// We now want to mark the parent harness as cleaned to avoid
|
||||
// running cleanup again since its internal state has been
|
||||
// cleaned up by its child harness tests.
|
||||
h.cleaned = true
|
||||
}
|
||||
|
||||
return st, cleanup
|
||||
}
|
||||
|
||||
// shutdownNonStandbyNodes will shutdown any non-standby nodes.
|
||||
func (h *HarnessTest) shutdownNonStandbyNodes() {
|
||||
for pks, node := range h.manager.activeNodes {
|
||||
// If it's a standby node, skip.
|
||||
_, ok := h.manager.standbyNodes[pks]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// The process may not be in a state to always shutdown
|
||||
// immediately, so we'll retry up to a hard limit to ensure we
|
||||
// eventually shutdown.
|
||||
err := wait.NoError(func() error {
|
||||
return h.manager.shutdownNode(node)
|
||||
}, DefaultTimeout)
|
||||
require.NoErrorf(h, err, "unable to shutdown %s", node.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStandbyNode is a function should be called with defer whenever a
|
||||
// subtest is created. It will reset the standby nodes configs, snapshot the
|
||||
// states, and validate the node has a clean state.
|
||||
func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
|
||||
// Remove connections made from this test.
|
||||
h.removeConnectionns(hn)
|
||||
|
||||
// Delete all payments made from this test.
|
||||
hn.RPC.DeleteAllPayments()
|
||||
|
||||
// Update the node's internal state.
|
||||
hn.UpdateState()
|
||||
|
||||
// Finally, check the node is in a clean state for the following tests.
|
||||
h.validateNodeState(hn)
|
||||
}
|
||||
|
||||
// removeConnectionns will remove all connections made on the standby nodes
|
||||
// expect the connections between Alice and Bob.
|
||||
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
|
||||
resp := hn.RPC.ListPeers()
|
||||
for _, peer := range resp.Peers {
|
||||
// Skip disconnecting Alice and Bob.
|
||||
switch peer.PubKey {
|
||||
case h.Alice.PubKeyStr:
|
||||
continue
|
||||
case h.Bob.PubKeyStr:
|
||||
continue
|
||||
}
|
||||
|
||||
hn.RPC.DisconnectPeer(peer.PubKey)
|
||||
}
|
||||
}
|
||||
|
||||
// SetTestName set the test case name.
|
||||
func (h *HarnessTest) SetTestName(name string) {
|
||||
h.manager.currentTestCase = name
|
||||
|
||||
// Overwrite the old log filename so we can create new log files.
|
||||
for _, node := range h.manager.standbyNodes {
|
||||
node.Cfg.LogFilenamePrefix = name
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode creates a new node and asserts its creation. The node is guaranteed
|
||||
// to have finished its initialization and all its subservers are started.
|
||||
func (h *HarnessTest) NewNode(name string,
|
||||
extraArgs []string) *node.HarnessNode {
|
||||
|
||||
node, err := h.manager.newNode(h.T, name, extraArgs, false, nil, false)
|
||||
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
// Shutdown shuts down the given node and asserts that no errors occur.
|
||||
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
|
||||
// The process may not be in a state to always shutdown immediately, so
|
||||
// we'll retry up to a hard limit to ensure we eventually shutdown.
|
||||
err := wait.NoError(func() error {
|
||||
return h.manager.shutdownNode(node)
|
||||
}, DefaultTimeout)
|
||||
require.NoErrorf(h, err, "unable to shutdown %v", node.Name())
|
||||
}
|
||||
|
||||
// RestartNode restarts a given node and asserts.
|
||||
func (h *HarnessTest) RestartNode(hn *node.HarnessNode,
|
||||
chanBackups ...*lnrpc.ChanBackupSnapshot) {
|
||||
|
||||
err := h.manager.restartNode(hn, nil, chanBackups...)
|
||||
require.NoErrorf(h, err, "failed to restart node %s", hn.Name())
|
||||
|
||||
// Give the node some time to catch up with the chain before we
|
||||
// continue with the tests.
|
||||
h.WaitForBlockchainSync(hn)
|
||||
}
|
||||
|
||||
// RestartNodeWithExtraArgs updates the node's config and restarts it.
|
||||
func (h *HarnessTest) RestartNodeWithExtraArgs(hn *node.HarnessNode,
|
||||
extraArgs []string) {
|
||||
|
||||
hn.SetExtraArgs(extraArgs)
|
||||
h.RestartNode(hn, nil)
|
||||
}
|
||||
|
||||
// SetFeeEstimate sets a fee rate to be returned from fee estimator.
|
||||
//
|
||||
// NOTE: this method will set the fee rate for a conf target of 1, which is the
|
||||
// fallback fee rate for a `WebAPIEstimator` if a higher conf target's fee rate
|
||||
// is not set. This means if the fee rate for conf target 6 is set, the fee
|
||||
// estimator will use that value instead.
|
||||
func (h *HarnessTest) SetFeeEstimate(fee chainfee.SatPerKWeight) {
|
||||
h.feeService.SetFeeRate(fee, 1)
|
||||
}
|
||||
|
||||
// validateNodeState checks that the node doesn't have any uncleaned states
|
||||
// which will affect its following tests.
|
||||
func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) {
|
||||
errStr := func(subject string) string {
|
||||
return fmt.Sprintf("%s: found %s channels, please close "+
|
||||
"them properly", hn.Name(), subject)
|
||||
}
|
||||
// If the node still has open channels, it's most likely that the
|
||||
// current test didn't close it properly.
|
||||
require.Zerof(h, hn.State.OpenChannel.Active, errStr("active"))
|
||||
require.Zerof(h, hn.State.OpenChannel.Public, errStr("public"))
|
||||
require.Zerof(h, hn.State.OpenChannel.Private, errStr("private"))
|
||||
require.Zerof(h, hn.State.OpenChannel.Pending, errStr("pending open"))
|
||||
|
||||
// The number of pending force close channels should be zero.
|
||||
require.Zerof(h, hn.State.CloseChannel.PendingForceClose,
|
||||
errStr("pending force"))
|
||||
|
||||
// The number of waiting close channels should be zero.
|
||||
require.Zerof(h, hn.State.CloseChannel.WaitingClose,
|
||||
errStr("waiting close"))
|
||||
|
||||
// Ths number of payments should be zero.
|
||||
// TODO(yy): no need to check since it's deleted in the cleanup? Or
|
||||
// check it in a wait?
|
||||
require.Zerof(h, hn.State.Payment.Total, "%s: found "+
|
||||
"uncleaned payments, please delete all of them properly",
|
||||
hn.Name())
|
||||
}
|
||||
|
||||
// GetChanPointFundingTxid takes a channel point and converts it into a chain
|
||||
// hash.
|
||||
func (h *HarnessTest) GetChanPointFundingTxid(
|
||||
cp *lnrpc.ChannelPoint) *chainhash.Hash {
|
||||
|
||||
txid, err := lnrpc.GetChanPointFundingTxid(cp)
|
||||
require.NoError(h, err, "unable to get txid")
|
||||
|
||||
return txid
|
||||
}
|
||||
|
||||
// OutPointFromChannelPoint creates an outpoint from a given channel point.
|
||||
func (h *HarnessTest) OutPointFromChannelPoint(
|
||||
cp *lnrpc.ChannelPoint) wire.OutPoint {
|
||||
|
||||
txid := h.GetChanPointFundingTxid(cp)
|
||||
return wire.OutPoint{
|
||||
Hash: *txid,
|
||||
Index: cp.OutputIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// OpenChannelParams houses the params to specify when opening a new channel.
|
||||
type OpenChannelParams struct {
|
||||
// Amt is the local amount being put into the channel.
|
||||
Amt btcutil.Amount
|
||||
|
||||
// PushAmt is the amount that should be pushed to the remote when the
|
||||
// channel is opened.
|
||||
PushAmt btcutil.Amount
|
||||
|
||||
// Private is a boolan indicating whether the opened channel should be
|
||||
// private.
|
||||
Private bool
|
||||
|
||||
// SpendUnconfirmed is a boolean indicating whether we can utilize
|
||||
// unconfirmed outputs to fund the channel.
|
||||
SpendUnconfirmed bool
|
||||
|
||||
// MinHtlc is the htlc_minimum_msat value set when opening the channel.
|
||||
MinHtlc lnwire.MilliSatoshi
|
||||
|
||||
// RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
|
||||
// channel, restricting the number of concurrent HTLCs the remote party
|
||||
// can add to a commitment.
|
||||
RemoteMaxHtlcs uint16
|
||||
|
||||
// FundingShim is an optional funding shim that the caller can specify
|
||||
// in order to modify the channel funding workflow.
|
||||
FundingShim *lnrpc.FundingShim
|
||||
|
||||
// SatPerVByte is the amount of satoshis to spend in chain fees per
|
||||
// virtual byte of the transaction.
|
||||
SatPerVByte btcutil.Amount
|
||||
|
||||
// CommitmentType is the commitment type that should be used for the
|
||||
// channel to be opened.
|
||||
CommitmentType lnrpc.CommitmentType
|
||||
}
|
||||
|
||||
// openChannel attempts to open a channel between srcNode and destNode with the
|
||||
// passed channel funding parameters. Once the `OpenChannel` is called, it will
|
||||
// consume the first event it receives from the open channel client and asserts
|
||||
// it's a channel pending event.
|
||||
func (h *HarnessTest) openChannel(srcNode, destNode *node.HarnessNode,
|
||||
p OpenChannelParams) rpc.OpenChanClient {
|
||||
|
||||
// Specify the minimal confirmations of the UTXOs used for channel
|
||||
// funding.
|
||||
minConfs := int32(1)
|
||||
if p.SpendUnconfirmed {
|
||||
minConfs = 0
|
||||
}
|
||||
|
||||
// Prepare the request and open the channel.
|
||||
openReq := &lnrpc.OpenChannelRequest{
|
||||
NodePubkey: destNode.PubKey[:],
|
||||
LocalFundingAmount: int64(p.Amt),
|
||||
PushSat: int64(p.PushAmt),
|
||||
Private: p.Private,
|
||||
MinConfs: minConfs,
|
||||
SpendUnconfirmed: p.SpendUnconfirmed,
|
||||
MinHtlcMsat: int64(p.MinHtlc),
|
||||
RemoteMaxHtlcs: uint32(p.RemoteMaxHtlcs),
|
||||
FundingShim: p.FundingShim,
|
||||
SatPerByte: int64(p.SatPerVByte),
|
||||
CommitmentType: p.CommitmentType,
|
||||
}
|
||||
respStream := srcNode.RPC.OpenChannel(openReq)
|
||||
|
||||
// Consume the "channel pending" update. This waits until the node
|
||||
// notifies us that the final message in the channel funding workflow
|
||||
// has been sent to the remote node.
|
||||
resp := h.ReceiveOpenChannelUpdate(respStream)
|
||||
|
||||
// Check that the update is channel pending.
|
||||
_, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
||||
require.Truef(h, ok, "expected channel pending: update, instead got %v",
|
||||
resp)
|
||||
|
||||
return respStream
|
||||
}
|
||||
|
||||
// OpenChannel attempts to open a channel with the specified parameters
|
||||
// extended from Alice to Bob. Additionally, the following items are asserted,
|
||||
// - 6 blocks will be mined so the channel will be announced if it's public.
|
||||
// - the funding transaction should be found in the first block.
|
||||
// - both nodes should see the channel edge update in their network graph.
|
||||
// - both nodes can report the status of the new channel from ListChannels.
|
||||
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
|
||||
p OpenChannelParams) *lnrpc.ChannelPoint {
|
||||
|
||||
// Wait until srcNode and destNode have the latest chain synced.
|
||||
// Otherwise, we may run into a check within the funding manager that
|
||||
// prevents any funding workflows from being kicked off if the chain
|
||||
// isn't yet synced.
|
||||
h.WaitForBlockchainSync(alice)
|
||||
h.WaitForBlockchainSync(bob)
|
||||
|
||||
chanOpenUpdate := h.openChannel(alice, bob, p)
|
||||
|
||||
// Mine 6 blocks, then wait for Alice's node to notify us that the
|
||||
// channel has been opened. The funding transaction should be found
|
||||
// within the first newly mined block. We mine 6 blocks so that in the
|
||||
// case that the channel is public, it is announced to the network.
|
||||
block := h.Miner.MineBlocksAndAssertNumTxes(6, 1)[0]
|
||||
|
||||
// Wait for the channel open event.
|
||||
fundingChanPoint := h.WaitForChannelOpenEvent(chanOpenUpdate)
|
||||
|
||||
// Check that the funding tx is found in the first block.
|
||||
fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
|
||||
h.Miner.AssertTxInBlock(block, fundingTxID)
|
||||
|
||||
// Check that both alice and bob have seen the channel from their
|
||||
// network topology.
|
||||
h.AssertTopologyChannelOpen(alice, fundingChanPoint)
|
||||
h.AssertTopologyChannelOpen(bob, fundingChanPoint)
|
||||
|
||||
// Check that the channel can be seen in their ListChannels.
|
||||
h.AssertChannelExists(alice, fundingChanPoint)
|
||||
h.AssertChannelExists(bob, fundingChanPoint)
|
||||
|
||||
// Finally, check the blocks are synced.
|
||||
h.WaitForBlockchainSync(alice)
|
||||
h.WaitForBlockchainSync(bob)
|
||||
|
||||
return fundingChanPoint
|
||||
}
|
||||
|
||||
// closeChannel attempts to close the channel indicated by the passed channel
|
||||
// point, initiated by the passed node. Once the CloseChannel rpc is called, it
|
||||
// will consume one event and assert it's a close pending event. In addition,
|
||||
// it will check that the closing tx can be found in the mempool.
|
||||
func (h *HarnessTest) closeChannel(hn *node.HarnessNode, cp *lnrpc.ChannelPoint,
|
||||
force bool) (rpc.CloseChanClient, *chainhash.Hash) {
|
||||
|
||||
// Calls the rpc to close the channel.
|
||||
closeReq := &lnrpc.CloseChannelRequest{
|
||||
ChannelPoint: cp,
|
||||
Force: force,
|
||||
}
|
||||
stream := hn.RPC.CloseChannel(closeReq)
|
||||
|
||||
// Consume the "channel close" update in order to wait for the closing
|
||||
// transaction to be broadcast, then wait for the closing tx to be seen
|
||||
// within the network.
|
||||
event := h.ReceiveCloseChannelUpdate(stream)
|
||||
pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
|
||||
require.Truef(h, ok, "expected channel close update, instead got %v",
|
||||
pendingClose)
|
||||
|
||||
closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid)
|
||||
require.NoErrorf(h, err, "unable to decode closeTxid: %v",
|
||||
pendingClose.ClosePending.Txid)
|
||||
|
||||
// Assert the closing tx is in the mempool.
|
||||
h.Miner.AssertTxInMempool(closeTxid)
|
||||
|
||||
return stream, closeTxid
|
||||
}
|
||||
|
||||
// CloseChannel attempts to close a non-anchored channel identified by the
|
||||
// passed channel point owned by the passed harness node. The following items
|
||||
// are asserted,
|
||||
// 1. a close pending event is sent from the close channel client.
|
||||
// 2. the closing tx is found in the mempool.
|
||||
// 3. the node reports the channel being waiting to close.
|
||||
// 4. a block is mined and the closing tx should be found in it.
|
||||
// 5. the node reports zero waiting close channels.
|
||||
// 6. the node receives a topology update regarding the channel close.
|
||||
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
|
||||
cp *lnrpc.ChannelPoint, force bool) *chainhash.Hash {
|
||||
|
||||
stream, _ := h.closeChannel(hn, cp, force)
|
||||
|
||||
return h.assertChannelClosed(hn, cp, false, stream)
|
||||
}
|
500
lntemp/harness_assertion.go
Normal file
500
lntemp/harness_assertion.go
Normal file
@ -0,0 +1,500 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// WaitForBlockchainSync waits until the node is synced to chain.
|
||||
func (h *HarnessTest) WaitForBlockchainSync(hn *node.HarnessNode) {
|
||||
err := wait.NoError(func() error {
|
||||
resp := hn.RPC.GetInfo()
|
||||
if resp.SyncedToChain {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s is not synced to chain", hn.Name())
|
||||
}, DefaultTimeout)
|
||||
|
||||
require.NoError(h, err, "timeout waiting for blockchain sync")
|
||||
}
|
||||
|
||||
// AssertPeerConnected asserts that the given node b is connected to a.
|
||||
func (h *HarnessTest) AssertPeerConnected(a, b *node.HarnessNode) {
|
||||
err := wait.NoError(func() error {
|
||||
// We require the RPC call to be succeeded and won't wait for
|
||||
// it as it's an unexpected behavior.
|
||||
resp := a.RPC.ListPeers()
|
||||
|
||||
// If node B is seen in the ListPeers response from node A,
|
||||
// then we can return true as the connection has been fully
|
||||
// established.
|
||||
for _, peer := range resp.Peers {
|
||||
if peer.PubKey == b.PubKeyStr {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%s not found in %s's ListPeers",
|
||||
b.Name(), a.Name())
|
||||
}, DefaultTimeout)
|
||||
|
||||
require.NoError(h, err, "unable to connect %s to %s, got error: "+
|
||||
"peers not connected within %v seconds",
|
||||
a.Name(), b.Name(), DefaultTimeout)
|
||||
}
|
||||
|
||||
// ConnectNodes creates a connection between the two nodes and asserts the
|
||||
// connection is succeeded.
|
||||
func (h *HarnessTest) ConnectNodes(a, b *node.HarnessNode) {
|
||||
bobInfo := b.RPC.GetInfo()
|
||||
|
||||
req := &lnrpc.ConnectPeerRequest{
|
||||
Addr: &lnrpc.LightningAddress{
|
||||
Pubkey: bobInfo.IdentityPubkey,
|
||||
Host: b.Cfg.P2PAddr(),
|
||||
},
|
||||
}
|
||||
a.RPC.ConnectPeer(req)
|
||||
h.AssertPeerConnected(a, b)
|
||||
}
|
||||
|
||||
// DisconnectNodes disconnects the given two nodes and asserts the
|
||||
// disconnection is succeeded. The request is made from node a and sent to node
|
||||
// b.
|
||||
func (h *HarnessTest) DisconnectNodes(a, b *node.HarnessNode) {
|
||||
bobInfo := b.RPC.GetInfo()
|
||||
a.RPC.DisconnectPeer(bobInfo.IdentityPubkey)
|
||||
}
|
||||
|
||||
// EnsureConnected will try to connect to two nodes, returning no error if they
|
||||
// are already connected. If the nodes were not connected previously, this will
|
||||
// behave the same as ConnectNodes. If a pending connection request has already
|
||||
// been made, the method will block until the two nodes appear in each other's
|
||||
// peers list, or until the DefaultTimeout expires.
|
||||
func (h *HarnessTest) EnsureConnected(a, b *node.HarnessNode) {
|
||||
// errConnectionRequested is used to signal that a connection was
|
||||
// requested successfully, which is distinct from already being
|
||||
// connected to the peer.
|
||||
errConnectionRequested := "connection request in progress"
|
||||
|
||||
tryConnect := func(a, b *node.HarnessNode) error {
|
||||
bInfo := b.RPC.GetInfo()
|
||||
|
||||
req := &lnrpc.ConnectPeerRequest{
|
||||
Addr: &lnrpc.LightningAddress{
|
||||
Pubkey: bInfo.IdentityPubkey,
|
||||
Host: b.Cfg.P2PAddr(),
|
||||
},
|
||||
}
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := a.RPC.LN.ConnectPeer(ctxt, req)
|
||||
|
||||
// Request was successful.
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the connection is in process, we return no error.
|
||||
if strings.Contains(err.Error(), errConnectionRequested) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the two are already connected, we return early with no
|
||||
// error.
|
||||
if strings.Contains(err.Error(), "already connected to peer") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We may get connection refused error if we happens to be in
|
||||
// the middle of a previous node disconnection, e.g., a restart
|
||||
// from one of the nodes.
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Return any critical errors returned by either alice or bob.
|
||||
require.NoError(h, tryConnect(a, b), "connection failed between %s "+
|
||||
"and %s", a.Cfg.Name, b.Cfg.Name)
|
||||
|
||||
// When Alice and Bob each makes a connection to the other side at the
|
||||
// same time, it's likely neither connections could succeed. Bob's
|
||||
// connection will be canceled by Alice since she has an outbound
|
||||
// connection to Bob already, and same happens to Alice's. Thus the two
|
||||
// connections cancel each other out.
|
||||
// TODO(yy): move this back when the above issue is fixed.
|
||||
// require.NoError(h, tryConnect(b, a), "connection failed between %s "+
|
||||
// "and %s", a.Cfg.Name, b.Cfg.Name)
|
||||
|
||||
// Otherwise one or both requested a connection, so we wait for the
|
||||
// peers lists to reflect the connection.
|
||||
h.AssertPeerConnected(a, b)
|
||||
h.AssertPeerConnected(b, a)
|
||||
}
|
||||
|
||||
// AssertNumEdges checks that an expected number of edges can be found in the
|
||||
// node specified.
|
||||
func (h *HarnessTest) AssertNumEdges(hn *node.HarnessNode,
|
||||
expected int, includeUnannounced bool) []*lnrpc.ChannelEdge {
|
||||
|
||||
var edges []*lnrpc.ChannelEdge
|
||||
|
||||
old := hn.State.Edge.Public
|
||||
if includeUnannounced {
|
||||
old = hn.State.Edge.Total
|
||||
}
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
req := &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: includeUnannounced,
|
||||
}
|
||||
chanGraph := hn.RPC.DescribeGraph(req)
|
||||
total := len(chanGraph.Edges)
|
||||
|
||||
if total-old == expected {
|
||||
if expected != 0 {
|
||||
// NOTE: assume edges come in ascending order
|
||||
// that the old edges are at the front of the
|
||||
// slice.
|
||||
edges = chanGraph.Edges[old:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errNumNotMatched(hn.Name(), "num of channel edges",
|
||||
expected, total-old, total, old)
|
||||
}, DefaultTimeout)
|
||||
|
||||
require.NoError(h, err, "timeout while checking for edges")
|
||||
|
||||
return edges
|
||||
}
|
||||
|
||||
// ReceiveOpenChannelUpdate waits until a message is received on the stream or
|
||||
// the timeout is reached.
|
||||
func (h *HarnessTest) ReceiveOpenChannelUpdate(
|
||||
stream rpc.OpenChanClient) *lnrpc.OpenStatusUpdate {
|
||||
|
||||
chanMsg := make(chan *lnrpc.OpenStatusUpdate)
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
// Consume one message. This will block until the message is
|
||||
// received.
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
chanMsg <- resp
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(DefaultTimeout):
|
||||
require.Fail(h, "timeout", "timeout waiting for open channel "+
|
||||
"update sent")
|
||||
|
||||
case err := <-errChan:
|
||||
require.Failf(h, "open channel stream",
|
||||
"received err from open channel stream: %v", err)
|
||||
|
||||
case updateMsg := <-chanMsg:
|
||||
return updateMsg
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForChannelOpenEvent waits for a notification that a channel is open by
|
||||
// consuming a message from the passed open channel stream.
|
||||
func (h HarnessTest) WaitForChannelOpenEvent(
|
||||
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
|
||||
|
||||
// Consume one event.
|
||||
event := h.ReceiveOpenChannelUpdate(stream)
|
||||
|
||||
resp, ok := event.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
|
||||
require.Truef(h, ok, "expected channel open update, instead got %v",
|
||||
resp)
|
||||
|
||||
return resp.ChanOpen.ChannelPoint
|
||||
}
|
||||
|
||||
// AssertTopologyChannelOpen asserts that a given channel outpoint is seen by
|
||||
// the passed node's network topology.
|
||||
func (h *HarnessTest) AssertTopologyChannelOpen(hn *node.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint) {
|
||||
|
||||
err := hn.Watcher.WaitForChannelOpen(chanPoint)
|
||||
require.NoErrorf(h, err, "%s didn't report channel", hn.Name())
|
||||
}
|
||||
|
||||
// AssertChannelExists asserts that an active channel identified by the
|
||||
// specified channel point exists from the point-of-view of the node.
|
||||
func (h *HarnessTest) AssertChannelExists(hn *node.HarnessNode,
|
||||
cp *lnrpc.ChannelPoint) *lnrpc.Channel {
|
||||
|
||||
var (
|
||||
channel *lnrpc.Channel
|
||||
err error
|
||||
)
|
||||
|
||||
err = wait.NoError(func() error {
|
||||
channel, err = h.findChannel(hn, cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check whether the channel is active, exit early if it is.
|
||||
if channel.Active {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("channel point not active")
|
||||
}, DefaultTimeout)
|
||||
|
||||
require.NoErrorf(h, err, "%s: timeout checking for channel point: %v",
|
||||
hn.Name(), cp)
|
||||
|
||||
return channel
|
||||
}
|
||||
|
||||
// findChannel tries to find a target channel in the node using the given
|
||||
// channel point.
|
||||
func (h *HarnessTest) findChannel(hn *node.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint) (*lnrpc.Channel, error) {
|
||||
|
||||
// Get the funding point.
|
||||
fp := h.OutPointFromChannelPoint(chanPoint)
|
||||
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
channelInfo := hn.RPC.ListChannels(req)
|
||||
|
||||
// Find the target channel.
|
||||
for _, channel := range channelInfo.Channels {
|
||||
if channel.ChannelPoint == fp.String() {
|
||||
return channel, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("channel not found using %s", chanPoint)
|
||||
}
|
||||
|
||||
// ReceiveCloseChannelUpdate waits until a message is received on the subscribe
|
||||
// channel close stream or the timeout is reached.
|
||||
func (h *HarnessTest) ReceiveCloseChannelUpdate(
|
||||
stream rpc.CloseChanClient) *lnrpc.CloseStatusUpdate {
|
||||
|
||||
chanMsg := make(chan *lnrpc.CloseStatusUpdate)
|
||||
errChan := make(chan error)
|
||||
go func() {
|
||||
// Consume one message. This will block until the message is
|
||||
// received.
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
chanMsg <- resp
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(DefaultTimeout):
|
||||
require.Fail(h, "timeout", "timeout waiting for close channel "+
|
||||
"update sent")
|
||||
|
||||
case err := <-errChan:
|
||||
require.Failf(h, "close channel stream",
|
||||
"received err from close channel stream: %v", err)
|
||||
|
||||
case updateMsg := <-chanMsg:
|
||||
return updateMsg
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type WaitingCloseChannel *lnrpc.PendingChannelsResponse_WaitingCloseChannel
|
||||
|
||||
// AssertChannelWaitingClose asserts that the given channel found in the node
|
||||
// is waiting close. Returns the WaitingCloseChannel if found.
|
||||
func (h *HarnessTest) AssertChannelWaitingClose(hn *node.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint) WaitingCloseChannel {
|
||||
|
||||
var target WaitingCloseChannel
|
||||
|
||||
op := h.OutPointFromChannelPoint(chanPoint)
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
resp := hn.RPC.PendingChannels()
|
||||
|
||||
for _, waitingClose := range resp.WaitingCloseChannels {
|
||||
if waitingClose.Channel.ChannelPoint == op.String() {
|
||||
target = waitingClose
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%v: channel %s not found in waiting close",
|
||||
hn.Name(), op)
|
||||
}, DefaultTimeout)
|
||||
require.NoError(h, err, "assert channel waiting close timed out")
|
||||
|
||||
return target
|
||||
}
|
||||
|
||||
// AssertTopologyChannelClosed asserts a given channel is closed by checking
|
||||
// the graph topology subscription of the specified node. Returns the closed
|
||||
// channel update if found.
|
||||
func (h *HarnessTest) AssertTopologyChannelClosed(hn *node.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint) *lnrpc.ClosedChannelUpdate {
|
||||
|
||||
closedChan, err := hn.Watcher.WaitForChannelClose(chanPoint)
|
||||
require.NoError(h, err, "failed to wait for channel close")
|
||||
|
||||
return closedChan
|
||||
}
|
||||
|
||||
// WaitForChannelCloseEvent waits for a notification that a channel is closed
|
||||
// by consuming a message from the passed close channel stream. Returns the
|
||||
// closing txid if found.
|
||||
func (h HarnessTest) WaitForChannelCloseEvent(
|
||||
stream rpc.CloseChanClient) *chainhash.Hash {
|
||||
|
||||
// Consume one event.
|
||||
event := h.ReceiveCloseChannelUpdate(stream)
|
||||
|
||||
resp, ok := event.Update.(*lnrpc.CloseStatusUpdate_ChanClose)
|
||||
require.Truef(h, ok, "expected channel open update, instead got %v",
|
||||
resp)
|
||||
|
||||
txid, err := chainhash.NewHash(resp.ChanClose.ClosingTxid)
|
||||
require.NoErrorf(h, err, "wrong format found in closing txid: %v",
|
||||
resp.ChanClose.ClosingTxid)
|
||||
|
||||
return txid
|
||||
}
|
||||
|
||||
// AssertNumWaitingClose checks that a PendingChannels response from the node
|
||||
// reports the expected number of waiting close channels.
|
||||
func (h *HarnessTest) AssertNumWaitingClose(hn *node.HarnessNode, num int) {
|
||||
oldWaiting := hn.State.CloseChannel.WaitingClose
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
resp := hn.RPC.PendingChannels()
|
||||
total := len(resp.WaitingCloseChannels)
|
||||
|
||||
got := total - oldWaiting
|
||||
if got == num {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errNumNotMatched(hn.Name(), "waiting close channels",
|
||||
num, got, total, oldWaiting)
|
||||
}, DefaultTimeout)
|
||||
|
||||
require.NoErrorf(h, err, "%s: assert waiting close timeout",
|
||||
hn.Name())
|
||||
}
|
||||
|
||||
// AssertNumPendingForceClose checks that a PendingChannels response from the
|
||||
// node reports the expected number of pending force close channels.
|
||||
func (h *HarnessTest) AssertNumPendingForceClose(hn *node.HarnessNode,
|
||||
num int) {
|
||||
|
||||
oldForce := hn.State.CloseChannel.PendingForceClose
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
resp := hn.RPC.PendingChannels()
|
||||
total := len(resp.PendingForceClosingChannels)
|
||||
|
||||
got := total - oldForce
|
||||
if got == num {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errNumNotMatched(hn.Name(), "pending force close "+
|
||||
"channels", num, got, total, oldForce)
|
||||
}, DefaultTimeout)
|
||||
|
||||
require.NoErrorf(h, err, "%s: assert pending force close timeout",
|
||||
hn.Name())
|
||||
}
|
||||
|
||||
// assertChannelClosed asserts that the channel is properly cleaned up after
|
||||
// initiating a cooperative or local close.
|
||||
func (h *HarnessTest) assertChannelClosed(hn *node.HarnessNode,
|
||||
cp *lnrpc.ChannelPoint, anchors bool,
|
||||
stream rpc.CloseChanClient) *chainhash.Hash {
|
||||
|
||||
// Assert the channel is waiting close.
|
||||
resp := h.AssertChannelWaitingClose(hn, cp)
|
||||
|
||||
// Assert that the channel is in coop broadcasted.
|
||||
require.Contains(h, resp.Channel.ChanStatusFlags,
|
||||
channeldb.ChanStatusCoopBroadcasted.String(),
|
||||
"channel not coop broadcasted")
|
||||
|
||||
// We'll now, generate a single block, wait for the final close status
|
||||
// update, then ensure that the closing transaction was included in the
|
||||
// block. If there are anchors, we also expect an anchor sweep.
|
||||
expectedTxes := 1
|
||||
if anchors {
|
||||
expectedTxes = 2
|
||||
}
|
||||
block := h.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)[0]
|
||||
|
||||
// Consume one close event and assert the closing txid can be found in
|
||||
// the block.
|
||||
closingTxid := h.WaitForChannelCloseEvent(stream)
|
||||
h.Miner.AssertTxInBlock(block, closingTxid)
|
||||
|
||||
// We should see zero waiting close channels now.
|
||||
h.AssertNumWaitingClose(hn, 0)
|
||||
|
||||
// Finally, check that the node's topology graph has seen this channel
|
||||
// closed.
|
||||
h.AssertTopologyChannelClosed(hn, cp)
|
||||
|
||||
return closingTxid
|
||||
}
|
||||
|
||||
// AssertChannelPolicyUpdate checks that the required policy update has
|
||||
// happened on the given node.
|
||||
func (h *HarnessTest) AssertChannelPolicyUpdate(hn *node.HarnessNode,
|
||||
advertisingNode *node.HarnessNode, policy *lnrpc.RoutingPolicy,
|
||||
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) {
|
||||
|
||||
require.NoError(
|
||||
h, hn.Watcher.WaitForChannelPolicyUpdate(
|
||||
advertisingNode, policy,
|
||||
chanPoint, includeUnannounced,
|
||||
), "%s: error while waiting for channel update", hn.Name(),
|
||||
)
|
||||
}
|
||||
|
||||
// WaitForGraphSync waits until the node is synced to graph or times out.
|
||||
func (h *HarnessTest) WaitForGraphSync(hn *node.HarnessNode) {
|
||||
err := wait.NoError(func() error {
|
||||
resp := hn.RPC.GetInfo()
|
||||
if resp.SyncedToGraph {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("node not synced to graph")
|
||||
}, DefaultTimeout)
|
||||
require.NoError(h, err, "%s: timeout while sync to graph", hn.Name())
|
||||
}
|
268
lntemp/harness_miner.go
Normal file
268
lntemp/harness_miner.go
Normal file
@ -0,0 +1,268 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/integration/rpctest"
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// minerLogFilename is the default log filename for the miner node.
|
||||
minerLogFilename = "output_btcd_miner.log"
|
||||
|
||||
// minerLogDir is the default log dir for the miner node.
|
||||
minerLogDir = ".minerlogs"
|
||||
)
|
||||
|
||||
var harnessNetParams = &chaincfg.RegressionNetParams
|
||||
|
||||
type HarnessMiner struct {
|
||||
*testing.T
|
||||
*rpctest.Harness
|
||||
|
||||
// runCtx is a context with cancel method. It's used to signal when the
|
||||
// node needs to quit, and used as the parent context when spawning
|
||||
runCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// logPath is the directory path of the miner's logs.
|
||||
logPath string
|
||||
|
||||
// logFilename is the saved log filename of the miner node.
|
||||
logFilename string
|
||||
}
|
||||
|
||||
// NewMiner creates a new miner using btcd backend with the default log file
|
||||
// dir and name.
|
||||
func NewMiner(ctxt context.Context, t *testing.T) *HarnessMiner {
|
||||
return newMiner(ctxt, t, minerLogDir, minerLogFilename)
|
||||
}
|
||||
|
||||
// newMiner creates a new miner using btcd's rpctest.
|
||||
func newMiner(ctxb context.Context, t *testing.T, minerDirName,
|
||||
logFilename string) *HarnessMiner {
|
||||
|
||||
handler := &rpcclient.NotificationHandlers{}
|
||||
btcdBinary := lntest.GetBtcdBinary()
|
||||
baseLogPath := fmt.Sprintf("%s/%s", lntest.GetLogDir(), minerDirName)
|
||||
|
||||
args := []string{
|
||||
"--rejectnonstd",
|
||||
"--txindex",
|
||||
"--nowinservice",
|
||||
"--nobanning",
|
||||
"--debuglevel=debug",
|
||||
"--logdir=" + baseLogPath,
|
||||
"--trickleinterval=100ms",
|
||||
// Don't disconnect if a reply takes too long.
|
||||
"--nostalldetect",
|
||||
}
|
||||
|
||||
miner, err := rpctest.New(harnessNetParams, handler, args, btcdBinary)
|
||||
require.NoError(t, err, "unable to create mining node")
|
||||
|
||||
ctxt, cancel := context.WithCancel(ctxb)
|
||||
return &HarnessMiner{
|
||||
T: t,
|
||||
Harness: miner,
|
||||
runCtx: ctxt,
|
||||
cancel: cancel,
|
||||
logPath: baseLogPath,
|
||||
logFilename: logFilename,
|
||||
}
|
||||
}
|
||||
|
||||
// saveLogs copies the node logs and save it to the file specified by
|
||||
// h.logFilename.
|
||||
func (h *HarnessMiner) saveLogs() {
|
||||
// After shutting down the miner, we'll make a copy of the log files
|
||||
// before deleting the temporary log dir.
|
||||
path := fmt.Sprintf("%s/%s", h.logPath, harnessNetParams.Name)
|
||||
files, err := ioutil.ReadDir(path)
|
||||
require.NoError(h, err, "unable to read log directory")
|
||||
|
||||
for _, file := range files {
|
||||
newFilename := strings.Replace(
|
||||
file.Name(), "btcd.log", h.logFilename, 1,
|
||||
)
|
||||
copyPath := fmt.Sprintf("%s/../%s", h.logPath, newFilename)
|
||||
|
||||
logFile := fmt.Sprintf("%s/%s", path, file.Name())
|
||||
err := CopyFile(filepath.Clean(copyPath), logFile)
|
||||
require.NoError(h, err, "unable to copy file")
|
||||
}
|
||||
|
||||
err = os.RemoveAll(h.logPath)
|
||||
require.NoErrorf(h, err, "cannot remove dir %s", h.logPath)
|
||||
}
|
||||
|
||||
// Stop shuts down the miner and saves its logs.
|
||||
func (h *HarnessMiner) Stop() {
|
||||
h.cancel()
|
||||
require.NoError(h, h.TearDown(), "tear down miner got error")
|
||||
h.saveLogs()
|
||||
}
|
||||
|
||||
// GetBestBlock makes a RPC request to miner and asserts.
|
||||
func (h *HarnessMiner) GetBestBlock() (*chainhash.Hash, int32) {
|
||||
blockHash, height, err := h.Client.GetBestBlock()
|
||||
require.NoError(h, err, "failed to GetBestBlock")
|
||||
return blockHash, height
|
||||
}
|
||||
|
||||
// GetRawMempool makes a RPC call to the miner's GetRawMempool and
|
||||
// asserts.
|
||||
func (h *HarnessMiner) GetRawMempool() []*chainhash.Hash {
|
||||
mempool, err := h.Client.GetRawMempool()
|
||||
require.NoError(h, err, "unable to get mempool")
|
||||
return mempool
|
||||
}
|
||||
|
||||
// GenerateBlocks mine 'num' of blocks and returns them.
|
||||
func (h *HarnessMiner) GenerateBlocks(num uint32) []*chainhash.Hash {
|
||||
blockHashes, err := h.Client.Generate(num)
|
||||
require.NoError(h, err, "unable to generate blocks")
|
||||
require.Len(h, blockHashes, int(num), "wrong num of blocks generated")
|
||||
|
||||
return blockHashes
|
||||
}
|
||||
|
||||
// GetBlock gets a block using its block hash.
|
||||
func (h *HarnessMiner) GetBlock(blockHash *chainhash.Hash) *wire.MsgBlock {
|
||||
block, err := h.Client.GetBlock(blockHash)
|
||||
require.NoError(h, err, "unable to get block")
|
||||
return block
|
||||
}
|
||||
|
||||
// MineBlocks mine 'num' of blocks and check that blocks are present in
|
||||
// node blockchain.
|
||||
func (h *HarnessMiner) MineBlocks(num uint32) []*wire.MsgBlock {
|
||||
blocks := make([]*wire.MsgBlock, num)
|
||||
|
||||
blockHashes := h.GenerateBlocks(num)
|
||||
|
||||
for i, blockHash := range blockHashes {
|
||||
block := h.GetBlock(blockHash)
|
||||
blocks[i] = block
|
||||
}
|
||||
|
||||
return blocks
|
||||
}
|
||||
|
||||
// AssertNumTxsInMempool polls until finding the desired number of transactions
|
||||
// in the provided miner's mempool. It will asserrt if this number is not met
|
||||
// after the given timeout.
|
||||
func (h *HarnessMiner) AssertNumTxsInMempool(n int) []*chainhash.Hash {
|
||||
var (
|
||||
mem []*chainhash.Hash
|
||||
err error
|
||||
)
|
||||
|
||||
err = wait.NoError(func() error {
|
||||
// We require the RPC call to be succeeded and won't wait for
|
||||
// it as it's an unexpected behavior.
|
||||
mem = h.GetRawMempool()
|
||||
if len(mem) == n {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("want %v, got %v in mempool: %v",
|
||||
n, len(mem), mem)
|
||||
}, lntest.MinerMempoolTimeout)
|
||||
require.NoError(h, err, "assert tx in mempool timeout")
|
||||
|
||||
return mem
|
||||
}
|
||||
|
||||
// AssertTxInBlock asserts that a given txid can be found in the passed block.
|
||||
func (h *HarnessMiner) AssertTxInBlock(block *wire.MsgBlock,
|
||||
txid *chainhash.Hash) {
|
||||
|
||||
blockTxes := make([]chainhash.Hash, 0)
|
||||
|
||||
for _, tx := range block.Transactions {
|
||||
sha := tx.TxHash()
|
||||
blockTxes = append(blockTxes, sha)
|
||||
|
||||
if bytes.Equal(txid[:], sha[:]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
require.Failf(h, "tx was not included in block", "tx:%v, block has:%v",
|
||||
txid, blockTxes)
|
||||
}
|
||||
|
||||
// MineBlocksAndAssertNumTxes mine 'num' of blocks and check that blocks are
|
||||
// present in node blockchain. numTxs should be set to the number of
|
||||
// transactions (excluding the coinbase) we expect to be included in the first
|
||||
// mined block.
|
||||
func (h *HarnessMiner) MineBlocksAndAssertNumTxes(num uint32,
|
||||
numTxs int) []*wire.MsgBlock {
|
||||
|
||||
// If we expect transactions to be included in the blocks we'll mine,
|
||||
// we wait here until they are seen in the miner's mempool.
|
||||
txids := h.AssertNumTxsInMempool(numTxs)
|
||||
|
||||
// Mine blocks.
|
||||
blocks := h.MineBlocks(num)
|
||||
|
||||
// Finally, assert that all the transactions were included in the first
|
||||
// block.
|
||||
for _, txid := range txids {
|
||||
h.AssertTxInBlock(blocks[0], txid)
|
||||
}
|
||||
|
||||
return blocks
|
||||
}
|
||||
|
||||
// GetRawTransaction makes a RPC call to the miner's GetRawTransaction and
|
||||
// asserts.
|
||||
func (h *HarnessMiner) GetRawTransaction(txid *chainhash.Hash) *btcutil.Tx {
|
||||
tx, err := h.Client.GetRawTransaction(txid)
|
||||
require.NoErrorf(h, err, "failed to get raw tx: %v", txid)
|
||||
return tx
|
||||
}
|
||||
|
||||
// AssertTxInMempool asserts a given transaction can be found in the mempool.
|
||||
func (h *HarnessMiner) AssertTxInMempool(txid *chainhash.Hash) *wire.MsgTx {
|
||||
var msgTx *wire.MsgTx
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
// We require the RPC call to be succeeded and won't wait for
|
||||
// it as it's an unexpected behavior.
|
||||
mempool := h.GetRawMempool()
|
||||
|
||||
if len(mempool) == 0 {
|
||||
return fmt.Errorf("empty mempool")
|
||||
}
|
||||
|
||||
for _, memTx := range mempool {
|
||||
// Check the values are equal.
|
||||
if *memTx == *txid {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("txid %v not found in mempool: %v", txid,
|
||||
mempool)
|
||||
}, lntest.MinerMempoolTimeout)
|
||||
|
||||
require.NoError(h, err, "timeout checking mempool")
|
||||
return msgTx
|
||||
}
|
210
lntemp/harness_node_manager.go
Normal file
210
lntemp/harness_node_manager.go
Normal file
@ -0,0 +1,210 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
)
|
||||
|
||||
// nodeManager is responsible for hanlding the start and stop of a given node.
|
||||
// It also keeps track of the running nodes.
|
||||
type nodeManager struct {
|
||||
sync.Mutex
|
||||
|
||||
// chainBackend houses the information necessary to use a node as LND
|
||||
// chain backend, such as rpc configuration, P2P information etc.
|
||||
chainBackend node.BackendConfig
|
||||
|
||||
// currentTestCase holds the name for the currently run test case.
|
||||
currentTestCase string
|
||||
|
||||
// lndBinary is the full path to the lnd binary that was specifically
|
||||
// compiled with all required itest flags.
|
||||
lndBinary string
|
||||
|
||||
// dbBackend sets the database backend to use.
|
||||
dbBackend lntest.DatabaseBackend
|
||||
|
||||
// activeNodes is a map of all running nodes, format:
|
||||
// {pubkey: *HarnessNode}.
|
||||
activeNodes map[string]*node.HarnessNode
|
||||
|
||||
// standbyNodes is a map of all the standby nodes, format:
|
||||
// {pubkey: *HarnessNode}.
|
||||
standbyNodes map[string]*node.HarnessNode
|
||||
|
||||
// nodeCounter is a monotonically increasing counter that's used as the
|
||||
// node's unique ID.
|
||||
nodeCounter uint32
|
||||
|
||||
// feeServiceURL is the url of the fee service.
|
||||
feeServiceURL string
|
||||
}
|
||||
|
||||
// newNodeManager creates a new node manager instance.
|
||||
func newNodeManager(lndBinary string,
|
||||
dbBackend lntest.DatabaseBackend) *nodeManager {
|
||||
|
||||
return &nodeManager{
|
||||
lndBinary: lndBinary,
|
||||
dbBackend: dbBackend,
|
||||
activeNodes: make(map[string]*node.HarnessNode),
|
||||
standbyNodes: make(map[string]*node.HarnessNode),
|
||||
}
|
||||
}
|
||||
|
||||
// nextNodeID generates a unique sequence to be used as the node's ID.
|
||||
func (nm *nodeManager) nextNodeID() uint32 {
|
||||
nodeID := atomic.AddUint32(&nm.nodeCounter, 1)
|
||||
return nodeID - 1
|
||||
}
|
||||
|
||||
// newNode initializes a new HarnessNode, supporting the ability to initialize
|
||||
// a wallet with or without a seed. If useSeed is false, the returned harness
|
||||
// node can be used immediately. Otherwise, the node will require an additional
|
||||
// initialization phase where the wallet is either created or restored.
|
||||
func (nm *nodeManager) newNode(t *testing.T, name string, extraArgs []string,
|
||||
useSeed bool, password []byte, cmdOnly bool,
|
||||
opts ...node.Option) (*node.HarnessNode, error) {
|
||||
|
||||
cfg := &node.BaseNodeConfig{
|
||||
Name: name,
|
||||
LogFilenamePrefix: nm.currentTestCase,
|
||||
Password: password,
|
||||
BackendCfg: nm.chainBackend,
|
||||
ExtraArgs: extraArgs,
|
||||
FeeURL: nm.feeServiceURL,
|
||||
DbBackend: nm.dbBackend,
|
||||
NodeID: nm.nextNodeID(),
|
||||
LndBinary: nm.lndBinary,
|
||||
NetParams: harnessNetParams,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
|
||||
node, err := node.NewHarnessNode(t, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Put node in activeNodes to ensure Shutdown is called even if start
|
||||
// returns an error.
|
||||
defer nm.registerNode(node)
|
||||
|
||||
switch {
|
||||
// If the node uses seed to start, we'll need to create the wallet and
|
||||
// unlock the wallet later.
|
||||
case useSeed:
|
||||
err = node.StartWithSeed()
|
||||
|
||||
// Start the node only with the lnd process without creating the grpc
|
||||
// connection, which is used in testing etcd leader selection.
|
||||
case cmdOnly:
|
||||
err = node.StartLndCmd()
|
||||
|
||||
// By default, we'll create a node with wallet being unlocked.
|
||||
default:
|
||||
err = node.Start()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start: %w", err)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// RegisterNode records a new HarnessNode in the NetworkHarnesses map of known
|
||||
// nodes. This method should only be called with nodes that have successfully
|
||||
// retrieved their public keys via FetchNodeInfo.
|
||||
func (nm *nodeManager) registerNode(node *node.HarnessNode) {
|
||||
nm.Lock()
|
||||
nm.activeNodes[node.PubKeyStr] = node
|
||||
nm.Unlock()
|
||||
}
|
||||
|
||||
// ShutdownNode stops an active lnd process and returns when the process has
|
||||
// exited and any temporary directories have been cleaned up.
|
||||
func (nm *nodeManager) shutdownNode(node *node.HarnessNode) error {
|
||||
if err := node.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(nm.activeNodes, node.PubKeyStr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// restartNode attempts to restart a lightning node by shutting it down
|
||||
// cleanly, then restarting the process. This function is fully blocking. Upon
|
||||
// restart, the RPC connection to the node will be re-attempted, continuing iff
|
||||
// the connection attempt is successful. If the callback parameter is non-nil,
|
||||
// then the function will be executed after the node shuts down, but *before*
|
||||
// the process has been started up again.
|
||||
//
|
||||
// This method can be useful when testing edge cases such as a node broadcast
|
||||
// and invalidated prior state, or persistent state recovery, simulating node
|
||||
// crashes, etc. Additionally, each time the node is restarted, the caller can
|
||||
// pass a set of SCBs to pass in via the Unlock method allowing them to restore
|
||||
// channels during restart.
|
||||
func (nm *nodeManager) restartNode(node *node.HarnessNode,
|
||||
callback func() error, chanBackups ...*lnrpc.ChanBackupSnapshot) error {
|
||||
|
||||
err := nm.restartNodeNoUnlock(node, callback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the node doesn't have a password set, then we can exit here as we
|
||||
// don't need to unlock it.
|
||||
if len(node.Cfg.Password) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, we'll unlock the wallet, then complete the final steps
|
||||
// for the node initialization process.
|
||||
unlockReq := &lnrpc.UnlockWalletRequest{
|
||||
WalletPassword: node.Cfg.Password,
|
||||
}
|
||||
if len(chanBackups) != 0 {
|
||||
unlockReq.ChannelBackups = chanBackups[0]
|
||||
unlockReq.RecoveryWindow = 1000
|
||||
}
|
||||
|
||||
err = wait.NoError(func() error {
|
||||
return node.Unlock(unlockReq)
|
||||
}, DefaultTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to unlock: %w", node.Name(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// restartNodeNoUnlock attempts to restart a lightning node by shutting it down
|
||||
// cleanly, then restarting the process. In case the node was setup with a
|
||||
// seed, it will be left in the unlocked state. This function is fully
|
||||
// blocking. If the callback parameter is non-nil, then the function will be
|
||||
// executed after the node shuts down, but *before* the process has been
|
||||
// started up again.
|
||||
func (nm *nodeManager) restartNodeNoUnlock(node *node.HarnessNode,
|
||||
callback func() error) error {
|
||||
|
||||
if err := node.Stop(); err != nil {
|
||||
return fmt.Errorf("restart node got error: %w", err)
|
||||
}
|
||||
|
||||
if callback != nil {
|
||||
if err := callback(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return node.Start()
|
||||
}
|
117
lntemp/harness_setup.go
Normal file
117
lntemp/harness_setup.go
Normal file
@ -0,0 +1,117 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/integration/rpctest"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// SetupHarness creates a new HarnessTest with a series of setups such that the
|
||||
// instance is ready for usage. The setups are,
|
||||
// 1. create the directories to hold lnd files.
|
||||
// 2. start a btcd miner.
|
||||
// 3. start a chain backend(btcd, bitcoind, or neutrino).
|
||||
// 4. connect the miner and the chain backend.
|
||||
// 5. start the HarnessTest.
|
||||
func SetupHarness(t *testing.T, binaryPath, dbBackendName string,
|
||||
feeService WebFeeService) *HarnessTest {
|
||||
|
||||
t.Log("Setting up HarnessTest...")
|
||||
|
||||
// Parse testing flags that influence our test execution.
|
||||
logDir := lntest.GetLogDir()
|
||||
require.NoError(t, os.MkdirAll(logDir, 0700), "create log dir failed")
|
||||
|
||||
// Parse database backend
|
||||
dbBackend := prepareDbBackend(t, dbBackendName)
|
||||
|
||||
// Create a new HarnessTest.
|
||||
ht := NewHarnessTest(t, binaryPath, feeService, dbBackend)
|
||||
|
||||
// Init the miner.
|
||||
t.Log("Prepare the miner and mine blocks to activate segwit...")
|
||||
miner := prepareMiner(ht.runCtx, ht.T)
|
||||
|
||||
// Start a chain backend.
|
||||
chainBackend, cleanUp := prepareChainBackend(t, miner.P2PAddress())
|
||||
ht.stopChainBackend = cleanUp
|
||||
|
||||
// Connect our chainBackend to our miner.
|
||||
t.Log("Connecting the miner with the chain backend...")
|
||||
require.NoError(t, chainBackend.ConnectMiner(), "connect miner")
|
||||
|
||||
// Start the HarnessTest with the chainBackend and miner.
|
||||
ht.Start(chainBackend, miner)
|
||||
|
||||
return ht
|
||||
}
|
||||
|
||||
// prepareMiner creates an instance of the btcd's rpctest.Harness that will act
|
||||
// as the miner for all tests. This will be used to fund the wallets of the
|
||||
// nodes within the test network and to drive blockchain related events within
|
||||
// the network. Revert the default setting of accepting non-standard
|
||||
// transactions on simnet to reject them. Transactions on the lightning network
|
||||
// should always be standard to get better guarantees of getting included in to
|
||||
// blocks.
|
||||
func prepareMiner(ctxt context.Context, t *testing.T) *HarnessMiner {
|
||||
miner := NewMiner(ctxt, t)
|
||||
|
||||
// Before we start anything, we want to overwrite some of the
|
||||
// connection settings to make the tests more robust. We might need to
|
||||
// restart the miner while there are already blocks present, which will
|
||||
// take a bit longer than the 1 second the default settings amount to.
|
||||
// Doubling both values will give us retries up to 4 seconds.
|
||||
miner.MaxConnRetries = rpctest.DefaultMaxConnectionRetries * 2
|
||||
miner.ConnectionRetryTimeout = rpctest.DefaultConnectionRetryTimeout * 2
|
||||
|
||||
// Set up miner and connect chain backend to it.
|
||||
require.NoError(t, miner.SetUp(true, 50))
|
||||
require.NoError(t, miner.Client.NotifyNewTransactions(false))
|
||||
|
||||
// Next mine enough blocks in order for segwit and the CSV package
|
||||
// soft-fork to activate on SimNet.
|
||||
numBlocks := harnessNetParams.MinerConfirmationWindow * 2
|
||||
miner.GenerateBlocks(numBlocks)
|
||||
|
||||
return miner
|
||||
}
|
||||
|
||||
// prepareChainBackend creates a new chain backend.
|
||||
func prepareChainBackend(t *testing.T,
|
||||
minerAddr string) (lntest.BackendConfig, func()) {
|
||||
|
||||
chainBackend, cleanUp, err := lntest.NewBackend(
|
||||
minerAddr, harnessNetParams,
|
||||
)
|
||||
require.NoError(t, err, "new backend")
|
||||
|
||||
return chainBackend, func() {
|
||||
require.NoError(t, cleanUp(), "cleanup")
|
||||
}
|
||||
}
|
||||
|
||||
// prepareDbBackend parses a DatabaseBackend based on the name given.
|
||||
func prepareDbBackend(t *testing.T,
|
||||
dbBackendName string) lntest.DatabaseBackend {
|
||||
|
||||
var dbBackend lntest.DatabaseBackend
|
||||
switch dbBackendName {
|
||||
case "bbolt":
|
||||
dbBackend = lntest.BackendBbolt
|
||||
|
||||
case "etcd":
|
||||
dbBackend = lntest.BackendEtcd
|
||||
|
||||
case "postgres":
|
||||
dbBackend = lntest.BackendPostgres
|
||||
|
||||
default:
|
||||
require.Fail(t, "unknown db backend")
|
||||
}
|
||||
|
||||
return dbBackend
|
||||
}
|
229
lntemp/node/config.go
Normal file
229
lntemp/node/config.go
Normal file
@ -0,0 +1,229 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/lightningnetwork/lnd/chanbackup"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
)
|
||||
|
||||
const (
|
||||
// ListenerFormat is the format string that is used to generate local
|
||||
// listener addresses.
|
||||
ListenerFormat = "127.0.0.1:%d"
|
||||
)
|
||||
|
||||
// Option is a function for updating a node's configuration.
|
||||
type Option func(*BaseNodeConfig)
|
||||
|
||||
// BackendConfig is an interface that abstracts away the specific chain backend
|
||||
// node implementation.
|
||||
type BackendConfig interface {
|
||||
// GenArgs returns the arguments needed to be passed to LND at startup
|
||||
// for using this node as a chain backend.
|
||||
GenArgs() []string
|
||||
|
||||
// ConnectMiner is called to establish a connection to the test miner.
|
||||
ConnectMiner() error
|
||||
|
||||
// DisconnectMiner is called to disconnect the miner.
|
||||
DisconnectMiner() error
|
||||
|
||||
// Name returns the name of the backend type.
|
||||
Name() string
|
||||
|
||||
// Credentials returns the rpc username, password and host for the
|
||||
// backend.
|
||||
Credentials() (string, string, string, error)
|
||||
}
|
||||
|
||||
// BaseNodeConfig is the base node configuration.
|
||||
type BaseNodeConfig struct {
|
||||
Name string
|
||||
|
||||
// LogFilenamePrefix is used to prefix node log files. Can be used to
|
||||
// store the current test case for simpler postmortem debugging.
|
||||
LogFilenamePrefix string
|
||||
|
||||
NetParams *chaincfg.Params
|
||||
BackendCfg BackendConfig
|
||||
BaseDir string
|
||||
ExtraArgs []string
|
||||
OriginalExtraArgs []string
|
||||
|
||||
DataDir string
|
||||
LogDir string
|
||||
TLSCertPath string
|
||||
TLSKeyPath string
|
||||
AdminMacPath string
|
||||
ReadMacPath string
|
||||
InvoiceMacPath string
|
||||
|
||||
HasSeed bool
|
||||
Password []byte
|
||||
|
||||
P2PPort int
|
||||
RPCPort int
|
||||
RESTPort int
|
||||
ProfilePort int
|
||||
|
||||
FeeURL string
|
||||
|
||||
DbBackend lntest.DatabaseBackend
|
||||
PostgresDsn string
|
||||
|
||||
// NodeID is a unique ID used to identify the node.
|
||||
NodeID uint32
|
||||
|
||||
// LndBinary is the full path to the lnd binary that was specifically
|
||||
// compiled with all required itest flags.
|
||||
LndBinary string
|
||||
|
||||
// backupDbDir is the path where a database backup is stored, if any.
|
||||
backupDbDir string
|
||||
|
||||
// postgresDbName is the name of the postgres database where lnd data
|
||||
// is stored in.
|
||||
postgresDbName string
|
||||
}
|
||||
|
||||
func (cfg BaseNodeConfig) P2PAddr() string {
|
||||
return fmt.Sprintf(ListenerFormat, cfg.P2PPort)
|
||||
}
|
||||
|
||||
func (cfg BaseNodeConfig) RPCAddr() string {
|
||||
return fmt.Sprintf(ListenerFormat, cfg.RPCPort)
|
||||
}
|
||||
|
||||
func (cfg BaseNodeConfig) RESTAddr() string {
|
||||
return fmt.Sprintf(ListenerFormat, cfg.RESTPort)
|
||||
}
|
||||
|
||||
// DBDir returns the holding directory path of the graph database.
|
||||
func (cfg BaseNodeConfig) DBDir() string {
|
||||
return filepath.Join(cfg.DataDir, "graph", cfg.NetParams.Name)
|
||||
}
|
||||
|
||||
func (cfg BaseNodeConfig) DBPath() string {
|
||||
return filepath.Join(cfg.DBDir(), "channel.db")
|
||||
}
|
||||
|
||||
func (cfg BaseNodeConfig) ChanBackupPath() string {
|
||||
return filepath.Join(
|
||||
cfg.DataDir, "chain", "bitcoin",
|
||||
fmt.Sprintf(
|
||||
"%v/%v", cfg.NetParams.Name,
|
||||
chanbackup.DefaultBackupFileName,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// GenerateListeningPorts generates the ports to listen on designated for the
|
||||
// current lightning network test.
|
||||
func (cfg *BaseNodeConfig) GenerateListeningPorts() {
|
||||
if cfg.P2PPort == 0 {
|
||||
cfg.P2PPort = lntest.NextAvailablePort()
|
||||
}
|
||||
if cfg.RPCPort == 0 {
|
||||
cfg.RPCPort = lntest.NextAvailablePort()
|
||||
}
|
||||
if cfg.RESTPort == 0 {
|
||||
cfg.RESTPort = lntest.NextAvailablePort()
|
||||
}
|
||||
if cfg.ProfilePort == 0 {
|
||||
cfg.ProfilePort = lntest.NextAvailablePort()
|
||||
}
|
||||
}
|
||||
|
||||
// BaseConfig returns the base node configuration struct.
|
||||
func (cfg *BaseNodeConfig) BaseConfig() *BaseNodeConfig {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// GenArgs generates a slice of command line arguments from the lightning node
|
||||
// config struct.
|
||||
func (cfg *BaseNodeConfig) GenArgs() []string {
|
||||
var args []string
|
||||
|
||||
switch cfg.NetParams {
|
||||
case &chaincfg.TestNet3Params:
|
||||
args = append(args, "--bitcoin.testnet")
|
||||
case &chaincfg.SimNetParams:
|
||||
args = append(args, "--bitcoin.simnet")
|
||||
case &chaincfg.RegressionNetParams:
|
||||
args = append(args, "--bitcoin.regtest")
|
||||
}
|
||||
|
||||
backendArgs := cfg.BackendCfg.GenArgs()
|
||||
args = append(args, backendArgs...)
|
||||
|
||||
nodeArgs := []string{
|
||||
"--bitcoin.active",
|
||||
"--nobootstrap",
|
||||
"--debuglevel=debug",
|
||||
"--bitcoin.defaultchanconfs=1",
|
||||
"--accept-keysend",
|
||||
fmt.Sprintf("--db.batch-commit-interval=%v", commitInterval),
|
||||
fmt.Sprintf("--bitcoin.defaultremotedelay=%v",
|
||||
lntest.DefaultCSV),
|
||||
fmt.Sprintf("--rpclisten=%v", cfg.RPCAddr()),
|
||||
fmt.Sprintf("--restlisten=%v", cfg.RESTAddr()),
|
||||
fmt.Sprintf("--restcors=https://%v", cfg.RESTAddr()),
|
||||
fmt.Sprintf("--listen=%v", cfg.P2PAddr()),
|
||||
fmt.Sprintf("--externalip=%v", cfg.P2PAddr()),
|
||||
fmt.Sprintf("--lnddir=%v", cfg.BaseDir),
|
||||
fmt.Sprintf("--adminmacaroonpath=%v", cfg.AdminMacPath),
|
||||
fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath),
|
||||
fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath),
|
||||
fmt.Sprintf("--trickledelay=%v", trickleDelay),
|
||||
fmt.Sprintf("--profile=%d", cfg.ProfilePort),
|
||||
fmt.Sprintf("--caches.rpc-graph-cache-duration=%d", 0),
|
||||
}
|
||||
args = append(args, nodeArgs...)
|
||||
|
||||
if !cfg.HasSeed {
|
||||
args = append(args, "--noseedbackup")
|
||||
}
|
||||
|
||||
switch cfg.DbBackend {
|
||||
case lntest.BackendEtcd:
|
||||
args = append(args, "--db.backend=etcd")
|
||||
args = append(args, "--db.etcd.embedded")
|
||||
args = append(
|
||||
args, fmt.Sprintf(
|
||||
"--db.etcd.embedded_client_port=%v",
|
||||
lntest.NextAvailablePort(),
|
||||
),
|
||||
)
|
||||
args = append(
|
||||
args, fmt.Sprintf(
|
||||
"--db.etcd.embedded_peer_port=%v",
|
||||
lntest.NextAvailablePort(),
|
||||
),
|
||||
)
|
||||
args = append(
|
||||
args, fmt.Sprintf(
|
||||
"--db.etcd.embedded_log_file=%v",
|
||||
path.Join(cfg.LogDir, "etcd.log"),
|
||||
),
|
||||
)
|
||||
|
||||
case lntest.BackendPostgres:
|
||||
args = append(args, "--db.backend=postgres")
|
||||
args = append(args, "--db.postgres.dsn="+cfg.PostgresDsn)
|
||||
}
|
||||
|
||||
if cfg.FeeURL != "" {
|
||||
args = append(args, "--feeurl="+cfg.FeeURL)
|
||||
}
|
||||
|
||||
// Put extra args in the end so the args can be overwritten.
|
||||
if cfg.ExtraArgs != nil {
|
||||
args = append(args, cfg.ExtraArgs...)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
862
lntemp/node/harness_node.go
Normal file
862
lntemp/node/harness_node.go
Normal file
@ -0,0 +1,862 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/macaroons"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/status"
|
||||
"gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// logPubKeyBytes is the number of bytes of the node's PubKey that will
|
||||
// be appended to the log file name. The whole PubKey is too long and
|
||||
// not really necessary to quickly identify what node produced which
|
||||
// log file.
|
||||
logPubKeyBytes = 4
|
||||
|
||||
// trickleDelay is the amount of time in milliseconds between each
|
||||
// release of announcements by AuthenticatedGossiper to the network.
|
||||
trickleDelay = 50
|
||||
|
||||
postgresDsn = "postgres://postgres:postgres@localhost:" +
|
||||
"6432/%s?sslmode=disable"
|
||||
|
||||
// commitInterval specifies the maximum interval the graph database
|
||||
// will wait between attempting to flush a batch of modifications to
|
||||
// disk(db.batch-commit-interval).
|
||||
commitInterval = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
// HarnessNode represents an instance of lnd running within our test network
|
||||
// harness. It's responsible for managing the lnd process, grpc connection, and
|
||||
// wallet auth. A HarnessNode is built upon its rpc clients, represented in
|
||||
// `HarnessRPC`. It also has a `State` which holds its internal state, and a
|
||||
// `Watcher` that keeps track of its topology updates.
|
||||
type HarnessNode struct {
|
||||
*testing.T
|
||||
|
||||
// Cfg holds the config values for the node.
|
||||
Cfg *BaseNodeConfig
|
||||
|
||||
// RPC holds a list of RPC clients.
|
||||
RPC *rpc.HarnessRPC
|
||||
|
||||
// State records the current state of the node.
|
||||
State *State
|
||||
|
||||
// Watcher watches the node's topology updates.
|
||||
Watcher *nodeWatcher
|
||||
|
||||
// PubKey is the serialized compressed identity public key of the node.
|
||||
// This field will only be populated once the node itself has been
|
||||
// started via the start() method.
|
||||
PubKey [33]byte
|
||||
PubKeyStr string
|
||||
|
||||
// conn is the underlying connection to the grpc endpoint of the node.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// runCtx is a context with cancel method. It's used to signal when the
|
||||
// node needs to quit, and used as the parent context when spawning
|
||||
// children contexts for RPC requests.
|
||||
runCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// filename is the log file's name.
|
||||
filename string
|
||||
|
||||
cmd *exec.Cmd
|
||||
logFile *os.File
|
||||
}
|
||||
|
||||
// NewHarnessNode creates a new test lightning node instance from the passed
|
||||
// config.
|
||||
func NewHarnessNode(t *testing.T, cfg *BaseNodeConfig) (*HarnessNode, error) {
|
||||
if cfg.BaseDir == "" {
|
||||
var err error
|
||||
cfg.BaseDir, err = ioutil.TempDir("", "lndtest-node")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cfg.DataDir = filepath.Join(cfg.BaseDir, "data")
|
||||
cfg.LogDir = filepath.Join(cfg.BaseDir, "logs")
|
||||
cfg.TLSCertPath = filepath.Join(cfg.BaseDir, "tls.cert")
|
||||
cfg.TLSKeyPath = filepath.Join(cfg.BaseDir, "tls.key")
|
||||
|
||||
networkDir := filepath.Join(
|
||||
cfg.DataDir, "chain", "bitcoin", cfg.NetParams.Name,
|
||||
)
|
||||
cfg.AdminMacPath = filepath.Join(networkDir, "admin.macaroon")
|
||||
cfg.ReadMacPath = filepath.Join(networkDir, "readonly.macaroon")
|
||||
cfg.InvoiceMacPath = filepath.Join(networkDir, "invoice.macaroon")
|
||||
|
||||
cfg.GenerateListeningPorts()
|
||||
|
||||
// Create temporary database.
|
||||
var dbName string
|
||||
if cfg.DbBackend == lntest.BackendPostgres {
|
||||
var err error
|
||||
dbName, err = createTempPgDb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.PostgresDsn = postgresDatabaseDsn(dbName)
|
||||
}
|
||||
|
||||
cfg.OriginalExtraArgs = cfg.ExtraArgs
|
||||
cfg.postgresDbName = dbName
|
||||
|
||||
return &HarnessNode{
|
||||
T: t,
|
||||
Cfg: cfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InitRPCClients initializes a list of RPC clients for the node.
|
||||
func (hn *HarnessNode) InitRPCClients(c *grpc.ClientConn) {
|
||||
hn.conn = c
|
||||
|
||||
// Init all the rpc clients.
|
||||
hn.RPC = rpc.NewHarnessRPC(hn.runCtx, hn.T, c, hn.Name())
|
||||
|
||||
// Init the node's internal state.
|
||||
hn.State = newState(hn.RPC)
|
||||
|
||||
// Init the topology watcher.
|
||||
hn.Watcher = newNodeWatcher(hn.RPC, hn.State)
|
||||
}
|
||||
|
||||
// Name returns the name of this node set during initialization.
|
||||
func (hn *HarnessNode) Name() string {
|
||||
return hn.Cfg.Name
|
||||
}
|
||||
|
||||
// UpdateState updates the node's internal state.
|
||||
func (hn *HarnessNode) UpdateState() {
|
||||
hn.State.updateState()
|
||||
}
|
||||
|
||||
// String gives the internal state of the node which is useful for debugging.
|
||||
func (hn *HarnessNode) String() string {
|
||||
type nodeCfg struct {
|
||||
LogFilenamePrefix string
|
||||
ExtraArgs []string
|
||||
HasSeed bool
|
||||
P2PPort int
|
||||
RPCPort int
|
||||
RESTPort int
|
||||
ProfilePort int
|
||||
AcceptKeySend bool
|
||||
FeeURL string
|
||||
}
|
||||
|
||||
nodeState := struct {
|
||||
NodeID uint32
|
||||
Name string
|
||||
PubKey string
|
||||
State *State
|
||||
NodeCfg nodeCfg
|
||||
}{
|
||||
NodeID: hn.Cfg.NodeID,
|
||||
Name: hn.Cfg.Name,
|
||||
PubKey: hn.PubKeyStr,
|
||||
State: hn.State,
|
||||
NodeCfg: nodeCfg{
|
||||
LogFilenamePrefix: hn.Cfg.LogFilenamePrefix,
|
||||
ExtraArgs: hn.Cfg.ExtraArgs,
|
||||
P2PPort: hn.Cfg.P2PPort,
|
||||
RPCPort: hn.Cfg.RPCPort,
|
||||
RESTPort: hn.Cfg.RESTPort,
|
||||
},
|
||||
}
|
||||
|
||||
stateBytes, err := json.MarshalIndent(nodeState, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Sprintf("\n encode node state with err: %v", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("\nnode state: %s", stateBytes)
|
||||
}
|
||||
|
||||
// WaitUntilStarted waits until the wallet state flips from "WAITING_TO_START".
|
||||
func (hn *HarnessNode) WaitUntilStarted() error {
|
||||
return hn.waitTillServerState(func(s lnrpc.WalletState) bool {
|
||||
return s != lnrpc.WalletState_WAITING_TO_START
|
||||
})
|
||||
}
|
||||
|
||||
// WaitUntilServerActive waits until the lnd daemon is fully started.
|
||||
func (hn *HarnessNode) WaitUntilServerActive() error {
|
||||
return hn.waitTillServerState(func(s lnrpc.WalletState) bool {
|
||||
return s == lnrpc.WalletState_SERVER_ACTIVE
|
||||
})
|
||||
}
|
||||
|
||||
// Unlock attempts to unlock the wallet of the target HarnessNode. This method
|
||||
// should be called after the restart of a HarnessNode that was created with a
|
||||
// seed+password. Once this method returns, the HarnessNode will be ready to
|
||||
// accept normal gRPC requests and harness command.
|
||||
func (hn *HarnessNode) Unlock(unlockReq *lnrpc.UnlockWalletRequest) error {
|
||||
// Otherwise, we'll need to unlock the node before it's able to start
|
||||
// up properly.
|
||||
hn.RPC.UnlockWallet(unlockReq)
|
||||
|
||||
// Now that the wallet has been unlocked, we'll wait for the RPC client
|
||||
// to be ready, then establish the normal gRPC connection.
|
||||
return hn.InitNode(nil)
|
||||
}
|
||||
|
||||
// AddToLogf adds a line of choice to the node's logfile. This is useful
|
||||
// to interleave test output with output from the node.
|
||||
func (hn *HarnessNode) AddToLogf(format string, a ...interface{}) {
|
||||
// If this node was not set up with a log file, just return early.
|
||||
if hn.logFile == nil {
|
||||
return
|
||||
}
|
||||
|
||||
desc := fmt.Sprintf("itest: %s\n", fmt.Sprintf(format, a...))
|
||||
if _, err := hn.logFile.WriteString(desc); err != nil {
|
||||
hn.printErrf("write to log err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadMacaroon waits a given duration for the macaroon file to be created. If
|
||||
// the file is readable within the timeout, its content is de-serialized as a
|
||||
// macaroon and returned.
|
||||
func (hn *HarnessNode) ReadMacaroon(macPath string, timeout time.Duration) (
|
||||
*macaroon.Macaroon, error) {
|
||||
|
||||
// Wait until macaroon file is created and has valid content before
|
||||
// using it.
|
||||
var mac *macaroon.Macaroon
|
||||
err := wait.NoError(func() error {
|
||||
macBytes, err := ioutil.ReadFile(macPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading macaroon file: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
newMac := &macaroon.Macaroon{}
|
||||
if err = newMac.UnmarshalBinary(macBytes); err != nil {
|
||||
return fmt.Errorf("error unmarshalling macaroon "+
|
||||
"file: %v", err)
|
||||
}
|
||||
mac = newMac
|
||||
|
||||
return nil
|
||||
}, timeout)
|
||||
|
||||
return mac, err
|
||||
}
|
||||
|
||||
// ConnectRPCWithMacaroon uses the TLS certificate and given macaroon to
|
||||
// create a gRPC client connection.
|
||||
func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) (
|
||||
*grpc.ClientConn, error) {
|
||||
|
||||
// Wait until TLS certificate is created and has valid content before
|
||||
// using it, up to 30 sec.
|
||||
var tlsCreds credentials.TransportCredentials
|
||||
err := wait.NoError(func() error {
|
||||
var err error
|
||||
tlsCreds, err = credentials.NewClientTLSFromFile(
|
||||
hn.Cfg.TLSCertPath, "",
|
||||
)
|
||||
return err
|
||||
}, DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading TLS cert: %v", err)
|
||||
}
|
||||
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithTransportCredentials(tlsCreds),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(hn.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
if mac == nil {
|
||||
return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...)
|
||||
}
|
||||
macCred, err := macaroons.NewMacaroonCredential(mac)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error cloning mac: %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(macCred))
|
||||
|
||||
return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...)
|
||||
}
|
||||
|
||||
// ConnectRPC uses the TLS certificate and admin macaroon files written by the
|
||||
// lnd node to create a gRPC client connection.
|
||||
func (hn *HarnessNode) ConnectRPC() (*grpc.ClientConn, error) {
|
||||
// If we should use a macaroon, always take the admin macaroon as a
|
||||
// default.
|
||||
mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, DefaultTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hn.ConnectRPCWithMacaroon(mac)
|
||||
}
|
||||
|
||||
// SetExtraArgs assigns the ExtraArgs field for the node's configuration. The
|
||||
// changes will take effect on restart.
|
||||
func (hn *HarnessNode) SetExtraArgs(extraArgs []string) {
|
||||
hn.Cfg.ExtraArgs = extraArgs
|
||||
}
|
||||
|
||||
// StartLndCmd handles the startup of lnd, creating log files, and possibly
|
||||
// kills the process when needed.
|
||||
func (hn *HarnessNode) StartLndCmd() error {
|
||||
// Init the runCtx.
|
||||
hn.runCtx, hn.cancel = context.WithCancel(context.Background())
|
||||
|
||||
args := hn.Cfg.GenArgs()
|
||||
hn.cmd = exec.Command(hn.Cfg.LndBinary, args...) //nolint:gosec
|
||||
|
||||
// Redirect stderr output to buffer
|
||||
var errb bytes.Buffer
|
||||
hn.cmd.Stderr = &errb
|
||||
|
||||
// If the logoutput flag is passed, redirect output from the nodes to
|
||||
// log files.
|
||||
if *lntest.LogOutput {
|
||||
err := addLogFile(hn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start the process.
|
||||
if err := hn.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartWithSeed will start the lnd process, creates the grpc connection
|
||||
// without macaroon auth, and waits until the server is reported as waiting to
|
||||
// start.
|
||||
//
|
||||
// NOTE: caller needs to take extra step to create and unlock the wallet.
|
||||
func (hn *HarnessNode) StartWithSeed() error {
|
||||
// Start lnd process and prepare logs.
|
||||
if err := hn.StartLndCmd(); err != nil {
|
||||
return fmt.Errorf("start lnd error: %w", err)
|
||||
}
|
||||
|
||||
// Create an unauthed connection.
|
||||
conn, err := hn.ConnectRPCWithMacaroon(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ConnectRPCWithMacaroon err: %w", err)
|
||||
}
|
||||
|
||||
// Since the conn is not authed, only the `WalletUnlocker` and `State`
|
||||
// clients can be inited from this conn.
|
||||
hn.conn = conn
|
||||
hn.RPC = rpc.NewHarnessRPC(hn.runCtx, hn.T, conn, hn.Name())
|
||||
|
||||
// Wait till the server is starting.
|
||||
return hn.WaitUntilStarted()
|
||||
}
|
||||
|
||||
// Start will start the lnd process, creates the grpc connection, and waits
|
||||
// until the server is fully started.
|
||||
func (hn *HarnessNode) Start() error {
|
||||
// Start lnd process and prepare logs.
|
||||
if err := hn.StartLndCmd(); err != nil {
|
||||
return fmt.Errorf("start lnd error: %w", err)
|
||||
}
|
||||
|
||||
// Since Stop uses the LightningClient to stop the node, if we fail to
|
||||
// get a connected client, we have to kill the process.
|
||||
conn, err := hn.ConnectRPC()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ConnectRPC err: %w", err)
|
||||
cmdErr := hn.kill()
|
||||
if cmdErr != nil {
|
||||
err = fmt.Errorf("kill process got err: %w: %v",
|
||||
cmdErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Init all the RPC clients.
|
||||
hn.InitRPCClients(conn)
|
||||
|
||||
// Wait till the server is starting.
|
||||
if err := hn.WaitUntilStarted(); err != nil {
|
||||
return fmt.Errorf("waiting for start got: %v", err)
|
||||
}
|
||||
|
||||
// Subscribe for topology updates.
|
||||
return hn.initLightningClient()
|
||||
}
|
||||
|
||||
// InitNode waits until the main gRPC server is detected as active, then
|
||||
// complete the normal HarnessNode gRPC connection creation. A non-nil
|
||||
// `macBytes` indicates the node is initialized stateless, otherwise it will
|
||||
// use the admin macaroon.
|
||||
func (hn *HarnessNode) InitNode(macBytes []byte) error {
|
||||
var (
|
||||
conn *grpc.ClientConn
|
||||
err error
|
||||
)
|
||||
|
||||
// If the node has been initialized stateless, we need to pass the
|
||||
// macaroon to the client.
|
||||
if macBytes != nil {
|
||||
adminMac := &macaroon.Macaroon{}
|
||||
err := adminMac.UnmarshalBinary(macBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshal failed: %w", err)
|
||||
}
|
||||
conn, err = hn.ConnectRPCWithMacaroon(adminMac)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Normal initialization, we expect a macaroon to be in the
|
||||
// file system.
|
||||
conn, err = hn.ConnectRPC()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Init all the RPC clients.
|
||||
hn.InitRPCClients(conn)
|
||||
|
||||
return hn.initLightningClient()
|
||||
}
|
||||
|
||||
// waitTillServerState makes a subscription to the server's state change and
|
||||
// blocks until the server is in the targeted state.
|
||||
func (hn *HarnessNode) waitTillServerState(
|
||||
predicate func(state lnrpc.WalletState) bool) error {
|
||||
|
||||
client := hn.RPC.SubscribeState()
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
resp, err := client.Recv()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
if predicate(resp.State) {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(lntest.NodeStartTimeout):
|
||||
return fmt.Errorf("timeout waiting for server state")
|
||||
case err := <-errChan:
|
||||
return fmt.Errorf("receive server state err: %v", err)
|
||||
|
||||
case <-done:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initLightningClient blocks until the lnd server is fully started and
|
||||
// subscribes the harness node to graph topology updates. This method also
|
||||
// spawns a lightning network watcher for this node, which watches for topology
|
||||
// changes.
|
||||
func (hn *HarnessNode) initLightningClient() error {
|
||||
// Wait until the server is fully started.
|
||||
if err := hn.WaitUntilServerActive(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the harness node's pubkey to what the node claims in GetInfo.
|
||||
// The RPC must have been started at this point.
|
||||
if err := hn.attachPubKey(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Launch the watcher that will hook into graph related topology change
|
||||
// from the PoV of this node.
|
||||
started := make(chan error, 1)
|
||||
go hn.Watcher.topologyWatcher(hn.runCtx, started)
|
||||
|
||||
select {
|
||||
// First time reading the channel indicates the topology client is
|
||||
// started.
|
||||
case err := <-started:
|
||||
if err != nil {
|
||||
return fmt.Errorf("create topology client stream "+
|
||||
"got err: %v", err)
|
||||
}
|
||||
|
||||
case <-time.After(DefaultTimeout):
|
||||
return fmt.Errorf("timeout creating topology client stream")
|
||||
}
|
||||
|
||||
// Catch topology client stream error inside a goroutine.
|
||||
go func() {
|
||||
select {
|
||||
case err := <-started:
|
||||
hn.printErrf("topology client: %v", err)
|
||||
|
||||
case <-hn.runCtx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// attachPubKey queries an unlocked node to retrieve its public key.
|
||||
func (hn *HarnessNode) attachPubKey() error {
|
||||
// Obtain the lnid of this node for quick identification purposes.
|
||||
info := hn.RPC.GetInfo()
|
||||
hn.PubKeyStr = info.IdentityPubkey
|
||||
|
||||
pubkey, err := hex.DecodeString(info.IdentityPubkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(hn.PubKey[:], pubkey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanup cleans up all the temporary files created by the node's process.
|
||||
func (hn *HarnessNode) cleanup() error {
|
||||
if hn.Cfg.backupDbDir != "" {
|
||||
err := os.RemoveAll(hn.Cfg.backupDbDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove backup dir: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
return os.RemoveAll(hn.Cfg.BaseDir)
|
||||
}
|
||||
|
||||
// waitForProcessExit Launch a new goroutine which that bubbles up any
|
||||
// potential fatal process errors to the goroutine running the tests.
|
||||
func (hn *HarnessNode) waitForProcessExit() {
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
err := hn.cmd.Wait()
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// If the process has already been canceled, we can exit early
|
||||
// as the logs have already been saved.
|
||||
if strings.Contains(err.Error(), "Wait was already called") {
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we print the error, break the select and save
|
||||
// logs.
|
||||
hn.printErrf("wait process exit got err: %v", err)
|
||||
break
|
||||
|
||||
case <-time.After(DefaultTimeout * 2):
|
||||
hn.printErrf("timeout waiting for process to exit")
|
||||
}
|
||||
|
||||
// Make sure log file is closed and renamed if necessary.
|
||||
finalizeLogfile(hn)
|
||||
|
||||
// Rename the etcd.log file if the node was running on embedded
|
||||
// etcd.
|
||||
finalizeEtcdLog(hn)
|
||||
}
|
||||
|
||||
// Stop attempts to stop the active lnd process.
|
||||
func (hn *HarnessNode) Stop() error {
|
||||
// Do nothing if the process is not running.
|
||||
if hn.runCtx == nil {
|
||||
hn.printErrf("found nil run context")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the runCtx.
|
||||
hn.cancel()
|
||||
|
||||
// Wait for lnd process to exit in the end.
|
||||
defer hn.waitForProcessExit()
|
||||
|
||||
// If we ever reaches the state where `Watcher` is initialized, it
|
||||
// means the node has an authed connection and all its RPC clients are
|
||||
// ready for use. Thus we will try to stop it via the RPC.
|
||||
if hn.Watcher != nil {
|
||||
// Don't watch for error because sometimes the RPC connection
|
||||
// gets closed before a response is returned.
|
||||
req := lnrpc.StopRequest{}
|
||||
|
||||
ctxt, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
err := wait.NoError(func() error {
|
||||
_, err := hn.RPC.LN.StopDaemon(ctxt, &req)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
|
||||
// Try again if a recovery/rescan is in progress.
|
||||
case strings.Contains(
|
||||
err.Error(), "recovery in progress",
|
||||
):
|
||||
return err
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}, DefaultTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for goroutines to be finished.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
hn.Watcher.wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// If the goroutines fail to finish before timeout, we'll print
|
||||
// the error to console and continue.
|
||||
select {
|
||||
case <-time.After(DefaultTimeout):
|
||||
hn.printErrf("timeout on wait group")
|
||||
case <-done:
|
||||
}
|
||||
} else {
|
||||
// If the rpc clients are not initiated, we'd kill the process
|
||||
// manually.
|
||||
hn.printErrf("found nil RPC clients")
|
||||
if err := hn.kill(); err != nil {
|
||||
return fmt.Errorf("killing process got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close any attempts at further grpc connections.
|
||||
if hn.conn != nil {
|
||||
err := status.Code(hn.conn.Close())
|
||||
switch err {
|
||||
case codes.OK:
|
||||
return nil
|
||||
|
||||
// When the context is canceled above, we might get the
|
||||
// following error as the context is no longer active.
|
||||
case codes.Canceled:
|
||||
return nil
|
||||
|
||||
case codes.Unknown:
|
||||
return fmt.Errorf("unknown error attempting to stop "+
|
||||
"grpc client: %v", err)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("error attempting to stop "+
|
||||
"grpc client: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown stops the active lnd process and cleans up any temporary
|
||||
// directories created along the way.
|
||||
func (hn *HarnessNode) Shutdown() error {
|
||||
if err := hn.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := hn.cleanup(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// kill kills the lnd process.
|
||||
func (hn *HarnessNode) kill() error {
|
||||
return hn.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
// printErrf prints an error to the console.
|
||||
func (hn *HarnessNode) printErrf(format string, a ...interface{}) {
|
||||
fmt.Printf("itest error from [%s:%s]: %s\n", // nolint:forbidigo
|
||||
hn.Cfg.LogFilenamePrefix, hn.Cfg.Name,
|
||||
fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func postgresDatabaseDsn(dbName string) string {
|
||||
return fmt.Sprintf(postgresDsn, dbName)
|
||||
}
|
||||
|
||||
// createTempPgDb creates a temp postgres database.
|
||||
func createTempPgDb() (string, error) {
|
||||
// Create random database name.
|
||||
randBytes := make([]byte, 8)
|
||||
_, err := rand.Read(randBytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dbName := "itest_" + hex.EncodeToString(randBytes)
|
||||
|
||||
// Create database.
|
||||
err = executePgQuery("CREATE DATABASE " + dbName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dbName, nil
|
||||
}
|
||||
|
||||
// executePgQuery executes a SQL statement in a postgres db.
|
||||
func executePgQuery(query string) error {
|
||||
pool, err := pgxpool.Connect(
|
||||
context.Background(),
|
||||
postgresDatabaseDsn("postgres"),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to connect to database: %v", err)
|
||||
}
|
||||
defer pool.Close()
|
||||
|
||||
_, err = pool.Exec(context.Background(), query)
|
||||
return err
|
||||
}
|
||||
|
||||
// renameFile is a helper to rename (log) files created during integration
|
||||
// tests.
|
||||
func renameFile(fromFileName, toFileName string) {
|
||||
err := os.Rename(fromFileName, toFileName)
|
||||
if err != nil {
|
||||
fmt.Printf("could not rename %s to %s: %v\n", // nolint:forbidigo
|
||||
fromFileName, toFileName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// getFinalizedLogFilePrefix returns the finalize log filename.
|
||||
func getFinalizedLogFilePrefix(hn *HarnessNode) string {
|
||||
pubKeyHex := hex.EncodeToString(
|
||||
hn.PubKey[:logPubKeyBytes],
|
||||
)
|
||||
|
||||
return fmt.Sprintf("%s/%d-%s-%s-%s",
|
||||
lntest.GetLogDir(), hn.Cfg.NodeID,
|
||||
hn.Cfg.LogFilenamePrefix,
|
||||
hn.Cfg.Name, pubKeyHex)
|
||||
}
|
||||
|
||||
// finalizeLogfile makes sure the log file cleanup function is initialized,
|
||||
// even if no log file is created.
|
||||
func finalizeLogfile(hn *HarnessNode) {
|
||||
// Exit early if there's no log file.
|
||||
if hn.logFile == nil {
|
||||
return
|
||||
}
|
||||
|
||||
hn.logFile.Close()
|
||||
|
||||
// If logoutput flag is not set, return early.
|
||||
if !*lntest.LogOutput {
|
||||
return
|
||||
}
|
||||
|
||||
newFileName := fmt.Sprintf("%v.log",
|
||||
getFinalizedLogFilePrefix(hn),
|
||||
)
|
||||
renameFile(hn.filename, newFileName)
|
||||
}
|
||||
|
||||
// finalizeEtcdLog saves the etcd log files when test ends.
|
||||
func finalizeEtcdLog(hn *HarnessNode) {
|
||||
// Exit early if this is not etcd backend.
|
||||
if hn.Cfg.DbBackend != lntest.BackendEtcd {
|
||||
return
|
||||
}
|
||||
|
||||
etcdLogFileName := fmt.Sprintf("%s/etcd.log", hn.Cfg.LogDir)
|
||||
newEtcdLogFileName := fmt.Sprintf("%v-etcd.log",
|
||||
getFinalizedLogFilePrefix(hn),
|
||||
)
|
||||
|
||||
renameFile(etcdLogFileName, newEtcdLogFileName)
|
||||
}
|
||||
|
||||
// addLogFile creates log files used by this node.
|
||||
func addLogFile(hn *HarnessNode) error {
|
||||
var fileName string
|
||||
|
||||
dir := lntest.GetLogDir()
|
||||
fileName = fmt.Sprintf("%s/%d-%s-%s-%s.log", dir, hn.Cfg.NodeID,
|
||||
hn.Cfg.LogFilenamePrefix, hn.Cfg.Name,
|
||||
hex.EncodeToString(hn.PubKey[:logPubKeyBytes]))
|
||||
|
||||
// If the node's PubKey is not yet initialized, create a temporary file
|
||||
// name. Later, after the PubKey has been initialized, the file can be
|
||||
// moved to its final name with the PubKey included.
|
||||
if bytes.Equal(hn.PubKey[:4], []byte{0, 0, 0, 0}) {
|
||||
fileName = fmt.Sprintf("%s/%d-%s-%s-tmp__.log", dir,
|
||||
hn.Cfg.NodeID, hn.Cfg.LogFilenamePrefix,
|
||||
hn.Cfg.Name)
|
||||
}
|
||||
|
||||
// Create file if not exists, otherwise append.
|
||||
file, err := os.OpenFile(fileName,
|
||||
os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pass node's stderr to both errb and the file.
|
||||
w := io.MultiWriter(hn.cmd.Stderr, file)
|
||||
hn.cmd.Stderr = w
|
||||
|
||||
// Pass the node's stdout only to the file.
|
||||
hn.cmd.Stdout = file
|
||||
|
||||
// Let the node keep a reference to this file, such that we can add to
|
||||
// it if necessary.
|
||||
hn.logFile = file
|
||||
|
||||
hn.filename = fileName
|
||||
|
||||
return nil
|
||||
}
|
360
lntemp/node/state.go
Normal file
360
lntemp/node/state.go
Normal file
@ -0,0 +1,360 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
)
|
||||
|
||||
type (
|
||||
// PolicyUpdate defines a type to store channel policy updates for a
|
||||
// given advertisingNode. It has the format,
|
||||
// {"advertisingNode": [policy1, policy2, ...]}.
|
||||
PolicyUpdate map[string][]*PolicyUpdateInfo
|
||||
|
||||
// policyUpdateMap defines a type to store channel policy updates. It
|
||||
// has the format,
|
||||
// {
|
||||
// "chanPoint1": {
|
||||
// "advertisingNode1": [
|
||||
// policy1, policy2, ...
|
||||
// ],
|
||||
// "advertisingNode2": [
|
||||
// policy1, policy2, ...
|
||||
// ]
|
||||
// },
|
||||
// "chanPoint2": ...
|
||||
// }.
|
||||
policyUpdateMap map[string]map[string][]*lnrpc.RoutingPolicy
|
||||
)
|
||||
|
||||
// PolicyUpdateInfo stores the RoutingPolicy plus the connecting node info.
|
||||
type PolicyUpdateInfo struct {
|
||||
*lnrpc.RoutingPolicy
|
||||
|
||||
// ConnectingNode specifies the node that is connected with the
|
||||
// advertising node.
|
||||
ConnectingNode string `json:"connecting_node"`
|
||||
|
||||
// Timestamp records the time the policy update is made.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// OpenChannelUpdate stores the open channel updates.
|
||||
type OpenChannelUpdate struct {
|
||||
// AdvertisingNode specifies the node that advertised this update.
|
||||
AdvertisingNode string `json:"advertising_node"`
|
||||
|
||||
// ConnectingNode specifies the node that is connected with the
|
||||
// advertising node.
|
||||
ConnectingNode string `json:"connecting_node"`
|
||||
|
||||
// Timestamp records the time the policy update is made.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// openChannelCount stores the total number of channel related counts.
|
||||
type openChannelCount struct {
|
||||
Active int
|
||||
Inactive int
|
||||
Pending int
|
||||
Public int
|
||||
Private int
|
||||
NumUpdates uint64
|
||||
}
|
||||
|
||||
// closedChannelCount stores the total number of closed, waiting and pending
|
||||
// force close channels.
|
||||
type closedChannelCount struct {
|
||||
PendingForceClose int
|
||||
WaitingClose int
|
||||
Closed int
|
||||
}
|
||||
|
||||
// utxoCount counts the total confirmed and unconfirmed UTXOs.
|
||||
type utxoCount struct {
|
||||
Confirmed int
|
||||
Unconfirmed int
|
||||
}
|
||||
|
||||
// edgeCount counts the total and public edges.
|
||||
type edgeCount struct {
|
||||
Total int
|
||||
Public int
|
||||
}
|
||||
|
||||
// paymentCount counts the complete(settled/failed) and incomplete payments.
|
||||
type paymentCount struct {
|
||||
Total int
|
||||
Completed int
|
||||
LastIndexOffset uint64
|
||||
}
|
||||
|
||||
// invoiceCount counts the complete(settled/failed) and incomplete invoices.
|
||||
type invoiceCount struct {
|
||||
Total int
|
||||
Completed int
|
||||
LastIndexOffset uint64
|
||||
}
|
||||
|
||||
// balanceCount provides a summary over balances related to channels.
|
||||
type balanceCount struct {
|
||||
LocalBalance *lnrpc.Amount
|
||||
RemoteBalance *lnrpc.Amount
|
||||
UnsettledLocalBalance *lnrpc.Amount
|
||||
UnsettledRemoteBalance *lnrpc.Amount
|
||||
PendingOpenLocalBalance *lnrpc.Amount
|
||||
PendingOpenRemoteBalance *lnrpc.Amount
|
||||
|
||||
// Deprecated fields.
|
||||
Balance int64
|
||||
PendingOpenBalance int64
|
||||
}
|
||||
|
||||
// walletBalance provides a summary over balances related the node's wallet.
|
||||
type walletBalance struct {
|
||||
TotalBalance int64
|
||||
ConfirmedBalance int64
|
||||
UnconfirmedBalance int64
|
||||
AccountBalance map[string]*lnrpc.WalletAccountBalance
|
||||
}
|
||||
|
||||
// State records the current state for a given node. It provides a simple count
|
||||
// over the node so that the test can track its state. For a channel-specific
|
||||
// state check, use dedicated function to query the channel as each channel is
|
||||
// meant to be unique.
|
||||
type State struct {
|
||||
// rpc is the RPC clients used for the current node.
|
||||
rpc *rpc.HarnessRPC
|
||||
|
||||
// OpenChannel gives the summary of open channel related counts.
|
||||
OpenChannel openChannelCount
|
||||
|
||||
// CloseChannel gives the summary of close channel related counts.
|
||||
CloseChannel closedChannelCount
|
||||
|
||||
// Balance gives the summary of the channel balance.
|
||||
Balance balanceCount
|
||||
|
||||
// Wallet gives the summary of the wallet balance.
|
||||
Wallet walletBalance
|
||||
|
||||
// HTLC counts the total active HTLCs.
|
||||
HTLC int
|
||||
|
||||
// Edge counts the total private/public edges.
|
||||
Edge edgeCount
|
||||
|
||||
// ChannelUpdate counts the total channel updates seen from the graph
|
||||
// subscription.
|
||||
ChannelUpdate int
|
||||
|
||||
// NodeUpdate counts the total node announcements seen from the graph
|
||||
// subscription.
|
||||
NodeUpdate int
|
||||
|
||||
// UTXO counts the total active UTXOs.
|
||||
UTXO utxoCount
|
||||
|
||||
// Payment counts the total payment of the node.
|
||||
Payment paymentCount
|
||||
|
||||
// Invoice counts the total invoices made by the node.
|
||||
Invoice invoiceCount
|
||||
|
||||
// openChans records each opened channel and how many times it has
|
||||
// heard the announcements from its graph subscription.
|
||||
openChans *SyncMap[wire.OutPoint, []*OpenChannelUpdate]
|
||||
|
||||
// closedChans records each closed channel and its close channel update
|
||||
// message received from its graph subscription.
|
||||
closedChans *SyncMap[wire.OutPoint, *lnrpc.ClosedChannelUpdate]
|
||||
|
||||
// numChanUpdates records the number of channel updates seen by each
|
||||
// channel.
|
||||
numChanUpdates *SyncMap[wire.OutPoint, int]
|
||||
|
||||
// nodeUpdates records the node announcements seen by each node.
|
||||
nodeUpdates *SyncMap[string, []*lnrpc.NodeUpdate]
|
||||
|
||||
// policyUpdates defines a type to store channel policy updates. It has
|
||||
// the format,
|
||||
// {
|
||||
// "chanPoint1": {
|
||||
// "advertisingNode1": [
|
||||
// policy1, policy2, ...
|
||||
// ],
|
||||
// "advertisingNode2": [
|
||||
// policy1, policy2, ...
|
||||
// ]
|
||||
// },
|
||||
// "chanPoint2": ...
|
||||
// }
|
||||
policyUpdates *SyncMap[wire.OutPoint, PolicyUpdate]
|
||||
}
|
||||
|
||||
// newState initialize a new state with every field being set to its zero
|
||||
// value.
|
||||
func newState(rpc *rpc.HarnessRPC) *State {
|
||||
return &State{
|
||||
rpc: rpc,
|
||||
openChans: &SyncMap[wire.OutPoint, []*OpenChannelUpdate]{},
|
||||
closedChans: &SyncMap[
|
||||
wire.OutPoint, *lnrpc.ClosedChannelUpdate,
|
||||
]{},
|
||||
numChanUpdates: &SyncMap[wire.OutPoint, int]{},
|
||||
nodeUpdates: &SyncMap[string, []*lnrpc.NodeUpdate]{},
|
||||
policyUpdates: &SyncMap[wire.OutPoint, PolicyUpdate]{},
|
||||
}
|
||||
}
|
||||
|
||||
// updateChannelStats gives the stats on open channel related fields.
|
||||
func (s *State) updateChannelStats() {
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
resp := s.rpc.ListChannels(req)
|
||||
|
||||
for _, channel := range resp.Channels {
|
||||
if channel.Active {
|
||||
s.OpenChannel.Active++
|
||||
} else {
|
||||
s.OpenChannel.Inactive++
|
||||
}
|
||||
|
||||
if channel.Private {
|
||||
s.OpenChannel.Private++
|
||||
} else {
|
||||
s.OpenChannel.Public++
|
||||
}
|
||||
s.OpenChannel.NumUpdates += channel.NumUpdates
|
||||
s.HTLC += len(channel.PendingHtlcs)
|
||||
}
|
||||
}
|
||||
|
||||
// updateCloseChannelStats gives the stats on close channel related fields.
|
||||
func (s *State) updateCloseChannelStats() {
|
||||
resp := s.rpc.PendingChannels()
|
||||
s.CloseChannel.PendingForceClose += len(
|
||||
resp.PendingForceClosingChannels,
|
||||
)
|
||||
s.CloseChannel.WaitingClose += len(resp.WaitingCloseChannels)
|
||||
|
||||
closeReq := &lnrpc.ClosedChannelsRequest{}
|
||||
closed := s.rpc.ClosedChannels(closeReq)
|
||||
|
||||
s.CloseChannel.Closed += len(closed.Channels)
|
||||
s.OpenChannel.Pending += len(resp.PendingOpenChannels)
|
||||
}
|
||||
|
||||
// updatePaymentStats counts the total payments made.
|
||||
func (s *State) updatePaymentStats() {
|
||||
req := &lnrpc.ListPaymentsRequest{
|
||||
IndexOffset: s.Payment.LastIndexOffset,
|
||||
}
|
||||
resp := s.rpc.ListPayments(req)
|
||||
|
||||
s.Payment.LastIndexOffset = resp.LastIndexOffset
|
||||
for _, payment := range resp.Payments {
|
||||
if payment.Status == lnrpc.Payment_FAILED ||
|
||||
payment.Status == lnrpc.Payment_SUCCEEDED {
|
||||
|
||||
s.Payment.Completed++
|
||||
}
|
||||
}
|
||||
|
||||
s.Payment.Total += len(resp.Payments)
|
||||
}
|
||||
|
||||
// updateInvoiceStats counts the total invoices made.
|
||||
func (s *State) updateInvoiceStats() {
|
||||
req := &lnrpc.ListInvoiceRequest{
|
||||
NumMaxInvoices: math.MaxUint64,
|
||||
IndexOffset: s.Invoice.LastIndexOffset,
|
||||
}
|
||||
resp := s.rpc.ListInvoices(req)
|
||||
|
||||
s.Invoice.LastIndexOffset = resp.LastIndexOffset
|
||||
for _, invoice := range resp.Invoices {
|
||||
if invoice.State == lnrpc.Invoice_SETTLED ||
|
||||
invoice.State == lnrpc.Invoice_CANCELED {
|
||||
|
||||
s.Invoice.Completed++
|
||||
}
|
||||
}
|
||||
|
||||
s.Invoice.Total += len(resp.Invoices)
|
||||
}
|
||||
|
||||
// updateUTXOStats counts the total UTXOs made.
|
||||
func (s *State) updateUTXOStats() {
|
||||
req := &walletrpc.ListUnspentRequest{}
|
||||
resp := s.rpc.ListUnspent(req)
|
||||
|
||||
for _, utxo := range resp.Utxos {
|
||||
if utxo.Confirmations > 0 {
|
||||
s.UTXO.Confirmed++
|
||||
} else {
|
||||
s.UTXO.Unconfirmed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateEdgeStats counts the total edges.
|
||||
func (s *State) updateEdgeStats() {
|
||||
req := &lnrpc.ChannelGraphRequest{IncludeUnannounced: true}
|
||||
resp := s.rpc.DescribeGraph(req)
|
||||
s.Edge.Total = len(resp.Edges)
|
||||
|
||||
req = &lnrpc.ChannelGraphRequest{IncludeUnannounced: false}
|
||||
resp = s.rpc.DescribeGraph(req)
|
||||
s.Edge.Public = len(resp.Edges)
|
||||
}
|
||||
|
||||
// updateChannelBalance creates stats for the node's channel balance.
|
||||
func (s *State) updateChannelBalance() {
|
||||
resp := s.rpc.ChannelBalance()
|
||||
|
||||
s.Balance.LocalBalance = resp.LocalBalance
|
||||
s.Balance.RemoteBalance = resp.RemoteBalance
|
||||
s.Balance.UnsettledLocalBalance = resp.UnsettledLocalBalance
|
||||
s.Balance.UnsettledRemoteBalance = resp.UnsettledRemoteBalance
|
||||
s.Balance.PendingOpenLocalBalance = resp.PendingOpenLocalBalance
|
||||
s.Balance.PendingOpenRemoteBalance = resp.PendingOpenRemoteBalance
|
||||
}
|
||||
|
||||
// updateWalletBalance creates stats for the node's wallet balance.
|
||||
func (s *State) updateWalletBalance() {
|
||||
resp := s.rpc.WalletBalance()
|
||||
|
||||
s.Wallet.TotalBalance = resp.TotalBalance
|
||||
s.Wallet.ConfirmedBalance = resp.ConfirmedBalance
|
||||
s.Wallet.UnconfirmedBalance = resp.UnconfirmedBalance
|
||||
s.Wallet.AccountBalance = resp.AccountBalance
|
||||
}
|
||||
|
||||
// updateState updates the internal state of the node.
|
||||
func (s *State) updateState() {
|
||||
s.updateChannelStats()
|
||||
s.updateCloseChannelStats()
|
||||
s.updatePaymentStats()
|
||||
s.updateInvoiceStats()
|
||||
s.updateUTXOStats()
|
||||
s.updateEdgeStats()
|
||||
s.updateChannelBalance()
|
||||
s.updateWalletBalance()
|
||||
}
|
||||
|
||||
// String encodes the node's state for debugging.
|
||||
func (s *State) String() string {
|
||||
stateBytes, err := json.MarshalIndent(s, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Sprintf("\n encode node state with err: %v", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("\n%s", stateBytes)
|
||||
}
|
53
lntemp/node/sync_map.go
Normal file
53
lntemp/node/sync_map.go
Normal file
@ -0,0 +1,53 @@
|
||||
package node
|
||||
|
||||
import "sync"
|
||||
|
||||
// SyncMap wraps a sync.Map with type parameters such that it's easier to
|
||||
// access the items stored in the map since no type assertion is needed. It
|
||||
// also requires explicit type definition when declaring and initiating the
|
||||
// variables, which helps us understanding what's stored in a given map.
|
||||
type SyncMap[K comparable, V any] struct {
|
||||
sync.Map
|
||||
}
|
||||
|
||||
// Store puts an item in the map.
|
||||
func (m *SyncMap[K, V]) Store(key K, value V) {
|
||||
m.Map.Store(key, value)
|
||||
}
|
||||
|
||||
// Load queries an item from the map using the specified key. If the item
|
||||
// cannot be found, an empty value and false will be returned. If the stored
|
||||
// item fails the type assertion, a nil value and false will be returned.
|
||||
func (m *SyncMap[K, V]) Load(key K) (V, bool) {
|
||||
result, ok := m.Map.Load(key)
|
||||
if !ok {
|
||||
return *new(V), false // nolint: gocritic
|
||||
}
|
||||
|
||||
item, ok := result.(V)
|
||||
return item, ok
|
||||
}
|
||||
|
||||
// Delete removes an item from the map specified by the key.
|
||||
func (m *SyncMap[K, V]) Delete(key K) {
|
||||
m.Map.Delete(key)
|
||||
}
|
||||
|
||||
// LoadAndDelete queries an item and deletes it from the map using the
|
||||
// specified key.
|
||||
func (m *SyncMap[K, V]) LoadAndDelete(key K) (V, bool) {
|
||||
result, loaded := m.Map.LoadAndDelete(key)
|
||||
if !loaded {
|
||||
return *new(V), loaded // nolint: gocritic
|
||||
}
|
||||
|
||||
item, ok := result.(V)
|
||||
return item, ok
|
||||
}
|
||||
|
||||
// Range iterates the map.
|
||||
func (m *SyncMap[K, V]) Range(visitor func(K, V) bool) {
|
||||
m.Map.Range(func(k any, v any) bool {
|
||||
return visitor(k.(K), v.(V))
|
||||
})
|
||||
}
|
685
lntemp/node/watcher.go
Normal file
685
lntemp/node/watcher.go
Normal file
@ -0,0 +1,685 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
)
|
||||
|
||||
type chanWatchType uint8
|
||||
|
||||
const (
|
||||
// watchOpenChannel specifies that this is a request to watch an open
|
||||
// channel event.
|
||||
watchOpenChannel chanWatchType = iota
|
||||
|
||||
// watchCloseChannel specifies that this is a request to watch a close
|
||||
// channel event.
|
||||
watchCloseChannel
|
||||
|
||||
// watchPolicyUpdate specifies that this is a request to watch a policy
|
||||
// update event.
|
||||
watchPolicyUpdate
|
||||
|
||||
// TODO(yy): remove once temp tests is finished.
|
||||
DefaultTimeout = lntest.DefaultTimeout
|
||||
)
|
||||
|
||||
// chanWatchRequest is a request to the lightningNetworkWatcher to be notified
|
||||
// once it's detected within the test Lightning Network, that a channel has
|
||||
// either been added or closed.
|
||||
type chanWatchRequest struct {
|
||||
chanPoint wire.OutPoint
|
||||
|
||||
chanWatchType chanWatchType
|
||||
|
||||
eventChan chan struct{}
|
||||
|
||||
advertisingNode string
|
||||
policy *lnrpc.RoutingPolicy
|
||||
includeUnannounced bool
|
||||
}
|
||||
|
||||
// nodeWatcher is a topology watcher for a HarnessNode. It keeps track of all
|
||||
// the topology updates seen in a given node, including NodeUpdate,
|
||||
// ChannelEdgeUpdate, and ClosedChannelUpdate.
|
||||
type nodeWatcher struct {
|
||||
// rpc is the RPC clients used for the current node.
|
||||
rpc *rpc.HarnessRPC
|
||||
|
||||
// state is the node's current state.
|
||||
state *State
|
||||
|
||||
// chanWatchRequests receives a request for watching a particular event
|
||||
// for a given channel.
|
||||
chanWatchRequests chan *chanWatchRequest
|
||||
|
||||
// For each outpoint, we'll track an integer which denotes the number
|
||||
// of edges seen for that channel within the network. When this number
|
||||
// reaches 2, then it means that both edge advertisements has
|
||||
// propagated through the network.
|
||||
openChanWatchers *SyncMap[wire.OutPoint, []chan struct{}]
|
||||
closeChanWatchers *SyncMap[wire.OutPoint, []chan struct{}]
|
||||
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func newNodeWatcher(rpc *rpc.HarnessRPC, state *State) *nodeWatcher {
|
||||
return &nodeWatcher{
|
||||
rpc: rpc,
|
||||
state: state,
|
||||
chanWatchRequests: make(chan *chanWatchRequest, 100),
|
||||
openChanWatchers: &SyncMap[wire.OutPoint, []chan struct{}]{},
|
||||
closeChanWatchers: &SyncMap[wire.OutPoint, []chan struct{}]{},
|
||||
}
|
||||
}
|
||||
|
||||
// GetNumChannelUpdates reads the num of channel updates inside a lock and
|
||||
// returns the value.
|
||||
func (nw *nodeWatcher) GetNumChannelUpdates(op wire.OutPoint) int {
|
||||
result, _ := nw.state.numChanUpdates.Load(op)
|
||||
return result
|
||||
}
|
||||
|
||||
// GetPolicyUpdates returns the node's policyUpdates state.
|
||||
func (nw *nodeWatcher) GetPolicyUpdates(op wire.OutPoint) PolicyUpdate {
|
||||
result, _ := nw.state.policyUpdates.Load(op)
|
||||
return result
|
||||
}
|
||||
|
||||
// GetNodeUpdates reads the node updates inside a lock and returns the value.
|
||||
func (nw *nodeWatcher) GetNodeUpdates(pubkey string) []*lnrpc.NodeUpdate {
|
||||
result, _ := nw.state.nodeUpdates.Load(pubkey)
|
||||
return result
|
||||
}
|
||||
|
||||
// WaitForNumChannelUpdates will block until a given number of updates has been
|
||||
// seen in the node's network topology.
|
||||
func (nw *nodeWatcher) WaitForNumChannelUpdates(op wire.OutPoint,
|
||||
expected int) error {
|
||||
|
||||
checkNumUpdates := func() error {
|
||||
num := nw.GetNumChannelUpdates(op)
|
||||
if num >= expected {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for num channel updates, "+
|
||||
"want %d, got %d", expected, num)
|
||||
}
|
||||
|
||||
return wait.NoError(checkNumUpdates, DefaultTimeout)
|
||||
}
|
||||
|
||||
// WaitForNumNodeUpdates will block until a given number of node updates has
|
||||
// been seen in the node's network topology.
|
||||
func (nw *nodeWatcher) WaitForNumNodeUpdates(pubkey string,
|
||||
expected int) error {
|
||||
|
||||
checkNumUpdates := func() error {
|
||||
num := len(nw.GetNodeUpdates(pubkey))
|
||||
if num >= expected {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for num node updates, "+
|
||||
"want %d, got %d", expected, num)
|
||||
}
|
||||
|
||||
return wait.NoError(checkNumUpdates, DefaultTimeout)
|
||||
}
|
||||
|
||||
// WaitForChannelOpen will block until a channel with the target outpoint is
|
||||
// seen as being fully advertised within the network. A channel is considered
|
||||
// "fully advertised" once both of its directional edges has been advertised in
|
||||
// the node's network topology.
|
||||
func (nw *nodeWatcher) WaitForChannelOpen(chanPoint *lnrpc.ChannelPoint) error {
|
||||
op := nw.rpc.MakeOutpoint(chanPoint)
|
||||
eventChan := make(chan struct{})
|
||||
nw.chanWatchRequests <- &chanWatchRequest{
|
||||
chanPoint: op,
|
||||
eventChan: eventChan,
|
||||
chanWatchType: watchOpenChannel,
|
||||
}
|
||||
|
||||
timer := time.After(DefaultTimeout)
|
||||
select {
|
||||
case <-eventChan:
|
||||
return nil
|
||||
|
||||
case <-timer:
|
||||
updates, err := syncMapToJSON(&nw.state.openChans.Map)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("channel:%s not heard before timeout: "+
|
||||
"node has heard: %s", op, updates)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForChannelClose will block until a channel with the target outpoint is
|
||||
// seen as closed within the node's network topology. A channel is considered
|
||||
// closed once a transaction spending the funding outpoint is seen within a
|
||||
// confirmed block.
|
||||
func (nw *nodeWatcher) WaitForChannelClose(
|
||||
chanPoint *lnrpc.ChannelPoint) (*lnrpc.ClosedChannelUpdate, error) {
|
||||
|
||||
op := nw.rpc.MakeOutpoint(chanPoint)
|
||||
eventChan := make(chan struct{})
|
||||
nw.chanWatchRequests <- &chanWatchRequest{
|
||||
chanPoint: op,
|
||||
eventChan: eventChan,
|
||||
chanWatchType: watchCloseChannel,
|
||||
}
|
||||
|
||||
timer := time.After(DefaultTimeout)
|
||||
select {
|
||||
case <-eventChan:
|
||||
closedChan, ok := nw.state.closedChans.Load(op)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("channel:%s expected to find "+
|
||||
"a closed channel in node's state:%s", op,
|
||||
nw.state)
|
||||
}
|
||||
return closedChan, nil
|
||||
|
||||
case <-timer:
|
||||
return nil, fmt.Errorf("channel:%s not closed before timeout: "+
|
||||
"%s", op, nw.state)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForChannelPolicyUpdate will block until a channel policy with the target
|
||||
// outpoint and advertisingNode is seen within the network.
|
||||
func (nw *nodeWatcher) WaitForChannelPolicyUpdate(
|
||||
advertisingNode *HarnessNode, policy *lnrpc.RoutingPolicy,
|
||||
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) error {
|
||||
|
||||
op := nw.rpc.MakeOutpoint(chanPoint)
|
||||
|
||||
ticker := time.NewTicker(wait.PollInterval)
|
||||
timer := time.After(DefaultTimeout)
|
||||
defer ticker.Stop()
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
for {
|
||||
select {
|
||||
// Send a watch request every second.
|
||||
case <-ticker.C:
|
||||
// Did the event can close in the meantime? We want to
|
||||
// avoid a "close of closed channel" panic since we're
|
||||
// re-using the same event chan for multiple requests.
|
||||
select {
|
||||
case <-eventChan:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
nw.chanWatchRequests <- &chanWatchRequest{
|
||||
chanPoint: op,
|
||||
eventChan: eventChan,
|
||||
chanWatchType: watchPolicyUpdate,
|
||||
policy: policy,
|
||||
advertisingNode: advertisingNode.PubKeyStr,
|
||||
includeUnannounced: includeUnannounced,
|
||||
}
|
||||
|
||||
case <-eventChan:
|
||||
return nil
|
||||
|
||||
case <-timer:
|
||||
expected, err := json.MarshalIndent(policy, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Errorf("encode policy err: %v", err)
|
||||
}
|
||||
policies, err := syncMapToJSON(
|
||||
&nw.state.policyUpdates.Map,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("policy not updated before timeout:"+
|
||||
"\nchannel: %v \nadvertisingNode: %s:%v"+
|
||||
"\nwant policy:%s\nhave updates:%s", op,
|
||||
advertisingNode.Name(),
|
||||
advertisingNode.PubKeyStr, expected, policies)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncMapToJSON is a helper function that creates json bytes from the sync.Map
|
||||
// used in the node. Expect the sync.Map to have map[string]interface.
|
||||
func syncMapToJSON(state *sync.Map) ([]byte, error) {
|
||||
m := map[string]interface{}{}
|
||||
state.Range(func(k, v interface{}) bool {
|
||||
op := k.(wire.OutPoint)
|
||||
m[op.String()] = v
|
||||
return true
|
||||
})
|
||||
policies, err := json.MarshalIndent(m, "", "\t")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encode polices err: %v", err)
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
// topologyWatcher is a goroutine which is able to dispatch notifications once
|
||||
// it has been observed that a target channel has been closed or opened within
|
||||
// the network. In order to dispatch these notifications, the
|
||||
// GraphTopologySubscription client exposed as part of the gRPC interface is
|
||||
// used.
|
||||
//
|
||||
// NOTE: must be run as a goroutine.
|
||||
func (nw *nodeWatcher) topologyWatcher(ctxb context.Context,
|
||||
started chan error) {
|
||||
|
||||
graphUpdates := make(chan *lnrpc.GraphTopologyUpdate)
|
||||
|
||||
client, err := nw.newTopologyClient(ctxb)
|
||||
started <- err
|
||||
|
||||
// Exit if there's an error.
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start a goroutine to receive graph updates.
|
||||
nw.wg.Add(1)
|
||||
go func() {
|
||||
defer nw.wg.Done()
|
||||
|
||||
// With the client being created, we now start receiving the
|
||||
// updates.
|
||||
err = nw.receiveTopologyClientStream(ctxb, client, graphUpdates)
|
||||
if err != nil {
|
||||
started <- fmt.Errorf("receiveTopologyClientStream "+
|
||||
"got err: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
// A new graph update has just been received, so we'll examine
|
||||
// the current set of registered clients to see if we can
|
||||
// dispatch any requests.
|
||||
case graphUpdate := <-graphUpdates:
|
||||
nw.handleChannelEdgeUpdates(graphUpdate.ChannelUpdates)
|
||||
nw.handleClosedChannelUpdate(graphUpdate.ClosedChans)
|
||||
nw.handleNodeUpdates(graphUpdate.NodeUpdates)
|
||||
|
||||
// A new watch request, has just arrived. We'll either be able
|
||||
// to dispatch immediately, or need to add the client for
|
||||
// processing later.
|
||||
case watchRequest := <-nw.chanWatchRequests:
|
||||
switch watchRequest.chanWatchType {
|
||||
case watchOpenChannel:
|
||||
// TODO(roasbeef): add update type also, checks
|
||||
// for multiple of 2
|
||||
nw.handleOpenChannelWatchRequest(watchRequest)
|
||||
|
||||
case watchCloseChannel:
|
||||
nw.handleCloseChannelWatchRequest(watchRequest)
|
||||
|
||||
case watchPolicyUpdate:
|
||||
nw.handlePolicyUpdateWatchRequest(watchRequest)
|
||||
}
|
||||
|
||||
case <-ctxb.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nw *nodeWatcher) handleNodeUpdates(updates []*lnrpc.NodeUpdate) {
|
||||
for _, nodeUpdate := range updates {
|
||||
nw.updateNodeStateNodeUpdates(nodeUpdate)
|
||||
}
|
||||
}
|
||||
|
||||
// handleChannelEdgeUpdates takes a series of channel edge updates, extracts
|
||||
// the outpoints, and saves them to harness node's internal state.
|
||||
func (nw *nodeWatcher) handleChannelEdgeUpdates(
|
||||
updates []*lnrpc.ChannelEdgeUpdate) {
|
||||
|
||||
// For each new channel, we'll increment the number of edges seen by
|
||||
// one.
|
||||
for _, newChan := range updates {
|
||||
op := nw.rpc.MakeOutpoint(newChan.ChanPoint)
|
||||
|
||||
// Update the num of channel updates.
|
||||
nw.updateNodeStateNumChanUpdates(op)
|
||||
|
||||
// Update the open channels.
|
||||
nw.updateNodeStateOpenChannel(op, newChan)
|
||||
|
||||
// Check whether there's a routing policy update. If so, save
|
||||
// it to the node state.
|
||||
if newChan.RoutingPolicy != nil {
|
||||
nw.updateNodeStatePolicy(op, newChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeStateNumChanUpdates updates the internal state of the node
|
||||
// regarding the num of channel update seen.
|
||||
func (nw *nodeWatcher) updateNodeStateNumChanUpdates(op wire.OutPoint) {
|
||||
oldNum, _ := nw.state.numChanUpdates.Load(op)
|
||||
nw.state.numChanUpdates.Store(op, oldNum+1)
|
||||
}
|
||||
|
||||
// updateNodeStateNodeUpdates updates the internal state of the node regarding
|
||||
// the node updates seen.
|
||||
func (nw *nodeWatcher) updateNodeStateNodeUpdates(update *lnrpc.NodeUpdate) {
|
||||
oldUpdates, _ := nw.state.nodeUpdates.Load(update.IdentityKey)
|
||||
nw.state.nodeUpdates.Store(
|
||||
update.IdentityKey, append(oldUpdates, update),
|
||||
)
|
||||
}
|
||||
|
||||
// updateNodeStateOpenChannel updates the internal state of the node regarding
|
||||
// the open channels.
|
||||
func (nw *nodeWatcher) updateNodeStateOpenChannel(op wire.OutPoint,
|
||||
newChan *lnrpc.ChannelEdgeUpdate) {
|
||||
|
||||
// Load the old updates the node has heard so far.
|
||||
updates, _ := nw.state.openChans.Load(op)
|
||||
|
||||
// Create a new update based on this newChan.
|
||||
newUpdate := &OpenChannelUpdate{
|
||||
AdvertisingNode: newChan.AdvertisingNode,
|
||||
ConnectingNode: newChan.ConnectingNode,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Update the node's state.
|
||||
updates = append(updates, newUpdate)
|
||||
nw.state.openChans.Store(op, updates)
|
||||
|
||||
// For this new channel, if the number of edges seen is less
|
||||
// than two, then the channel hasn't been fully announced yet.
|
||||
if len(updates) < 2 {
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we'll notify all the registered watchers and
|
||||
// remove the dispatched watchers.
|
||||
watcherResult, loaded := nw.openChanWatchers.LoadAndDelete(op)
|
||||
if !loaded {
|
||||
return
|
||||
}
|
||||
|
||||
for _, eventChan := range watcherResult {
|
||||
close(eventChan)
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeStatePolicy updates the internal state of the node regarding the
|
||||
// policy updates.
|
||||
func (nw *nodeWatcher) updateNodeStatePolicy(op wire.OutPoint,
|
||||
newChan *lnrpc.ChannelEdgeUpdate) {
|
||||
|
||||
// Init an empty policy map and overwrite it if the channel point can
|
||||
// be found in the node's policyUpdates.
|
||||
policies, ok := nw.state.policyUpdates.Load(op)
|
||||
if !ok {
|
||||
policies = make(PolicyUpdate)
|
||||
}
|
||||
|
||||
node := newChan.AdvertisingNode
|
||||
|
||||
// Append the policy to the slice and update the node's state.
|
||||
newPolicy := PolicyUpdateInfo{
|
||||
newChan.RoutingPolicy, newChan.ConnectingNode, time.Now(),
|
||||
}
|
||||
policies[node] = append(policies[node], &newPolicy)
|
||||
nw.state.policyUpdates.Store(op, policies)
|
||||
}
|
||||
|
||||
// handleOpenChannelWatchRequest processes a watch open channel request by
|
||||
// checking the number of the edges seen for a given channel point. If the
|
||||
// number is no less than 2 then the channel is considered open. Otherwise, we
|
||||
// will attempt to find it in its channel graph. If neither can be found, the
|
||||
// request is added to a watch request list than will be handled by
|
||||
// handleChannelEdgeUpdates.
|
||||
func (nw *nodeWatcher) handleOpenChannelWatchRequest(req *chanWatchRequest) {
|
||||
targetChan := req.chanPoint
|
||||
|
||||
// If this is an open request, then it can be dispatched if the number
|
||||
// of edges seen for the channel is at least two.
|
||||
result, _ := nw.state.openChans.Load(targetChan)
|
||||
if len(result) >= 2 {
|
||||
close(req.eventChan)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we'll add this to the list of open channel watchers for
|
||||
// this out point.
|
||||
watchers, _ := nw.openChanWatchers.Load(targetChan)
|
||||
nw.openChanWatchers.Store(
|
||||
targetChan, append(watchers, req.eventChan),
|
||||
)
|
||||
}
|
||||
|
||||
// handleClosedChannelUpdate takes a series of closed channel updates, extracts
|
||||
// the outpoints, saves them to harness node's internal state, and notifies all
|
||||
// registered clients.
|
||||
func (nw *nodeWatcher) handleClosedChannelUpdate(
|
||||
updates []*lnrpc.ClosedChannelUpdate) {
|
||||
|
||||
// For each channel closed, we'll mark that we've detected a channel
|
||||
// closure while lnd was pruning the channel graph.
|
||||
for _, closedChan := range updates {
|
||||
op := nw.rpc.MakeOutpoint(closedChan.ChanPoint)
|
||||
|
||||
nw.state.closedChans.Store(op, closedChan)
|
||||
|
||||
// As the channel has been closed, we'll notify all register
|
||||
// watchers.
|
||||
watchers, loaded := nw.closeChanWatchers.LoadAndDelete(op)
|
||||
if !loaded {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, eventChan := range watchers {
|
||||
close(eventChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleCloseChannelWatchRequest processes a watch close channel request by
|
||||
// checking whether the given channel point can be found in the node's internal
|
||||
// state. If not, the request is added to a watch request list than will be
|
||||
// handled by handleCloseChannelWatchRequest.
|
||||
func (nw *nodeWatcher) handleCloseChannelWatchRequest(req *chanWatchRequest) {
|
||||
targetChan := req.chanPoint
|
||||
|
||||
// If this is a close request, then it can be immediately dispatched if
|
||||
// we've already seen a channel closure for this channel.
|
||||
if _, ok := nw.state.closedChans.Load(targetChan); ok {
|
||||
close(req.eventChan)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we'll add this to the list of close channel watchers for
|
||||
// this out point.
|
||||
oldWatchers, _ := nw.closeChanWatchers.Load(targetChan)
|
||||
nw.closeChanWatchers.Store(
|
||||
targetChan, append(oldWatchers, req.eventChan),
|
||||
)
|
||||
}
|
||||
|
||||
// handlePolicyUpdateWatchRequest checks that if the expected policy can be
|
||||
// found either in the node's interval state or describe graph response. If
|
||||
// found, it will signal the request by closing the event channel. Otherwise it
|
||||
// does nothing but returns nil.
|
||||
func (nw *nodeWatcher) handlePolicyUpdateWatchRequest(req *chanWatchRequest) {
|
||||
op := req.chanPoint
|
||||
|
||||
var policies []*PolicyUpdateInfo
|
||||
|
||||
// Get a list of known policies for this chanPoint+advertisingNode
|
||||
// combination. Start searching in the node state first.
|
||||
policyMap, ok := nw.state.policyUpdates.Load(op)
|
||||
if ok {
|
||||
policies, ok = policyMap[req.advertisingNode]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// If it cannot be found in the node state, try searching it
|
||||
// from the node's DescribeGraph.
|
||||
policyMap := nw.getChannelPolicies(req.includeUnannounced)
|
||||
result, ok := policyMap[op.String()][req.advertisingNode]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, policy := range result {
|
||||
// Use empty from node to mark it being loaded from
|
||||
// DescribeGraph.
|
||||
policies = append(
|
||||
policies, &PolicyUpdateInfo{
|
||||
policy, "", time.Now(),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the latest policy is matched.
|
||||
policy := policies[len(policies)-1]
|
||||
if checkChannelPolicy(policy.RoutingPolicy, req.policy) == nil {
|
||||
close(req.eventChan)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type topologyClient lnrpc.Lightning_SubscribeChannelGraphClient
|
||||
|
||||
// newTopologyClient creates a topology client.
|
||||
func (nw *nodeWatcher) newTopologyClient(
|
||||
ctx context.Context) (topologyClient, error) {
|
||||
|
||||
req := &lnrpc.GraphTopologySubscription{}
|
||||
client, err := nw.rpc.LN.SubscribeChannelGraph(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: unable to create topology client: "+
|
||||
"%v (%s)", nw.rpc.Name, err, time.Now().String())
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// receiveTopologyClientStream takes a topologyClient and receives graph
|
||||
// updates.
|
||||
//
|
||||
// NOTE: must be run as a goroutine.
|
||||
func (nw *nodeWatcher) receiveTopologyClientStream(ctxb context.Context,
|
||||
client topologyClient,
|
||||
receiver chan *lnrpc.GraphTopologyUpdate) error {
|
||||
|
||||
for {
|
||||
update, err := client.Recv()
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
// Good case. We will send the update to the receiver.
|
||||
|
||||
case strings.Contains(err.Error(), "EOF"):
|
||||
// End of subscription stream. Do nothing and quit.
|
||||
return nil
|
||||
|
||||
case strings.Contains(err.Error(), context.Canceled.Error()):
|
||||
// End of subscription stream. Do nothing and quit.
|
||||
return nil
|
||||
|
||||
default:
|
||||
// An expected error is returned, return and leave it
|
||||
// to be handled by the caller.
|
||||
return fmt.Errorf("graph subscription err: %w", err)
|
||||
}
|
||||
|
||||
// Send the update or quit.
|
||||
select {
|
||||
case receiver <- update:
|
||||
case <-ctxb.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getChannelPolicies queries the channel graph and formats the policies into
|
||||
// the format defined in type policyUpdateMap.
|
||||
func (nw *nodeWatcher) getChannelPolicies(include bool) policyUpdateMap {
|
||||
req := &lnrpc.ChannelGraphRequest{IncludeUnannounced: include}
|
||||
graph := nw.rpc.DescribeGraph(req)
|
||||
|
||||
policyUpdates := policyUpdateMap{}
|
||||
|
||||
for _, e := range graph.Edges {
|
||||
policies := policyUpdates[e.ChanPoint]
|
||||
|
||||
// If the map[op] is nil, we need to initialize the map first.
|
||||
if policies == nil {
|
||||
policies = make(map[string][]*lnrpc.RoutingPolicy)
|
||||
}
|
||||
|
||||
if e.Node1Policy != nil {
|
||||
policies[e.Node1Pub] = append(
|
||||
policies[e.Node1Pub], e.Node1Policy,
|
||||
)
|
||||
}
|
||||
|
||||
if e.Node2Policy != nil {
|
||||
policies[e.Node2Pub] = append(
|
||||
policies[e.Node2Pub], e.Node2Policy,
|
||||
)
|
||||
}
|
||||
|
||||
policyUpdates[e.ChanPoint] = policies
|
||||
}
|
||||
|
||||
return policyUpdates
|
||||
}
|
||||
|
||||
// checkChannelPolicy checks that the policy matches the expected one.
|
||||
func checkChannelPolicy(policy, expectedPolicy *lnrpc.RoutingPolicy) error {
|
||||
if policy.FeeBaseMsat != expectedPolicy.FeeBaseMsat {
|
||||
return fmt.Errorf("expected base fee %v, got %v",
|
||||
expectedPolicy.FeeBaseMsat, policy.FeeBaseMsat)
|
||||
}
|
||||
if policy.FeeRateMilliMsat != expectedPolicy.FeeRateMilliMsat {
|
||||
return fmt.Errorf("expected fee rate %v, got %v",
|
||||
expectedPolicy.FeeRateMilliMsat,
|
||||
policy.FeeRateMilliMsat)
|
||||
}
|
||||
if policy.TimeLockDelta != expectedPolicy.TimeLockDelta {
|
||||
return fmt.Errorf("expected time lock delta %v, got %v",
|
||||
expectedPolicy.TimeLockDelta,
|
||||
policy.TimeLockDelta)
|
||||
}
|
||||
if policy.MinHtlc != expectedPolicy.MinHtlc {
|
||||
return fmt.Errorf("expected min htlc %v, got %v",
|
||||
expectedPolicy.MinHtlc, policy.MinHtlc)
|
||||
}
|
||||
if policy.MaxHtlcMsat != expectedPolicy.MaxHtlcMsat {
|
||||
return fmt.Errorf("expected max htlc %v, got %v",
|
||||
expectedPolicy.MaxHtlcMsat, policy.MaxHtlcMsat)
|
||||
}
|
||||
if policy.Disabled != expectedPolicy.Disabled {
|
||||
return errors.New("edge should be disabled but isn't")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
5
lntemp/rpc/chain_notifier.go
Normal file
5
lntemp/rpc/chain_notifier.go
Normal file
@ -0,0 +1,5 @@
|
||||
package rpc
|
||||
|
||||
// =====================
|
||||
// ChainClient related RPCs.
|
||||
// =====================
|
94
lntemp/rpc/harness_rpc.go
Normal file
94
lntemp/rpc/harness_rpc.go
Normal file
@ -0,0 +1,94 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/chainrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/peersrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(yy): remove once temp tests is finished.
|
||||
DefaultTimeout = lntest.DefaultTimeout
|
||||
)
|
||||
|
||||
// HarnessRPC wraps all lnd's RPC clients into a single struct for easier
|
||||
// access.
|
||||
type HarnessRPC struct {
|
||||
*testing.T
|
||||
|
||||
LN lnrpc.LightningClient
|
||||
WalletUnlocker lnrpc.WalletUnlockerClient
|
||||
Invoice invoicesrpc.InvoicesClient
|
||||
Signer signrpc.SignerClient
|
||||
Router routerrpc.RouterClient
|
||||
WalletKit walletrpc.WalletKitClient
|
||||
Watchtower watchtowerrpc.WatchtowerClient
|
||||
WatchtowerClient wtclientrpc.WatchtowerClientClient
|
||||
State lnrpc.StateClient
|
||||
ChainClient chainrpc.ChainNotifierClient
|
||||
Peer peersrpc.PeersClient
|
||||
|
||||
// Name is the HarnessNode's name.
|
||||
Name string
|
||||
|
||||
// runCtx is a context with cancel method. It's used to signal when the
|
||||
// node needs to quit, and used as the parent context when spawning
|
||||
// children contexts for RPC requests.
|
||||
runCtx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewHarnessRPC creates a new HarnessRPC with its own context inherted from
|
||||
// the pass context.
|
||||
func NewHarnessRPC(ctxt context.Context, t *testing.T, c *grpc.ClientConn,
|
||||
name string) *HarnessRPC {
|
||||
|
||||
h := &HarnessRPC{
|
||||
T: t,
|
||||
LN: lnrpc.NewLightningClient(c),
|
||||
Invoice: invoicesrpc.NewInvoicesClient(c),
|
||||
Router: routerrpc.NewRouterClient(c),
|
||||
WalletKit: walletrpc.NewWalletKitClient(c),
|
||||
WalletUnlocker: lnrpc.NewWalletUnlockerClient(c),
|
||||
Watchtower: watchtowerrpc.NewWatchtowerClient(c),
|
||||
WatchtowerClient: wtclientrpc.NewWatchtowerClientClient(c),
|
||||
Signer: signrpc.NewSignerClient(c),
|
||||
State: lnrpc.NewStateClient(c),
|
||||
ChainClient: chainrpc.NewChainNotifierClient(c),
|
||||
Peer: peersrpc.NewPeersClient(c),
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// Inherit parent context.
|
||||
h.runCtx, h.cancel = context.WithCancel(ctxt)
|
||||
return h
|
||||
}
|
||||
|
||||
// MakeOutpoint returns the outpoint of the channel's funding transaction.
|
||||
func (h *HarnessRPC) MakeOutpoint(cp *lnrpc.ChannelPoint) wire.OutPoint {
|
||||
fundingTxID, err := lnrpc.GetChanPointFundingTxid(cp)
|
||||
require.NoErrorf(h, err, "failed to make chanPoint", h.Name)
|
||||
|
||||
return wire.OutPoint{
|
||||
Hash: *fundingTxID,
|
||||
Index: cp.OutputIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// NoError is a helper method to format the error message used in calling RPCs.
|
||||
func (h *HarnessRPC) NoError(err error, operation string) {
|
||||
require.NoErrorf(h, err, "%s: failed to call %s", h.Name, operation)
|
||||
}
|
5
lntemp/rpc/invoices.go
Normal file
5
lntemp/rpc/invoices.go
Normal file
@ -0,0 +1,5 @@
|
||||
package rpc
|
||||
|
||||
// =====================
|
||||
// InvoiceClient related RPCs.
|
||||
// =====================
|
216
lntemp/rpc/lnd.go
Normal file
216
lntemp/rpc/lnd.go
Normal file
@ -0,0 +1,216 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
)
|
||||
|
||||
// =====================
|
||||
// LightningClient related RPCs.
|
||||
// =====================
|
||||
|
||||
// NewAddress makes a RPC call to NewAddress and asserts.
|
||||
func (h *HarnessRPC) NewAddress(
|
||||
req *lnrpc.NewAddressRequest) *lnrpc.NewAddressResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.NewAddress(ctxt, req)
|
||||
h.NoError(err, "NewAddress")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// WalletBalance makes a RPC call to WalletBalance and asserts.
|
||||
func (h *HarnessRPC) WalletBalance() *lnrpc.WalletBalanceResponse {
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
req := &lnrpc.WalletBalanceRequest{}
|
||||
resp, err := h.LN.WalletBalance(ctxt, req)
|
||||
h.NoError(err, "WalletBalance")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// ListPeers makes a RPC call to the node's ListPeers and asserts.
|
||||
func (h *HarnessRPC) ListPeers() *lnrpc.ListPeersResponse {
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.ListPeers(ctxt, &lnrpc.ListPeersRequest{})
|
||||
h.NoError(err, "ListPeers")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// DisconnectPeer calls the DisconnectPeer RPC on a given node with a specified
|
||||
// public key string and asserts there's no error.
|
||||
func (h *HarnessRPC) DisconnectPeer(
|
||||
pubkey string) *lnrpc.DisconnectPeerResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
req := &lnrpc.DisconnectPeerRequest{PubKey: pubkey}
|
||||
|
||||
resp, err := h.LN.DisconnectPeer(ctxt, req)
|
||||
h.NoError(err, "DisconnectPeer")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// DeleteAllPayments makes a RPC call to the node's DeleteAllPayments and
|
||||
// asserts.
|
||||
func (h *HarnessRPC) DeleteAllPayments() {
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
req := &lnrpc.DeleteAllPaymentsRequest{}
|
||||
_, err := h.LN.DeleteAllPayments(ctxt, req)
|
||||
h.NoError(err, "DeleteAllPayments")
|
||||
}
|
||||
|
||||
// GetInfo calls the GetInfo RPC on a given node and asserts there's no error.
|
||||
func (h *HarnessRPC) GetInfo() *lnrpc.GetInfoResponse {
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
info, err := h.LN.GetInfo(ctxt, &lnrpc.GetInfoRequest{})
|
||||
h.NoError(err, "GetInfo")
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
// ConnectPeer makes a RPC call to ConnectPeer and asserts there's no error.
|
||||
func (h *HarnessRPC) ConnectPeer(
|
||||
req *lnrpc.ConnectPeerRequest) *lnrpc.ConnectPeerResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.ConnectPeer(ctxt, req)
|
||||
h.NoError(err, "ConnectPeer")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// ListChannels list the channels for the given node and asserts it's
|
||||
// successful.
|
||||
func (h *HarnessRPC) ListChannels(
|
||||
req *lnrpc.ListChannelsRequest) *lnrpc.ListChannelsResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.ListChannels(ctxt, req)
|
||||
h.NoError(err, "ListChannels")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// PendingChannels makes a RPC request to PendingChannels and asserts there's
|
||||
// no error.
|
||||
func (h *HarnessRPC) PendingChannels() *lnrpc.PendingChannelsResponse {
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
||||
resp, err := h.LN.PendingChannels(ctxt, pendingChansRequest)
|
||||
h.NoError(err, "PendingChannels")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// ClosedChannels makes a RPC call to node's ClosedChannels and asserts.
|
||||
func (h *HarnessRPC) ClosedChannels(
|
||||
req *lnrpc.ClosedChannelsRequest) *lnrpc.ClosedChannelsResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.ClosedChannels(ctxt, req)
|
||||
h.NoError(err, "ClosedChannels")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// ListPayments lists the node's payments and asserts.
|
||||
func (h *HarnessRPC) ListPayments(
|
||||
req *lnrpc.ListPaymentsRequest) *lnrpc.ListPaymentsResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.ListPayments(ctxt, req)
|
||||
h.NoError(err, "ListPayments")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// ListInvoices list the node's invoice using the request and asserts.
|
||||
func (h *HarnessRPC) ListInvoices(
|
||||
req *lnrpc.ListInvoiceRequest) *lnrpc.ListInvoiceResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.ListInvoices(ctxt, req)
|
||||
h.NoError(err, "ListInvoice")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// DescribeGraph makes a RPC call to the node's DescribeGraph and asserts. It
|
||||
// takes a bool to indicate whether we want to include private edges or not.
|
||||
func (h *HarnessRPC) DescribeGraph(
|
||||
req *lnrpc.ChannelGraphRequest) *lnrpc.ChannelGraph {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.LN.DescribeGraph(ctxt, req)
|
||||
h.NoError(err, "DescribeGraph")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// ChannelBalance gets the channel balance and asserts.
|
||||
func (h *HarnessRPC) ChannelBalance() *lnrpc.ChannelBalanceResponse {
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
req := &lnrpc.ChannelBalanceRequest{}
|
||||
resp, err := h.LN.ChannelBalance(ctxt, req)
|
||||
h.NoError(err, "ChannelBalance")
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
type OpenChanClient lnrpc.Lightning_OpenChannelClient
|
||||
|
||||
// OpenChannel makes a rpc call to LightningClient and returns the open channel
|
||||
// client.
|
||||
func (h *HarnessRPC) OpenChannel(req *lnrpc.OpenChannelRequest) OpenChanClient {
|
||||
stream, err := h.LN.OpenChannel(h.runCtx, req)
|
||||
h.NoError(err, "OpenChannel")
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
type CloseChanClient lnrpc.Lightning_CloseChannelClient
|
||||
|
||||
// CloseChannel makes a rpc call to LightningClient and returns the close
|
||||
// channel client.
|
||||
func (h *HarnessRPC) CloseChannel(
|
||||
req *lnrpc.CloseChannelRequest) CloseChanClient {
|
||||
|
||||
// Use runCtx here instead of a timeout context to keep the client
|
||||
// alive for the entire test case.
|
||||
stream, err := h.LN.CloseChannel(h.runCtx, req)
|
||||
h.NoError(err, "CloseChannel")
|
||||
|
||||
return stream
|
||||
}
|
5
lntemp/rpc/peers.go
Normal file
5
lntemp/rpc/peers.go
Normal file
@ -0,0 +1,5 @@
|
||||
package rpc
|
||||
|
||||
// =====================
|
||||
// PeerClient related RPCs.
|
||||
// =====================
|
25
lntemp/rpc/router.go
Normal file
25
lntemp/rpc/router.go
Normal file
@ -0,0 +1,25 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
)
|
||||
|
||||
// =====================
|
||||
// RouterClient related RPCs.
|
||||
// =====================
|
||||
|
||||
// UpdateChanStatus makes a UpdateChanStatus RPC call to node's RouterClient
|
||||
// and asserts.
|
||||
func (h *HarnessRPC) UpdateChanStatus(
|
||||
req *routerrpc.UpdateChanStatusRequest) *routerrpc.UpdateChanStatusResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.Router.UpdateChanStatus(ctxt, req)
|
||||
h.NoError(err, "UpdateChanStatus")
|
||||
|
||||
return resp
|
||||
}
|
5
lntemp/rpc/signer.go
Normal file
5
lntemp/rpc/signer.go
Normal file
@ -0,0 +1,5 @@
|
||||
package rpc
|
||||
|
||||
// =====================
|
||||
// Signer related RPCs.
|
||||
// =====================
|
19
lntemp/rpc/state.go
Normal file
19
lntemp/rpc/state.go
Normal file
@ -0,0 +1,19 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
)
|
||||
|
||||
// =====================
|
||||
// StateClient related RPCs.
|
||||
// =====================
|
||||
|
||||
// SubscribeState makes a rpc call to StateClient and asserts there's no error.
|
||||
func (h *HarnessRPC) SubscribeState() lnrpc.State_SubscribeStateClient {
|
||||
client, err := h.State.SubscribeState(
|
||||
h.runCtx, &lnrpc.SubscribeStateRequest{},
|
||||
)
|
||||
h.NoError(err, "SubscribeState")
|
||||
|
||||
return client
|
||||
}
|
24
lntemp/rpc/wallet_kit.go
Normal file
24
lntemp/rpc/wallet_kit.go
Normal file
@ -0,0 +1,24 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
)
|
||||
|
||||
// =====================
|
||||
// WalletKitClient related RPCs.
|
||||
// =====================
|
||||
|
||||
// FinalizePsbt makes a RPC call to node's ListUnspent and asserts.
|
||||
func (h *HarnessRPC) ListUnspent(
|
||||
req *walletrpc.ListUnspentRequest) *walletrpc.ListUnspentResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.WalletKit.ListUnspent(ctxt, req)
|
||||
h.NoError(err, "ListUnspent")
|
||||
|
||||
return resp
|
||||
}
|
25
lntemp/rpc/wallet_unlocker.go
Normal file
25
lntemp/rpc/wallet_unlocker.go
Normal file
@ -0,0 +1,25 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
)
|
||||
|
||||
// =====================
|
||||
// WalletUnlockerClient related RPCs.
|
||||
// =====================
|
||||
|
||||
// UnlockWallet makes a RPC request to WalletUnlocker and asserts there's no
|
||||
// error.
|
||||
func (h *HarnessRPC) UnlockWallet(
|
||||
req *lnrpc.UnlockWalletRequest) *lnrpc.UnlockWalletResponse {
|
||||
|
||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.WalletUnlocker.UnlockWallet(ctxt, req)
|
||||
h.NoError(err, "UnlockWallet")
|
||||
|
||||
return resp
|
||||
}
|
5
lntemp/rpc/watchtower.go
Normal file
5
lntemp/rpc/watchtower.go
Normal file
@ -0,0 +1,5 @@
|
||||
package rpc
|
||||
|
||||
// =====================
|
||||
// WatchtowerClient and WatchtowerClientClient related RPCs.
|
||||
// =====================
|
46
lntemp/utils.go
Normal file
46
lntemp/utils.go
Normal file
@ -0,0 +1,46 @@
|
||||
package lntemp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NeutrinoBackendName is the name of the neutrino backend.
|
||||
NeutrinoBackendName = "neutrino"
|
||||
|
||||
// TODO(yy): delete.
|
||||
DefaultTimeout = lntest.DefaultTimeout
|
||||
)
|
||||
|
||||
// CopyFile copies the file src to dest.
|
||||
func CopyFile(dest, src string) error {
|
||||
s, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
d, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(d, s); err != nil {
|
||||
d.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
return d.Close()
|
||||
}
|
||||
|
||||
// errNumNotMatched is a helper method to return a nicely formatted error.
|
||||
func errNumNotMatched(name string, subject string,
|
||||
want, got, total, old int) error {
|
||||
|
||||
return fmt.Errorf("%s: assert %s failed: want %d, got: %d, total: "+
|
||||
"%d, previously had: %d", name, subject, want, got, total, old)
|
||||
}
|
@ -609,7 +609,7 @@ func (hn *HarnessNode) startLnd(lndBinary string, lndError chan<- error) error {
|
||||
fileName string
|
||||
err error
|
||||
)
|
||||
if *logOutput {
|
||||
if *LogOutput {
|
||||
fileName, err = addLogFile(hn)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1843,7 +1843,7 @@ func finalizeLogfile(hn *HarnessNode, fileName string) {
|
||||
hn.logFile.Close()
|
||||
|
||||
// If logoutput flag is not set, return early.
|
||||
if !*logOutput {
|
||||
if !*LogOutput {
|
||||
return
|
||||
}
|
||||
|
||||
|
9
lntest/itest/list_off_test.go
Normal file
9
lntest/itest/list_off_test.go
Normal file
@ -0,0 +1,9 @@
|
||||
//go:build !rpctest
|
||||
// +build !rpctest
|
||||
|
||||
package itest
|
||||
|
||||
import "github.com/lightningnetwork/lnd/lntemp"
|
||||
|
||||
// TODO(yy): remove the temp.
|
||||
var allTestCasesTemp = []*lntemp.TestCase{}
|
14
lntest/itest/list_on_test.go
Normal file
14
lntest/itest/list_on_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
//go:build rpctest
|
||||
// +build rpctest
|
||||
|
||||
package itest
|
||||
|
||||
import "github.com/lightningnetwork/lnd/lntemp"
|
||||
|
||||
// TODO(yy): remove the temp.
|
||||
var allTestCasesTemp = []*lntemp.TestCase{
|
||||
{
|
||||
Name: "update channel status",
|
||||
TestFunc: testUpdateChanStatus,
|
||||
},
|
||||
}
|
@ -15,6 +15,8 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/peersrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -23,120 +25,60 @@ import (
|
||||
// testUpdateChanStatus checks that calls to the UpdateChanStatus RPC update
|
||||
// the channel graph as expected, and that channel state is properly updated
|
||||
// in the presence of interleaved node disconnects / reconnects.
|
||||
func testUpdateChanStatus(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
//
|
||||
// NOTE: this test can be flaky as we are testing the chan-enable-timeout and
|
||||
// chan-disable-timeout flags here.
|
||||
//
|
||||
// For chan-enable-timeout, setting this value too small will cause an enabled
|
||||
// channel never be considered active by our channel status manager. Upon
|
||||
// reconnection, our Brontide will send a request to enable this channel after
|
||||
// the "chan-enable-timeout" has passed. The request is handled by the channel
|
||||
// status manager, which will check the channel's eligibility to forward links
|
||||
// by asking htlcswitch/link. Meanwhile, the htlcswitch/link won't mark the
|
||||
// link as eligible unless it has finished its initialization, which takes some
|
||||
// time. Thus, if the Brontide sends a request too early it will get a false
|
||||
// report saying the channel link is not eligible because that link hasn't
|
||||
// finished its initialization.
|
||||
//
|
||||
// For chan-disable-timeout, setting this value too small will cause an already
|
||||
// enabled channel being marked as disabled. For instance, if some operations
|
||||
// take more than 5 seconds to finish, the channel will be marked as disabled,
|
||||
// thus a following operation will fail if it relies on the channel being
|
||||
// enabled.
|
||||
func testUpdateChanStatus(ht *lntemp.HarnessTest) {
|
||||
// Create two fresh nodes and open a channel between them.
|
||||
alice := net.NewNode(t.t, "Alice", []string{
|
||||
"--minbackoff=10s",
|
||||
"--chan-enable-timeout=1.5s",
|
||||
"--chan-disable-timeout=3s",
|
||||
"--chan-status-sample-interval=.5s",
|
||||
})
|
||||
defer shutdownAndAssert(net, t, alice)
|
||||
|
||||
bob := net.NewNode(t.t, "Bob", []string{
|
||||
"--minbackoff=10s",
|
||||
"--chan-enable-timeout=1.5s",
|
||||
"--chan-disable-timeout=3s",
|
||||
"--chan-status-sample-interval=.5s",
|
||||
})
|
||||
defer shutdownAndAssert(net, t, bob)
|
||||
|
||||
// Connect Alice to Bob.
|
||||
net.ConnectNodes(t.t, alice, bob)
|
||||
|
||||
// Give Alice some coins so she can fund a channel.
|
||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
|
||||
alice, bob := ht.Alice, ht.Bob
|
||||
args := []string{
|
||||
"--minbackoff=60s",
|
||||
"--chan-enable-timeout=3s",
|
||||
"--chan-disable-timeout=6s",
|
||||
"--chan-status-sample-interval=0.5s",
|
||||
}
|
||||
ht.RestartNodeWithExtraArgs(alice, args)
|
||||
ht.RestartNodeWithExtraArgs(bob, args)
|
||||
ht.EnsureConnected(alice, bob)
|
||||
|
||||
// Open a channel with 100k satoshis between Alice and Bob with Alice
|
||||
// being the sole funder of the channel.
|
||||
chanAmt := btcutil.Amount(100000)
|
||||
chanPoint := openChannelAndAssert(
|
||||
t, net, alice, bob, lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
},
|
||||
chanPoint := ht.OpenChannel(
|
||||
alice, bob, lntemp.OpenChannelParams{Amt: chanAmt},
|
||||
)
|
||||
defer ht.CloseChannel(alice, chanPoint, false)
|
||||
|
||||
// Wait for Alice and Bob to receive the channel edge from the
|
||||
// funding manager.
|
||||
err := alice.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "alice didn't see the alice->bob channel")
|
||||
// assertEdgeDisabled ensures that Alice has the correct Disabled state
|
||||
// for given channel from her DescribeGraph.
|
||||
assertEdgeDisabled := func(disabled bool) {
|
||||
outPoint := ht.OutPointFromChannelPoint(chanPoint)
|
||||
|
||||
err = bob.WaitForNetworkChannelOpen(chanPoint)
|
||||
require.NoError(t.t, err, "bob didn't see the alice->bob channel")
|
||||
|
||||
// Launch a node for Carol which will connect to Alice and Bob in order
|
||||
// to receive graph updates. This will ensure that the channel updates
|
||||
// are propagated throughout the network.
|
||||
carol := net.NewNode(t.t, "Carol", nil)
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
// Connect both Alice and Bob to the new node Carol, so she can sync her
|
||||
// graph.
|
||||
net.ConnectNodes(t.t, alice, carol)
|
||||
net.ConnectNodes(t.t, bob, carol)
|
||||
waitForGraphSync(t, carol)
|
||||
|
||||
// assertChannelUpdate checks that the required policy update has
|
||||
// happened on the given node.
|
||||
assertChannelUpdate := func(node *lntest.HarnessNode,
|
||||
policy *lnrpc.RoutingPolicy) {
|
||||
|
||||
require.NoError(
|
||||
t.t, carol.WaitForChannelPolicyUpdate(
|
||||
node.PubKeyStr, policy, chanPoint, false,
|
||||
), "error while waiting for channel update",
|
||||
)
|
||||
}
|
||||
|
||||
// sendReq sends an UpdateChanStatus request to the given node.
|
||||
sendReq := func(node *lntest.HarnessNode, chanPoint *lnrpc.ChannelPoint,
|
||||
action routerrpc.ChanStatusAction) {
|
||||
|
||||
req := &routerrpc.UpdateChanStatusRequest{
|
||||
ChanPoint: chanPoint,
|
||||
Action: action,
|
||||
}
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = node.RouterClient.UpdateChanStatus(ctxt, req)
|
||||
require.NoErrorf(t.t, err, "UpdateChanStatus")
|
||||
}
|
||||
|
||||
// assertEdgeDisabled ensures that a given node has the correct
|
||||
// Disabled state for a channel.
|
||||
assertEdgeDisabled := func(node *lntest.HarnessNode,
|
||||
chanPoint *lnrpc.ChannelPoint, disabled bool) {
|
||||
|
||||
outPoint, err := lntest.MakeOutpoint(chanPoint)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
err = wait.NoError(func() error {
|
||||
req := &lnrpc.ChannelGraphRequest{
|
||||
IncludeUnannounced: true,
|
||||
}
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
|
||||
chanGraph, err := node.DescribeGraph(ctxt, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to query node %v's "+
|
||||
"graph: %v", node, err)
|
||||
}
|
||||
numEdges := len(chanGraph.Edges)
|
||||
if numEdges != 1 {
|
||||
return fmt.Errorf("expected to find 1 edge in "+
|
||||
"the graph, found %d", numEdges)
|
||||
}
|
||||
edge := chanGraph.Edges[0]
|
||||
err := wait.NoError(func() error {
|
||||
edge := ht.AssertNumEdges(alice, 1, true)[0]
|
||||
if edge.ChanPoint != outPoint.String() {
|
||||
return fmt.Errorf("expected chan_point %v, "+
|
||||
"got %v", outPoint, edge.ChanPoint)
|
||||
}
|
||||
var policy *lnrpc.RoutingPolicy
|
||||
if node.PubKeyStr == edge.Node1Pub {
|
||||
if alice.PubKeyStr == edge.Node1Pub {
|
||||
policy = edge.Node1Policy
|
||||
} else {
|
||||
policy = edge.Node2Policy
|
||||
@ -144,18 +86,61 @@ func testUpdateChanStatus(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
if disabled != policy.Disabled {
|
||||
return fmt.Errorf("expected policy.Disabled "+
|
||||
"to be %v, but policy was %v", disabled,
|
||||
policy)
|
||||
policy.Disabled)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, defaultTimeout)
|
||||
require.NoError(t.t, err)
|
||||
require.NoError(ht, err, "assert edge disabled timeout")
|
||||
}
|
||||
|
||||
// aliceSendReq sends an UpdateChanStatus request to Alice.
|
||||
aliceSendReq := func(action routerrpc.ChanStatusAction) {
|
||||
req := &routerrpc.UpdateChanStatusRequest{
|
||||
ChanPoint: chanPoint,
|
||||
Action: action,
|
||||
}
|
||||
alice.RPC.UpdateChanStatus(req)
|
||||
}
|
||||
|
||||
// Initially, the channel between Alice and Bob should not be disabled.
|
||||
//
|
||||
// NOTE: This check should happen right after the channel openning as
|
||||
// we've used a short timeout value for `--chan-disable-timeout`. If we
|
||||
// wait longer than that we might get a flake saying the channel is
|
||||
// disabled.
|
||||
assertEdgeDisabled(false)
|
||||
|
||||
// Launch a node for Carol which will connect to Alice and Bob in order
|
||||
// to receive graph updates. This will ensure that the channel updates
|
||||
// are propagated throughout the network.
|
||||
carol := ht.NewNode("Carol", nil)
|
||||
|
||||
// assertChannelUpdate checks that the required policy update has been
|
||||
// heard in Carol's network.
|
||||
assertChannelUpdate := func(node *node.HarnessNode,
|
||||
policy *lnrpc.RoutingPolicy) {
|
||||
|
||||
ht.AssertChannelPolicyUpdate(
|
||||
carol, node, policy, chanPoint, false,
|
||||
)
|
||||
}
|
||||
|
||||
// Connect both Alice and Bob to the new node Carol, so she can sync
|
||||
// her graph.
|
||||
ht.ConnectNodes(alice, carol)
|
||||
ht.ConnectNodes(bob, carol)
|
||||
ht.WaitForGraphSync(carol)
|
||||
|
||||
// If the above waitForGraphSync takes more than 4 seconds, the channel
|
||||
// Alice=>Bob will be marked as disabled now. Thus we connect Alice and
|
||||
// Bob again to make sure the channel is alive.
|
||||
ht.EnsureConnected(alice, bob)
|
||||
|
||||
// When updating the state of the channel between Alice and Bob, we
|
||||
// should expect to see channel updates with the default routing
|
||||
// policy. The value of "Disabled" will depend on the specific
|
||||
// scenario being tested.
|
||||
// policy. The value of "Disabled" will depend on the specific scenario
|
||||
// being tested.
|
||||
expectedPolicy := &lnrpc.RoutingPolicy{
|
||||
FeeBaseMsat: int64(chainreg.DefaultBitcoinBaseFeeMSat),
|
||||
FeeRateMilliMsat: int64(chainreg.DefaultBitcoinFeeRate),
|
||||
@ -164,83 +149,75 @@ func testUpdateChanStatus(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
MaxHtlcMsat: calculateMaxHtlc(chanAmt),
|
||||
}
|
||||
|
||||
// Initially, the channel between Alice and Bob should not be
|
||||
// disabled.
|
||||
assertEdgeDisabled(alice, chanPoint, false)
|
||||
|
||||
// Manually disable the channel and ensure that a "Disabled = true"
|
||||
// update is propagated.
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_DISABLE)
|
||||
aliceSendReq(routerrpc.ChanStatusAction_DISABLE)
|
||||
expectedPolicy.Disabled = true
|
||||
assertChannelUpdate(alice, expectedPolicy)
|
||||
|
||||
// Re-enable the channel and ensure that a "Disabled = false" update
|
||||
// is propagated.
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_ENABLE)
|
||||
// Re-enable the channel and ensure that a "Disabled = false" update is
|
||||
// propagated.
|
||||
aliceSendReq(routerrpc.ChanStatusAction_ENABLE)
|
||||
expectedPolicy.Disabled = false
|
||||
assertChannelUpdate(alice, expectedPolicy)
|
||||
|
||||
// Manually enabling a channel should NOT prevent subsequent
|
||||
// disconnections from automatically disabling the channel again
|
||||
// (we don't want to clutter the network with channels that are
|
||||
// falsely advertised as enabled when they don't work).
|
||||
require.NoError(t.t, net.DisconnectNodes(alice, bob))
|
||||
// disconnections from automatically disabling the channel again (we
|
||||
// don't want to clutter the network with channels that are falsely
|
||||
// advertised as enabled when they don't work).
|
||||
ht.DisconnectNodes(alice, bob)
|
||||
expectedPolicy.Disabled = true
|
||||
assertChannelUpdate(alice, expectedPolicy)
|
||||
assertChannelUpdate(bob, expectedPolicy)
|
||||
|
||||
// Reconnecting the nodes should propagate a "Disabled = false" update.
|
||||
net.EnsureConnected(t.t, alice, bob)
|
||||
ht.EnsureConnected(alice, bob)
|
||||
expectedPolicy.Disabled = false
|
||||
assertChannelUpdate(alice, expectedPolicy)
|
||||
assertChannelUpdate(bob, expectedPolicy)
|
||||
|
||||
// Manually disabling the channel should prevent a subsequent
|
||||
// disconnect / reconnect from re-enabling the channel on
|
||||
// Alice's end. Note the asymmetry between manual enable and
|
||||
// manual disable!
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_DISABLE)
|
||||
// disconnect/reconnect from re-enabling the channel on Alice's end.
|
||||
// Note the asymmetry between manual enable and manual disable!
|
||||
aliceSendReq(routerrpc.ChanStatusAction_DISABLE)
|
||||
|
||||
// Alice sends out the "Disabled = true" update in response to
|
||||
// the ChanStatusAction_DISABLE request.
|
||||
// Alice sends out the "Disabled = true" update in response to the
|
||||
// ChanStatusAction_DISABLE request.
|
||||
expectedPolicy.Disabled = true
|
||||
assertChannelUpdate(alice, expectedPolicy)
|
||||
|
||||
require.NoError(t.t, net.DisconnectNodes(alice, bob))
|
||||
ht.DisconnectNodes(alice, bob)
|
||||
|
||||
// Bob sends a "Disabled = true" update upon detecting the
|
||||
// disconnect.
|
||||
// Bob sends a "Disabled = true" update upon detecting the disconnect.
|
||||
expectedPolicy.Disabled = true
|
||||
assertChannelUpdate(bob, expectedPolicy)
|
||||
|
||||
// Bob sends a "Disabled = false" update upon detecting the
|
||||
// reconnect.
|
||||
net.EnsureConnected(t.t, alice, bob)
|
||||
// Bob sends a "Disabled = false" update upon detecting the reconnect.
|
||||
ht.EnsureConnected(alice, bob)
|
||||
expectedPolicy.Disabled = false
|
||||
assertChannelUpdate(bob, expectedPolicy)
|
||||
|
||||
// However, since we manually disabled the channel on Alice's end,
|
||||
// the policy on Alice's end should still be "Disabled = true". Again,
|
||||
// note the asymmetry between manual enable and manual disable!
|
||||
assertEdgeDisabled(alice, chanPoint, true)
|
||||
// However, since we manually disabled the channel on Alice's end, the
|
||||
// policy on Alice's end should still be "Disabled = true". Again, note
|
||||
// the asymmetry between manual enable and manual disable!
|
||||
assertEdgeDisabled(true)
|
||||
|
||||
require.NoError(t.t, net.DisconnectNodes(alice, bob))
|
||||
ht.DisconnectNodes(alice, bob)
|
||||
|
||||
// Bob sends a "Disabled = true" update upon detecting the
|
||||
// disconnect.
|
||||
// Bob sends a "Disabled = true" update upon detecting the disconnect.
|
||||
expectedPolicy.Disabled = true
|
||||
assertChannelUpdate(bob, expectedPolicy)
|
||||
|
||||
// After restoring automatic channel state management on Alice's end,
|
||||
// BOTH Alice and Bob should set the channel state back to "enabled"
|
||||
// on reconnect.
|
||||
sendReq(alice, chanPoint, routerrpc.ChanStatusAction_AUTO)
|
||||
net.EnsureConnected(t.t, alice, bob)
|
||||
// BOTH Alice and Bob should set the channel state back to "enabled" on
|
||||
// reconnect.
|
||||
aliceSendReq(routerrpc.ChanStatusAction_AUTO)
|
||||
ht.EnsureConnected(alice, bob)
|
||||
|
||||
expectedPolicy.Disabled = false
|
||||
assertChannelUpdate(alice, expectedPolicy)
|
||||
assertChannelUpdate(bob, expectedPolicy)
|
||||
assertEdgeDisabled(alice, chanPoint, false)
|
||||
assertEdgeDisabled(false)
|
||||
}
|
||||
|
||||
// testUnannouncedChannels checks unannounced channels are not returned by
|
||||
|
@ -13,44 +13,19 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultSplitTranches is the default number of tranches we split the
|
||||
// test cases into.
|
||||
defaultSplitTranches uint = 1
|
||||
|
||||
// defaultRunTranche is the default index of the test cases tranche that
|
||||
// we run.
|
||||
defaultRunTranche uint = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// testCasesSplitParts is the number of tranches the test cases should
|
||||
// be split into. By default this is set to 1, so no splitting happens.
|
||||
// If this value is increased, then the -runtranche flag must be
|
||||
// specified as well to indicate which part should be run in the current
|
||||
// invocation.
|
||||
testCasesSplitTranches = flag.Uint(
|
||||
"splittranches", defaultSplitTranches, "split the test cases "+
|
||||
"in this many tranches and run the tranche at "+
|
||||
"0-based index specified by the -runtranche flag",
|
||||
)
|
||||
|
||||
// testCasesRunTranche is the 0-based index of the split test cases
|
||||
// tranche to run in the current invocation.
|
||||
testCasesRunTranche = flag.Uint(
|
||||
"runtranche", defaultRunTranche, "run the tranche of the "+
|
||||
"split test cases with the given (0-based) index",
|
||||
)
|
||||
|
||||
// dbBackendFlag specifies the backend to use.
|
||||
dbBackendFlag = flag.String("dbbackend", "bbolt", "Database backend "+
|
||||
"(bbolt, etcd, postgres)")
|
||||
// tempTest is a flag used to mark whether we should run the old or the
|
||||
// new test cases. Used here so we can transit smoothly during our new
|
||||
// itest construction.
|
||||
//
|
||||
// TODO(yy): remove temp flag.
|
||||
tempTest = flag.Bool("temptest", false, "run the new tests(temp)")
|
||||
)
|
||||
|
||||
// getTestCaseSplitTranche returns the sub slice of the test cases that should
|
||||
// be run as the current split tranche as well as the index and slice offset of
|
||||
// the tranche.
|
||||
func getTestCaseSplitTranche() ([]*testCase, uint, uint) {
|
||||
func getTestCaseSplitTrancheOld() ([]*testCase, uint, uint) {
|
||||
numTranches := defaultSplitTranches
|
||||
if testCasesSplitTranches != nil {
|
||||
numTranches = *testCasesSplitTranches
|
||||
@ -83,6 +58,10 @@ func getTestCaseSplitTranche() ([]*testCase, uint, uint) {
|
||||
// TestLightningNetworkDaemon performs a series of integration tests amongst a
|
||||
// programmatically driven network of lnd nodes.
|
||||
func TestLightningNetworkDaemon(t *testing.T) {
|
||||
if *tempTest {
|
||||
t.Skip("Running new tests, old tests are skipped")
|
||||
}
|
||||
|
||||
// If no tests are registered, then we can exit early.
|
||||
if len(allTestCases) == 0 {
|
||||
t.Skip("integration tests not selected with flag 'rpctest'")
|
||||
@ -91,7 +70,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
// Parse testing flags that influence our test execution.
|
||||
logDir := lntest.GetLogDir()
|
||||
require.NoError(t, os.MkdirAll(logDir, 0700))
|
||||
testCases, trancheIndex, trancheOffset := getTestCaseSplitTranche()
|
||||
testCases, trancheIndex, trancheOffset := getTestCaseSplitTrancheOld()
|
||||
lntest.ApplyPortOffset(uint32(trancheIndex) * 1000)
|
||||
|
||||
// Before we start any node, we need to make sure that any btcd node
|
||||
|
@ -88,10 +88,6 @@ var allTestCases = []*testCase{
|
||||
name: "list channels",
|
||||
test: testListChannels,
|
||||
},
|
||||
{
|
||||
name: "update channel status",
|
||||
test: testUpdateChanStatus,
|
||||
},
|
||||
{
|
||||
name: "test update node announcement rpc",
|
||||
test: testUpdateNodeAnnouncement,
|
||||
|
201
lntest/itest/temp_lnd_test.go
Normal file
201
lntest/itest/temp_lnd_test.go
Normal file
@ -0,0 +1,201 @@
|
||||
package itest
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/integration/rpctest"
|
||||
"github.com/lightningnetwork/lnd/lntemp"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultSplitTranches is the default number of tranches we split the
|
||||
// test cases into.
|
||||
defaultSplitTranches uint = 1
|
||||
|
||||
// defaultRunTranche is the default index of the test cases tranche that
|
||||
// we run.
|
||||
defaultRunTranche uint = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// testCasesSplitParts is the number of tranches the test cases should
|
||||
// be split into. By default this is set to 1, so no splitting happens.
|
||||
// If this value is increased, then the -runtranche flag must be
|
||||
// specified as well to indicate which part should be run in the current
|
||||
// invocation.
|
||||
testCasesSplitTranches = flag.Uint(
|
||||
"splittranches", defaultSplitTranches, "split the test cases "+
|
||||
"in this many tranches and run the tranche at "+
|
||||
"0-based index specified by the -runtranche flag",
|
||||
)
|
||||
|
||||
// testCasesRunTranche is the 0-based index of the split test cases
|
||||
// tranche to run in the current invocation.
|
||||
testCasesRunTranche = flag.Uint(
|
||||
"runtranche", defaultRunTranche, "run the tranche of the "+
|
||||
"split test cases with the given (0-based) index",
|
||||
)
|
||||
|
||||
// dbBackendFlag specifies the backend to use.
|
||||
dbBackendFlag = flag.String("dbbackend", "bbolt", "Database backend "+
|
||||
"(bbolt, etcd, postgres)")
|
||||
)
|
||||
|
||||
// TestLightningNetworkDaemonTemp performs a series of integration tests
|
||||
// amongst a programmatically driven network of lnd nodes.
|
||||
func TestLightningNetworkDaemonTemp(t *testing.T) {
|
||||
if !*tempTest {
|
||||
t.Skip("Running old tests, new tests are skipped")
|
||||
}
|
||||
|
||||
// If no tests are registered, then we can exit early.
|
||||
if len(allTestCasesTemp) == 0 {
|
||||
t.Skip("integration tests not selected with flag 'rpctest'")
|
||||
}
|
||||
|
||||
// Get the test cases to be run in this tranche.
|
||||
testCases, trancheIndex, trancheOffset := getTestCaseSplitTranche()
|
||||
lntest.ApplyPortOffset(uint32(trancheIndex) * 1000)
|
||||
|
||||
// Create a simple fee service.
|
||||
feeService := lntemp.NewFeeService(t)
|
||||
|
||||
// Get the binary path and setup the harness test.
|
||||
binary := getLndBinary(t)
|
||||
harnessTest := lntemp.SetupHarness(
|
||||
t, binary, *dbBackendFlag, feeService,
|
||||
)
|
||||
defer harnessTest.Stop()
|
||||
|
||||
// Setup standby nodes, Alice and Bob, which will be alive and shared
|
||||
// among all the test cases.
|
||||
harnessTest.SetupStandbyNodes()
|
||||
|
||||
// Run the subset of the test cases selected in this tranche.
|
||||
for idx, testCase := range testCases {
|
||||
testCase := testCase
|
||||
name := fmt.Sprintf("tranche%02d/%02d-of-%d/%s/%s",
|
||||
trancheIndex, trancheOffset+uint(idx)+1,
|
||||
len(allTestCases), harnessTest.ChainBackendName(),
|
||||
testCase.Name)
|
||||
|
||||
success := t.Run(name, func(t1 *testing.T) {
|
||||
// Create a separate harness test for the testcase to
|
||||
// avoid overwriting the external harness test that is
|
||||
// tied to the parent test.
|
||||
ht, cleanup := harnessTest.Subtest(t1)
|
||||
defer cleanup()
|
||||
|
||||
cleanTestCaseName := strings.ReplaceAll(
|
||||
testCase.Name, " ", "_",
|
||||
)
|
||||
ht.SetTestName(cleanTestCaseName)
|
||||
|
||||
logLine := fmt.Sprintf(
|
||||
"STARTING ============ %v ============\n",
|
||||
testCase.Name,
|
||||
)
|
||||
|
||||
ht.Alice.AddToLogf(logLine)
|
||||
ht.Bob.AddToLogf(logLine)
|
||||
|
||||
ht.EnsureConnected(ht.Alice, ht.Bob)
|
||||
|
||||
ht.RunTestCase(testCase)
|
||||
})
|
||||
|
||||
// Stop at the first failure. Mimic behavior of original test
|
||||
// framework.
|
||||
if !success {
|
||||
// Log failure time to help relate the lnd logs to the
|
||||
// failure.
|
||||
t.Logf("Failure time: %v", time.Now().Format(
|
||||
"2006-01-02 15:04:05.000",
|
||||
))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_, height := harnessTest.Miner.GetBestBlock()
|
||||
t.Logf("=========> tests finished for tranche: %v, tested %d "+
|
||||
"cases, end height: %d\n", trancheIndex, len(testCases), height)
|
||||
}
|
||||
|
||||
// getTestCaseSplitTranche returns the sub slice of the test cases that should
|
||||
// be run as the current split tranche as well as the index and slice offset of
|
||||
// the tranche.
|
||||
func getTestCaseSplitTranche() ([]*lntemp.TestCase, uint, uint) {
|
||||
numTranches := defaultSplitTranches
|
||||
if testCasesSplitTranches != nil {
|
||||
numTranches = *testCasesSplitTranches
|
||||
}
|
||||
runTranche := defaultRunTranche
|
||||
if testCasesRunTranche != nil {
|
||||
runTranche = *testCasesRunTranche
|
||||
}
|
||||
|
||||
// There's a special flake-hunt mode where we run the same test multiple
|
||||
// times in parallel. In that case the tranche index is equal to the
|
||||
// thread ID, but we need to actually run all tests for the regex
|
||||
// selection to work.
|
||||
threadID := runTranche
|
||||
if numTranches == 1 {
|
||||
runTranche = 0
|
||||
}
|
||||
|
||||
numCases := uint(len(allTestCasesTemp))
|
||||
testsPerTranche := numCases / numTranches
|
||||
trancheOffset := runTranche * testsPerTranche
|
||||
trancheEnd := trancheOffset + testsPerTranche
|
||||
if trancheEnd > numCases || runTranche == numTranches-1 {
|
||||
trancheEnd = numCases
|
||||
}
|
||||
|
||||
return allTestCasesTemp[trancheOffset:trancheEnd], threadID,
|
||||
trancheOffset
|
||||
}
|
||||
|
||||
func getLndBinary(t *testing.T) string {
|
||||
binary := itestLndBinary
|
||||
lndExec := ""
|
||||
if lndExecutable != nil && *lndExecutable != "" {
|
||||
lndExec = *lndExecutable
|
||||
}
|
||||
if lndExec == "" && runtime.GOOS == "windows" {
|
||||
// Windows (even in a bash like environment like git bash as on
|
||||
// Travis) doesn't seem to like relative paths to exe files...
|
||||
currentDir, err := os.Getwd()
|
||||
require.NoError(t, err, "unable to get working directory")
|
||||
|
||||
targetPath := filepath.Join(currentDir, "../../lnd-itest.exe")
|
||||
binary, err = filepath.Abs(targetPath)
|
||||
require.NoError(t, err, "unable to get absolute path")
|
||||
} else if lndExec != "" {
|
||||
binary = lndExec
|
||||
}
|
||||
|
||||
return binary
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Before we start any node, we need to make sure that any btcd node
|
||||
// that is started through the RPC harness uses a unique port as well
|
||||
// to avoid any port collisions.
|
||||
rpctest.ListenAddressGenerator = lntest.GenerateBtcdListenerAddresses
|
||||
|
||||
// Swap out grpc's default logger with out fake logger which drops the
|
||||
// statements on the floor.
|
||||
fakeLogger := grpclog.NewLoggerV2(io.Discard, io.Discard, io.Discard)
|
||||
grpclog.SetLoggerV2(fakeLogger)
|
||||
}
|
@ -42,7 +42,9 @@ var (
|
||||
|
||||
// logOutput is a flag that can be set to append the output from the
|
||||
// seed nodes to log files.
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
//
|
||||
// TODO(yy): remove the export.
|
||||
LogOutput = flag.Bool("logoutput", false,
|
||||
"log output from node n to file output-n.log")
|
||||
|
||||
// logSubDir is the default directory where the logs are written to if
|
||||
|
@ -15,17 +15,26 @@ const PollInterval = 200 * time.Millisecond
|
||||
// some property is upheld within a particular time frame.
|
||||
func Predicate(pred func() bool, timeout time.Duration) error {
|
||||
exitTimer := time.After(timeout)
|
||||
result := make(chan bool, 1)
|
||||
|
||||
for {
|
||||
<-time.After(PollInterval)
|
||||
|
||||
go func() {
|
||||
result <- pred()
|
||||
}()
|
||||
|
||||
// Each time we call the pred(), we expect a result to be
|
||||
// returned otherwise it will timeout.
|
||||
select {
|
||||
case <-exitTimer:
|
||||
return fmt.Errorf("predicate not satisfied after time out")
|
||||
default:
|
||||
}
|
||||
return fmt.Errorf("predicate not satisfied after " +
|
||||
"time out")
|
||||
|
||||
if pred() {
|
||||
return nil
|
||||
case succeed := <-result:
|
||||
if succeed {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,11 @@ NUM_ITEST_TRANCHES = 4
|
||||
ITEST_PARALLELISM = $(NUM_ITEST_TRANCHES)
|
||||
POSTGRES_START_DELAY = 5
|
||||
|
||||
# Build temp tests only. TODO(yy): remove.
|
||||
ifneq ($(temptest),)
|
||||
ITEST_FLAGS += -temptest=$(temptest)
|
||||
endif
|
||||
|
||||
# If rpc option is set also add all extra RPC tags to DEV_TAGS
|
||||
ifneq ($(with-rpc),)
|
||||
DEV_TAGS += $(RPC_TAGS)
|
||||
|
Loading…
x
Reference in New Issue
Block a user