Merge pull request #6824 from yyforyongyu/5-new-itest

itest: continued itest refactor and fix - IV
This commit is contained in:
Oliver Gugger 2023-01-18 22:14:29 +01:00 committed by GitHub
commit 48c8c1bf48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 4029 additions and 4946 deletions

View File

@ -369,7 +369,8 @@ PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776),
[7172](https://github.com/lightningnetwork/lnd/pull/7172),
[7242](https://github.com/lightningnetwork/lnd/pull/7242),
[7245](https://github.com/lightningnetwork/lnd/pull/7245)),
[6823](https://github.com/lightningnetwork/lnd/pull/6823)) have been made to
[6823](https://github.com/lightningnetwork/lnd/pull/6823),
[6824](https://github.com/lightningnetwork/lnd/pull/6824),) have been made to
refactor the itest for code health and maintenance.
# Contributors (Alphabetical Order)

View File

@ -4,7 +4,6 @@ import (
"context"
"encoding/hex"
"fmt"
"sync"
"testing"
"time"
@ -676,6 +675,32 @@ func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
}
// NewNodeRemoteSigner creates a new remote signer node and asserts its
// creation.
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
require.NoErrorf(h, err, "unable to create new node for %s", name)
err = hn.StartWithNoAuth(h.runCtx)
require.NoError(h, err, "failed to start node %s", name)
// With the seed created, construct the init request to the node,
// including the newly generated seed.
initReq := &lnrpc.InitWalletRequest{
WalletPassword: password,
WatchOnly: watchOnly,
}
// Pass the init request via rpc to finish unlocking the node. This
// will also initialize the macaroon-authenticated LightningClient.
_, err = h.manager.initWalletAndNode(hn, initReq)
require.NoErrorf(h, err, "failed to init node %s", name)
return hn
}
// KillNode kills the node (but won't wait for the node process to stop).
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
require.NoErrorf(h, hn.Kill(), "%s: kill got error", hn.Name())
@ -797,6 +822,26 @@ type OpenChannelParams struct {
// ScidAlias denotes whether the channel will be an option-scid-alias
// channel type negotiation.
ScidAlias bool
// BaseFee is the channel base fee applied during the channel
// announcement phase.
BaseFee uint64
// FeeRate is the channel fee rate in ppm applied during the channel
// announcement phase.
FeeRate uint64
// UseBaseFee, if set, instructs the downstream logic to apply the
// user-specified channel base fee to the channel update announcement.
// If set to false it avoids applying a base fee of 0 and instead
// activates the default configured base fee.
UseBaseFee bool
// UseFeeRate, if set, instructs the downstream logic to apply the
// user-specified channel fee rate to the channel update announcement.
// If set to false it avoids applying a fee rate of 0 and instead
// activates the default configured fee rate.
UseFeeRate bool
}
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
@ -833,6 +878,10 @@ func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
CommitmentType: p.CommitmentType,
ZeroConf: p.ZeroConf,
ScidAlias: p.ScidAlias,
BaseFee: p.BaseFee,
FeeRate: p.FeeRate,
UseBaseFee: p.UseBaseFee,
UseFeeRate: p.UseFeeRate,
}
}
@ -886,24 +935,76 @@ func (h *HarnessTest) OpenChannelAssertStream(srcNode,
}
// OpenChannel attempts to open a channel with the specified parameters
// extended from Alice to Bob. Additionally, the following items are asserted,
// - 6 blocks will be mined so the channel will be announced if it's public.
// - the funding transaction should be found in the first block.
// extended from Alice to Bob. Additionally, for public channels, it will mine
// extra blocks so they are announced to the network. In specific, the
// following items are asserted,
// - for non-zero conf channel, 1 blocks will be mined to confirm the funding
// tx.
// - both nodes should see the channel edge update in their network graph.
// - both nodes can report the status of the new channel from ListChannels.
// - extra blocks are mined if it's a public channel.
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
p OpenChannelParams) *lnrpc.ChannelPoint {
// First, open the channel without announcing it.
cp := h.OpenChannelNoAnnounce(alice, bob, p)
// If this is a private channel, there's no need to mine extra blocks
// since it will never be announced to the network.
if p.Private {
return cp
}
// Mine extra blocks to announce the channel.
if p.ZeroConf {
// For a zero-conf channel, no blocks have been mined so we
// need to mine 6 blocks.
//
// Mine 1 block to confirm the funding transaction.
h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
} else {
// For a regular channel, 1 block has already been mined to
// confirm the funding transaction, so we mine 5 blocks.
h.MineBlocks(numBlocksOpenChannel - 1)
}
return cp
}
// OpenChannelNoAnnounce attempts to open a channel with the specified
// parameters extended from Alice to Bob without mining the necessary blocks to
// announce the channel. Additionally, the following items are asserted,
// - for non-zero conf channel, 1 blocks will be mined to confirm the funding
// tx.
// - both nodes should see the channel edge update in their network graph.
// - both nodes can report the status of the new channel from ListChannels.
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
p OpenChannelParams) *lnrpc.ChannelPoint {
chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
// Mine 6 blocks, then wait for Alice's node to notify us that the
// channel has been opened. The funding transaction should be found
// within the first newly mined block. We mine 6 blocks so that in the
// case that the channel is public, it is announced to the network.
block := h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)[0]
// Open a zero conf channel.
if p.ZeroConf {
return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
}
// Open a non-zero conf channel.
return h.openChannel(alice, bob, chanOpenUpdate)
}
// openChannel attempts to open a channel with the specified parameters
// extended from Alice to Bob. Additionally, the following items are asserted,
// - 1 block is mined and the funding transaction should be found in it.
// - both nodes should see the channel edge update in their network graph.
// - both nodes can report the status of the new channel from ListChannels.
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
// Mine 1 block to confirm the funding transaction.
block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
// Wait for the channel open event.
fundingChanPoint := h.WaitForChannelOpenEvent(chanOpenUpdate)
fundingChanPoint := h.WaitForChannelOpenEvent(stream)
// Check that the funding tx is found in the first block.
fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
@ -918,9 +1019,27 @@ func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
h.AssertChannelExists(alice, fundingChanPoint)
h.AssertChannelExists(bob, fundingChanPoint)
// Finally, check the blocks are synced.
h.WaitForBlockchainSync(alice)
h.WaitForBlockchainSync(bob)
return fundingChanPoint
}
// openChannelZeroConf attempts to open a channel with the specified parameters
// extended from Alice to Bob. Additionally, the following items are asserted,
// - both nodes should see the channel edge update in their network graph.
// - both nodes can report the status of the new channel from ListChannels.
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
// Wait for the channel open event.
fundingChanPoint := h.WaitForChannelOpenEvent(stream)
// Check that both alice and bob have seen the channel from their
// network topology.
h.AssertTopologyChannelOpen(alice, fundingChanPoint)
h.AssertTopologyChannelOpen(bob, fundingChanPoint)
// Finally, check that the channel can be seen in their ListChannels.
h.AssertChannelExists(alice, fundingChanPoint)
h.AssertChannelExists(bob, fundingChanPoint)
return fundingChanPoint
}
@ -1145,36 +1264,54 @@ func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
}
// CompletePaymentRequests sends payments from a node to complete all payment
// requests. This function does not return until all payments successfully
// complete without errors.
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
paymentRequests []string) {
// completePaymentRequestsAssertStatus sends payments from a node to complete
// all payment requests. This function does not return until all payments
// have reached the specified status.
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
paymentRequests []string, status lnrpc.Payment_PaymentStatus) {
var wg sync.WaitGroup
// Create a buffered chan to signal the results.
results := make(chan rpc.PaymentClient, len(paymentRequests))
// send sends a payment and asserts if it doesn't succeeded.
send := func(payReq string) {
defer wg.Done()
req := &routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
TimeoutSeconds: defaultPaymentTimeout,
FeeLimitMsat: noFeeLimitMsat,
}
stream := hn.RPC.SendPayment(req)
h.AssertPaymentStatusFromStream(stream, lnrpc.Payment_SUCCEEDED)
// Signal sent succeeded.
results <- stream
}
// Launch all payments simultaneously.
for _, payReq := range paymentRequests {
payReqCopy := payReq
wg.Add(1)
go send(payReqCopy)
}
// Wait for all payments to report success.
wg.Wait()
// Wait for all payments to report the expected status.
timer := time.After(DefaultTimeout)
select {
case stream := <-results:
h.AssertPaymentStatusFromStream(stream, status)
case <-timer:
require.Fail(h, "timeout", "waiting payment results timeout")
}
}
// CompletePaymentRequests sends payments from a node to complete all payment
// requests. This function does not return until all payments successfully
// complete without errors.
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
paymentRequests []string) {
h.completePaymentRequestsAssertStatus(
hn, paymentRequests, lnrpc.Payment_SUCCEEDED,
)
}
// CompletePaymentRequestsNoWait sends payments from a node to complete all
@ -1188,21 +1325,10 @@ func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
// we return.
oldResp := h.GetChannelByChanPoint(hn, chanPoint)
// send sends a payment and asserts if it doesn't succeeded.
send := func(payReq string) {
req := &routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
TimeoutSeconds: defaultPaymentTimeout,
FeeLimitMsat: noFeeLimitMsat,
}
hn.RPC.SendPayment(req)
}
// Launch all payments simultaneously.
for _, payReq := range paymentRequests {
payReqCopy := payReq
go send(payReqCopy)
}
// Send payments and assert they are in-flight.
h.completePaymentRequestsAssertStatus(
hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
)
// We are not waiting for feedback in the form of a response, but we
// should still wait long enough for the server to receive and handle
@ -1797,3 +1923,92 @@ func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
return routes
}
// ReceiveHtlcInterceptor waits until a message is received on the htlc
// interceptor stream or the timeout is reached.
func (h *HarnessTest) ReceiveHtlcInterceptor(
stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout intercepting htlc")
case err := <-errChan:
require.Failf(h, "err from stream",
"received err from stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// ReceiveChannelEvent waits until a message is received from the
// ChannelEventsClient stream or the timeout is reached.
func (h *HarnessTest) ReceiveChannelEvent(
stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
chanMsg := make(chan *lnrpc.ChannelEventUpdate)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout intercepting htlc")
case err := <-errChan:
require.Failf(h, "err from stream",
"received err from stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// GetOutputIndex returns the output index of the given address in the given
// transaction.
func (h *HarnessTest) GetOutputIndex(txid *chainhash.Hash, addr string) int {
// We'll then extract the raw transaction from the mempool in order to
// determine the index of the p2tr output.
tx := h.Miner.GetRawTransaction(txid)
p2trOutputIndex := -1
for i, txOut := range tx.MsgTx().TxOut {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
txOut.PkScript, h.Miner.ActiveNet,
)
require.NoError(h, err)
if addrs[0].String() == addr {
p2trOutputIndex = i
}
}
require.Greater(h, p2trOutputIndex, -1)
return p2trOutputIndex
}

View File

@ -1,12 +1,14 @@
package lntemp
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"math"
"sort"
"strings"
"time"
@ -127,6 +129,9 @@ func (h *HarnessTest) ConnectNodesPerm(a, b *node.HarnessNode) {
func (h *HarnessTest) DisconnectNodes(a, b *node.HarnessNode) {
bobInfo := b.RPC.GetInfo()
a.RPC.DisconnectPeer(bobInfo.IdentityPubkey)
// Assert disconnected.
h.AssertPeerNotConnected(a, b)
}
// EnsureConnected will try to connect to two nodes, returning no error if they
@ -729,6 +734,51 @@ func (h *HarnessTest) AssertNumUTXOs(hn *node.HarnessNode,
return h.AssertNumUTXOsWithConf(hn, num, math.MaxInt32, 0)
}
// getUTXOs gets the number of newly created UTOXs within the current test
// scope.
func (h *HarnessTest) getUTXOs(hn *node.HarnessNode, account string,
max, min int32) []*lnrpc.Utxo {
var unconfirmed bool
if max == 0 {
unconfirmed = true
}
req := &walletrpc.ListUnspentRequest{
Account: account,
MaxConfs: max,
MinConfs: min,
UnconfirmedOnly: unconfirmed,
}
resp := hn.RPC.ListUnspent(req)
return resp.Utxos
}
// GetUTXOs returns all the UTXOs for the given node's account, including
// confirmed and unconfirmed.
func (h *HarnessTest) GetUTXOs(hn *node.HarnessNode,
account string) []*lnrpc.Utxo {
return h.getUTXOs(hn, account, math.MaxInt32, 0)
}
// GetUTXOsConfirmed returns the confirmed UTXOs for the given node's account.
func (h *HarnessTest) GetUTXOsConfirmed(hn *node.HarnessNode,
account string) []*lnrpc.Utxo {
return h.getUTXOs(hn, account, math.MaxInt32, 1)
}
// GetUTXOsUnconfirmed returns the unconfirmed UTXOs for the given node's
// account.
func (h *HarnessTest) GetUTXOsUnconfirmed(hn *node.HarnessNode,
account string) []*lnrpc.Utxo {
return h.getUTXOs(hn, account, 0, 0)
}
// WaitForBalanceConfirmed waits until the node sees the expected confirmed
// balance in its wallet.
func (h *HarnessTest) WaitForBalanceConfirmed(hn *node.HarnessNode,
@ -878,6 +928,16 @@ func (h *HarnessTest) AssertPaymentStatusFromStream(stream rpc.PaymentClient,
return h.assertPaymentStatusWithTimeout(stream, status, DefaultTimeout)
}
// AssertPaymentSucceedWithTimeout asserts that a payment is succeeded within
// the specified timeout.
func (h *HarnessTest) AssertPaymentSucceedWithTimeout(stream rpc.PaymentClient,
timeout time.Duration) *lnrpc.Payment {
return h.assertPaymentStatusWithTimeout(
stream, lnrpc.Payment_SUCCEEDED, timeout,
)
}
// assertPaymentStatusWithTimeout takes a client stream and asserts the payment
// is in desired status before the specified timeout. The payment found is
// returned once succeeded.
@ -889,7 +949,9 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
err := wait.NoError(func() error {
// Consume one message. This will raise an error if the message
// is not received within DefaultTimeout.
payment, err := h.ReceivePaymentUpdate(stream)
payment, err := h.receivePaymentUpdateWithTimeout(
stream, timeout,
)
if err != nil {
return fmt.Errorf("received error from payment "+
"stream: %s", err)
@ -918,8 +980,17 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
func (h *HarnessTest) ReceivePaymentUpdate(
stream rpc.PaymentClient) (*lnrpc.Payment, error) {
return h.receivePaymentUpdateWithTimeout(stream, DefaultTimeout)
}
// receivePaymentUpdateWithTimeout waits until a message is received on the
// payment client stream or the timeout is reached.
func (h *HarnessTest) receivePaymentUpdateWithTimeout(stream rpc.PaymentClient,
timeout time.Duration) (*lnrpc.Payment, error) {
chanMsg := make(chan *lnrpc.Payment, 1)
errChan := make(chan error, 1)
go func() {
// Consume one message. This will block until the message is
// received.
@ -933,7 +1004,7 @@ func (h *HarnessTest) ReceivePaymentUpdate(
}()
select {
case <-time.After(DefaultTimeout):
case <-time.After(timeout):
require.Fail(h, "timeout", "timeout waiting for payment update")
return nil, nil
@ -2074,3 +2145,145 @@ func (h *HarnessTest) ReceiveSendToRouteUpdate(
return updateMsg, nil
}
}
// AssertInvoiceEqual asserts that two lnrpc.Invoices are equivalent. A custom
// comparison function is defined for these tests, since proto message returned
// from unary and streaming RPCs (as of protobuf 1.23.0 and grpc 1.29.1) aren't
// consistent with the private fields set on the messages. As a result, we
// avoid using require.Equal and test only the actual data members.
func (h *HarnessTest) AssertInvoiceEqual(a, b *lnrpc.Invoice) {
// Ensure the HTLCs are sorted properly before attempting to compare.
sort.Slice(a.Htlcs, func(i, j int) bool {
return a.Htlcs[i].ChanId < a.Htlcs[j].ChanId
})
sort.Slice(b.Htlcs, func(i, j int) bool {
return b.Htlcs[i].ChanId < b.Htlcs[j].ChanId
})
require.Equal(h, a.Memo, b.Memo)
require.Equal(h, a.RPreimage, b.RPreimage)
require.Equal(h, a.RHash, b.RHash)
require.Equal(h, a.Value, b.Value)
require.Equal(h, a.ValueMsat, b.ValueMsat)
require.Equal(h, a.CreationDate, b.CreationDate)
require.Equal(h, a.SettleDate, b.SettleDate)
require.Equal(h, a.PaymentRequest, b.PaymentRequest)
require.Equal(h, a.DescriptionHash, b.DescriptionHash)
require.Equal(h, a.Expiry, b.Expiry)
require.Equal(h, a.FallbackAddr, b.FallbackAddr)
require.Equal(h, a.CltvExpiry, b.CltvExpiry)
require.Equal(h, a.RouteHints, b.RouteHints)
require.Equal(h, a.Private, b.Private)
require.Equal(h, a.AddIndex, b.AddIndex)
require.Equal(h, a.SettleIndex, b.SettleIndex)
require.Equal(h, a.AmtPaidSat, b.AmtPaidSat)
require.Equal(h, a.AmtPaidMsat, b.AmtPaidMsat)
require.Equal(h, a.State, b.State)
require.Equal(h, a.Features, b.Features)
require.Equal(h, a.IsKeysend, b.IsKeysend)
require.Equal(h, a.PaymentAddr, b.PaymentAddr)
require.Equal(h, a.IsAmp, b.IsAmp)
require.Equal(h, len(a.Htlcs), len(b.Htlcs))
for i := range a.Htlcs {
htlcA, htlcB := a.Htlcs[i], b.Htlcs[i]
require.Equal(h, htlcA.ChanId, htlcB.ChanId)
require.Equal(h, htlcA.HtlcIndex, htlcB.HtlcIndex)
require.Equal(h, htlcA.AmtMsat, htlcB.AmtMsat)
require.Equal(h, htlcA.AcceptHeight, htlcB.AcceptHeight)
require.Equal(h, htlcA.AcceptTime, htlcB.AcceptTime)
require.Equal(h, htlcA.ResolveTime, htlcB.ResolveTime)
require.Equal(h, htlcA.ExpiryHeight, htlcB.ExpiryHeight)
require.Equal(h, htlcA.State, htlcB.State)
require.Equal(h, htlcA.CustomRecords, htlcB.CustomRecords)
require.Equal(h, htlcA.MppTotalAmtMsat, htlcB.MppTotalAmtMsat)
require.Equal(h, htlcA.Amp, htlcB.Amp)
}
}
// AssertUTXOInWallet asserts that a given UTXO can be found in the node's
// wallet.
func (h *HarnessTest) AssertUTXOInWallet(hn *node.HarnessNode,
op *lnrpc.OutPoint, account string) {
err := wait.NoError(func() error {
utxos := h.GetUTXOs(hn, account)
err := fmt.Errorf("tx with hash %x not found", op.TxidBytes)
for _, utxo := range utxos {
if !bytes.Equal(utxo.Outpoint.TxidBytes, op.TxidBytes) {
continue
}
err = fmt.Errorf("tx with output index %v not found",
op.OutputIndex)
if utxo.Outpoint.OutputIndex != op.OutputIndex {
continue
}
return nil
}
return err
}, DefaultTimeout)
require.NoErrorf(h, err, "outpoint %v not found in %s's wallet",
op, hn.Name())
}
// AssertWalletAccountBalance asserts that the unconfirmed and confirmed
// balance for the given account is satisfied by the WalletBalance and
// ListUnspent RPCs. The unconfirmed balance is not checked for neutrino nodes.
func (h *HarnessTest) AssertWalletAccountBalance(hn *node.HarnessNode,
account string, confirmedBalance, unconfirmedBalance int64) {
err := wait.NoError(func() error {
balanceResp := hn.RPC.WalletBalance()
require.Contains(h, balanceResp.AccountBalance, account)
accountBalance := balanceResp.AccountBalance[account]
// Check confirmed balance.
if accountBalance.ConfirmedBalance != confirmedBalance {
return fmt.Errorf("expected confirmed balance %v, "+
"got %v", confirmedBalance,
accountBalance.ConfirmedBalance)
}
utxos := h.GetUTXOsConfirmed(hn, account)
var totalConfirmedVal int64
for _, utxo := range utxos {
totalConfirmedVal += utxo.AmountSat
}
if totalConfirmedVal != confirmedBalance {
return fmt.Errorf("expected total confirmed utxo "+
"balance %v, got %v", confirmedBalance,
totalConfirmedVal)
}
// Skip unconfirmed balance checks for neutrino nodes.
if h.IsNeutrinoBackend() {
return nil
}
// Check unconfirmed balance.
if accountBalance.UnconfirmedBalance != unconfirmedBalance {
return fmt.Errorf("expected unconfirmed balance %v, "+
"got %v", unconfirmedBalance,
accountBalance.UnconfirmedBalance)
}
utxos = h.GetUTXOsUnconfirmed(hn, account)
var totalUnconfirmedVal int64
for _, utxo := range utxos {
totalUnconfirmedVal += utxo.AmountSat
}
if totalUnconfirmedVal != unconfirmedBalance {
return fmt.Errorf("expected total unconfirmed utxo "+
"balance %v, got %v", unconfirmedBalance,
totalUnconfirmedVal)
}
return nil
}, DefaultTimeout)
require.NoError(h, err, "timeout checking wallet account balance")
}

View File

@ -11,6 +11,7 @@ import (
"testing"
"time"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
@ -253,6 +254,16 @@ func (h *HarnessMiner) GetRawTransaction(txid *chainhash.Hash) *btcutil.Tx {
return tx
}
// GetRawTransactionVerbose makes a RPC call to the miner's
// GetRawTransactionVerbose and asserts.
func (h *HarnessMiner) GetRawTransactionVerbose(
txid *chainhash.Hash) *btcjson.TxRawResult {
tx, err := h.Client.GetRawTransactionVerbose(txid)
require.NoErrorf(h, err, "failed to get raw tx verbose: %v", txid)
return tx
}
// AssertTxInMempool asserts a given transaction can be found in the mempool.
func (h *HarnessMiner) AssertTxInMempool(txid *chainhash.Hash) *wire.MsgTx {
var msgTx *wire.MsgTx

View File

@ -135,15 +135,24 @@ func NewHarnessNode(t *testing.T, cfg *BaseNodeConfig) (*HarnessNode, error) {
}, nil
}
// InitRPCClients initializes a list of RPC clients for the node.
func (hn *HarnessNode) InitRPCClients(c *grpc.ClientConn) {
// Initialize creates a list of new RPC clients using the passed connection,
// initializes the node's internal state and creates a topology watcher.
func (hn *HarnessNode) Initialize(c *grpc.ClientConn) {
hn.conn = c
// Init all the rpc clients.
hn.RPC = rpc.NewHarnessRPC(hn.runCtx, hn.T, c, hn.Name())
// Init the node's internal state.
hn.State = newState(hn.RPC)
// Init the node's state.
//
// If we already have a state, it means we are restarting the node and
// we will only reset its internal states. Otherwise we'll create a new
// state.
if hn.State != nil {
hn.State.resetEphermalStates(hn.RPC)
} else {
hn.State = newState(hn.RPC)
}
// Init the topology watcher.
hn.Watcher = newNodeWatcher(hn.RPC, hn.State)
@ -164,11 +173,11 @@ func (hn *HarnessNode) String() string {
type nodeCfg struct {
LogFilenamePrefix string
ExtraArgs []string
HasSeed bool
SkipUnlock bool
Password []byte
P2PPort int
RPCPort int
RESTPort int
ProfilePort int
AcceptKeySend bool
FeeURL string
}
@ -185,6 +194,8 @@ func (hn *HarnessNode) String() string {
PubKey: hn.PubKeyStr,
State: hn.State,
NodeCfg: nodeCfg{
SkipUnlock: hn.Cfg.SkipUnlock,
Password: hn.Cfg.Password,
LogFilenamePrefix: hn.Cfg.LogFilenamePrefix,
ExtraArgs: hn.Cfg.ExtraArgs,
P2PPort: hn.Cfg.P2PPort,
@ -434,8 +445,9 @@ func (hn *HarnessNode) Start(ctxt context.Context) error {
return err
}
// Init all the RPC clients.
hn.InitRPCClients(conn)
// Init the node by creating the RPC clients, initializing node's
// internal state and watcher.
hn.Initialize(conn)
// Wait till the server is starting.
if err := hn.WaitUntilStarted(); err != nil {
@ -477,8 +489,9 @@ func (hn *HarnessNode) InitNode(macBytes []byte) error {
}
}
// Init all the RPC clients.
hn.InitRPCClients(conn)
// Init the node by creating the RPC clients, initializing node's
// internal state and watcher.
hn.Initialize(conn)
// Wait till the server is starting.
if err := hn.WaitUntilStarted(); err != nil {

View File

@ -241,6 +241,14 @@ func (s *State) updatePaymentStats() {
}
resp := s.rpc.ListPayments(req)
// Exit early when the there's no payment.
//
// NOTE: we need to exit early here because when there's no invoice the
// `LastOffsetIndex` will be zero.
if len(resp.Payments) == 0 {
return
}
s.Payment.LastIndexOffset = resp.LastIndexOffset
for _, payment := range resp.Payments {
if payment.Status == lnrpc.Payment_FAILED ||
@ -261,6 +269,14 @@ func (s *State) updateInvoiceStats() {
}
resp := s.rpc.ListInvoices(req)
// Exit early when the there's no invoice.
//
// NOTE: we need to exit early here because when there's no invoice the
// `LastOffsetIndex` will be zero.
if len(resp.Invoices) == 0 {
return
}
s.Invoice.LastIndexOffset = resp.LastIndexOffset
for _, invoice := range resp.Invoices {
if invoice.State == lnrpc.Invoice_SETTLED ||
@ -328,3 +344,17 @@ func (s *State) String() string {
return fmt.Sprintf("\n%s", stateBytes)
}
// resetEphermalStates resets the current state with a new HarnessRPC and empty
// private fields which are used to track state only valid for the last test.
func (s *State) resetEphermalStates(rpc *rpc.HarnessRPC) {
s.rpc = rpc
// Reset ephermal states which are used to record info from finished
// tests.
s.openChans = &SyncMap[wire.OutPoint, []*OpenChannelUpdate]{}
s.closedChans = &SyncMap[wire.OutPoint, *lnrpc.ClosedChannelUpdate]{}
s.numChanUpdates = &SyncMap[wire.OutPoint, int]{}
s.nodeUpdates = &SyncMap[string, []*lnrpc.NodeUpdate]{}
s.policyUpdates = &SyncMap[wire.OutPoint, PolicyUpdate]{}
}

View File

@ -1,5 +1,44 @@
package rpc
import (
"github.com/lightningnetwork/lnd/lnrpc/chainrpc"
)
// =====================
// ChainClient related RPCs.
// =====================
type ConfNtfnClient chainrpc.ChainNotifier_RegisterConfirmationsNtfnClient
// RegisterConfirmationsNtfn creates a notification client to watch a given
// transaction being confirmed.
func (h *HarnessRPC) RegisterConfirmationsNtfn(
req *chainrpc.ConfRequest) ConfNtfnClient {
// RegisterConfirmationsNtfn needs to have the context alive for the
// entire test case as the returned client will be used for send and
// receive events stream. Thus we use runCtx here instead of a timeout
// context.
client, err := h.ChainClient.RegisterConfirmationsNtfn(
h.runCtx, req,
)
h.NoError(err, "RegisterConfirmationsNtfn")
return client
}
type SpendClient chainrpc.ChainNotifier_RegisterSpendNtfnClient
// RegisterSpendNtfn creates a notification client to watch a given
// transaction being spent.
func (h *HarnessRPC) RegisterSpendNtfn(req *chainrpc.SpendRequest) SpendClient {
// RegisterSpendNtfn needs to have the context alive for the entire
// test case as the returned client will be used for send and receive
// events stream. Thus we use runCtx here instead of a timeout context.
client, err := h.ChainClient.RegisterSpendNtfn(
h.runCtx, req,
)
h.NoError(err, "RegisterSpendNtfn")
return client
}

View File

@ -631,3 +631,54 @@ func (h *HarnessRPC) RegisterRPCMiddleware() (MiddlewareClient,
return stream, cancel
}
type ChannelEventsClient lnrpc.Lightning_SubscribeChannelEventsClient
// SubscribeChannelEvents creates a subscription client for channel events and
// asserts its creation.
func (h *HarnessRPC) SubscribeChannelEvents() ChannelEventsClient {
req := &lnrpc.ChannelEventSubscription{}
// SubscribeChannelEvents needs to have the context alive for the
// entire test case as the returned client will be used for send and
// receive events stream. Thus we use runCtx here instead of a timeout
// context.
client, err := h.LN.SubscribeChannelEvents(h.runCtx, req)
h.NoError(err, "SubscribeChannelEvents")
return client
}
type CustomMessageClient lnrpc.Lightning_SubscribeCustomMessagesClient
// SubscribeCustomMessages creates a subscription client for custom messages.
func (h *HarnessRPC) SubscribeCustomMessages() (CustomMessageClient,
context.CancelFunc) {
ctxt, cancel := context.WithCancel(h.runCtx)
req := &lnrpc.SubscribeCustomMessagesRequest{}
// SubscribeCustomMessages needs to have the context alive for the
// entire test case as the returned client will be used for send and
// receive events stream. Thus we use runCtx here instead of a timeout
// context.
stream, err := h.LN.SubscribeCustomMessages(ctxt, req)
h.NoError(err, "SubscribeCustomMessages")
return stream, cancel
}
// SendCustomMessage makes a RPC call to the node's SendCustomMessage and
// returns the response.
func (h *HarnessRPC) SendCustomMessage(
req *lnrpc.SendCustomMessageRequest) *lnrpc.SendCustomMessageResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.LN.SendCustomMessage(ctxt, req)
h.NoError(err, "SendCustomMessage")
return resp
}

View File

@ -150,3 +150,43 @@ func (h *HarnessRPC) XImportMissionControlAssertErr(
_, err := h.Router.XImportMissionControl(ctxt, req)
require.Error(h, err, "expect an error from x import mission control")
}
// BuildRoute makes a RPC call to the node's RouterClient and asserts.
func (h *HarnessRPC) BuildRoute(
req *routerrpc.BuildRouteRequest) *routerrpc.BuildRouteResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Router.BuildRoute(ctxt, req)
h.NoError(err, "BuildRoute")
return resp
}
type InterceptorClient routerrpc.Router_HtlcInterceptorClient
// HtlcInterceptor makes a RPC call to the node's RouterClient and asserts.
func (h *HarnessRPC) HtlcInterceptor() (InterceptorClient, context.CancelFunc) {
// HtlcInterceptor needs to have the context alive for the entire test
// case as the returned client will be used for send and receive events
// stream. Thus we use cancel context here instead of a timeout
// context.
ctxt, cancel := context.WithCancel(h.runCtx)
resp, err := h.Router.HtlcInterceptor(ctxt)
h.NoError(err, "HtlcInterceptor")
return resp, cancel
}
type TrackPaymentsClient routerrpc.Router_TrackPaymentsClient
// TrackPayments makes a RPC call to the node's RouterClient and asserts.
func (h *HarnessRPC) TrackPayments(
req *routerrpc.TrackPaymentsRequest) TrackPaymentsClient {
resp, err := h.Router.TrackPayments(h.runCtx, req)
h.NoError(err, "TrackPayments")
return resp
}

View File

@ -4,13 +4,39 @@ import (
"context"
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/stretchr/testify/require"
)
// =====================
// Signer related RPCs.
// =====================
// SignOutputRaw makes a RPC call to node's SignOutputRaw and asserts.
// DeriveSharedKey makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) DeriveSharedKey(
req *signrpc.SharedKeyRequest) *signrpc.SharedKeyResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.DeriveSharedKey(ctxt, req)
h.NoError(err, "DeriveSharedKey")
return resp
}
// DeriveSharedKeyErr makes a RPC call to the node's SignerClient and asserts
// there is an error.
func (h *HarnessRPC) DeriveSharedKeyErr(req *signrpc.SharedKeyRequest) error {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.Signer.DeriveSharedKey(ctxt, req)
require.Error(h, err, "expected error from calling DeriveSharedKey")
return err
}
// SignOutputRaw makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) SignOutputRaw(req *signrpc.SignReq) *signrpc.SignResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
@ -20,3 +46,152 @@ func (h *HarnessRPC) SignOutputRaw(req *signrpc.SignReq) *signrpc.SignResp {
return resp
}
// SignOutputRawErr makes a RPC call to the node's SignerClient and asserts an
// error is returned.
func (h *HarnessRPC) SignOutputRawErr(req *signrpc.SignReq) error {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.Signer.SignOutputRaw(ctxt, req)
require.Error(h, err, "expect to fail to sign raw output")
return err
}
// MuSig2CreateSession makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) MuSig2CreateSession(
req *signrpc.MuSig2SessionRequest) *signrpc.MuSig2SessionResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.MuSig2CreateSession(ctxt, req)
h.NoError(err, "MuSig2CreateSession")
return resp
}
// MuSig2CombineKeys makes a RPC call to the node's SignerClient and asserts.
//
//nolint:lll
func (h *HarnessRPC) MuSig2CombineKeys(
req *signrpc.MuSig2CombineKeysRequest) *signrpc.MuSig2CombineKeysResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.MuSig2CombineKeys(ctxt, req)
h.NoError(err, "MuSig2CombineKeys")
return resp
}
// MuSig2RegisterNonces makes a RPC call to the node's SignerClient and asserts.
//
//nolint:lll
func (h *HarnessRPC) MuSig2RegisterNonces(
req *signrpc.MuSig2RegisterNoncesRequest) *signrpc.MuSig2RegisterNoncesResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.MuSig2RegisterNonces(ctxt, req)
h.NoError(err, "MuSig2RegisterNonces")
return resp
}
// MuSig2Sign makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) MuSig2Sign(
req *signrpc.MuSig2SignRequest) *signrpc.MuSig2SignResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.MuSig2Sign(ctxt, req)
h.NoError(err, "MuSig2Sign")
return resp
}
// MuSig2SignErr makes a RPC call to the node's SignerClient and asserts an
// error is returned.
func (h *HarnessRPC) MuSig2SignErr(req *signrpc.MuSig2SignRequest) error {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
_, err := h.Signer.MuSig2Sign(ctxt, req)
require.Error(h, err, "expect an error")
return err
}
// MuSig2CombineSig makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) MuSig2CombineSig(
r *signrpc.MuSig2CombineSigRequest) *signrpc.MuSig2CombineSigResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.MuSig2CombineSig(ctxt, r)
h.NoError(err, "MuSig2CombineSig")
return resp
}
// MuSig2Cleanup makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) MuSig2Cleanup(
req *signrpc.MuSig2CleanupRequest) *signrpc.MuSig2CleanupResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.MuSig2Cleanup(ctxt, req)
h.NoError(err, "MuSig2Cleanup")
return resp
}
// SignMessageSigner makes a RPC call to the node's SignerClient and asserts.
//
// NOTE: there's already `SignMessage` in `h.LN`.
func (h *HarnessRPC) SignMessageSigner(
req *signrpc.SignMessageReq) *signrpc.SignMessageResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.SignMessage(ctxt, req)
h.NoError(err, "SignMessage")
return resp
}
// VerifyMessageSigner makes a RPC call to the node's SignerClient and asserts.
//
// NOTE: there's already `VerifyMessageSigner` in `h.LN`.
func (h *HarnessRPC) VerifyMessageSigner(
req *signrpc.VerifyMessageReq) *signrpc.VerifyMessageResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.VerifyMessage(ctxt, req)
h.NoError(err, "VerifyMessage")
return resp
}
// ComputeInputScript makes a RPC call to the node's SignerClient and asserts.
func (h *HarnessRPC) ComputeInputScript(
req *signrpc.SignReq) *signrpc.InputScriptResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Signer.ComputeInputScript(ctxt, req)
h.NoError(err, "ComputeInputScript")
return resp
}

View File

@ -163,3 +163,85 @@ func (h *HarnessRPC) PublishTransaction(
return resp
}
// BumpFee makes a RPC call to the node's WalletKitClient and asserts.
func (h *HarnessRPC) BumpFee(
req *walletrpc.BumpFeeRequest) *walletrpc.BumpFeeResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletKit.BumpFee(ctxt, req)
h.NoError(err, "BumpFee")
return resp
}
// ListAccounts makes a RPC call to the node's WalletKitClient and asserts.
func (h *HarnessRPC) ListAccounts(
req *walletrpc.ListAccountsRequest) *walletrpc.ListAccountsResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletKit.ListAccounts(ctxt, req)
h.NoError(err, "ListAccounts")
return resp
}
// ImportAccount makes a RPC call to the node's WalletKitClient and asserts.
func (h *HarnessRPC) ImportAccount(
req *walletrpc.ImportAccountRequest) *walletrpc.ImportAccountResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletKit.ImportAccount(ctxt, req)
h.NoError(err, "ImportAccount")
return resp
}
// ImportPublicKey makes a RPC call to the node's WalletKitClient and asserts.
//
//nolint:lll
func (h *HarnessRPC) ImportPublicKey(
req *walletrpc.ImportPublicKeyRequest) *walletrpc.ImportPublicKeyResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletKit.ImportPublicKey(ctxt, req)
h.NoError(err, "ImportPublicKey")
return resp
}
// SignPsbt makes a RPC call to the node's WalletKitClient and asserts.
func (h *HarnessRPC) SignPsbt(
req *walletrpc.SignPsbtRequest) *walletrpc.SignPsbtResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletKit.SignPsbt(ctxt, req)
h.NoError(err, "SignPsbt")
return resp
}
// ImportTapscript makes a RPC call to the node's WalletKitClient and asserts.
//
//nolint:lll
func (h *HarnessRPC) ImportTapscript(
req *walletrpc.ImportTapscriptRequest) *walletrpc.ImportTapscriptResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.WalletKit.ImportTapscript(ctxt, req)
h.NoError(err, "ImportTapscript")
return resp
}

View File

@ -949,60 +949,6 @@ func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode,
require.NoErrorf(t.t, err, "got err: %v", predErr)
}
// verifyCloseUpdate is used to verify that a closed channel update is of the
// expected type.
func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
closeType lnrpc.ChannelCloseSummary_ClosureType,
closeInitiator lnrpc.Initiator) error {
// We should receive one inactive and one closed notification
// for each channel.
switch update := chanUpdate.Channel.(type) {
case *lnrpc.ChannelEventUpdate_InactiveChannel:
if chanUpdate.Type != lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL {
return fmt.Errorf("update type mismatch: expected %v, got %v",
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
chanUpdate.Type)
}
case *lnrpc.ChannelEventUpdate_ClosedChannel:
if chanUpdate.Type !=
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL {
return fmt.Errorf("update type mismatch: expected %v, got %v",
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
chanUpdate.Type)
}
if update.ClosedChannel.CloseType != closeType {
return fmt.Errorf("channel closure type "+
"mismatch: expected %v, got %v",
closeType,
update.ClosedChannel.CloseType)
}
if update.ClosedChannel.CloseInitiator != closeInitiator {
return fmt.Errorf("expected close intiator: %v, got: %v",
closeInitiator,
update.ClosedChannel.CloseInitiator)
}
case *lnrpc.ChannelEventUpdate_FullyResolvedChannel:
if chanUpdate.Type != lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
return fmt.Errorf("update type mismatch: expected %v, got %v",
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
chanUpdate.Type)
}
default:
return fmt.Errorf("channel update channel of wrong type, "+
"expected closed channel, got %T",
update)
}
return nil
}
// assertNodeNumChannels polls the provided node's list channels rpc until it
// reaches the desired number of total channels.
func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode,

View File

@ -275,6 +275,10 @@ var allTestCasesTemp = []*lntemp.TestCase{
Name: "open channel reorg test",
TestFunc: testOpenChannelAfterReorg,
},
{
Name: "psbt channel funding",
TestFunc: testPsbtChanFunding,
},
{
Name: "psbt channel funding external",
TestFunc: testPsbtChanFundingExternal,
@ -283,6 +287,10 @@ var allTestCasesTemp = []*lntemp.TestCase{
Name: "psbt channel funding single step",
TestFunc: testPsbtChanFundingSingleStep,
},
{
Name: "sign psbt",
TestFunc: testSignPsbt,
},
{
Name: "resolution handoff",
TestFunc: testResHandoff,
@ -369,4 +377,128 @@ var allTestCasesTemp = []*lntemp.TestCase{
Name: "wipe forwarding packages",
TestFunc: testWipeForwardingPackages,
},
{
Name: "switch circuit persistence",
TestFunc: testSwitchCircuitPersistence,
},
{
Name: "switch offline delivery",
TestFunc: testSwitchOfflineDelivery,
},
{
Name: "switch offline delivery persistence",
TestFunc: testSwitchOfflineDeliveryPersistence,
},
{
Name: "switch offline delivery outgoing offline",
TestFunc: testSwitchOfflineDeliveryOutgoingOffline,
},
{
Name: "sendtoroute multi path payment",
TestFunc: testSendToRouteMultiPath,
},
{
Name: "send multi path payment",
TestFunc: testSendMultiPathPayment,
},
{
Name: "sendpayment amp invoice",
TestFunc: testSendPaymentAMPInvoice,
},
{
Name: "sendpayment amp invoice repeat",
TestFunc: testSendPaymentAMPInvoiceRepeat,
},
{
Name: "send payment amp",
TestFunc: testSendPaymentAMP,
},
{
Name: "sendtoroute amp",
TestFunc: testSendToRouteAMP,
},
{
Name: "forward interceptor dedup htlcs",
TestFunc: testForwardInterceptorDedupHtlc,
},
{
Name: "forward interceptor",
TestFunc: testForwardInterceptorBasic,
},
{
Name: "zero conf channel open",
TestFunc: testZeroConfChannelOpen,
},
{
Name: "option scid alias",
TestFunc: testOptionScidAlias,
},
{
Name: "scid alias channel update",
TestFunc: testUpdateChannelPolicyScidAlias,
},
{
Name: "scid alias upgrade",
TestFunc: testOptionScidUpgrade,
},
{
Name: "nonstd sweep",
TestFunc: testNonstdSweep,
},
{
Name: "multiple channel creation and update subscription",
TestFunc: testBasicChannelCreationAndUpdates,
},
{
Name: "derive shared key",
TestFunc: testDeriveSharedKey,
},
{
Name: "sign output raw",
TestFunc: testSignOutputRaw,
},
{
Name: "sign verify message",
TestFunc: testSignVerifyMessage,
},
{
Name: "cpfp",
TestFunc: testCPFP,
},
{
Name: "taproot",
TestFunc: testTaproot,
},
{
Name: "wallet import account",
TestFunc: testWalletImportAccount,
},
{
Name: "wallet import pubkey",
TestFunc: testWalletImportPubKey,
},
{
Name: "async payments benchmark",
TestFunc: testAsyncPayments,
},
{
Name: "remote signer",
TestFunc: testRemoteSigner,
},
{
Name: "taproot coop close",
TestFunc: testTaprootCoopClose,
},
{
Name: "trackpayments",
TestFunc: testTrackPayments,
},
{
Name: "open channel fee policy",
TestFunc: testOpenChannelUpdateFeePolicy,
},
{
Name: "custom message",
TestFunc: testCustomMessage,
},
}

View File

@ -1,10 +1,7 @@
package itest
import (
"context"
"crypto/rand"
"encoding/hex"
"sort"
"testing"
"time"
@ -14,40 +11,41 @@ import (
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testSendPaymentAMPInvoice tests that we can send an AMP payment to a
// specified AMP invoice using SendPaymentV2.
func testSendPaymentAMPInvoice(net *lntest.NetworkHarness, t *harnessTest) {
t.t.Run("native payaddr", func(t *testing.T) {
tt := newHarnessTest(t, net)
testSendPaymentAMPInvoiceCase(net, tt, false)
func testSendPaymentAMPInvoice(ht *lntemp.HarnessTest) {
succeed := ht.Run("native payaddr", func(t *testing.T) {
tt := ht.Subtest(t)
testSendPaymentAMPInvoiceCase(tt, false)
})
t.t.Run("external payaddr", func(t *testing.T) {
tt := newHarnessTest(t, net)
testSendPaymentAMPInvoiceCase(net, tt, true)
// Abort the test if failed.
if !succeed {
return
}
ht.Run("external payaddr", func(t *testing.T) {
tt := ht.Subtest(t)
testSendPaymentAMPInvoiceCase(tt, true)
})
}
func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
func testSendPaymentAMPInvoiceCase(ht *lntemp.HarnessTest,
useExternalPayAddr bool) {
ctxb := context.Background()
ctx := newMppTestContext(t, net)
defer ctx.shutdownNodes()
mts := newMppTestScenario(ht)
// Subscribe to bob's invoices. Do this early in the test to make sure
// that the subscription has actually been completed when we add an
// invoice. Otherwise the notification will be missed.
req := &lnrpc.InvoiceSubscription{}
ctxc, cancelSubscription := context.WithCancel(ctxb)
bobInvoiceSubscription, err := ctx.bob.SubscribeInvoices(ctxc, req)
require.NoError(t.t, err)
defer cancelSubscription()
bobInvoiceSubscription := mts.bob.RPC.SubscribeInvoices(req)
const paymentAmt = btcutil.Amount(300000)
@ -61,49 +59,45 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
// \ /
// \__ Dave ____/
//
ctx.openChannel(ctx.carol, ctx.bob, 135000)
ctx.openChannel(ctx.alice, ctx.carol, 235000)
ctx.openChannel(ctx.dave, ctx.bob, 135000)
ctx.openChannel(ctx.alice, ctx.dave, 135000)
ctx.openChannel(ctx.eve, ctx.bob, 135000)
ctx.openChannel(ctx.carol, ctx.eve, 135000)
mppReq := &mppOpenChannelRequest{
amtAliceCarol: 235000,
amtAliceDave: 135000,
amtCarolBob: 135000,
amtCarolEve: 135000,
amtDaveBob: 135000,
amtEveBob: 135000,
}
mts.openChannels(mppReq)
chanPointAliceDave := mts.channelPoints[1]
chanPointDaveBob := mts.channelPoints[4]
defer ctx.closeChannels()
ctx.waitForChannels()
addInvoiceResp, err := ctx.bob.AddInvoice(context.Background(), &lnrpc.Invoice{
invoice := &lnrpc.Invoice{
Value: int64(paymentAmt),
IsAmp: true,
})
require.NoError(t.t, err)
}
addInvoiceResp := mts.bob.RPC.AddInvoice(invoice)
// Ensure we get a notification of the invoice being added by Bob.
rpcInvoice, err := bobInvoiceSubscription.Recv()
require.NoError(t.t, err)
rpcInvoice := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
require.False(t.t, rpcInvoice.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_OPEN, rpcInvoice.State)
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidSat)
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidMsat)
require.Equal(t.t, 0, len(rpcInvoice.Htlcs))
require.False(ht, rpcInvoice.Settled)
require.Equal(ht, lnrpc.Invoice_OPEN, rpcInvoice.State)
require.Equal(ht, int64(0), rpcInvoice.AmtPaidSat)
require.Equal(ht, int64(0), rpcInvoice.AmtPaidMsat)
require.Equal(ht, 0, len(rpcInvoice.Htlcs))
// Increase Dave's fee to make the test deterministic. Otherwise it
// would be unpredictable whether pathfinding would go through Charlie
// or Dave for the first shard.
_, err = ctx.dave.UpdateChannelPolicy(
context.Background(),
&lnrpc.PolicyUpdateRequest{
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
BaseFeeMsat: 500000,
FeeRate: 0.001,
TimeLockDelta: 40,
},
expectedPolicy := mts.updateDaveGlobalPolicy()
// Make sure Alice has heard it for both Dave's channels.
ht.AssertChannelPolicyUpdate(
mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
)
ht.AssertChannelPolicyUpdate(
mts.alice, mts.dave, expectedPolicy, chanPointDaveBob, false,
)
if err != nil {
t.Fatalf("dave policy update: %v", err)
}
// Generate an external payment address when attempting to pseudo-reuse
// an AMP invoice. When using an external payment address, we'll also
@ -116,19 +110,16 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
)
if useExternalPayAddr {
expNumInvoices = 2
externalPayAddr = make([]byte, 32)
_, err = rand.Read(externalPayAddr)
require.NoError(t.t, err)
externalPayAddr = ht.Random32Bytes()
}
payment := sendAndAssertSuccess(
t, ctx.alice, &routerrpc.SendPaymentRequest{
PaymentRequest: addInvoiceResp.PaymentRequest,
PaymentAddr: externalPayAddr,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
sendReq := &routerrpc.SendPaymentRequest{
PaymentRequest: addInvoiceResp.PaymentRequest,
PaymentAddr: externalPayAddr,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
// Check that Alice split the payment in at least three shards. Because
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
@ -145,232 +136,201 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
}
const minExpectedShards = 3
if succeeded < minExpectedShards {
t.Fatalf("expected at least %v shards, but got %v",
minExpectedShards, succeeded)
}
require.GreaterOrEqual(ht, succeeded, minExpectedShards,
"expected num of shards not reached")
// When an external payment address is supplied, we'll get an extra
// notification for the JIT inserted invoice, since it differs from the
// original.
if useExternalPayAddr {
_, err = bobInvoiceSubscription.Recv()
require.NoError(t.t, err)
ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
}
// There should now be a settle event for the invoice.
rpcInvoice, err = bobInvoiceSubscription.Recv()
require.NoError(t.t, err)
rpcInvoice = ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
// the one received via the subscription.
invoiceResp, err := ctx.bob.ListInvoices(
ctxb, &lnrpc.ListInvoiceRequest{},
)
require.NoError(t.t, err)
require.Equal(t.t, expNumInvoices, len(invoiceResp.Invoices))
assertInvoiceEqual(t.t, rpcInvoice, invoiceResp.Invoices[expNumInvoices-1])
invoices := ht.AssertNumInvoices(mts.bob, expNumInvoices)
ht.AssertInvoiceEqual(rpcInvoice, invoices[expNumInvoices-1])
// Assert that the invoice is settled for the total payment amount and
// has the correct payment address.
require.True(t.t, rpcInvoice.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_SETTLED, rpcInvoice.State)
require.Equal(t.t, int64(paymentAmt), rpcInvoice.AmtPaidSat)
require.Equal(t.t, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
require.True(ht, rpcInvoice.Settled)
require.Equal(ht, lnrpc.Invoice_SETTLED, rpcInvoice.State)
require.Equal(ht, int64(paymentAmt), rpcInvoice.AmtPaidSat)
require.Equal(ht, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
// Finally, assert that the same set id is recorded for each htlc, and
// that the preimage hash pair is valid.
var setID []byte
require.Equal(t.t, succeeded, len(rpcInvoice.Htlcs))
require.Equal(ht, succeeded, len(rpcInvoice.Htlcs))
for _, htlc := range rpcInvoice.Htlcs {
require.NotNil(t.t, htlc.Amp)
require.NotNil(ht, htlc.Amp)
if setID == nil {
setID = make([]byte, 32)
copy(setID, htlc.Amp.SetId)
}
require.Equal(t.t, setID, htlc.Amp.SetId)
require.Equal(ht, setID, htlc.Amp.SetId)
// Parse the child hash and child preimage, and assert they are
// well-formed.
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
require.NoError(t.t, err)
require.NoError(ht, err)
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
require.NoError(t.t, err)
require.NoError(ht, err)
// Assert that the preimage actually matches the hashes.
validPreimage := childPreimage.Matches(childHash)
require.True(t.t, validPreimage)
require.True(ht, validPreimage)
}
// The set ID we extract above should be shown in the final settled
// state.
ampState := rpcInvoice.AmpInvoiceState[hex.EncodeToString(setID)]
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
// Finally, close all channels.
mts.closeChannels()
}
// testSendPaymentAMPInvoiceRepeat tests that it's possible to pay an AMP
// invoice multiple times by having the client generate a new setID each time.
func testSendPaymentAMPInvoiceRepeat(net *lntest.NetworkHarness,
t *harnessTest) {
func testSendPaymentAMPInvoiceRepeat(ht *lntemp.HarnessTest) {
// In this basic test, we'll only need two nodes as we want to
// primarily test the recurring payment feature. So we'll re-use the
carol := net.NewNode(t.t, "Carol", nil)
defer shutdownAndAssert(net, t, carol)
carol := ht.NewNode("Carol", nil)
// Send Carol enough coins to be able to open a channel to Dave.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
dave := net.NewNode(t.t, "Dave", nil)
defer shutdownAndAssert(net, t, dave)
// Before we start the test, we'll ensure both sides are connected to
// the funding flow can properly be executed.
net.EnsureConnected(t.t, carol, dave)
dave := ht.NewNode("Dave", nil)
// Set up an invoice subscription so we can be notified when Dave
// receives his repeated payments.
req := &lnrpc.InvoiceSubscription{}
ctxb := context.Background()
ctxc, cancelSubscription := context.WithCancel(ctxb)
invSubscription, err := dave.SubscribeInvoices(ctxc, req)
require.NoError(t.t, err)
defer cancelSubscription()
invSubscription := dave.RPC.SubscribeInvoices(req)
// Before we start the test, we'll ensure both sides are connected to
// the funding flow can properly be executed.
ht.EnsureConnected(carol, dave)
// Establish a channel between Carol and Dave.
chanAmt := btcutil.Amount(100_000)
chanPoint := openChannelAndAssert(
t, net, carol, dave,
lntest.OpenChannelParams{
Amt: chanAmt,
},
ht.OpenChannel(
carol, dave, lntemp.OpenChannelParams{Amt: chanAmt},
)
err = carol.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err, "carol didn't report channel")
err = dave.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err, "dave didn't report channel")
// Create an AMP invoice of a trivial amount, that we'll pay repeatedly
// in this integration test.
paymentAmt := 10000
addInvoiceResp, err := dave.AddInvoice(ctxb, &lnrpc.Invoice{
invoice := &lnrpc.Invoice{
Value: int64(paymentAmt),
IsAmp: true,
})
require.NoError(t.t, err)
}
addInvoiceResp := dave.RPC.AddInvoice(invoice)
// We should get an initial notification that the HTLC has been added.
rpcInvoice, err := invSubscription.Recv()
require.NoError(t.t, err)
require.False(t.t, rpcInvoice.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_OPEN, rpcInvoice.State)
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidSat)
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidMsat)
require.Equal(t.t, 0, len(rpcInvoice.Htlcs))
rpcInvoice := ht.ReceiveInvoiceUpdate(invSubscription)
require.False(ht, rpcInvoice.Settled)
require.Equal(ht, lnrpc.Invoice_OPEN, rpcInvoice.State)
require.Equal(ht, int64(0), rpcInvoice.AmtPaidSat)
require.Equal(ht, int64(0), rpcInvoice.AmtPaidMsat)
require.Equal(ht, 0, len(rpcInvoice.Htlcs))
// Now we'll use Carol to pay the invoice that Dave created.
_ = sendAndAssertSuccess(
t, carol, &routerrpc.SendPaymentRequest{
PaymentRequest: addInvoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
ht.CompletePaymentRequests(
carol, []string{addInvoiceResp.PaymentRequest},
)
// Dave should get a notification that the invoice has been settled.
invoiceNtfn, err := invSubscription.Recv()
require.NoError(t.t, err)
invoiceNtfn := ht.ReceiveInvoiceUpdate(invSubscription)
// The notification should signal that the invoice is now settled, and
// should also include the set ID, and show the proper amount paid.
require.True(t.t, invoiceNtfn.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoiceNtfn.State)
require.Equal(t.t, paymentAmt, int(invoiceNtfn.AmtPaidSat))
require.Equal(t.t, 1, len(invoiceNtfn.AmpInvoiceState))
require.True(ht, invoiceNtfn.Settled)
require.Equal(ht, lnrpc.Invoice_SETTLED, invoiceNtfn.State)
require.Equal(ht, paymentAmt, int(invoiceNtfn.AmtPaidSat))
require.Equal(ht, 1, len(invoiceNtfn.AmpInvoiceState))
var firstSetID []byte
for setIDStr, ampState := range invoiceNtfn.AmpInvoiceState {
firstSetID, _ = hex.DecodeString(setIDStr)
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED,
ampState.State)
}
// Pay the invoice again, we should get another notification that Dave
// has received another payment.
_ = sendAndAssertSuccess(
t, carol, &routerrpc.SendPaymentRequest{
PaymentRequest: addInvoiceResp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
ht.CompletePaymentRequests(
carol, []string{addInvoiceResp.PaymentRequest},
)
// Dave should get another notification.
invoiceNtfn, err = invSubscription.Recv()
require.NoError(t.t, err)
invoiceNtfn = ht.ReceiveInvoiceUpdate(invSubscription)
// The invoice should still be shown as settled, and also include the
// information about this newly generated setID, showing 2x the amount
// paid.
require.True(t.t, invoiceNtfn.Settled) // nolint:staticcheck
require.Equal(t.t, paymentAmt*2, int(invoiceNtfn.AmtPaidSat))
require.True(ht, invoiceNtfn.Settled)
require.Equal(ht, paymentAmt*2, int(invoiceNtfn.AmtPaidSat))
var secondSetID []byte
for setIDStr, ampState := range invoiceNtfn.AmpInvoiceState {
secondSetID, _ = hex.DecodeString(setIDStr)
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED,
ampState.State)
}
// The returned invoice should only include a single HTLC since we
// return the "projected" sub-invoice for a given setID.
require.Equal(t.t, 1, len(invoiceNtfn.Htlcs))
require.Equal(ht, 1, len(invoiceNtfn.Htlcs))
// However the AMP state index should show that there've been two
// repeated payments to this invoice so far.
require.Equal(t.t, 2, len(invoiceNtfn.AmpInvoiceState))
require.Equal(ht, 2, len(invoiceNtfn.AmpInvoiceState))
// Now we'll look up the invoice using the new LookupInvoice2 RPC call
// by the set ID of each of the invoices.
subInvoice1, err := dave.LookupInvoiceV2(ctxb, &invoicesrpc.LookupInvoiceMsg{
msg := &invoicesrpc.LookupInvoiceMsg{
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_SetId{
SetId: firstSetID,
},
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_ONLY,
})
require.Nil(t.t, err)
subInvoice2, err := dave.LookupInvoiceV2(ctxb, &invoicesrpc.LookupInvoiceMsg{
}
subInvoice1 := dave.RPC.LookupInvoiceV2(msg)
msg = &invoicesrpc.LookupInvoiceMsg{
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_SetId{
SetId: secondSetID,
},
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_ONLY,
})
require.Nil(t.t, err)
}
subInvoice2 := dave.RPC.LookupInvoiceV2(msg)
// Each invoice should only show a single HTLC present, as we passed
// the HTLC set only modifier.
require.Equal(t.t, 1, len(subInvoice1.Htlcs))
require.Equal(t.t, 1, len(subInvoice2.Htlcs))
require.Equal(ht, 1, len(subInvoice1.Htlcs))
require.Equal(ht, 1, len(subInvoice2.Htlcs))
// If we look up the same invoice, by its payment address, but now with
// the HTLC blank modifier, then none of them should be returned.
rootInvoice, err := dave.LookupInvoiceV2(ctxb, &invoicesrpc.LookupInvoiceMsg{
msg = &invoicesrpc.LookupInvoiceMsg{
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
PaymentAddr: addInvoiceResp.PaymentAddr,
},
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_BLANK,
})
require.Nil(t.t, err)
require.Equal(t.t, 0, len(rootInvoice.Htlcs))
}
rootInvoice := dave.RPC.LookupInvoiceV2(msg)
require.Equal(ht, 0, len(rootInvoice.Htlcs))
// If we look up the same invoice, by its payment address, but without
// that modified, then we should get all the relevant HTLCs.
rootInvoice, err = dave.LookupInvoiceV2(ctxb,
&invoicesrpc.LookupInvoiceMsg{
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
PaymentAddr: addInvoiceResp.PaymentAddr,
},
})
require.Nil(t.t, err)
require.Equal(t.t, 2, len(rootInvoice.Htlcs))
msg = &invoicesrpc.LookupInvoiceMsg{
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
PaymentAddr: addInvoiceResp.PaymentAddr,
},
}
rootInvoice = dave.RPC.LookupInvoiceV2(msg)
require.Equal(ht, 2, len(rootInvoice.Htlcs))
// Finally, we'll test that if we subscribe for notifications of
// settled invoices, we get a backlog, which includes the invoice we
@ -379,29 +339,22 @@ func testSendPaymentAMPInvoiceRepeat(net *lntest.NetworkHarness,
req = &lnrpc.InvoiceSubscription{
SettleIndex: 1,
}
ctxc, cancelSubscription2 := context.WithCancel(ctxb)
invSub2, err := dave.SubscribeInvoices(ctxc, req)
require.NoError(t.t, err)
defer cancelSubscription2()
invSub2 := dave.RPC.SubscribeInvoices(req)
// The first invoice we get back should match the state of the invoice
// after our second payment: amt updated, but only a single HTLC shown
// through.
backlogInv, _ := invSub2.Recv()
require.Equal(t.t, 1, len(backlogInv.Htlcs))
require.Equal(t.t, 2, len(backlogInv.AmpInvoiceState))
require.True(t.t, backlogInv.Settled) // nolint:staticcheck
require.Equal(t.t, paymentAmt*2, int(backlogInv.AmtPaidSat))
backlogInv := ht.ReceiveInvoiceUpdate(invSub2)
require.Equal(ht, 1, len(backlogInv.Htlcs))
require.Equal(ht, 2, len(backlogInv.AmpInvoiceState))
require.True(ht, backlogInv.Settled)
require.Equal(ht, paymentAmt*2, int(backlogInv.AmtPaidSat))
}
// testSendPaymentAMP tests that we can send an AMP payment to a specified
// destination using SendPaymentV2.
func testSendPaymentAMP(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
ctx := newMppTestContext(t, net)
defer ctx.shutdownNodes()
func testSendPaymentAMP(ht *lntemp.HarnessTest) {
mts := newMppTestScenario(ht)
const paymentAmt = btcutil.Amount(300000)
// Set up a network with three different paths Alice <-> Bob. Channel
@ -414,44 +367,37 @@ func testSendPaymentAMP(net *lntest.NetworkHarness, t *harnessTest) {
// \ /
// \__ Dave ____/
//
ctx.openChannel(ctx.carol, ctx.bob, 135000)
ctx.openChannel(ctx.alice, ctx.carol, 235000)
ctx.openChannel(ctx.dave, ctx.bob, 135000)
ctx.openChannel(ctx.alice, ctx.dave, 135000)
ctx.openChannel(ctx.eve, ctx.bob, 135000)
ctx.openChannel(ctx.carol, ctx.eve, 135000)
defer ctx.closeChannels()
ctx.waitForChannels()
mppReq := &mppOpenChannelRequest{
amtAliceCarol: 235000,
amtAliceDave: 135000,
amtCarolBob: 135000,
amtCarolEve: 135000,
amtDaveBob: 135000,
amtEveBob: 135000,
}
mts.openChannels(mppReq)
chanPointAliceDave := mts.channelPoints[1]
// Increase Dave's fee to make the test deterministic. Otherwise it
// would be unpredictable whether pathfinding would go through Charlie
// or Dave for the first shard.
_, err := ctx.dave.UpdateChannelPolicy(
context.Background(),
&lnrpc.PolicyUpdateRequest{
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
BaseFeeMsat: 500000,
FeeRate: 0.001,
TimeLockDelta: 40,
},
)
if err != nil {
t.Fatalf("dave policy update: %v", err)
}
expectedPolicy := mts.updateDaveGlobalPolicy()
payment := sendAndAssertSuccess(
t, ctx.alice, &routerrpc.SendPaymentRequest{
Dest: ctx.bob.PubKey[:],
Amt: int64(paymentAmt),
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
Amp: true,
},
// Make sure Alice has heard it.
ht.AssertChannelPolicyUpdate(
mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
)
sendReq := &routerrpc.SendPaymentRequest{
Dest: mts.bob.PubKey[:],
Amt: int64(paymentAmt),
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
Amp: true,
}
payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
// Check that Alice split the payment in at least three shards. Because
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
// there is some non-determinism in the process. Depending on whether
@ -467,64 +413,55 @@ func testSendPaymentAMP(net *lntest.NetworkHarness, t *harnessTest) {
}
const minExpectedShards = 3
if succeeded < minExpectedShards {
t.Fatalf("expected at least %v shards, but got %v",
minExpectedShards, succeeded)
}
require.GreaterOrEqual(ht, succeeded, minExpectedShards,
"expected num of shards not reached")
// Fetch Bob's invoices.
invoiceResp, err := ctx.bob.ListInvoices(
ctxb, &lnrpc.ListInvoiceRequest{},
)
require.NoError(t.t, err)
// There should only be one invoice.
require.Equal(t.t, 1, len(invoiceResp.Invoices))
rpcInvoice := invoiceResp.Invoices[0]
// Fetch Bob's invoices. There should only be one invoice.
invoices := ht.AssertNumInvoices(mts.bob, 1)
rpcInvoice := invoices[0]
// Assert that the invoice is settled for the total payment amount and
// has the correct payment address.
require.True(t.t, rpcInvoice.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_SETTLED, rpcInvoice.State)
require.Equal(t.t, int64(paymentAmt), rpcInvoice.AmtPaidSat)
require.Equal(t.t, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
require.True(ht, rpcInvoice.Settled)
require.Equal(ht, lnrpc.Invoice_SETTLED, rpcInvoice.State)
require.Equal(ht, int64(paymentAmt), rpcInvoice.AmtPaidSat)
require.Equal(ht, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
// Finally, assert that the same set id is recorded for each htlc, and
// that the preimage hash pair is valid.
var setID []byte
require.Equal(t.t, succeeded, len(rpcInvoice.Htlcs))
require.Equal(ht, succeeded, len(rpcInvoice.Htlcs))
for _, htlc := range rpcInvoice.Htlcs {
require.NotNil(t.t, htlc.Amp)
require.NotNil(ht, htlc.Amp)
if setID == nil {
setID = make([]byte, 32)
copy(setID, htlc.Amp.SetId)
}
require.Equal(t.t, setID, htlc.Amp.SetId)
require.Equal(ht, setID, htlc.Amp.SetId)
// Parse the child hash and child preimage, and assert they are
// well-formed.
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
require.NoError(t.t, err)
require.NoError(ht, err)
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
require.NoError(t.t, err)
require.NoError(ht, err)
// Assert that the preimage actually matches the hashes.
validPreimage := childPreimage.Matches(childHash)
require.True(t.t, validPreimage)
require.True(ht, validPreimage)
}
// The set ID we extract above should be shown in the final settled
// state.
ampState := rpcInvoice.AmpInvoiceState[hex.EncodeToString(setID)]
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
// Finally, close all channels.
mts.closeChannels()
}
func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
ctx := newMppTestContext(t, net)
defer ctx.shutdownNodes()
func testSendToRouteAMP(ht *lntemp.HarnessTest) {
mts := newMppTestScenario(ht)
const (
paymentAmt = btcutil.Amount(300000)
numShards = 3
@ -532,63 +469,50 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
chanAmt = shardAmt * 3 / 2
)
// Subscribe to bob's invoices.
req := &lnrpc.InvoiceSubscription{}
bobInvoiceSubscription := mts.bob.RPC.SubscribeInvoices(req)
// Set up a network with three different paths Alice <-> Bob.
// _ Eve _
// / \
// Alice -- Carol ---- Bob
// \ /
// \__ Dave ____/
//
ctx.openChannel(ctx.carol, ctx.bob, chanAmt)
ctx.openChannel(ctx.dave, ctx.bob, chanAmt)
ctx.openChannel(ctx.alice, ctx.dave, chanAmt)
ctx.openChannel(ctx.eve, ctx.bob, chanAmt)
ctx.openChannel(ctx.carol, ctx.eve, chanAmt)
// Since the channel Alice-> Carol will have to carry two
// shards, we make it larger.
ctx.openChannel(ctx.alice, ctx.carol, chanAmt+shardAmt)
defer ctx.closeChannels()
ctx.waitForChannels()
// Subscribe to bob's invoices.
req := &lnrpc.InvoiceSubscription{}
ctxc, cancelSubscription := context.WithCancel(ctxb)
bobInvoiceSubscription, err := ctx.bob.SubscribeInvoices(ctxc, req)
require.NoError(t.t, err)
defer cancelSubscription()
///
mppReq := &mppOpenChannelRequest{
// Since the channel Alice-> Carol will have to carry two
// shards, we make it larger.
amtAliceCarol: chanAmt + shardAmt,
amtAliceDave: chanAmt,
amtCarolBob: chanAmt,
amtCarolEve: chanAmt,
amtDaveBob: chanAmt,
amtEveBob: chanAmt,
}
mts.openChannels(mppReq)
// We'll send shards along three routes from Alice.
sendRoutes := [numShards][]*lntest.HarnessNode{
{ctx.carol, ctx.bob},
{ctx.dave, ctx.bob},
{ctx.carol, ctx.eve, ctx.bob},
sendRoutes := [numShards][]*node.HarnessNode{
{mts.carol, mts.bob},
{mts.dave, mts.bob},
{mts.carol, mts.eve, mts.bob},
}
payAddr := make([]byte, 32)
_, err = rand.Read(payAddr)
require.NoError(t.t, err)
setID := make([]byte, 32)
_, err = rand.Read(setID)
require.NoError(t.t, err)
payAddr := ht.Random32Bytes()
setID := ht.Random32Bytes()
var sharer amp.Sharer
sharer, err = amp.NewSeedSharer()
require.NoError(t.t, err)
sharer, err := amp.NewSeedSharer()
require.NoError(ht, err)
childPreimages := make(map[lntypes.Preimage]uint32)
responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes))
// Define a closure for sending each of the three shards.
sendShard := func(i int, hops []*lntest.HarnessNode) {
sendShard := func(i int, hops []*node.HarnessNode) {
// Build a route for the specified hops.
r, err := ctx.buildRoute(ctxb, shardAmt, ctx.alice, hops)
if err != nil {
t.Fatalf("unable to build route: %v", err)
}
r := mts.buildRoute(shardAmt, mts.alice, hops)
// Set the MPP records to indicate this is a payment shard.
hop := r.Hops[len(r.Hops)-1]
@ -602,7 +526,7 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
if i < len(sendRoutes)-1 {
var left amp.Sharer
left, sharer, err = sharer.Split()
require.NoError(t.t, err)
require.NoError(ht, err)
child = left.Child(uint32(i))
} else {
@ -622,15 +546,10 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
Route: r,
}
// We'll send all shards in their own goroutine, since SendToRoute will
// block as long as the payment is in flight.
// We'll send all shards in their own goroutine, since
// SendToRoute will block as long as the payment is in flight.
go func() {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := ctx.alice.RouterClient.SendToRouteV2(ctxt, sendReq)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
resp := mts.alice.RPC.SendToRouteV2(sendReq)
responses <- resp
}()
}
@ -640,21 +559,21 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
// Ensure we get a notification of the invoice being added by Bob.
rpcInvoice, err := bobInvoiceSubscription.Recv()
require.NoError(t.t, err)
require.NoError(ht, err)
require.False(t.t, rpcInvoice.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_OPEN, rpcInvoice.State)
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidSat)
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidMsat)
require.Equal(t.t, payAddr, rpcInvoice.PaymentAddr)
require.False(ht, rpcInvoice.Settled)
require.Equal(ht, lnrpc.Invoice_OPEN, rpcInvoice.State)
require.Equal(ht, int64(0), rpcInvoice.AmtPaidSat)
require.Equal(ht, int64(0), rpcInvoice.AmtPaidMsat)
require.Equal(ht, payAddr, rpcInvoice.PaymentAddr)
require.Equal(t.t, 0, len(rpcInvoice.Htlcs))
require.Equal(ht, 0, len(rpcInvoice.Htlcs))
sendShard(1, sendRoutes[1])
sendShard(2, sendRoutes[2])
// Assert that all of the child preimages are unique.
require.Equal(t.t, len(sendRoutes), len(childPreimages))
require.Equal(ht, len(sendRoutes), len(childPreimages))
// Make a copy of the childPreimages map for validating the resulting
// invoice.
@ -665,24 +584,23 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
// Wait for all responses to be back, and check that they all
// succeeded.
timer := time.After(defaultTimeout)
for range sendRoutes {
var resp *lnrpc.HTLCAttempt
select {
case resp = <-responses:
case <-time.After(defaultTimeout):
t.Fatalf("response not received")
case <-timer:
require.Fail(ht, "response not received")
}
if resp.Failure != nil {
t.Fatalf("received payment failure : %v", resp.Failure)
}
require.Nil(ht, resp.Failure, "received payment failure")
preimage, err := lntypes.MakePreimage(resp.Preimage)
require.NoError(t.t, err)
require.NoError(ht, err)
// Assert that the response includes one of our child preimages.
_, ok := childPreimages[preimage]
require.True(t.t, ok)
require.True(ht, ok)
// Remove this preimage from out set so that we ensure all
// responses have a unique child preimage.
@ -692,109 +610,51 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
// There should now be a settle event for the invoice.
rpcInvoice, err = bobInvoiceSubscription.Recv()
require.NoError(t.t, err)
require.NoError(ht, err)
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
// the one received via the subscription.
invoiceResp, err := ctx.bob.ListInvoices(
ctxb, &lnrpc.ListInvoiceRequest{},
)
require.NoError(t.t, err)
require.Equal(t.t, 1, len(invoiceResp.Invoices))
assertInvoiceEqual(t.t, rpcInvoice, invoiceResp.Invoices[0])
invoices := ht.AssertNumInvoices(mts.bob, 1)
ht.AssertInvoiceEqual(rpcInvoice, invoices[0])
// Assert that the invoice is settled for the total payment amount and
// has the correct payment address.
require.True(t.t, rpcInvoice.Settled) // nolint:staticcheck
require.Equal(t.t, lnrpc.Invoice_SETTLED, rpcInvoice.State)
require.Equal(t.t, int64(paymentAmt), rpcInvoice.AmtPaidSat)
require.Equal(t.t, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
require.Equal(t.t, payAddr, rpcInvoice.PaymentAddr)
require.True(ht, rpcInvoice.Settled)
require.Equal(ht, lnrpc.Invoice_SETTLED, rpcInvoice.State)
require.Equal(ht, int64(paymentAmt), rpcInvoice.AmtPaidSat)
require.Equal(ht, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
require.Equal(ht, payAddr, rpcInvoice.PaymentAddr)
// Finally, assert that the proper set id is recorded for each htlc, and
// that the preimage hash pair is valid.
require.Equal(t.t, numShards, len(rpcInvoice.Htlcs))
require.Equal(ht, numShards, len(rpcInvoice.Htlcs))
for _, htlc := range rpcInvoice.Htlcs {
require.NotNil(t.t, htlc.Amp)
require.Equal(t.t, setID, htlc.Amp.SetId)
require.NotNil(ht, htlc.Amp)
require.Equal(ht, setID, htlc.Amp.SetId)
// Parse the child hash and child preimage, and assert they are
// well-formed.
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
require.NoError(t.t, err)
require.NoError(ht, err)
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
require.NoError(t.t, err)
require.NoError(ht, err)
// Assert that the preimage actually matches the hashes.
validPreimage := childPreimage.Matches(childHash)
require.True(t.t, validPreimage)
require.True(ht, validPreimage)
// Assert that the HTLC includes one of our child preimages.
childIndex, ok := childPreimages[childPreimage]
require.True(t.t, ok)
require.True(ht, ok)
// Assert that the correct child index is reflected.
require.Equal(t.t, childIndex, htlc.Amp.ChildIndex)
require.Equal(ht, childIndex, htlc.Amp.ChildIndex)
// Remove this preimage from our set so that we ensure all HTLCs
// have a unique child preimage.
delete(childPreimages, childPreimage)
}
}
// assertInvoiceEqual asserts that two lnrpc.Invoices are equivalent. A custom
// comparison function is defined for these tests, since proto message returned
// from unary and streaming RPCs (as of protobuf 1.23.0 and grpc 1.29.1) aren't
// consistent with the private fields set on the messages. As a result, we avoid
// using require.Equal and test only the actual data members.
func assertInvoiceEqual(t *testing.T, a, b *lnrpc.Invoice) {
t.Helper()
// Ensure the HTLCs are sorted properly before attempting to compare.
sort.Slice(a.Htlcs, func(i, j int) bool {
return a.Htlcs[i].ChanId < a.Htlcs[j].ChanId
})
sort.Slice(b.Htlcs, func(i, j int) bool {
return b.Htlcs[i].ChanId < b.Htlcs[j].ChanId
})
require.Equal(t, a.Memo, b.Memo)
require.Equal(t, a.RPreimage, b.RPreimage)
require.Equal(t, a.RHash, b.RHash)
require.Equal(t, a.Value, b.Value)
require.Equal(t, a.ValueMsat, b.ValueMsat)
require.Equal(t, a.CreationDate, b.CreationDate)
require.Equal(t, a.SettleDate, b.SettleDate)
require.Equal(t, a.PaymentRequest, b.PaymentRequest)
require.Equal(t, a.DescriptionHash, b.DescriptionHash)
require.Equal(t, a.Expiry, b.Expiry)
require.Equal(t, a.FallbackAddr, b.FallbackAddr)
require.Equal(t, a.CltvExpiry, b.CltvExpiry)
require.Equal(t, a.RouteHints, b.RouteHints)
require.Equal(t, a.Private, b.Private)
require.Equal(t, a.AddIndex, b.AddIndex)
require.Equal(t, a.SettleIndex, b.SettleIndex)
require.Equal(t, a.AmtPaidSat, b.AmtPaidSat)
require.Equal(t, a.AmtPaidMsat, b.AmtPaidMsat)
require.Equal(t, a.State, b.State)
require.Equal(t, a.Features, b.Features)
require.Equal(t, a.IsKeysend, b.IsKeysend)
require.Equal(t, a.PaymentAddr, b.PaymentAddr)
require.Equal(t, a.IsAmp, b.IsAmp)
require.Equal(t, len(a.Htlcs), len(b.Htlcs))
for i := range a.Htlcs {
htlcA, htlcB := a.Htlcs[i], b.Htlcs[i]
require.Equal(t, htlcA.ChanId, htlcB.ChanId)
require.Equal(t, htlcA.HtlcIndex, htlcB.HtlcIndex)
require.Equal(t, htlcA.AmtMsat, htlcB.AmtMsat)
require.Equal(t, htlcA.AcceptHeight, htlcB.AcceptHeight)
require.Equal(t, htlcA.AcceptTime, htlcB.AcceptTime)
require.Equal(t, htlcA.ResolveTime, htlcB.ResolveTime)
require.Equal(t, htlcA.ExpiryHeight, htlcB.ExpiryHeight)
require.Equal(t, htlcA.State, htlcB.State)
require.Equal(t, htlcA.CustomRecords, htlcB.CustomRecords)
require.Equal(t, htlcA.MppTotalAmtMsat, htlcB.MppTotalAmtMsat)
require.Equal(t, htlcA.Amp, htlcB.Amp)
}
// Finally, close all channels.
mts.closeChannels()
}

View File

@ -1,13 +1,11 @@
package itest
import (
"context"
"fmt"
"sync"
"time"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
@ -15,16 +13,8 @@ import (
// testCustomMessage tests sending and receiving of overridden custom message
// types (within the message type range usually reserved for protocol messages)
// via the send and subscribe custom message APIs.
func testCustomMessage(net *lntest.NetworkHarness, t *harnessTest) {
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
// At the end of our test, cancel our context and wait for all
// goroutines to exit.
defer func() {
cancel()
wg.Wait()
}()
func testCustomMessage(ht *lntemp.HarnessTest) {
alice, bob := ht.Alice, ht.Bob
var (
overrideType1 uint32 = 554
@ -34,28 +24,19 @@ func testCustomMessage(net *lntest.NetworkHarness, t *harnessTest) {
// Update Alice to accept custom protocol messages with type 1 but do
// not allow Bob to handle them yet.
net.Alice.Cfg.ExtraArgs = append(
net.Alice.Cfg.ExtraArgs,
extraArgs := []string{
fmt.Sprintf(msgOverrideArg, overrideType1),
)
require.NoError(t.t, net.RestartNode(net.Alice, nil, nil))
// Wait for Alice's server to be active after the restart before we
// try to subscribe to our message stream.
require.NoError(t.t, net.Alice.WaitUntilServerActive())
}
ht.RestartNodeWithExtraArgs(alice, extraArgs)
// Subscribe Alice to custom messages before we send any, so that we
// don't miss any.
msgClient, err := net.Alice.LightningClient.SubscribeCustomMessages(
ctx, &lnrpc.SubscribeCustomMessagesRequest{},
)
require.NoError(t.t, err, "alice could not subscribe")
msgClient, cancel := alice.RPC.SubscribeCustomMessages()
defer cancel()
// Create a channel to receive custom messages on.
messages := make(chan *lnrpc.CustomMessage)
wg.Add(1)
go func() {
defer wg.Done()
for {
// If we fail to receive, just exit. The test should
// fail elsewhere if it doesn't get a message that it
@ -69,120 +50,108 @@ func testCustomMessage(net *lntest.NetworkHarness, t *harnessTest) {
// test is shutting down.
select {
case messages <- msg:
case <-ctx.Done():
case <-ht.Context().Done():
return
}
}
}()
// Connect alice and bob so that they can exchange messages.
net.EnsureConnected(t.t, net.Alice, net.Bob)
ht.EnsureConnected(alice, bob)
// Create a custom message that is within our allowed range.
msgType := uint32(lnwire.CustomTypeStart + 1)
msgData := []byte{1, 2, 3}
// Send it from Bob to Alice.
ctxt, _ := context.WithTimeout(ctx, defaultTimeout)
_, err = net.Bob.LightningClient.SendCustomMessage(
ctxt, &lnrpc.SendCustomMessageRequest{
Peer: net.Alice.PubKey[:],
Type: msgType,
Data: msgData,
},
)
require.NoError(t.t, err, "bob could not send")
bobMsg := &lnrpc.SendCustomMessageRequest{
Peer: alice.PubKey[:],
Type: msgType,
Data: msgData,
}
bob.RPC.SendCustomMessage(bobMsg)
// Wait for Alice to receive the message. It should come through because
// it is within our allowed range.
// Wait for Alice to receive the message. It should come through
// because it is within our allowed range.
select {
case msg := <-messages:
// Check our type and data and (sanity) check the peer we got it
// from.
require.Equal(t.t, msgType, msg.Type, "first msg type wrong")
require.Equal(t.t, msgData, msg.Data, "first msg data wrong")
require.Equal(t.t, net.Bob.PubKey[:], msg.Peer, "first msg "+
// Check our type and data and (sanity) check the peer we got
// it from.
require.Equal(ht, msgType, msg.Type, "first msg type wrong")
require.Equal(ht, msgData, msg.Data, "first msg data wrong")
require.Equal(ht, bob.PubKey[:], msg.Peer, "first msg "+
"peer wrong")
case <-time.After(defaultTimeout):
t.t.Fatalf("alice did not receive first custom message: %v",
ht.Fatalf("alice did not receive first custom message: %v",
msgType)
}
// Try to send a message from Bob to Alice which has a message type
// outside of the custom type range and assert that it fails.
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
_, err = net.Bob.LightningClient.SendCustomMessage(
ctxt, &lnrpc.SendCustomMessageRequest{
Peer: net.Alice.PubKey[:],
Type: overrideType1,
Data: msgData,
},
)
require.Error(t.t, err, "bob should not be able to send type 1")
bobMsg = &lnrpc.SendCustomMessageRequest{
Peer: alice.PubKey[:],
Type: overrideType1,
Data: msgData,
}
_, err := bob.RPC.LN.SendCustomMessage(ht.Context(), bobMsg)
require.Error(ht, err, "bob should not be able to send type 1")
// Now, restart Bob with the ability to send two different custom
// protocol messages.
net.Bob.Cfg.ExtraArgs = append(
net.Bob.Cfg.ExtraArgs,
extraArgs = []string{
fmt.Sprintf(msgOverrideArg, overrideType1),
fmt.Sprintf(msgOverrideArg, overrideType2),
)
require.NoError(t.t, net.RestartNode(net.Bob, nil, nil))
}
ht.RestartNodeWithExtraArgs(bob, extraArgs)
// Make sure Bob and Alice are connected after his restart.
net.EnsureConnected(t.t, net.Alice, net.Bob)
ht.EnsureConnected(alice, bob)
// Send a message from Bob to Alice with a type that Bob is allowed to
// send, but Alice will not handle as a custom message.
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
_, err = net.Bob.LightningClient.SendCustomMessage(
ctxt, &lnrpc.SendCustomMessageRequest{
Peer: net.Alice.PubKey[:],
Type: overrideType2,
Data: msgData,
},
)
require.NoError(t.t, err, "bob should be able to send type 2")
bobMsg = &lnrpc.SendCustomMessageRequest{
Peer: alice.PubKey[:],
Type: overrideType2,
Data: msgData,
}
bob.RPC.SendCustomMessage(bobMsg)
// Do a quick check that Alice did not receive this message in her
// stream. Note that this is an instant check, so could miss the message
// being received. We'll also check below that she didn't get it, this
// is just a sanity check.
// stream. Note that this is an instant check, so could miss the
// message being received. We'll also check below that she didn't get
// it, this is just a sanity check.
select {
case msg := <-messages:
t.t.Fatalf("unexpected message: %v", msg)
ht.Fatalf("unexpected message: %v", msg)
default:
}
// Finally, send a custom message with a type that Bob is allowed to
// send and Alice is configured to receive.
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
_, err = net.Bob.LightningClient.SendCustomMessage(
ctxt, &lnrpc.SendCustomMessageRequest{
Peer: net.Alice.PubKey[:],
Type: overrideType1,
Data: msgData,
},
)
require.NoError(t.t, err, "bob should be able to send type 1")
bobMsg = &lnrpc.SendCustomMessageRequest{
Peer: alice.PubKey[:],
Type: overrideType1,
Data: msgData,
}
bob.RPC.SendCustomMessage(bobMsg)
// Wait to receive a message from Bob. This check serves to ensure that
// our message type 1 was delivered, and assert that the preceding one
// was not (we could have missed it in our check above). When we receive
// the second message, we know that the first one did not go through,
// because we expect our messages to deliver in order.
// was not (we could have missed it in our check above). When we
// receive the second message, we know that the first one did not go
// through, because we expect our messages to deliver in order.
select {
case msg := <-messages:
// Check our type and data and (sanity) check the peer we got it
// from.
require.Equal(t.t, overrideType1, msg.Type, "second message "+
// Check our type and data and (sanity) check the peer we got
// it from.
require.Equal(ht, overrideType1, msg.Type, "second message "+
"type")
require.Equal(t.t, msgData, msg.Data, "second message data")
require.Equal(t.t, net.Bob.PubKey[:], msg.Peer, "second "+
require.Equal(ht, msgData, msg.Data, "second message data")
require.Equal(ht, bob.PubKey[:], msg.Peer, "second "+
"message peer")
case <-time.After(defaultTimeout):
t.t.Fatalf("alice did not receive second custom message")
ht.Fatalf("alice did not receive second custom message")
}
}

View File

@ -1,19 +1,18 @@
package itest
import (
"context"
"encoding/hex"
"fmt"
"sync"
"strings"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
@ -35,163 +34,145 @@ type interceptorTestCase struct {
// testForwardInterceptorDedupHtlc tests that upon reconnection, duplicate
// HTLCs aren't re-notified using the HTLC interceptor API.
func testForwardInterceptorDedupHtlc(net *lntest.NetworkHarness, t *harnessTest) {
func testForwardInterceptorDedupHtlc(ht *lntemp.HarnessTest) {
// Initialize the test context with 3 connected nodes.
alice := net.NewNode(t.t, "alice", nil)
defer shutdownAndAssert(net, t, alice)
ts := newInterceptorTestScenario(ht)
bob := net.NewNode(t.t, "bob", nil)
defer shutdownAndAssert(net, t, bob)
carol := net.NewNode(t.t, "carol", nil)
defer shutdownAndAssert(net, t, carol)
tc := newInterceptorTestContext(t, net, alice, bob, carol)
const (
chanAmt = btcutil.Amount(300000)
)
alice, bob, carol := ts.alice, ts.bob, ts.carol
// Open and wait for channels.
tc.openChannel(tc.alice, tc.bob, chanAmt)
tc.openChannel(tc.bob, tc.carol, chanAmt)
defer tc.closeChannels()
tc.waitForChannels()
const chanAmt = btcutil.Amount(300000)
p := lntemp.OpenChannelParams{Amt: chanAmt}
reqs := []*lntemp.OpenChannelRequest{
{Local: alice, Remote: bob, Param: p},
{Local: bob, Remote: carol, Param: p},
}
resp := ht.OpenMultiChannelsAsync(reqs)
cpAB, cpBC := resp[0], resp[1]
ctxb := context.Background()
ctxt, cancelInterceptor := context.WithCancel(ctxb)
interceptor, err := tc.bob.RouterClient.HtlcInterceptor(ctxt)
require.NoError(tc.t.t, err, "failed to create HtlcInterceptor")
// Make sure Alice is aware of channel Bob=>Carol.
ht.AssertTopologyChannelOpen(alice, cpBC)
addResponse, err := tc.carol.AddInvoice(ctxb, &lnrpc.Invoice{
ValueMsat: 1000,
})
require.NoError(tc.t.t, err, "unable to add invoice")
// Connect the interceptor.
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
invoice, err := tc.carol.LookupInvoice(ctxb, &lnrpc.PaymentHash{
RHashStr: hex.EncodeToString(addResponse.RHash),
})
require.NoError(tc.t.t, err, "unable to find invoice")
// Prepare the test cases.
req := &lnrpc.Invoice{ValueMsat: 1000}
addResponse := carol.RPC.AddInvoice(req)
invoice := carol.RPC.LookupInvoice(addResponse.RHash)
tc := &interceptorTestCase{
amountMsat: 1000,
invoice: invoice,
payAddr: invoice.PaymentAddr,
}
// We initiate a payment from Alice.
done := make(chan struct{})
go func() {
// Signal that all the payments have been sent.
defer close(done)
ts.sendPaymentAndAssertAction(tc)
}()
// We start the htlc interceptor with a simple implementation that
// saves all intercepted packets. These packets are held to simulate a
// pending payment.
interceptedPacketstMap := &sync.Map{}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
packet, err := interceptor.Recv()
if err != nil {
// If it is just the error result of the
// context cancellation the we exit silently.
status, ok := status.FromError(err)
if ok && status.Code() == codes.Canceled {
return
}
// Otherwise it an unexpected error, we fail
// the test.
require.NoError(
tc.t.t, err,
"unexpected error in interceptor.Recv()",
)
return
}
interceptedPacketstMap.Store(
packet.IncomingCircuitKey.HtlcId, packet,
)
}
}()
// We initiate a payment from Alice.
wg.Add(1)
go func() {
defer wg.Done()
_, _ = tc.sendAliceToCarolPayment(
ctxb, 1000,
invoice.RHash, invoice.PaymentAddr,
)
}()
packet := ht.ReceiveHtlcInterceptor(interceptor)
// Here we should wait for the channel to contain a pending htlc, and
// also be shown as being active.
err = wait.Predicate(func() bool {
channels, err := tc.bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{
ActiveOnly: true,
Peer: tc.alice.PubKey[:],
})
if err != nil {
return false
err := wait.NoError(func() error {
channel := ht.QueryChannelByChanPoint(bob, cpAB)
if len(channel.PendingHtlcs) == 0 {
return fmt.Errorf("expect alice <> bob channel to " +
"have pending htlcs")
}
if len(channels.Channels) == 0 {
return false
if channel.Active {
return nil
}
aliceChan := channels.Channels[0]
if len(aliceChan.PendingHtlcs) == 0 {
return false
}
return aliceChan.Active
return fmt.Errorf("channel not active")
}, defaultTimeout)
require.NoError(
tc.t.t, err, "alice <> bob channel pending htlc never arrived",
ht, err, "alice <> bob channel pending htlc never arrived",
)
// At this point we want to make bob's link send all pending htlcs to
// the switch again. We force this behavior by disconnecting and
// connecting to the peer.
if err := tc.net.DisconnectNodes(tc.bob, tc.alice); err != nil {
tc.t.Fatalf("failed to disconnect alice and bob")
}
tc.net.EnsureConnected(tc.t.t, tc.bob, tc.alice)
ht.DisconnectNodes(bob, alice)
ht.EnsureConnected(bob, alice)
// Here we wait for the channel to be active again.
err = wait.Predicate(func() bool {
req := &lnrpc.ListChannelsRequest{
ActiveOnly: true,
Peer: tc.alice.PubKey[:],
}
channels, err := tc.bob.ListChannels(ctxt, req)
return err == nil && len(channels.Channels) > 0
}, defaultTimeout)
require.NoError(
tc.t.t, err, "alice <> bob channel didn't re-activate",
)
ht.AssertChannelExists(bob, cpAB)
// Now that the channel is active we make sure the test passes as
// expected.
payments, err := tc.alice.ListPayments(ctxb, &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
})
require.NoError(tc.t.t, err, "failed to fetch payment")
// We expect one in flight payment since we held the htlcs.
require.Equal(tc.t.t, len(payments.Payments), 1)
require.Equal(tc.t.t, payments.Payments[0].Status, lnrpc.Payment_IN_FLIGHT)
// We now fail all htlcs to cancel the payment.
packetsCount := 0
interceptedPacketstMap.Range(func(_, packet interface{}) bool {
p := packet.(*routerrpc.ForwardHtlcInterceptRequest)
_ = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: p.IncomingCircuitKey,
Action: routerrpc.ResolveHoldForwardAction_FAIL,
})
packetsCount++
return true
})
var preimage lntypes.Preimage
copy(preimage[:], invoice.RPreimage)
ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_IN_FLIGHT)
// At this point if we have more than one held htlcs then we should
// fail. This means we hold the same htlc twice which is a risk we
// want to eliminate. If we don't have the same htlc twice in theory we
// can cancel one and settle the other by mistake.
require.Equal(tc.t.t, packetsCount, 1)
// fail. This means we hold the same htlc twice which is a risk we want
// to eliminate. If we don't have the same htlc twice in theory we can
// cancel one and settle the other by mistake.
errDone := make(chan struct{})
go func() {
defer close(errDone)
_, err := interceptor.Recv()
require.Error(ht, err, "expected an error from interceptor")
status, ok := status.FromError(err)
switch {
// If it is just the error result of the context cancellation
// the we exit silently.
case ok && status.Code() == codes.Canceled:
fallthrough
// When the test ends, during the node's shutdown it will close
// the connection.
case strings.Contains(err.Error(), "closed network connection"):
fallthrough
case strings.Contains(err.Error(), "EOF"):
return
}
// Otherwise we receive an unexpected error.
require.Failf(ht, "interceptor", "unexpected err: %v", err)
}()
// We now fail all htlcs to cancel the payment.
err = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: packet.IncomingCircuitKey,
Action: routerrpc.ResolveHoldForwardAction_FAIL,
})
require.NoError(ht, err, "failed to send request")
// Cancel the context, which will disconnect the above interceptor.
cancelInterceptor()
wg.Wait()
// Make sure all goroutines are finished.
select {
case <-done:
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for sending payment")
}
select {
case <-errDone:
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for interceptor error")
}
// Finally, close channels.
ht.CloseChannel(alice, cpAB)
ht.CloseChannel(bob, cpBC)
}
// testForwardInterceptorBasic tests the forward interceptor RPC layer.
@ -204,265 +185,191 @@ func testForwardInterceptorDedupHtlc(net *lntest.NetworkHarness, t *harnessTest)
// 3. Intercepted held htlcs result in no payment (invoice is not settled).
// 4. When Interceptor disconnects it resumes all held htlcs, which result in
// valid payment (invoice is settled).
func testForwardInterceptorBasic(net *lntest.NetworkHarness, t *harnessTest) {
// Initialize the test context with 3 connected nodes.
alice := net.NewNode(t.t, "alice", nil)
defer shutdownAndAssert(net, t, alice)
func testForwardInterceptorBasic(ht *lntemp.HarnessTest) {
ts := newInterceptorTestScenario(ht)
bob := net.NewNode(t.t, "bob", nil)
defer shutdownAndAssert(net, t, bob)
carol := net.NewNode(t.t, "carol", nil)
defer shutdownAndAssert(net, t, carol)
testContext := newInterceptorTestContext(t, net, alice, bob, carol)
const (
chanAmt = btcutil.Amount(300000)
)
alice, bob, carol := ts.alice, ts.bob, ts.carol
// Open and wait for channels.
testContext.openChannel(testContext.alice, testContext.bob, chanAmt)
testContext.openChannel(testContext.bob, testContext.carol, chanAmt)
defer testContext.closeChannels()
testContext.waitForChannels()
const chanAmt = btcutil.Amount(300000)
p := lntemp.OpenChannelParams{Amt: chanAmt}
reqs := []*lntemp.OpenChannelRequest{
{Local: alice, Remote: bob, Param: p},
{Local: bob, Remote: carol, Param: p},
}
resp := ht.OpenMultiChannelsAsync(reqs)
cpAB, cpBC := resp[0], resp[1]
// Make sure Alice is aware of channel Bob=>Carol.
ht.AssertTopologyChannelOpen(alice, cpBC)
// Connect the interceptor.
ctxb := context.Background()
ctxt, cancelInterceptor := context.WithTimeout(ctxb, defaultTimeout)
interceptor, err := testContext.bob.RouterClient.HtlcInterceptor(ctxt)
require.NoError(t.t, err, "failed to create HtlcInterceptor")
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
// Prepare the test cases.
testCases := testContext.prepareTestCases()
testCases := ts.prepareTestCases()
// A channel for the interceptor go routine to send the requested packets.
interceptedChan := make(chan *routerrpc.ForwardHtlcInterceptRequest,
len(testCases))
// Run the interceptor loop in its own go routine.
var wg sync.WaitGroup
wg.Add(1)
// For each test case make sure we initiate a payment from Alice to
// Carol routed through Bob. For each payment we also test its final
// status according to the interceptorAction specified in the test
// case.
done := make(chan struct{})
go func() {
defer wg.Done()
for {
request, err := interceptor.Recv()
if err != nil {
// If it is just the error result of the context cancellation
// the we exit silently.
status, ok := status.FromError(err)
if ok && status.Code() == codes.Canceled {
return
}
// Otherwise it an unexpected error, we fail the test.
require.NoError(t.t, err, "unexpected error in interceptor.Recv()")
return
}
interceptedChan <- request
}
}()
// Signal that all the payments have been sent.
defer close(done)
// For each test case make sure we initiate a payment from Alice to Carol
// routed through Bob. For each payment we also test its final status
// according to the interceptorAction specified in the test case.
wg.Add(1)
go func() {
defer wg.Done()
for _, tc := range testCases {
attempt, err := testContext.sendAliceToCarolPayment(
context.Background(), tc.invoice.ValueMsat,
tc.invoice.RHash, tc.payAddr,
)
if t.t.Failed() {
return
}
if err != nil {
require.NoError(t.t, err, "failed to send payment")
}
switch tc.interceptorAction {
// For 'fail' interceptor action we make sure the payment failed.
case routerrpc.ResolveHoldForwardAction_FAIL:
require.Equal(t.t, lnrpc.HTLCAttempt_FAILED,
attempt.Status, "expected payment to fail")
// Assert that we get a temporary channel
// failure which has a channel update.
require.NotNil(t.t, attempt.Failure)
require.NotNil(t.t, attempt.Failure.ChannelUpdate)
require.Equal(t.t,
lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE,
attempt.Failure.Code)
// For settle and resume we make sure the payment is successful.
case routerrpc.ResolveHoldForwardAction_SETTLE:
fallthrough
case routerrpc.ResolveHoldForwardAction_RESUME:
require.Equal(t.t, lnrpc.HTLCAttempt_SUCCEEDED,
attempt.Status, "expected payment to succeed")
}
attempt := ts.sendPaymentAndAssertAction(tc)
ts.assertAction(tc, attempt)
}
}()
// We make sure here the interceptor has processed all packets before we
// check the payment statuses.
for i := 0; i < len(testCases); i++ {
select {
case request := <-interceptedChan:
// Assert sanity of informational packet data.
require.NotZero(t.t, request.OutgoingRequestedChanId)
require.NotZero(t.t, request.IncomingExpiry)
require.NotZero(t.t, request.IncomingAmountMsat)
// We make sure here the interceptor has processed all packets before
// we check the payment statuses.
for _, tc := range testCases {
request := ht.ReceiveHtlcInterceptor(interceptor)
require.Less(
t.t,
request.OutgoingExpiry, request.IncomingExpiry,
)
require.Less(
t.t,
request.OutgoingAmountMsat,
request.IncomingAmountMsat,
)
// Assert sanity of informational packet data.
require.NotZero(ht, request.OutgoingRequestedChanId)
require.NotZero(ht, request.IncomingExpiry)
require.NotZero(ht, request.IncomingAmountMsat)
value, ok := request.CustomRecords[customTestKey]
require.True(t.t, ok, "expected custom record")
require.Equal(t.t, customTestValue, value)
require.Less(ht, request.OutgoingExpiry,
request.IncomingExpiry)
require.Less(ht, request.OutgoingAmountMsat,
request.IncomingAmountMsat)
testCase := testCases[i]
value, ok := request.CustomRecords[customTestKey]
require.True(ht, ok, "expected custom record")
require.Equal(ht, customTestValue, value)
// For held packets we ignore, keeping them in hold status.
if testCase.shouldHold {
continue
}
// For all other packets we resolve according to the test case.
_ = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: request.IncomingCircuitKey,
Action: testCase.interceptorAction,
Preimage: testCase.invoice.RPreimage,
})
case <-time.After(defaultTimeout):
t.Fatalf("response from interceptor was not received %v", i)
// For held packets we ignore, keeping them in hold status.
if tc.shouldHold {
continue
}
// For all other packets we resolve according to the test case.
err := interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: request.IncomingCircuitKey,
Action: tc.interceptorAction,
Preimage: tc.invoice.RPreimage,
})
require.NoError(ht, err, "failed to send request")
}
// At this point we are left with the held packets, we want to make sure
// each one of them has a corresponding 'in-flight' payment at
// At this point we are left with the held packets, we want to make
// sure each one of them has a corresponding 'in-flight' payment at
// Alice's node.
payments, err := testContext.alice.ListPayments(context.Background(),
&lnrpc.ListPaymentsRequest{IncludeIncomplete: true})
require.NoError(t.t, err, "failed to fetch payment")
for _, testCase := range testCases {
if testCase.shouldHold {
hashStr := hex.EncodeToString(testCase.invoice.RHash)
var foundPayment *lnrpc.Payment
expectedAmt := testCase.invoice.ValueMsat
for _, p := range payments.Payments {
if p.PaymentHash == hashStr {
foundPayment = p
break
}
}
require.NotNil(t.t, foundPayment, fmt.Sprintf("expected "+
"to find pending payment for held htlc %v",
hashStr))
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT,
foundPayment.Status, "expected payment to be "+
"in flight")
require.Equal(t.t, expectedAmt, foundPayment.ValueMsat,
"incorrect in flight amount")
if !testCase.shouldHold {
continue
}
var preimage lntypes.Preimage
copy(preimage[:], testCase.invoice.RPreimage)
payment := ht.AssertPaymentStatus(
alice, preimage, lnrpc.Payment_IN_FLIGHT,
)
expectedAmt := testCase.invoice.ValueMsat
require.Equal(ht, expectedAmt, payment.ValueMsat,
"incorrect in flight amount")
}
// Disconnect interceptor should cause resume held packets.
// After that we wait for all go routines to finish, including the one
// that tests the payment final status for the held payment.
// Cancel the context, which will disconnect the above interceptor.
cancelInterceptor()
wg.Wait()
// Disconnect interceptor should cause resume held packets. After that
// we wait for all go routines to finish, including the one that tests
// the payment final status for the held payment.
select {
case <-done:
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for sending payment")
}
// Verify that we don't get notified about already completed HTLCs
// We do that by restarting alice, the sender the HTLCs. Under
// https://github.com/lightningnetwork/lnd/issues/5115
// this should cause all HTLCs settled or failed by the interceptor to renotify.
restartAlice, err := net.SuspendNode(alice)
require.NoError(t.t, err, "failed to suspend alice")
// this should cause all HTLCs settled or failed by the interceptor to
// renotify.
restartAlice := ht.SuspendNode(alice)
require.NoError(ht, restartAlice(), "failed to restart alice")
ctxt, cancelInterceptor = context.WithTimeout(ctxb, defaultTimeout)
defer cancelInterceptor()
interceptor, err = testContext.bob.RouterClient.HtlcInterceptor(ctxt)
require.NoError(t.t, err, "failed to create HtlcInterceptor")
// Make sure the channel is active from Bob's PoV.
ht.AssertChannelExists(bob, cpAB)
err = restartAlice()
require.NoError(t.t, err, "failed to restart alice")
// Create a new interceptor as the old one has quit.
interceptor, cancelInterceptor = bob.RPC.HtlcInterceptor()
done = make(chan struct{})
go func() {
request, err := interceptor.Recv()
if err != nil {
// If it is just the error result of the context cancellation
// the we exit silently.
status, ok := status.FromError(err)
if ok && status.Code() == codes.Canceled {
return
}
// Otherwise it an unexpected error, we fail the test.
require.NoError(
t.t, err, "unexpected error in interceptor.Recv()",
)
defer close(done)
_, err := interceptor.Recv()
require.Error(ht, err, "expected an error from interceptor")
status, ok := status.FromError(err)
switch {
// If it is just the error result of the context cancellation
// the we exit silently.
case ok && status.Code() == codes.Canceled:
fallthrough
// When the test ends, during the node's shutdown it will close
// the connection.
case strings.Contains(err.Error(), "closed network connection"):
fallthrough
case strings.Contains(err.Error(), "EOF"):
return
}
require.Nil(t.t, request, "no more intercepts should arrive")
// Otherwise we receive an unexpected error.
require.Failf(ht, "iinterceptor", "unexpected err: %v", err)
}()
err = wait.Predicate(func() bool {
channels, err := bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{
ActiveOnly: true, Peer: alice.PubKey[:],
})
return err == nil && len(channels.Channels) > 0
}, defaultTimeout)
require.NoError(t.t, err, "alice <> bob channel didn't re-activate")
}
// interceptorTestContext is a helper struct to hold the test context and
// provide the needed functionality.
type interceptorTestContext struct {
t *harnessTest
net *lntest.NetworkHarness
// Keep a list of all our active channels.
networkChans []*lnrpc.ChannelPoint
closeChannelFuncs []func()
alice, bob, carol *lntest.HarnessNode
nodes []*lntest.HarnessNode
}
func newInterceptorTestContext(t *harnessTest,
net *lntest.NetworkHarness,
alice, bob, carol *lntest.HarnessNode) *interceptorTestContext {
// Connect nodes
nodes := []*lntest.HarnessNode{alice, bob, carol}
for i := 0; i < len(nodes); i++ {
for j := i + 1; j < len(nodes); j++ {
net.EnsureConnected(t.t, nodes[i], nodes[j])
}
// Cancel the context, which will disconnect the above interceptor.
cancelInterceptor()
select {
case <-done:
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for interceptor error")
}
ctx := interceptorTestContext{
t: t,
net: net,
// Finally, close channels.
ht.CloseChannel(alice, cpAB)
ht.CloseChannel(bob, cpBC)
}
// interceptorTestScenario is a helper struct to hold the test context and
// provide the needed functionality.
type interceptorTestScenario struct {
ht *lntemp.HarnessTest
alice, bob, carol *node.HarnessNode
}
// newInterceptorTestScenario initializes a new test scenario with three nodes
// and connects them to have the following topology,
//
// Alice --> Bob --> Carol
//
// Among them, Alice and Bob are standby nodes and Carol is a new node.
func newInterceptorTestScenario(
ht *lntemp.HarnessTest) *interceptorTestScenario {
alice, bob := ht.Alice, ht.Bob
carol := ht.NewNode("carol", nil)
ht.EnsureConnected(alice, bob)
ht.EnsureConnected(bob, carol)
return &interceptorTestScenario{
ht: ht,
alice: alice,
bob: bob,
carol: carol,
nodes: nodes,
}
return &ctx
}
// prepareTestCases prepares 4 tests:
@ -470,36 +377,41 @@ func newInterceptorTestContext(t *harnessTest,
// 2. resumed htlc.
// 3. settling htlc externally.
// 4. held htlc that is resumed later.
func (c *interceptorTestContext) prepareTestCases() []*interceptorTestCase {
func (c *interceptorTestScenario) prepareTestCases() []*interceptorTestCase {
var (
actionFail = routerrpc.ResolveHoldForwardAction_FAIL
actionResume = routerrpc.ResolveHoldForwardAction_RESUME
actionSettle = routerrpc.ResolveHoldForwardAction_SETTLE
)
cases := []*interceptorTestCase{
{amountMsat: 1000, shouldHold: false,
interceptorAction: routerrpc.ResolveHoldForwardAction_FAIL},
{amountMsat: 1000, shouldHold: false,
interceptorAction: routerrpc.ResolveHoldForwardAction_RESUME},
{amountMsat: 1000, shouldHold: false,
interceptorAction: routerrpc.ResolveHoldForwardAction_SETTLE},
{amountMsat: 1000, shouldHold: true,
interceptorAction: routerrpc.ResolveHoldForwardAction_RESUME},
{
amountMsat: 1000, shouldHold: false,
interceptorAction: actionFail,
},
{
amountMsat: 1000, shouldHold: false,
interceptorAction: actionResume,
},
{
amountMsat: 1000, shouldHold: false,
interceptorAction: actionSettle,
},
{
amountMsat: 1000, shouldHold: true,
interceptorAction: actionResume,
},
}
for _, t := range cases {
addResponse, err := c.carol.AddInvoice(context.Background(), &lnrpc.Invoice{
ValueMsat: t.amountMsat,
})
require.NoError(c.t.t, err, "unable to add invoice")
invoice, err := c.carol.LookupInvoice(context.Background(), &lnrpc.PaymentHash{
RHashStr: hex.EncodeToString(addResponse.RHash),
})
require.NoError(c.t.t, err, "unable to find invoice")
inv := &lnrpc.Invoice{ValueMsat: t.amountMsat}
addResponse := c.carol.RPC.AddInvoice(inv)
invoice := c.carol.RPC.LookupInvoice(addResponse.RHash)
// We'll need to also decode the returned invoice so we can
// grab the payment address which is now required for ALL
// payments.
payReq, err := c.carol.DecodePayReq(context.Background(), &lnrpc.PayReqString{
PayReq: invoice.PaymentRequest,
})
require.NoError(c.t.t, err, "unable to decode invoice")
payReq := c.carol.RPC.DecodePayReq(invoice.PaymentRequest)
t.invoice = invoice
t.payAddr = payReq.PaymentAddr
@ -507,69 +419,15 @@ func (c *interceptorTestContext) prepareTestCases() []*interceptorTestCase {
return cases
}
func (c *interceptorTestContext) openChannel(from, to *lntest.HarnessNode,
chanSize btcutil.Amount) {
c.net.SendCoins(c.t.t, btcutil.SatoshiPerBitcoin, from)
chanPoint := openChannelAndAssert(
c.t, c.net, from, to,
lntest.OpenChannelParams{
Amt: chanSize,
},
)
c.closeChannelFuncs = append(c.closeChannelFuncs, func() {
closeChannelAndAssert(c.t, c.net, from, chanPoint, false)
})
c.networkChans = append(c.networkChans, chanPoint)
}
func (c *interceptorTestContext) closeChannels() {
for _, f := range c.closeChannelFuncs {
f()
}
}
func (c *interceptorTestContext) waitForChannels() {
// Wait for all nodes to have seen all channels.
for _, chanPoint := range c.networkChans {
for _, node := range c.nodes {
txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
require.NoError(c.t.t, err, "unable to get txid")
point := wire.OutPoint{
Hash: *txid,
Index: chanPoint.OutputIndex,
}
err = node.WaitForNetworkChannelOpen(chanPoint)
require.NoError(c.t.t, err, fmt.Sprintf("(%d): timeout "+
"waiting for channel(%s) open", node.NodeID,
point))
}
}
}
// sendAliceToCarolPayment sends a payment from alice to carol and make an
// attempt to pay. The lnrpc.HTLCAttempt is returned.
func (c *interceptorTestContext) sendAliceToCarolPayment(ctx context.Context,
amtMsat int64,
paymentHash, paymentAddr []byte) (*lnrpc.HTLCAttempt, error) {
// sendPaymentAndAssertAction sends a payment from alice to carol and asserts
// that the specified interceptor action is taken.
func (c *interceptorTestScenario) sendPaymentAndAssertAction(
tc *interceptorTestCase) *lnrpc.HTLCAttempt {
// Build a route from alice to carol.
route, err := c.buildRoute(
ctx, amtMsat, []*lntest.HarnessNode{c.bob, c.carol},
paymentAddr,
route := c.buildRoute(
tc.amountMsat, []*node.HarnessNode{c.bob, c.carol}, tc.payAddr,
)
if err != nil {
return nil, err
}
sendReq := &routerrpc.SendToRouteRequest{
PaymentHash: paymentHash,
Route: route,
}
// Send a custom record to the forwarding node.
route.Hops[0].CustomRecords = map[uint64][]byte{
@ -577,21 +435,51 @@ func (c *interceptorTestContext) sendAliceToCarolPayment(ctx context.Context,
}
// Send the payment.
return c.alice.RouterClient.SendToRouteV2(ctx, sendReq)
sendReq := &routerrpc.SendToRouteRequest{
PaymentHash: tc.invoice.RHash,
Route: route,
}
return c.alice.RPC.SendToRouteV2(sendReq)
}
func (c *interceptorTestScenario) assertAction(tc *interceptorTestCase,
attempt *lnrpc.HTLCAttempt) {
// Now check the expected action has been taken.
switch tc.interceptorAction {
// For 'fail' interceptor action we make sure the payment failed.
case routerrpc.ResolveHoldForwardAction_FAIL:
require.Equal(c.ht, lnrpc.HTLCAttempt_FAILED, attempt.Status,
"expected payment to fail")
// Assert that we get a temporary channel failure which has a
// channel update.
require.NotNil(c.ht, attempt.Failure)
require.NotNil(c.ht, attempt.Failure.ChannelUpdate)
require.Equal(c.ht, lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE,
attempt.Failure.Code)
// For settle and resume we make sure the payment is successful.
case routerrpc.ResolveHoldForwardAction_SETTLE:
fallthrough
case routerrpc.ResolveHoldForwardAction_RESUME:
require.Equal(c.ht, lnrpc.HTLCAttempt_SUCCEEDED,
attempt.Status, "expected payment to succeed")
}
}
// buildRoute is a helper function to build a route with given hops.
func (c *interceptorTestContext) buildRoute(ctx context.Context, amtMsat int64,
hops []*lntest.HarnessNode, payAddr []byte) (*lnrpc.Route, error) {
func (c *interceptorTestScenario) buildRoute(amtMsat int64,
hops []*node.HarnessNode, payAddr []byte) *lnrpc.Route {
rpcHops := make([][]byte, 0, len(hops))
for _, hop := range hops {
k := hop.PubKeyStr
pubkey, err := route.NewVertexFromStr(k)
if err != nil {
return nil, fmt.Errorf("error parsing %v: %v",
k, err)
}
require.NoErrorf(c.ht, err, "error parsing %v: %v", k, err)
rpcHops = append(rpcHops, pubkey[:])
}
@ -602,10 +490,7 @@ func (c *interceptorTestContext) buildRoute(ctx context.Context, amtMsat int64,
PaymentAddr: payAddr,
}
routeResp, err := c.alice.RouterClient.BuildRoute(ctx, req)
if err != nil {
return nil, err
}
routeResp := c.alice.RPC.BuildRoute(req)
return routeResp.Route, nil
return routeResp.Route
}

View File

@ -1,27 +1,23 @@
package itest
import (
"bytes"
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/stretchr/testify/require"
)
// testSendToRouteMultiPath tests that we are able to successfully route a
// payment using multiple shards across different paths, by using SendToRoute.
func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
ctx := newMppTestContext(t, net)
defer ctx.shutdownNodes()
func testSendToRouteMultiPath(ht *lntemp.HarnessTest) {
mts := newMppTestScenario(ht)
// To ensure the payment goes through separate paths, we'll set a
// channel size that can only carry one shard at a time. We'll divide
@ -39,55 +35,41 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
// \ /
// \__ Dave ____/
//
ctx.openChannel(ctx.carol, ctx.bob, chanAmt)
ctx.openChannel(ctx.dave, ctx.bob, chanAmt)
ctx.openChannel(ctx.alice, ctx.dave, chanAmt)
ctx.openChannel(ctx.eve, ctx.bob, chanAmt)
ctx.openChannel(ctx.carol, ctx.eve, chanAmt)
// Since the channel Alice-> Carol will have to carry two
// shards, we make it larger.
ctx.openChannel(ctx.alice, ctx.carol, chanAmt+shardAmt)
defer ctx.closeChannels()
ctx.waitForChannels()
req := &mppOpenChannelRequest{
// Since the channel Alice-> Carol will have to carry two
// shards, we make it larger.
amtAliceCarol: chanAmt + shardAmt,
amtAliceDave: chanAmt,
amtCarolBob: chanAmt,
amtCarolEve: chanAmt,
amtDaveBob: chanAmt,
amtEveBob: chanAmt,
}
mts.openChannels(req)
// Make Bob create an invoice for Alice to pay.
payReqs, rHashes, invoices, err := createPayReqs(
ctx.bob, paymentAmt, 1,
)
if err != nil {
t.Fatalf("unable to create pay reqs: %v", err)
}
payReqs, rHashes, invoices := ht.CreatePayReqs(mts.bob, paymentAmt, 1)
rHash := rHashes[0]
payReq := payReqs[0]
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
decodeResp, err := ctx.bob.DecodePayReq(
ctxt, &lnrpc.PayReqString{PayReq: payReq},
)
if err != nil {
t.Fatalf("decode pay req: %v", err)
}
decodeResp := mts.bob.RPC.DecodePayReq(payReq)
payAddr := decodeResp.PaymentAddr
// Subscribe the invoice.
stream := mts.bob.RPC.SubscribeSingleInvoice(rHash)
// We'll send shards along three routes from Alice.
sendRoutes := [][]*lntest.HarnessNode{
{ctx.carol, ctx.bob},
{ctx.dave, ctx.bob},
{ctx.carol, ctx.eve, ctx.bob},
sendRoutes := [][]*node.HarnessNode{
{mts.carol, mts.bob},
{mts.dave, mts.bob},
{mts.carol, mts.eve, mts.bob},
}
responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes))
for _, hops := range sendRoutes {
// Build a route for the specified hops.
r, err := ctx.buildRoute(ctxb, shardAmt, ctx.alice, hops)
if err != nil {
t.Fatalf("unable to build route: %v", err)
}
r := mts.buildRoute(shardAmt, mts.alice, hops)
// Set the MPP records to indicate this is a payment shard.
hop := r.Hops[len(r.Hops)-1]
@ -103,62 +85,44 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
Route: r,
}
// We'll send all shards in their own goroutine, since SendToRoute will
// block as long as the payment is in flight.
// We'll send all shards in their own goroutine, since
// SendToRoute will block as long as the payment is in flight.
go func() {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := ctx.alice.RouterClient.SendToRouteV2(ctxt, sendReq)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
resp := mts.alice.RPC.SendToRouteV2(sendReq)
responses <- resp
}()
}
// Wait for all responses to be back, and check that they all
// succeeded.
timer := time.After(defaultTimeout)
for range sendRoutes {
var resp *lnrpc.HTLCAttempt
select {
case resp = <-responses:
case <-time.After(defaultTimeout):
t.Fatalf("response not received")
case <-timer:
require.Fail(ht, "response not received")
}
if resp.Failure != nil {
t.Fatalf("received payment failure : %v", resp.Failure)
}
require.Nil(ht, resp.Failure, "received payment failure")
// All shards should come back with the preimage.
if !bytes.Equal(resp.Preimage, invoices[0].RPreimage) {
t.Fatalf("preimage doesn't match")
}
require.Equal(ht, resp.Preimage, invoices[0].RPreimage,
"preimage doesn't match")
}
// assertNumHtlcs is a helper that checks the node's latest payment,
// and asserts it was split into num shards.
assertNumHtlcs := func(node *lntest.HarnessNode, num int) {
req := &lnrpc.ListPaymentsRequest{
IncludeIncomplete: true,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
paymentsResp, err := node.ListPayments(ctxt, req)
if err != nil {
t.Fatalf("error when obtaining payments: %v",
err)
}
assertNumHtlcs := func(hn *node.HarnessNode, num int) {
var preimage lntypes.Preimage
copy(preimage[:], invoices[0].RPreimage)
payments := paymentsResp.Payments
if len(payments) == 0 {
t.Fatalf("no payments found")
}
payment := ht.AssertPaymentStatus(
hn, preimage, lnrpc.Payment_SUCCEEDED,
)
payment := payments[len(payments)-1]
htlcs := payment.Htlcs
if len(htlcs) == 0 {
t.Fatalf("no htlcs")
}
require.NotEmpty(ht, htlcs, "no htlcs")
succeeded := 0
for _, htlc := range htlcs {
@ -166,194 +130,217 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
succeeded++
}
}
if succeeded != num {
t.Fatalf("expected %v succussful HTLCs, got %v", num,
succeeded)
}
require.Equal(ht, num, succeeded, "HTLCs not matched")
}
// assertSettledInvoice checks that the invoice for the given payment
// hash is settled, and has been paid using num HTLCs.
assertSettledInvoice := func(node *lntest.HarnessNode, rhash []byte,
num int) {
assertSettledInvoice := func(rhash []byte, num int) {
var payHash lntypes.Hash
copy(payHash[:], rhash)
inv := ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED)
found := false
offset := uint64(0)
for !found {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
invoicesResp, err := node.ListInvoices(
ctxt, &lnrpc.ListInvoiceRequest{
IndexOffset: offset,
},
)
if err != nil {
t.Fatalf("error when obtaining payments: %v",
err)
}
// Assert that the amount paid to the invoice is correct.
require.EqualValues(ht, paymentAmt, inv.AmtPaidSat,
"incorrect payment amt")
if len(invoicesResp.Invoices) == 0 {
break
}
for _, inv := range invoicesResp.Invoices {
if !bytes.Equal(inv.RHash, rhash) {
continue
}
// Assert that the amount paid to the invoice is
// correct.
if inv.AmtPaidSat != int64(paymentAmt) {
t.Fatalf("incorrect payment amt for "+
"invoicewant: %d, got %d",
paymentAmt, inv.AmtPaidSat)
}
if inv.State != lnrpc.Invoice_SETTLED {
t.Fatalf("Invoice not settled: %v",
inv.State)
}
if len(inv.Htlcs) != num {
t.Fatalf("expected invoice to be "+
"settled with %v HTLCs, had %v",
num, len(inv.Htlcs))
}
found = true
break
}
offset = invoicesResp.LastIndexOffset
}
if !found {
t.Fatalf("invoice not found")
}
require.Len(ht, inv.Htlcs, num, "wrong num of HTLCs")
}
// Finally check that the payment shows up with three settled HTLCs in
// Alice's list of payments...
assertNumHtlcs(ctx.alice, 3)
assertNumHtlcs(mts.alice, 3)
// ...and in Bob's list of paid invoices.
assertSettledInvoice(ctx.bob, rHash, 3)
assertSettledInvoice(rHash, 3)
// Finally, close all channels.
mts.closeChannels()
}
type mppTestContext struct {
t *harnessTest
net *lntest.NetworkHarness
// mppTestScenario defines a test scenario used for testing MPP-related tests.
// It has two standby nodes, alice and bob, and three new nodes, carol, dave,
// and eve.
type mppTestScenario struct {
ht *lntemp.HarnessTest
alice, bob, carol, dave, eve *node.HarnessNode
nodes []*node.HarnessNode
// Keep a list of all our active channels.
networkChans []*lnrpc.ChannelPoint
closeChannelFuncs []func()
alice, bob, carol, dave, eve *lntest.HarnessNode
nodes []*lntest.HarnessNode
channelPoints []*lnrpc.ChannelPoint
}
func newMppTestContext(t *harnessTest,
net *lntest.NetworkHarness) *mppTestContext {
alice := net.NewNode(t.t, "alice", nil)
bob := net.NewNode(t.t, "bob", []string{"--accept-amp"})
// newMppTestScenario initializes a new mpp test scenario with five funded
// nodes and connects them to have the following topology,
//
// _ Eve _
// / \
// Alice -- Carol ---- Bob
// \ /
// \__ Dave ____/
func newMppTestScenario(ht *lntemp.HarnessTest) *mppTestScenario {
alice, bob := ht.Alice, ht.Bob
ht.RestartNodeWithExtraArgs(bob, []string{
"--maxpendingchannels=2",
"--accept-amp",
})
// Create a five-node context consisting of Alice, Bob and three new
// nodes.
carol := net.NewNode(t.t, "carol", nil)
dave := net.NewNode(t.t, "dave", nil)
eve := net.NewNode(t.t, "eve", nil)
carol := ht.NewNode("carol", []string{
"--maxpendingchannels=2",
"--accept-amp",
})
dave := ht.NewNode("dave", nil)
eve := ht.NewNode("eve", nil)
// Connect nodes to ensure propagation of channels.
nodes := []*lntest.HarnessNode{alice, bob, carol, dave, eve}
for i := 0; i < len(nodes); i++ {
for j := i + 1; j < len(nodes); j++ {
net.EnsureConnected(t.t, nodes[i], nodes[j])
}
ht.EnsureConnected(alice, carol)
ht.EnsureConnected(alice, dave)
ht.EnsureConnected(carol, bob)
ht.EnsureConnected(carol, eve)
ht.EnsureConnected(dave, bob)
ht.EnsureConnected(eve, bob)
// Send coins to the nodes and mine 1 blocks to confirm them.
for i := 0; i < 2; i++ {
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol)
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, dave)
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, eve)
ht.MineBlocks(1)
}
ctx := mppTestContext{
t: t,
net: net,
mts := &mppTestScenario{
ht: ht,
alice: alice,
bob: bob,
carol: carol,
dave: dave,
eve: eve,
nodes: nodes,
nodes: []*node.HarnessNode{alice, bob, carol, dave, eve},
}
return &ctx
return mts
}
// openChannel is a helper to open a channel from->to.
func (c *mppTestContext) openChannel(from, to *lntest.HarnessNode,
chanSize btcutil.Amount) {
// mppOpenChannelRequest defines the amounts used for each channel opening.
type mppOpenChannelRequest struct {
// Channel Alice=>Carol.
amtAliceCarol btcutil.Amount
c.net.SendCoins(c.t.t, btcutil.SatoshiPerBitcoin, from)
// Channel Alice=>Dave.
amtAliceDave btcutil.Amount
chanPoint := openChannelAndAssert(
c.t, c.net, from, to,
lntest.OpenChannelParams{Amt: chanSize},
)
// Channel Carol=>Bob.
amtCarolBob btcutil.Amount
c.closeChannelFuncs = append(c.closeChannelFuncs, func() {
closeChannelAndAssert(c.t, c.net, from, chanPoint, false)
})
// Channel Carol=>Eve.
amtCarolEve btcutil.Amount
c.networkChans = append(c.networkChans, chanPoint)
// Channel Dave=>Bob.
amtDaveBob btcutil.Amount
// Channel Eve=>Bob.
amtEveBob btcutil.Amount
}
func (c *mppTestContext) closeChannels() {
for _, f := range c.closeChannelFuncs {
f()
// openChannels is a helper to open channels that sets up a network topology
// with three different paths Alice <-> Bob as following,
//
// _ Eve _
// / \
// Alice -- Carol ---- Bob
// \ /
// \__ Dave ____/
//
// NOTE: all the channels are open together to save blocks mined.
func (m *mppTestScenario) openChannels(r *mppOpenChannelRequest) {
reqs := []*lntemp.OpenChannelRequest{
{
Local: m.alice,
Remote: m.carol,
Param: lntemp.OpenChannelParams{Amt: r.amtAliceCarol},
},
{
Local: m.alice,
Remote: m.dave,
Param: lntemp.OpenChannelParams{Amt: r.amtAliceDave},
},
{
Local: m.carol,
Remote: m.bob,
Param: lntemp.OpenChannelParams{Amt: r.amtCarolBob},
},
{
Local: m.carol,
Remote: m.eve,
Param: lntemp.OpenChannelParams{Amt: r.amtCarolEve},
},
{
Local: m.dave,
Remote: m.bob,
Param: lntemp.OpenChannelParams{Amt: r.amtDaveBob},
},
{
Local: m.eve,
Remote: m.bob,
Param: lntemp.OpenChannelParams{Amt: r.amtEveBob},
},
}
}
func (c *mppTestContext) shutdownNodes() {
shutdownAndAssert(c.net, c.t, c.alice)
shutdownAndAssert(c.net, c.t, c.bob)
shutdownAndAssert(c.net, c.t, c.carol)
shutdownAndAssert(c.net, c.t, c.dave)
shutdownAndAssert(c.net, c.t, c.eve)
}
m.channelPoints = m.ht.OpenMultiChannelsAsync(reqs)
func (c *mppTestContext) waitForChannels() {
// Wait for all nodes to have seen all channels.
for _, chanPoint := range c.networkChans {
for _, node := range c.nodes {
txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
if err != nil {
c.t.Fatalf("unable to get txid: %v", err)
}
point := wire.OutPoint{
Hash: *txid,
Index: chanPoint.OutputIndex,
}
err = node.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
c.t.Fatalf("(%v:%d): timeout waiting for "+
"channel(%s) open: %v",
node.Cfg.Name, node.NodeID, point, err)
}
// Make sure every node has heard every channel.
for _, hn := range m.nodes {
for _, cp := range m.channelPoints {
m.ht.AssertTopologyChannelOpen(hn, cp)
}
}
}
// closeChannels closes all the open channels from `openChannels`.
func (m *mppTestScenario) closeChannels() {
if m.ht.Failed() {
m.ht.Log("Skipped closing channels for failed test")
return
}
// TODO(yy): remove the sleep once the following bug is fixed. When the
// payment is reported as settled by Alice, it's expected the
// commitment dance is finished and all subsequent states have been
// updated. Yet we'd receive the error `cannot co-op close channel with
// active htlcs` or `link failed to shutdown` if we close the channel.
// We need to investigate the order of settling the payments and
// updating commitments to understand and fix .
time.Sleep(2 * time.Second)
// Close all channels without mining the closing transactions.
m.ht.CloseChannelAssertPending(m.alice, m.channelPoints[0], false)
m.ht.CloseChannelAssertPending(m.alice, m.channelPoints[1], false)
m.ht.CloseChannelAssertPending(m.carol, m.channelPoints[2], false)
m.ht.CloseChannelAssertPending(m.carol, m.channelPoints[3], false)
m.ht.CloseChannelAssertPending(m.dave, m.channelPoints[4], false)
m.ht.CloseChannelAssertPending(m.eve, m.channelPoints[5], false)
// Now mine a block to include all the closing transactions.
m.ht.MineBlocks(1)
// Assert that the channels are closed.
for _, hn := range m.nodes {
m.ht.AssertNumWaitingClose(hn, 0)
}
}
// Helper function for Alice to build a route from pubkeys.
func (c *mppTestContext) buildRoute(ctxb context.Context, amt btcutil.Amount,
sender *lntest.HarnessNode, hops []*lntest.HarnessNode) (*lnrpc.Route,
error) {
func (m *mppTestScenario) buildRoute(amt btcutil.Amount,
sender *node.HarnessNode, hops []*node.HarnessNode) *lnrpc.Route {
rpcHops := make([][]byte, 0, len(hops))
for _, hop := range hops {
k := hop.PubKeyStr
pubkey, err := route.NewVertexFromStr(k)
if err != nil {
return nil, fmt.Errorf("error parsing %v: %v",
k, err)
}
require.NoErrorf(m.ht, err, "error parsing %v: %v", k, err)
rpcHops = append(rpcHops, pubkey[:])
}
@ -362,12 +349,37 @@ func (c *mppTestContext) buildRoute(ctxb context.Context, amt btcutil.Amount,
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
HopPubkeys: rpcHops,
}
routeResp := sender.RPC.BuildRoute(req)
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
routeResp, err := sender.RouterClient.BuildRoute(ctxt, req)
if err != nil {
return nil, err
return routeResp.Route
}
// updatePolicy updates a Dave's global channel policy and returns the expected
// policy for further check. It changes Dave's `FeeBaseMsat` from 1000 msat to
// 500,000 msat, and `FeeProportionalMillonths` from 1 msat to 1000 msat.
func (m *mppTestScenario) updateDaveGlobalPolicy() *lnrpc.RoutingPolicy {
const (
baseFeeMsat = 500_000
feeRate = 0.001
maxHtlcMsat = 133_650_000
)
expectedPolicy := &lnrpc.RoutingPolicy{
FeeBaseMsat: baseFeeMsat,
FeeRateMilliMsat: feeRate * testFeeBase,
TimeLockDelta: 40,
MinHtlc: 1000, // default value
MaxHtlcMsat: maxHtlcMsat,
}
return routeResp.Route, nil
updateFeeReq := &lnrpc.PolicyUpdateRequest{
BaseFeeMsat: baseFeeMsat,
FeeRate: feeRate,
TimeLockDelta: 40,
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
MaxHtlcMsat: maxHtlcMsat,
}
m.dave.RPC.UpdateChannelPolicy(updateFeeReq)
return expectedPolicy
}

View File

@ -1,40 +1,39 @@
package itest
import (
"context"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/stretchr/testify/require"
)
func testNonstdSweep(net *lntest.NetworkHarness, t *harnessTest) {
func testNonstdSweep(ht *lntemp.HarnessTest) {
p2shAddr, err := btcutil.NewAddressScriptHash(
make([]byte, 1), harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
p2pkhAddr, err := btcutil.NewAddressPubKeyHash(
make([]byte, 20), harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
p2wshAddr, err := btcutil.NewAddressWitnessScriptHash(
make([]byte, 32), harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash(
make([]byte, 20), harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
p2trAddr, err := btcutil.NewAddressTaproot(
make([]byte, 32), harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
tests := []struct {
name string
@ -64,10 +63,10 @@ func testNonstdSweep(net *lntest.NetworkHarness, t *harnessTest) {
for _, test := range tests {
test := test
success := t.t.Run(test.name, func(t *testing.T) {
h := newHarnessTest(t, net)
success := ht.Run(test.name, func(t *testing.T) {
st := ht.Subtest(t)
testNonStdSweepInner(net, h, test.address)
testNonStdSweepInner(st, test.address)
})
if !success {
break
@ -75,18 +74,14 @@ func testNonstdSweep(net *lntest.NetworkHarness, t *harnessTest) {
}
}
func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
address string) {
ctxb := context.Background()
carol := net.NewNode(t.t, "carol", nil)
func testNonStdSweepInner(ht *lntemp.HarnessTest, address string) {
carol := ht.NewNode("carol", nil)
// Give Carol a UTXO so SendCoins will behave as expected.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
// Set the fee estimate to 1sat/vbyte.
net.SetFeeEstimate(250)
ht.SetFeeEstimate(250)
// Make Carol call SendCoins with the SendAll flag and the created
// address.
@ -96,20 +91,13 @@ func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
SendAll: true,
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// If a non-standard transaction was created, then this SendCoins call
// will fail.
_, err := carol.SendCoins(ctxt, sendReq)
require.NoError(t.t, err)
carol.RPC.SendCoins(sendReq)
// Fetch the txid so we can grab the raw transaction.
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err)
tx, err := net.Miner.Client.GetRawTransaction(txid)
require.NoError(t.t, err)
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
tx := ht.Miner.GetRawTransaction(txid)
msgTx := tx.MsgTx()
@ -123,9 +111,7 @@ func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
for _, inp := range msgTx.TxIn {
// Fetch the previous outpoint's value.
prevOut := inp.PreviousOutPoint
ptx, err := net.Miner.Client.GetRawTransaction(&prevOut.Hash)
require.NoError(t.t, err)
ptx := ht.Miner.GetRawTransaction(&prevOut.Hash)
pout := ptx.MsgTx().TxOut[prevOut.Index]
inputVal += int(pout.Value)
@ -139,9 +125,11 @@ func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
// Fetch the vsize of the transaction so we can determine if the
// transaction pays >= 1 sat/vbyte.
rawTx, err := net.Miner.Client.GetRawTransactionVerbose(txid)
require.NoError(t.t, err)
rawTx := ht.Miner.GetRawTransactionVerbose(txid)
// Require fee >= vbytes.
require.True(t.t, fee >= int(rawTx.Vsize))
require.True(ht, fee >= int(rawTx.Vsize))
// Mine a block to keep the mempool clean.
ht.MineBlocksAndAssertNumTxes(1, 1)
}

View File

@ -2,7 +2,6 @@ package itest
import (
"bytes"
"context"
"fmt"
"github.com/btcsuite/btcd/btcutil"
@ -14,7 +13,6 @@ import (
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/sweep"
@ -80,35 +78,28 @@ func testChainKitGetBlockHash(ht *lntemp.HarnessTest) {
// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction.
//
// TODO(wilmer): Add RBF case once btcd supports it.
func testCPFP(net *lntest.NetworkHarness, t *harnessTest) {
runCPFP(net, t, net.Alice, net.Bob)
func testCPFP(ht *lntemp.HarnessTest) {
runCPFP(ht, ht.Alice, ht.Bob)
}
// runCPFP ensures that the daemon can bump an unconfirmed transaction's fee
// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction.
func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
alice, bob *lntest.HarnessNode) {
func runCPFP(ht *lntemp.HarnessTest, alice, bob *node.HarnessNode) {
// Skip this test for neutrino, as it's not aware of mempool
// transactions.
if net.BackendCfg.Name() == lntest.NeutrinoBackendName {
t.Skipf("skipping CPFP test for neutrino backend")
if ht.IsNeutrinoBackend() {
ht.Skipf("skipping CPFP test for neutrino backend")
}
// We'll start the test by sending Alice some coins, which she'll use to
// send to Bob.
ctxb := context.Background()
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
// We'll start the test by sending Alice some coins, which she'll use
// to send to Bob.
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Create an address for Bob to send the coins to.
addrReq := &lnrpc.NewAddressRequest{
req := &lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := bob.NewAddress(ctxt, addrReq)
if err != nil {
t.Fatalf("unable to get new address for bob: %v", err)
}
resp := bob.RPC.NewAddress(req)
// Send the coins from Alice to Bob. We should expect a transaction to
// be broadcast and seen in the mempool.
@ -116,46 +107,33 @@ func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
Addr: resp.Address,
Amount: btcutil.SatoshiPerBitcoin,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
if _, err = alice.SendCoins(ctxt, sendReq); err != nil {
t.Fatalf("unable to send coins to bob: %v", err)
}
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
if err != nil {
t.Fatalf("expected one mempool transaction: %v", err)
}
alice.RPC.SendCoins(sendReq)
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
// We'll then extract the raw transaction from the mempool in order to
// determine the index of Bob's output.
tx, err := net.Miner.Client.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to extract raw transaction from mempool: %v",
err)
}
tx := ht.Miner.GetRawTransaction(txid)
bobOutputIdx := -1
for i, txOut := range tx.MsgTx().TxOut {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
txOut.PkScript, net.Miner.ActiveNet,
txOut.PkScript, ht.Miner.ActiveNet,
)
if err != nil {
t.Fatalf("unable to extract address from pkScript=%x: "+
"%v", txOut.PkScript, err)
}
require.NoErrorf(ht, err, "unable to extract address "+
"from pkScript=%x: %v", txOut.PkScript, err)
if addrs[0].String() == resp.Address {
bobOutputIdx = i
}
}
if bobOutputIdx == -1 {
t.Fatalf("bob's output was not found within the transaction")
}
require.NotEqual(ht, -1, bobOutputIdx, "bob's output was not found "+
"within the transaction")
// Wait until bob has seen the tx and considers it as owned.
op := &lnrpc.OutPoint{
TxidBytes: txid[:],
OutputIndex: uint32(bobOutputIdx),
}
assertWalletUnspent(t, bob, op, "")
ht.AssertUTXOInWallet(bob, op, "")
// We'll attempt to bump the fee of this transaction by performing a
// CPFP from Alice's point of view.
@ -165,68 +143,38 @@ func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
sweep.DefaultMaxFeeRate.FeePerKVByte() / 2000,
),
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
_, err = bob.WalletKitClient.BumpFee(ctxt, bumpFeeReq)
if err != nil {
t.Fatalf("unable to bump fee: %v", err)
}
bob.RPC.BumpFee(bumpFeeReq)
// We should now expect to see two transactions within the mempool, a
// parent and its child.
_, err = waitForNTxsInMempool(net.Miner.Client, 2, minerMempoolTimeout)
if err != nil {
t.Fatalf("expected two mempool transactions: %v", err)
}
ht.Miner.AssertNumTxsInMempool(2)
// We should also expect to see the output being swept by the
// UtxoSweeper. We'll ensure it's using the fee rate specified.
pendingSweepsReq := &walletrpc.PendingSweepsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingSweepsResp, err := bob.WalletKitClient.PendingSweeps(
ctxt, pendingSweepsReq,
)
if err != nil {
t.Fatalf("unable to retrieve pending sweeps: %v", err)
}
if len(pendingSweepsResp.PendingSweeps) != 1 {
t.Fatalf("expected to find %v pending sweep(s), found %v", 1,
len(pendingSweepsResp.PendingSweeps))
}
pendingSweepsResp := bob.RPC.PendingSweeps()
require.Len(ht, pendingSweepsResp.PendingSweeps, 1,
"expected to find 1 pending sweep")
pendingSweep := pendingSweepsResp.PendingSweeps[0]
if !bytes.Equal(pendingSweep.Outpoint.TxidBytes, op.TxidBytes) {
t.Fatalf("expected output txid %x, got %x", op.TxidBytes,
pendingSweep.Outpoint.TxidBytes)
}
if pendingSweep.Outpoint.OutputIndex != op.OutputIndex {
t.Fatalf("expected output index %v, got %v", op.OutputIndex,
pendingSweep.Outpoint.OutputIndex)
}
if pendingSweep.SatPerVbyte != bumpFeeReq.SatPerVbyte {
t.Fatalf("expected sweep sat per vbyte %v, got %v",
bumpFeeReq.SatPerVbyte, pendingSweep.SatPerVbyte)
}
require.Equal(ht, pendingSweep.Outpoint.TxidBytes, op.TxidBytes,
"output txid not matched")
require.Equal(ht, pendingSweep.Outpoint.OutputIndex, op.OutputIndex,
"output index not matched")
require.Equal(ht, pendingSweep.SatPerVbyte, bumpFeeReq.SatPerVbyte,
"sweep sat per vbyte not matched")
// Mine a block to clean up the unconfirmed transactions.
mineBlocks(t, net, 1, 2)
ht.MineBlocksAndAssertNumTxes(1, 2)
// The input used to CPFP should no longer be pending.
err = wait.NoError(func() error {
req := &walletrpc.PendingSweepsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
resp, err := bob.WalletKitClient.PendingSweeps(ctxt, req)
if err != nil {
return fmt.Errorf("unable to retrieve bob's pending "+
"sweeps: %v", err)
}
err := wait.NoError(func() error {
resp := bob.RPC.PendingSweeps()
if len(resp.PendingSweeps) != 0 {
return fmt.Errorf("expected 0 pending sweeps, found %d",
len(resp.PendingSweeps))
}
return nil
}, defaultTimeout)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(ht, err, "timeout checking bob's pending sweeps")
}
// testAnchorReservedValue tests that we won't allow sending transactions when

View File

@ -1,9 +1,8 @@
package itest
import (
"context"
"fmt"
"time"
"testing"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/chaincfg/chainhash"
@ -12,7 +11,8 @@ import (
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntemp/rpc"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
@ -172,9 +172,7 @@ func testOpenChannelAfterReorg(ht *lntemp.HarnessTest) {
// ChannelUpdate --> defaultBaseFee, provided FeeRate
// 4.) baseFee and feeRate provided to OpenChannelRequest
// ChannelUpdate --> provided baseFee, provided feeRate.
func testOpenChannelUpdateFeePolicy(net *lntest.NetworkHarness,
t *harnessTest) {
func testOpenChannelUpdateFeePolicy(ht *lntemp.HarnessTest) {
const (
defaultBaseFee = 1000
defaultFeeRate = 1
@ -189,7 +187,7 @@ func testOpenChannelUpdateFeePolicy(net *lntest.NetworkHarness,
chanAmt := funding.MaxBtcFundingAmount
pushAmt := chanAmt / 2
feeScenarios := []lntest.OpenChannelParams{
feeScenarios := []lntemp.OpenChannelParams{
{
Amt: chanAmt,
PushAmt: pushAmt,
@ -259,89 +257,79 @@ func testOpenChannelUpdateFeePolicy(net *lntest.NetworkHarness,
MaxHtlcMsat: defaultMaxHtlc,
}
for i, feeScenario := range feeScenarios {
alice, bob := ht.Alice, ht.Bob
runTestCase := func(ht *lntemp.HarnessTest,
fs lntemp.OpenChannelParams,
alicePolicy, bobPolicy *lnrpc.RoutingPolicy) {
// Create a channel Alice->Bob.
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob,
feeScenario,
)
chanPoint := ht.OpenChannel(alice, bob, fs)
defer ht.CloseChannel(alice, chanPoint)
defer closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
// We add all the nodes' update channels to a slice, such that
// we can make sure they all receive the expected updates.
nodes := []*node.HarnessNode{alice, bob}
// We add all the nodes' update channels to a slice, such that we can
// make sure they all receive the expected updates.
nodes := []*lntest.HarnessNode{net.Alice, net.Bob}
// Alice and Bob should see each other's ChannelUpdates, advertising
// the preferred routing policies.
assertPolicyUpdate(
t, nodes, net.Alice.PubKeyStr,
&expectedPolicies[i], chanPoint,
)
assertPolicyUpdate(
t, nodes, net.Bob.PubKeyStr,
&bobExpectedPolicy, chanPoint,
// Alice and Bob should see each other's ChannelUpdates,
// advertising the preferred routing policies.
assertNodesPolicyUpdate(
ht, nodes, alice, alicePolicy, chanPoint,
)
assertNodesPolicyUpdate(ht, nodes, bob, bobPolicy, chanPoint)
// They should now know about the default policies.
for _, node := range nodes {
assertChannelPolicy(
t, node, net.Alice.PubKeyStr,
&expectedPolicies[i], chanPoint,
ht.AssertChannelPolicy(
node, alice.PubKeyStr, alicePolicy, chanPoint,
)
assertChannelPolicy(
t, node, net.Bob.PubKeyStr,
&bobExpectedPolicy, chanPoint,
ht.AssertChannelPolicy(
node, bob.PubKeyStr, bobPolicy, chanPoint,
)
}
}
require.NoError(
t.t, net.Alice.WaitForNetworkChannelOpen(chanPoint),
"alice reports channel opening",
)
for i, feeScenario := range feeScenarios {
ht.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
st := ht.Subtest(t)
ht.EnsureConnected(alice, bob)
require.NoError(
t.t, net.Bob.WaitForNetworkChannelOpen(chanPoint),
"bob reports channel opening",
)
runTestCase(
st, feeScenario,
&expectedPolicies[i], &bobExpectedPolicy,
)
})
}
}
// testBasicChannelCreationAndUpdates tests multiple channel opening and
// closing, and ensures that if a node is subscribed to channel updates
// they will be received correctly for both cooperative and force closed
// channels.
func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
t *harnessTest) {
runBasicChannelCreationAndUpdates(net, t, net.Alice, net.Bob)
// closing, and ensures that if a node is subscribed to channel updates they
// will be received correctly for both cooperative and force closed channels.
func testBasicChannelCreationAndUpdates(ht *lntemp.HarnessTest) {
runBasicChannelCreationAndUpdates(ht, ht.Alice, ht.Bob)
}
// runBasicChannelCreationAndUpdates tests multiple channel opening and closing,
// and ensures that if a node is subscribed to channel updates they will be
// received correctly for both cooperative and force closed channels.
func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
t *harnessTest, alice, bob *lntest.HarnessNode) {
func runBasicChannelCreationAndUpdates(ht *lntemp.HarnessTest,
alice, bob *node.HarnessNode) {
ctxb := context.Background()
const (
numChannels = 2
amount = funding.MaxBtcFundingAmount
)
// Subscribe Bob and Alice to channel event notifications.
bobChanSub := subscribeChannelNotifications(ctxb, t, bob)
defer close(bobChanSub.quit)
aliceChanSub := subscribeChannelNotifications(ctxb, t, alice)
defer close(aliceChanSub.quit)
bobChanSub := bob.RPC.SubscribeChannelEvents()
aliceChanSub := alice.RPC.SubscribeChannelEvents()
// Open the channels between Alice and Bob, asserting that the channels
// have been properly opened on-chain.
chanPoints := make([]*lnrpc.ChannelPoint, numChannels)
for i := 0; i < numChannels; i++ {
chanPoints[i] = openChannelAndAssert(
t, net, alice, bob, lntest.OpenChannelParams{
chanPoints[i] = ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{
Amt: amount,
},
)
@ -350,112 +338,93 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
// Since each of the channels just became open, Bob and Alice should
// each receive an open and an active notification for each channel.
const numExpectedOpenUpdates = 3 * numChannels
verifyOpenUpdatesReceived := func(sub channelSubscription) error {
numChannelUpds := 0
for numChannelUpds < numExpectedOpenUpdates {
select {
case update := <-sub.updateChan:
switch update.Type {
case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL:
if numChannelUpds%3 != 0 {
return fmt.Errorf("expected " +
"open or active" +
"channel ntfn, got pending open " +
"channel ntfn instead")
}
case lnrpc.ChannelEventUpdate_OPEN_CHANNEL:
if numChannelUpds%3 != 1 {
return fmt.Errorf("expected " +
"pending open or active" +
"channel ntfn, got open" +
"channel ntfn instead")
}
case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL:
if numChannelUpds%3 != 2 {
return fmt.Errorf("expected " +
"pending open or open" +
"channel ntfn, got active " +
"channel ntfn instead")
}
default:
return fmt.Errorf("update type mismatch: "+
"expected open or active channel "+
"notification, got: %v",
update.Type)
}
numChannelUpds++
verifyOpenUpdatesReceived := func(sub rpc.ChannelEventsClient) error {
for i := 0; i < numExpectedOpenUpdates; i++ {
update := ht.ReceiveChannelEvent(sub)
case <-time.After(time.Second * 10):
return fmt.Errorf("timeout waiting for channel "+
"notifications, only received %d/%d "+
"chanupds", numChannelUpds,
numExpectedOpenUpdates)
switch update.Type {
case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL:
if i%3 == 0 {
continue
}
return fmt.Errorf("expected open or active" +
"channel ntfn, got pending open " +
"channel ntfn instead")
case lnrpc.ChannelEventUpdate_OPEN_CHANNEL:
if i%3 == 1 {
continue
}
return fmt.Errorf("expected pending open or " +
"active channel ntfn, got open" +
"channel ntfn instead")
case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL:
if i%3 == 2 {
continue
}
return fmt.Errorf("expected pending open or " +
"open channel ntfn, got active " +
"channel ntfn instead")
default:
return fmt.Errorf("update type mismatch: "+
"expected open or active channel "+
"notification, got: %v", update.Type)
}
}
return nil
}
require.NoError(
t.t, verifyOpenUpdatesReceived(bobChanSub), "bob open channels",
)
require.NoError(
t.t, verifyOpenUpdatesReceived(aliceChanSub), "alice open "+
"channels",
)
require.NoError(ht, verifyOpenUpdatesReceived(bobChanSub),
"bob open channels")
require.NoError(ht, verifyOpenUpdatesReceived(aliceChanSub),
"alice open channels")
// Close the channels between Alice and Bob, asserting that the channels
// have been properly closed on-chain.
// Close the channels between Alice and Bob, asserting that the
// channels have been properly closed on-chain.
for i, chanPoint := range chanPoints {
// Force close the first of the two channels.
force := i%2 == 0
closeChannelAndAssert(t, net, alice, chanPoint, force)
if force {
cleanupForceClose(t, net, alice, chanPoint)
ht.ForceCloseChannel(alice, chanPoint)
} else {
ht.CloseChannel(alice, chanPoint)
}
}
// verifyCloseUpdatesReceived is used to verify that Alice and Bob
// receive the correct channel updates in order.
const numExpectedCloseUpdates = 3 * numChannels
verifyCloseUpdatesReceived := func(sub channelSubscription,
verifyCloseUpdatesReceived := func(sub rpc.ChannelEventsClient,
forceType lnrpc.ChannelCloseSummary_ClosureType,
closeInitiator lnrpc.Initiator) error {
// Ensure one inactive and one closed notification is received
// for each closed channel.
numChannelUpds := 0
for numChannelUpds < numExpectedCloseUpdates {
expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
for i := 0; i < numExpectedCloseUpdates; i++ {
expectedCloseType := lnrpc.
ChannelCloseSummary_COOPERATIVE_CLOSE
// Every other channel should be force closed. If this
// channel was force closed, set the expected close type
// to the type passed in.
force := (numChannelUpds/3)%2 == 0
force := (i/3)%2 == 0
if force {
expectedCloseType = forceType
}
select {
case chanUpdate := <-sub.updateChan:
err := verifyCloseUpdate(
chanUpdate, expectedCloseType,
closeInitiator,
)
if err != nil {
return err
}
numChannelUpds++
case err := <-sub.errChan:
chanUpdate := ht.ReceiveChannelEvent(sub)
err := verifyCloseUpdate(
chanUpdate, expectedCloseType,
closeInitiator,
)
if err != nil {
return err
case <-time.After(time.Second * 10):
return fmt.Errorf("timeout waiting "+
"for channel notifications, only "+
"received %d/%d chanupds",
numChannelUpds, numChannelUpds)
}
}
@ -467,7 +436,7 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
// All channels (cooperatively and force closed) should have a remote
// close initiator because Alice closed the channels.
require.NoError(
t.t, verifyCloseUpdatesReceived(
ht, verifyCloseUpdatesReceived(
bobChanSub,
lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
lnrpc.Initiator_INITIATOR_REMOTE,
@ -479,7 +448,7 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
// All channels (cooperatively and force closed) should have a local
// close initiator because Alice closed the channels.
require.NoError(
t.t, verifyCloseUpdatesReceived(
ht, verifyCloseUpdatesReceived(
aliceChanSub,
lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
lnrpc.Initiator_INITIATOR_LOCAL,
@ -516,3 +485,64 @@ func assertMinerBlockHeightDelta(ht *lntemp.HarnessTest,
}, defaultTimeout)
require.NoError(ht, err, "failed to assert block height delta")
}
// verifyCloseUpdate is used to verify that a closed channel update is of the
// expected type.
func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
closeType lnrpc.ChannelCloseSummary_ClosureType,
closeInitiator lnrpc.Initiator) error {
// We should receive one inactive and one closed notification
// for each channel.
switch update := chanUpdate.Channel.(type) {
case *lnrpc.ChannelEventUpdate_InactiveChannel:
if chanUpdate.Type !=
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL {
return fmt.Errorf("update type mismatch: "+
"expected %v, got %v",
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
chanUpdate.Type)
}
case *lnrpc.ChannelEventUpdate_ClosedChannel:
if chanUpdate.Type !=
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL {
return fmt.Errorf("update type mismatch: "+
"expected %v, got %v",
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
chanUpdate.Type)
}
if update.ClosedChannel.CloseType != closeType {
return fmt.Errorf("channel closure type "+
"mismatch: expected %v, got %v",
closeType,
update.ClosedChannel.CloseType)
}
if update.ClosedChannel.CloseInitiator != closeInitiator {
return fmt.Errorf("expected close intiator: %v, "+
"got: %v", closeInitiator,
update.ClosedChannel.CloseInitiator)
}
case *lnrpc.ChannelEventUpdate_FullyResolvedChannel:
if chanUpdate.Type !=
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
return fmt.Errorf("update type mismatch: "+
"expected %v, got %v",
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
chanUpdate.Type)
}
default:
return fmt.Errorf("channel update channel of wrong type, "+
"expected closed channel, got %T",
update)
}
return nil
}

View File

@ -12,6 +12,7 @@ import (
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
@ -225,35 +226,31 @@ func testPaymentFollowingChannelOpen(ht *lntemp.HarnessTest) {
}
// testAsyncPayments tests the performance of the async payments.
func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) {
runAsyncPayments(net, t, net.Alice, net.Bob)
func testAsyncPayments(ht *lntemp.HarnessTest) {
// We use new nodes here as the benchmark test creates lots of data
// which can be costly to be carried on.
alice := ht.NewNode("Alice", []string{"--pending-commit-interval=3m"})
bob := ht.NewNode("Bob", []string{"--pending-commit-interval=3m"})
ht.EnsureConnected(alice, bob)
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
runAsyncPayments(ht, alice, bob)
}
// runAsyncPayments tests the performance of the async payments.
func runAsyncPayments(net *lntest.NetworkHarness, t *harnessTest, alice,
bob *lntest.HarnessNode) {
ctxb := context.Background()
const (
paymentAmt = 100
)
func runAsyncPayments(ht *lntemp.HarnessTest, alice, bob *node.HarnessNode) {
const paymentAmt = 100
// First establish a channel with a capacity equals to the overall
// amount of payments, between Alice and Bob, at the end of the test
// Alice should send all money from her side to Bob.
channelCapacity := btcutil.Amount(paymentAmt * 2000)
chanPoint := openChannelAndAssert(
t, net, alice, bob,
lntest.OpenChannelParams{
Amt: channelCapacity,
},
chanPoint := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{Amt: channelCapacity},
)
info, err := getChanInfo(alice)
if err != nil {
t.Fatalf("unable to get alice channel info: %v", err)
}
info := ht.QueryChannelByChanPoint(alice, chanPoint)
// We'll create a number of invoices equal the max number of HTLCs that
// can be carried in one direction. The number on the commitment will
@ -267,128 +264,95 @@ func runAsyncPayments(net *lntest.NetworkHarness, t *harnessTest, alice,
// With the channel open, we'll create invoices for Bob that Alice
// will pay to in order to advance the state of the channel.
bobPayReqs, _, _, err := createPayReqs(
bob, paymentAmt, numInvoices,
)
if err != nil {
t.Fatalf("unable to create pay reqs: %v", err)
}
bobPayReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, numInvoices)
// Wait for Alice to receive the channel edge from the funding manager.
err = alice.WaitForNetworkChannelOpen(chanPoint)
if err != nil {
t.Fatalf("alice didn't see the alice->bob channel before "+
"timeout: %v", err)
}
// Simultaneously send payments from Alice to Bob using of Bob's payment
// hashes generated above.
// Simultaneously send payments from Alice to Bob using of Bob's
// payment hashes generated above.
now := time.Now()
errChan := make(chan error)
statusChan := make(chan *lnrpc.Payment)
settled := make(chan struct{})
defer close(settled)
timeout := lntest.AsyncBenchmarkTimeout * 2
for i := 0; i < numInvoices; i++ {
payReq := bobPayReqs[i]
go func() {
ctxt, _ := context.WithTimeout(ctxb, lntest.AsyncBenchmarkTimeout)
stream, err := alice.RouterClient.SendPaymentV2(
ctxt,
&routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
errChan <- err
}
result, err := getPaymentResult(stream)
if err != nil {
errChan <- err
req := &routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
TimeoutSeconds: int32(timeout.Seconds()),
FeeLimitMsat: noFeeLimitMsat,
}
// AssertPaymentStatusWithTimeout will assert that the
// payment is settled.
stream := alice.RPC.SendPayment(req)
ht.AssertPaymentSucceedWithTimeout(stream, timeout)
statusChan <- result
settled <- struct{}{}
}()
}
// Wait until all the payments have settled.
timer := time.After(timeout)
for i := 0; i < numInvoices; i++ {
select {
case result := <-statusChan:
if result.Status == lnrpc.Payment_SUCCEEDED {
continue
}
case err := <-errChan:
t.Fatalf("payment error: %v", err)
case <-settled:
case <-timer:
require.Fail(ht, "timeout", "wait payment failed")
}
}
// All payments have been sent, mark the finish time.
timeTaken := time.Since(now)
// Next query for Bob's and Alice's channel states, in order to confirm
// that all payment have been successful transmitted.
// assertChannelState asserts the channel state by checking the values
// in fields, LocalBalance, RemoteBalance and num of PendingHtlcs.
assertChannelState := func(hn *node.HarnessNode, cp *lnrpc.ChannelPoint,
localBalance, remoteBalance int64, numPendingHtlcs int) {
// Wait for the revocation to be received so alice no longer has pending
// htlcs listed and has correct balances. This is needed due to the fact
// that we now pipeline the settles.
err = wait.Predicate(func() bool {
aliceChan, err := getChanInfo(alice)
if err != nil {
return false
}
if len(aliceChan.PendingHtlcs) != 0 {
return false
}
if aliceChan.RemoteBalance != bobAmt {
return false
}
if aliceChan.LocalBalance != aliceAmt {
return false
}
// Get the funding point.
err := wait.NoError(func() error {
// Find the target channel first.
target := ht.GetChannelByChanPoint(hn, cp)
return true
}, defaultTimeout)
if err != nil {
t.Fatalf("failed to assert alice's pending htlcs and/or remote/local balance")
if len(target.PendingHtlcs) != numPendingHtlcs {
return fmt.Errorf("pending htlcs is "+
"incorrect, got %v, expected %v",
len(target.PendingHtlcs), 0)
}
if target.LocalBalance != localBalance {
return fmt.Errorf("local balance is "+
"incorrect, got %v, expected %v",
target.LocalBalance, localBalance)
}
if target.RemoteBalance != remoteBalance {
return fmt.Errorf("remote balance is "+
"incorrect, got %v, expected %v",
target.RemoteBalance, remoteBalance)
}
return nil
}, lntemp.DefaultTimeout)
require.NoError(ht, err, "timeout while chekcing for balance")
}
// Wait for the revocation to be received so alice no longer has
// pending htlcs listed and has correct balances. This is needed due to
// the fact that we now pipeline the settles.
assertChannelState(alice, chanPoint, aliceAmt, bobAmt, 0)
// Wait for Bob to receive revocation from Alice.
err = wait.NoError(func() error {
bobChan, err := getChanInfo(bob)
if err != nil {
t.Fatalf("unable to get bob's channel info: %v", err)
}
assertChannelState(bob, chanPoint, bobAmt, aliceAmt, 0)
if len(bobChan.PendingHtlcs) != 0 {
return fmt.Errorf("bob's pending htlcs is incorrect, "+
"got %v, expected %v",
len(bobChan.PendingHtlcs), 0)
}
if bobChan.LocalBalance != bobAmt {
return fmt.Errorf("bob's local balance is incorrect, "+
"got %v, expected %v", bobChan.LocalBalance,
bobAmt)
}
if bobChan.RemoteBalance != aliceAmt {
return fmt.Errorf("bob's remote balance is incorrect, "+
"got %v, expected %v", bobChan.RemoteBalance,
aliceAmt)
}
return nil
}, defaultTimeout)
require.NoError(t.t, err)
t.Log("\tBenchmark info: Elapsed time: ", timeTaken)
t.Log("\tBenchmark info: TPS: ", float64(numInvoices)/timeTaken.Seconds())
ht.Log("\tBenchmark info: Elapsed time: ", timeTaken)
ht.Log("\tBenchmark info: TPS: ",
float64(numInvoices)/timeTaken.Seconds())
// Finally, immediately close the channel. This function will also
// block until the channel is closed and will additionally assert the
// relevant channel closing post conditions.
closeChannelAndAssert(t, net, alice, chanPoint, false)
ht.CloseChannel(alice, chanPoint)
}
// testBidirectionalAsyncPayments tests that nodes are able to send the

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,6 @@
package itest
import (
"context"
"fmt"
"testing"
@ -11,7 +10,8 @@ import (
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/stretchr/testify/require"
)
@ -54,105 +54,94 @@ var (
// testRemoteSigner tests that a watch-only wallet can use a remote signing
// wallet to perform any signing or ECDH operations.
func testRemoteSigner(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
subTests := []struct {
func testRemoteSigner(ht *lntemp.HarnessTest) {
type testCase struct {
name string
randomSeed bool
sendCoins bool
fn func(tt *harnessTest, wo, carol *lntest.HarnessNode)
}{{
fn func(tt *lntemp.HarnessTest,
wo, carol *node.HarnessNode)
}
subTests := []testCase{{
name: "random seed",
randomSeed: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
// Nothing more to test here.
},
}, {
name: "account import",
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runWalletImportAccountScenario(
net, tt,
walletrpc.AddressType_WITNESS_PUBKEY_HASH,
tt, walletrpc.AddressType_WITNESS_PUBKEY_HASH,
carol, wo,
)
},
}, {
name: "basic channel open close",
sendCoins: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
runBasicChannelCreationAndUpdates(
net, tt, wo, carol,
)
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runBasicChannelCreationAndUpdates(tt, wo, carol)
},
}, {
name: "async payments",
sendCoins: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
runAsyncPayments(net, tt, wo, carol)
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runAsyncPayments(tt, wo, carol)
},
}, {
name: "shared key",
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runDeriveSharedKey(tt, wo)
},
}, {
name: "cpfp",
sendCoins: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
runCPFP(net, tt, wo, carol)
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runCPFP(tt, wo, carol)
},
}, {
name: "psbt",
randomSeed: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
runPsbtChanFunding(net, tt, carol, wo)
runSignPsbtSegWitV0P2WKH(tt, net, wo)
runSignPsbtSegWitV1KeySpendBip86(tt, net, wo)
runSignPsbtSegWitV1KeySpendRootHash(tt, net, wo)
runSignPsbtSegWitV1ScriptSpend(tt, net, wo)
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runPsbtChanFunding(tt, carol, wo)
runSignPsbtSegWitV0P2WKH(tt, wo)
runSignPsbtSegWitV1KeySpendBip86(tt, wo)
runSignPsbtSegWitV1KeySpendRootHash(tt, wo)
runSignPsbtSegWitV1ScriptSpend(tt, wo)
},
}, {
name: "sign output raw",
sendCoins: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
runSignOutputRaw(tt, net, wo)
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runSignOutputRaw(tt, wo)
},
}, {
name: "sign verify msg",
sendCoins: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
runSignVerifyMessage(tt, net, wo)
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
runSignVerifyMessage(tt, wo)
},
}, {
name: "taproot",
sendCoins: true,
randomSeed: true,
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
longTimeout := 3 * defaultTimeout
ctxt, cancel := context.WithTimeout(ctxb, longTimeout)
testTaprootSendCoinsKeySpendBip86(ctxt, tt, wo, net)
testTaprootComputeInputScriptKeySpendBip86(
ctxt, tt, wo, net,
)
testTaprootSignOutputRawScriptSpend(ctxt, tt, wo, net)
testTaprootSignOutputRawKeySpendBip86(ctxt, tt, wo, net)
testTaprootSignOutputRawKeySpendRootHash(
ctxt, tt, wo, net,
)
cancel()
ctxt, cancel = context.WithTimeout(ctxb, longTimeout)
testTaprootMuSig2KeySpendRootHash(ctxt, tt, wo, net)
testTaprootMuSig2ScriptSpend(ctxt, tt, wo, net)
testTaprootMuSig2KeySpendBip86(ctxt, tt, wo, net)
testTaprootMuSig2CombinedLeafKeySpend(ctxt, tt, wo, net)
cancel()
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
testTaprootSendCoinsKeySpendBip86(tt, wo)
testTaprootComputeInputScriptKeySpendBip86(tt, wo)
testTaprootSignOutputRawScriptSpend(tt, wo)
testTaprootSignOutputRawKeySpendBip86(tt, wo)
testTaprootSignOutputRawKeySpendRootHash(tt, wo)
testTaprootMuSig2KeySpendRootHash(tt, wo)
testTaprootMuSig2ScriptSpend(tt, wo)
testTaprootMuSig2KeySpendBip86(tt, wo)
testTaprootMuSig2CombinedLeafKeySpend(tt, wo)
},
}}
for _, st := range subTests {
subTest := st
prepareTest := func(st *lntemp.HarnessTest,
subTest testCase) (*node.HarnessNode,
*node.HarnessNode, *node.HarnessNode) {
// Signer is our signing node and has the wallet with the full
// master private key. We test that we can create the watch-only
@ -162,34 +151,32 @@ func testRemoteSigner(net *lntest.NetworkHarness, t *harnessTest) {
password := []byte("itestpassword")
var (
signerNodePubKey = nodePubKey
watchOnlyAccounts = deriveCustomScopeAccounts(t.t)
signer *lntest.HarnessNode
watchOnlyAccounts = deriveCustomScopeAccounts(ht.T)
signer *node.HarnessNode
err error
)
if !subTest.randomSeed {
signer, err = net.RestoreNodeWithSeed(
signer = st.RestoreNodeWithSeed(
"Signer", nil, password, nil, rootKey, 0, nil,
)
require.NoError(t.t, err)
} else {
signer = net.NewNode(t.t, "Signer", nil)
signer = st.NewNode("Signer", nil)
signerNodePubKey = signer.PubKeyStr
rpcAccts, err := signer.WalletKitClient.ListAccounts(
ctxb, &walletrpc.ListAccountsRequest{},
rpcAccts := signer.RPC.ListAccounts(
&walletrpc.ListAccountsRequest{},
)
require.NoError(t.t, err)
watchOnlyAccounts, err = walletrpc.AccountsToWatchOnly(
rpcAccts.Accounts,
)
require.NoError(t.t, err)
require.NoError(st, err)
}
// WatchOnly is the node that has a watch-only wallet and uses
// the Signer node for any operation that requires access to
// private keys.
watchOnly, err := net.NewNodeRemoteSigner(
watchOnly := st.NewNodeRemoteSigner(
"WatchOnly", []string{
"--remotesigner.enable",
fmt.Sprintf(
@ -210,33 +197,35 @@ func testRemoteSigner(net *lntest.NetworkHarness, t *harnessTest) {
Accounts: watchOnlyAccounts,
},
)
require.NoError(t.t, err)
resp, err := watchOnly.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
require.Equal(t.t, signerNodePubKey, resp.IdentityPubkey)
resp := watchOnly.RPC.GetInfo()
require.Equal(st, signerNodePubKey, resp.IdentityPubkey)
if subTest.sendCoins {
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, watchOnly)
assertAccountBalance(
t.t, watchOnly, "default",
st.FundCoins(btcutil.SatoshiPerBitcoin, watchOnly)
ht.AssertWalletAccountBalance(
watchOnly, "default",
btcutil.SatoshiPerBitcoin, 0,
)
}
carol := net.NewNode(t.t, "carol", nil)
net.EnsureConnected(t.t, watchOnly, carol)
carol := st.NewNode("carol", nil)
st.EnsureConnected(watchOnly, carol)
success := t.t.Run(subTest.name, func(tt *testing.T) {
ht := newHarnessTest(tt, net)
subTest.fn(ht, watchOnly, carol)
return signer, watchOnly, carol
}
for _, testCase := range subTests {
subTest := testCase
success := ht.Run(subTest.name, func(tt *testing.T) {
// Skip the cleanup here as no standby node is used.
st := ht.Subtest(tt)
_, watchOnly, carol := prepareTest(st, subTest)
subTest.fn(st, watchOnly, carol)
})
shutdownAndAssert(net, t, carol)
shutdownAndAssert(net, t, watchOnly)
shutdownAndAssert(net, t, signer)
if !success {
return
}

View File

@ -560,7 +560,7 @@ func middlewareMandatoryTest(ht *lntemp.HarnessTest, node *node.HarnessNode) {
// test case. So we need to do the wait and client setup manually here.
conn, err := node.ConnectRPC()
require.NoError(ht, err)
node.InitRPCClients(conn)
node.Initialize(conn)
err = node.WaitUntilServerActive()
require.NoError(ht, err)

View File

@ -1,22 +1,19 @@
package itest
import (
"context"
"encoding/hex"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/stretchr/testify/require"
)
// testSendMultiPathPayment tests that we are able to successfully route a
// payment using multiple shards across different paths.
func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
ctx := newMppTestContext(t, net)
defer ctx.shutdownNodes()
func testSendMultiPathPayment(ht *lntemp.HarnessTest) {
mts := newMppTestScenario(ht)
const paymentAmt = btcutil.Amount(300000)
@ -30,57 +27,45 @@ func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
// \ /
// \__ Dave ____/
//
ctx.openChannel(ctx.carol, ctx.bob, 135000)
ctx.openChannel(ctx.alice, ctx.carol, 235000)
ctx.openChannel(ctx.dave, ctx.bob, 135000)
ctx.openChannel(ctx.alice, ctx.dave, 135000)
ctx.openChannel(ctx.eve, ctx.bob, 135000)
ctx.openChannel(ctx.carol, ctx.eve, 135000)
defer ctx.closeChannels()
ctx.waitForChannels()
req := &mppOpenChannelRequest{
amtAliceCarol: 235000,
amtAliceDave: 135000,
amtCarolBob: 135000,
amtCarolEve: 135000,
amtDaveBob: 135000,
amtEveBob: 135000,
}
mts.openChannels(req)
chanPointAliceDave := mts.channelPoints[1]
// Increase Dave's fee to make the test deterministic. Otherwise it
// would be unpredictable whether pathfinding would go through Charlie
// or Dave for the first shard.
_, err := ctx.dave.UpdateChannelPolicy(
context.Background(),
&lnrpc.PolicyUpdateRequest{
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
BaseFeeMsat: 500000,
FeeRate: 0.001,
TimeLockDelta: 40,
},
expectedPolicy := mts.updateDaveGlobalPolicy()
// Make sure Alice has heard it.
ht.AssertChannelPolicyUpdate(
mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
)
if err != nil {
t.Fatalf("dave policy update: %v", err)
}
// Our first test will be Alice paying Bob using a SendPayment call.
// Let Bob create an invoice for Alice to pay.
payReqs, rHashes, invoices, err := createPayReqs(
ctx.bob, paymentAmt, 1,
)
if err != nil {
t.Fatalf("unable to create pay reqs: %v", err)
}
payReqs, rHashes, invoices := ht.CreatePayReqs(mts.bob, paymentAmt, 1)
rHash := rHashes[0]
payReq := payReqs[0]
payment := sendAndAssertSuccess(
t, ctx.alice, &routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
MaxParts: 10,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
sendReq := &routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
MaxParts: 10,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
// Make sure we got the preimage.
if payment.PaymentPreimage != hex.EncodeToString(invoices[0].RPreimage) {
t.Fatalf("preimage doesn't match")
}
require.Equal(ht, hex.EncodeToString(invoices[0].RPreimage),
payment.PaymentPreimage, "preimage doesn't match")
// Check that Alice split the payment in at least three shards. Because
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
@ -97,32 +82,17 @@ func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
}
const minExpectedShards = 3
if succeeded < minExpectedShards {
t.Fatalf("expected at least %v shards, but got %v",
minExpectedShards, succeeded)
}
require.GreaterOrEqual(ht, succeeded, minExpectedShards,
"expected shards not reached")
// Make sure Bob show the invoice as settled for the full
// amount.
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
inv, err := ctx.bob.LookupInvoice(
ctxt, &lnrpc.PaymentHash{
RHash: rHash,
},
)
if err != nil {
t.Fatalf("error when obtaining invoice: %v", err)
}
// Make sure Bob show the invoice as settled for the full amount.
inv := mts.bob.RPC.LookupInvoice(rHash)
if inv.AmtPaidSat != int64(paymentAmt) {
t.Fatalf("incorrect payment amt for invoice"+
"want: %d, got %d",
paymentAmt, inv.AmtPaidSat)
}
require.EqualValues(ht, paymentAmt, inv.AmtPaidSat,
"incorrect payment amt")
if inv.State != lnrpc.Invoice_SETTLED {
t.Fatalf("Invoice not settled: %v", inv.State)
}
require.Equal(ht, lnrpc.Invoice_SETTLED, inv.State,
"Invoice not settled")
settled := 0
for _, htlc := range inv.Htlcs {
@ -130,8 +100,9 @@ func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
settled++
}
}
if settled != succeeded {
t.Fatalf("expected invoice to be settled "+
"with %v HTLCs, had %v", succeeded, settled)
}
require.Equal(ht, succeeded, settled,
"num of HTLCs wrong")
// Finally, close all channels.
mts.closeChannels()
}

View File

@ -2,9 +2,7 @@ package itest
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
@ -15,7 +13,8 @@ import (
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/stretchr/testify/require"
)
@ -23,21 +22,19 @@ import (
// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with
// the node's pubkey and a customized public key to check the validity of the
// result.
func testDeriveSharedKey(net *lntest.NetworkHarness, t *harnessTest) {
runDeriveSharedKey(t, net.Alice)
func testDeriveSharedKey(ht *lntemp.HarnessTest) {
runDeriveSharedKey(ht, ht.Alice)
}
// runDeriveSharedKey checks the ECDH performed by the endpoint
// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with
// the node's pubkey and a customized public key to check the validity of the
// result.
func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
ctxb := context.Background()
func runDeriveSharedKey(ht *lntemp.HarnessTest, alice *node.HarnessNode) {
// Create an ephemeral key, extracts its public key, and make a
// PrivKeyECDH using the ephemeral key.
ephemeralPriv, err := btcec.NewPrivateKey()
require.NoError(t.t, err, "failed to create ephemeral key")
require.NoError(ht, err, "failed to create ephemeral key")
ephemeralPubBytes := ephemeralPriv.PubKey().SerializeCompressed()
privKeyECDH := &keychain.PrivKeyECDH{PrivKey: ephemeralPriv}
@ -47,30 +44,27 @@ func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
assertECDHMatch := func(pub *btcec.PublicKey,
req *signrpc.SharedKeyRequest) {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
resp, err := alice.SignerClient.DeriveSharedKey(ctxt, req)
require.NoError(t.t, err, "calling DeriveSharedKey failed")
resp := alice.RPC.DeriveSharedKey(req)
sharedKey, _ := privKeyECDH.ECDH(pub)
require.Equal(
t.t, sharedKey[:], resp.SharedKey,
"failed to derive the expected key",
)
require.Equal(ht, sharedKey[:], resp.SharedKey,
"failed to derive the expected key")
}
nodePub, err := btcec.ParsePubKey(alice.PubKey[:])
require.NoError(t.t, err, "failed to parse node pubkey")
require.NoError(ht, err, "failed to parse node pubkey")
customizedKeyFamily := int32(keychain.KeyFamilyMultiSig)
customizedIndex := int32(1)
customizedPub, err := deriveCustomizedKey(
ctxb, alice, &signrpc.KeyLocator{
KeyFamily: customizedKeyFamily,
KeyIndex: customizedIndex,
},
)
require.NoError(t.t, err, "failed to create customized pubkey")
// Derive a customized key.
deriveReq := &signrpc.KeyLocator{
KeyFamily: customizedKeyFamily,
KeyIndex: customizedIndex,
}
resp := alice.RPC.DeriveKey(deriveReq)
customizedPub, err := btcec.ParsePubKey(resp.RawKeyBytes)
require.NoError(ht, err, "failed to parse node pubkey")
// Test DeriveSharedKey with no optional arguments. It will result in
// performing an ECDH between the ephemeral key and the node's pubkey.
@ -154,12 +148,8 @@ func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
// assertErrorMatch checks when calling DeriveSharedKey with invalid
// params, the expected error is returned.
assertErrorMatch := func(match string, req *signrpc.SharedKeyRequest) {
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
_, err := alice.SignerClient.DeriveSharedKey(ctxt, req)
require.Error(t.t, err, "expected to have an error")
require.Contains(
t.t, err.Error(), match, "error failed to match",
)
err := alice.RPC.DeriveSharedKeyErr(req)
require.Contains(ht, err.Error(), match, "error not match")
}
// Test that EphemeralPubkey must be supplied.
@ -204,43 +194,35 @@ func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
// testSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
// custom ways of specifying the signing key in the key descriptor/locator.
func testSignOutputRaw(net *lntest.NetworkHarness, t *harnessTest) {
runSignOutputRaw(t, net, net.Alice)
func testSignOutputRaw(ht *lntemp.HarnessTest) {
runSignOutputRaw(ht, ht.Alice)
}
// runSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
// custom ways of specifying the signing key in the key descriptor/locator.
func runSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
alice *lntest.HarnessNode) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
func runSignOutputRaw(ht *lntemp.HarnessTest, alice *node.HarnessNode) {
// For the next step, we need a public key. Let's use a special family
// for this. We want this to be an index of zero.
const testCustomKeyFamily = 44
keyDesc, err := alice.WalletKitClient.DeriveNextKey(
ctxt, &walletrpc.KeyReq{
KeyFamily: testCustomKeyFamily,
},
)
require.NoError(t.t, err)
require.Equal(t.t, int32(0), keyDesc.KeyLoc.KeyIndex)
req := &walletrpc.KeyReq{
KeyFamily: testCustomKeyFamily,
}
keyDesc := alice.RPC.DeriveNextKey(req)
require.Equal(ht, int32(0), keyDesc.KeyLoc.KeyIndex)
targetPubKey, err := btcec.ParsePubKey(keyDesc.RawKeyBytes)
require.NoError(t.t, err)
require.NoError(ht, err)
// First, try with a key descriptor that only sets the public key.
assertSignOutputRaw(
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
RawKeyBytes: keyDesc.RawKeyBytes,
},
)
// Now try again, this time only with the (0 index!) key locator.
assertSignOutputRaw(
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
KeyLoc: &signrpc.KeyLocator{
KeyFamily: keyDesc.KeyLoc.KeyFamily,
KeyIndex: keyDesc.KeyLoc.KeyIndex,
@ -250,29 +232,25 @@ func runSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
// And now test everything again with a new key where we know the index
// is not 0.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
keyDesc, err = alice.WalletKitClient.DeriveNextKey(
ctxt, &walletrpc.KeyReq{
KeyFamily: testCustomKeyFamily,
},
)
require.NoError(t.t, err)
require.Equal(t.t, int32(1), keyDesc.KeyLoc.KeyIndex)
req = &walletrpc.KeyReq{
KeyFamily: testCustomKeyFamily,
}
keyDesc = alice.RPC.DeriveNextKey(req)
require.Equal(ht, int32(1), keyDesc.KeyLoc.KeyIndex)
targetPubKey, err = btcec.ParsePubKey(keyDesc.RawKeyBytes)
require.NoError(t.t, err)
require.NoError(ht, err)
// First, try with a key descriptor that only sets the public key.
assertSignOutputRaw(
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
RawKeyBytes: keyDesc.RawKeyBytes,
},
)
// Now try again, this time only with the key locator.
assertSignOutputRaw(
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
KeyLoc: &signrpc.KeyLocator{
KeyFamily: keyDesc.KeyLoc.KeyFamily,
KeyIndex: keyDesc.KeyLoc.KeyIndex,
@ -284,53 +262,44 @@ func runSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
// assertSignOutputRaw sends coins to a p2wkh address derived from the given
// target public key and then tries to spend that output again by invoking the
// SignOutputRaw RPC with the key descriptor provided.
func assertSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
alice *lntest.HarnessNode, targetPubKey *btcec.PublicKey,
func assertSignOutputRaw(ht *lntemp.HarnessTest,
alice *node.HarnessNode, targetPubKey *btcec.PublicKey,
keyDesc *signrpc.KeyDescriptor) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
pubKeyHash := btcutil.Hash160(targetPubKey.SerializeCompressed())
targetAddr, err := btcutil.NewAddressWitnessPubKeyHash(
pubKeyHash, harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
targetScript, err := txscript.PayToAddrScript(targetAddr)
require.NoError(t.t, err)
require.NoError(ht, err)
// Send some coins to the generated p2wpkh address.
_, err = alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
req := &lnrpc.SendCoinsRequest{
Addr: targetAddr.String(),
Amount: 800_000,
})
require.NoError(t.t, err)
}
alice.RPC.SendCoins(req)
// Wait until the TX is found in the mempool.
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err)
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
targetOutputIndex := getOutputIndex(
t, net.Miner, txid, targetAddr.String(),
)
targetOutputIndex := ht.GetOutputIndex(txid, targetAddr.String())
// Clear the mempool.
mineBlocks(t, net, 1, 1)
ht.MineBlocksAndAssertNumTxes(1, 1)
// Try to spend the output now to a new p2wkh address.
p2wkhResp, err := alice.NewAddress(ctxt, &lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
})
require.NoError(t.t, err)
addrReq := &lnrpc.NewAddressRequest{Type: AddrTypeWitnessPubkeyHash}
p2wkhResp := alice.RPC.NewAddress(addrReq)
p2wkhAdrr, err := btcutil.DecodeAddress(
p2wkhResp.Address, harnessNetParams,
)
require.NoError(t.t, err)
require.NoError(ht, err)
p2wkhPkScript, err := txscript.PayToAddrScript(p2wkhAdrr)
require.NoError(t.t, err)
require.NoError(ht, err)
tx := wire.NewMsgTx(2)
tx.TxIn = []*wire.TxIn{{
@ -346,24 +315,22 @@ func assertSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
}}
var buf bytes.Buffer
require.NoError(t.t, tx.Serialize(&buf))
require.NoError(ht, tx.Serialize(&buf))
signResp, err := alice.SignerClient.SignOutputRaw(
ctxt, &signrpc.SignReq{
RawTxBytes: buf.Bytes(),
SignDescs: []*signrpc.SignDescriptor{{
Output: &signrpc.TxOut{
PkScript: targetScript,
Value: 800_000,
},
InputIndex: 0,
KeyDesc: keyDesc,
Sighash: uint32(txscript.SigHashAll),
WitnessScript: targetScript,
}},
},
)
require.NoError(t.t, err)
signReq := &signrpc.SignReq{
RawTxBytes: buf.Bytes(),
SignDescs: []*signrpc.SignDescriptor{{
Output: &signrpc.TxOut{
PkScript: targetScript,
Value: 800_000,
},
InputIndex: 0,
KeyDesc: keyDesc,
Sighash: uint32(txscript.SigHashAll),
WitnessScript: targetScript,
}},
}
signResp := alice.RPC.SignOutputRaw(signReq)
tx.TxIn[0].Witness = wire.TxWitness{
append(signResp.RawSigs[0], byte(txscript.SigHashAll)),
@ -371,68 +338,38 @@ func assertSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
}
buf.Reset()
require.NoError(t.t, tx.Serialize(&buf))
require.NoError(ht, tx.Serialize(&buf))
_, err = alice.WalletKitClient.PublishTransaction(
ctxt, &walletrpc.Transaction{
TxHex: buf.Bytes(),
},
)
require.NoError(t.t, err)
alice.RPC.PublishTransaction(&walletrpc.Transaction{
TxHex: buf.Bytes(),
})
// Wait until the spending tx is found.
txid, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err)
p2wkhOutputIndex := getOutputIndex(
t, net.Miner, txid, p2wkhAdrr.String(),
)
txid = ht.Miner.AssertNumTxsInMempool(1)[0]
p2wkhOutputIndex := ht.GetOutputIndex(txid, p2wkhAdrr.String())
op := &lnrpc.OutPoint{
TxidBytes: txid[:],
OutputIndex: uint32(p2wkhOutputIndex),
}
assertWalletUnspent(t, alice, op, "")
ht.AssertUTXOInWallet(alice, op, "")
// Mine another block to clean up the mempool and to make sure the spend
// tx is actually included in a block.
mineBlocks(t, net, 1, 1)
}
// deriveCustomizedKey uses the family and index to derive a public key from
// the node's walletkit client.
func deriveCustomizedKey(ctx context.Context, node *lntest.HarnessNode,
keyLoc *signrpc.KeyLocator) (*btcec.PublicKey, error) {
ctxt, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
resp, err := node.WalletKitClient.DeriveKey(ctxt, keyLoc)
if err != nil {
return nil, fmt.Errorf("failed to derive key: %v", err)
}
pub, err := btcec.ParsePubKey(resp.RawKeyBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse node pubkey: %v", err)
}
return pub, nil
// Mine another block to clean up the mempool and to make sure the
// spend tx is actually included in a block.
ht.MineBlocksAndAssertNumTxes(1, 1)
}
// testSignVerifyMessage makes sure that the SignMessage RPC can be used with
// all custom flags by verifying with VerifyMessage. Tests both ECDSA and
// Schnorr signatures.
func testSignVerifyMessage(net *lntest.NetworkHarness, t *harnessTest) {
runSignVerifyMessage(t, net, net.Alice)
func testSignVerifyMessage(ht *lntemp.HarnessTest) {
runSignVerifyMessage(ht, ht.Alice)
}
// runSignVerifyMessage makes sure that the SignMessage RPC can be used with all
// custom flags by verifying with VerifyMessage. Tests both ECDSA and Schnorr
// signatures.
func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
alice *lntest.HarnessNode) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
// runSignVerifyMessage makes sure that the SignMessage RPC can be used with
// all custom flags by verifying with VerifyMessage. Tests both ECDSA and
// Schnorr signatures.
func runSignVerifyMessage(ht *lntemp.HarnessTest, alice *node.HarnessNode) {
aliceMsg := []byte("alice msg")
keyLoc := &signrpc.KeyLocator{
KeyFamily: int32(keychain.KeyFamilyNodeKey),
@ -446,11 +383,17 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
SchnorrSig: false,
}
signMsgResp, err := alice.SignerClient.SignMessage(ctxt, signMsgReq)
require.NoError(t.t, err, "failed to sign message")
signMsgResp := alice.RPC.SignMessageSigner(signMsgReq)
customPubKey, err := deriveCustomizedKey(ctxt, alice, keyLoc)
require.NoError(t.t, err, "failed to create customized pubkey")
deriveCustomizedKey := func() *btcec.PublicKey {
resp := alice.RPC.DeriveKey(keyLoc)
pub, err := btcec.ParsePubKey(resp.RawKeyBytes)
require.NoError(ht, err, "failed to parse node pubkey")
return pub
}
customPubKey := deriveCustomizedKey()
verifyReq := &signrpc.VerifyMessageReq{
Msg: aliceMsg,
@ -458,10 +401,8 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
Pubkey: customPubKey.SerializeCompressed(),
IsSchnorrSig: false,
}
verifyResp, err := alice.SignerClient.VerifyMessage(ctxt, verifyReq)
require.NoError(t.t, err)
require.True(t.t, verifyResp.Valid, "failed to verify message")
verifyResp := alice.RPC.VerifyMessageSigner(verifyReq)
require.True(ht, verifyResp.Valid, "failed to verify message")
// Use a different key locator.
keyLoc = &signrpc.KeyLocator{
@ -475,12 +416,8 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
KeyLoc: keyLoc,
SchnorrSig: true,
}
signMsgResp, err = alice.SignerClient.SignMessage(ctxt, signMsgReq)
require.NoError(t.t, err)
customPubKey, err = deriveCustomizedKey(ctxt, alice, keyLoc)
require.NoError(t.t, err, "failed to create customized pubkey")
signMsgResp = alice.RPC.SignMessageSigner(signMsgReq)
customPubKey = deriveCustomizedKey()
// Verify the Schnorr signature.
verifyReq = &signrpc.VerifyMessageReq{
@ -489,10 +426,8 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
Pubkey: schnorr.SerializePubKey(customPubKey),
IsSchnorrSig: true,
}
verifyResp, err = alice.SignerClient.VerifyMessage(ctxt, verifyReq)
require.NoError(t.t, err)
require.True(t.t, verifyResp.Valid, "failed to verify message")
verifyResp = alice.RPC.VerifyMessageSigner(verifyReq)
require.True(ht, verifyResp.Valid, "failed to verify message")
// Also test that we can tweak a private key and verify the message
// against the tweaked public key.
@ -502,8 +437,7 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
)
signMsgReq.SchnorrSigTapTweak = tweakBytes[:]
signMsgResp, err = alice.SignerClient.SignMessage(ctxt, signMsgReq)
require.NoError(t.t, err)
signMsgResp = alice.RPC.SignMessageSigner(signMsgReq)
verifyReq = &signrpc.VerifyMessageReq{
Msg: aliceMsg,
@ -511,8 +445,6 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
Pubkey: schnorr.SerializePubKey(tweakedPubKey),
IsSchnorrSig: true,
}
verifyResp, err = alice.SignerClient.VerifyMessage(ctxt, verifyReq)
require.NoError(t.t, err)
require.True(t.t, verifyResp.Valid, "failed to verify message")
verifyResp = alice.RPC.VerifyMessageSigner(verifyReq)
require.True(ht, verifyResp.Valid, "failed to verify message")
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -4,140 +4,8 @@
package itest
var allTestCases = []*testCase{
{
name: "multiple channel creation and update subscription",
test: testBasicChannelCreationAndUpdates,
},
{
name: "derive shared key",
test: testDeriveSharedKey,
},
{
name: "sign output raw",
test: testSignOutputRaw,
},
{
name: "sign verify message",
test: testSignVerifyMessage,
},
{
name: "async payments benchmark",
test: testAsyncPayments,
},
{
name: "async bidirectional payments",
test: testBidirectionalAsyncPayments,
},
{
name: "switch circuit persistence",
test: testSwitchCircuitPersistence,
},
{
name: "switch offline delivery",
test: testSwitchOfflineDelivery,
},
{
name: "switch offline delivery persistence",
test: testSwitchOfflineDeliveryPersistence,
},
{
name: "switch offline delivery outgoing offline",
test: testSwitchOfflineDeliveryOutgoingOffline,
},
{
name: "cpfp",
test: testCPFP,
},
{
name: "psbt channel funding",
test: testPsbtChanFunding,
},
{
name: "sign psbt",
test: testSignPsbt,
},
{
name: "sendtoroute multi path payment",
test: testSendToRouteMultiPath,
},
{
name: "sendtoroute amp",
test: testSendToRouteAMP,
},
{
name: "sendpayment amp",
test: testSendPaymentAMP,
},
{
name: "sendpayment amp invoice",
test: testSendPaymentAMPInvoice,
},
{
name: "sendpayment amp invoice repeat",
test: testSendPaymentAMPInvoiceRepeat,
},
{
name: "send multi path payment",
test: testSendMultiPathPayment,
},
{
name: "forward interceptor",
test: testForwardInterceptorBasic,
},
{
name: "forward interceptor dedup htlcs",
test: testForwardInterceptorDedupHtlc,
},
{
name: "wallet import account",
test: testWalletImportAccount,
},
{
name: "wallet import pubkey",
test: testWalletImportPubKey,
},
{
name: "remote signer",
test: testRemoteSigner,
},
{
name: "taproot",
test: testTaproot,
},
{
name: "zero conf channel open",
test: testZeroConfChannelOpen,
},
{
name: "option scid alias",
test: testOptionScidAlias,
},
{
name: "scid alias channel update",
test: testUpdateChannelPolicyScidAlias,
},
{
name: "scid alias upgrade",
test: testOptionScidUpgrade,
},
{
name: "nonstd sweep",
test: testNonstdSweep,
},
{
name: "taproot coop close",
test: testTaprootCoopClose,
},
{
name: "trackpayments",
test: testTrackPayments,
},
{
name: "open channel fee policy",
test: testOpenChannelUpdateFeePolicy,
},
{
name: "custom messaging",
test: testCustomMessage,
},
}

View File

@ -1,102 +1,89 @@
package itest
import (
"context"
"encoding/hex"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/stretchr/testify/require"
)
// testTrackPayments tests whether a client that calls the TrackPayments api
// receives payment updates.
func testTrackPayments(net *lntest.NetworkHarness, t *harnessTest) {
func testTrackPayments(ht *lntemp.HarnessTest) {
// Open a channel between alice and bob.
net.EnsureConnected(t.t, net.Alice, net.Bob)
channel := openChannelAndAssert(
t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
alice, bob := ht.Alice, ht.Bob
channel := ht.OpenChannel(
alice, bob, lntemp.OpenChannelParams{
Amt: btcutil.Amount(300000),
},
)
defer closeChannelAndAssert(t, net, net.Alice, channel, true)
err := net.Alice.WaitForNetworkChannelOpen(channel)
require.NoError(t.t, err, "unable to wait for channel to open")
ctxb := context.Background()
ctxt, cancelTracker := context.WithCancel(ctxb)
defer cancelTracker()
// Call the TrackPayments api to listen for payment updates.
tracker, err := net.Alice.RouterClient.TrackPayments(
ctxt,
&routerrpc.TrackPaymentsRequest{
NoInflightUpdates: false,
},
)
require.NoError(t.t, err, "failed to call TrackPayments successfully.")
req := &routerrpc.TrackPaymentsRequest{
NoInflightUpdates: false,
}
tracker := alice.RPC.TrackPayments(req)
// Create an invoice from bob.
var amountMsat int64 = 1000
invoiceResp, err := net.Bob.AddInvoice(
ctxb,
invoiceResp := bob.RPC.AddInvoice(
&lnrpc.Invoice{
ValueMsat: amountMsat,
},
)
require.NoError(t.t, err, "unable to add invoice.")
invoice, err := net.Bob.LookupInvoice(
ctxb,
&lnrpc.PaymentHash{
RHashStr: hex.EncodeToString(invoiceResp.RHash),
},
)
require.NoError(t.t, err, "unable to find invoice.")
invoice := bob.RPC.LookupInvoice(invoiceResp.RHash)
// Send payment from alice to bob.
paymentClient, err := net.Alice.RouterClient.SendPaymentV2(
ctxb,
paymentClient := alice.RPC.SendPayment(
&routerrpc.SendPaymentRequest{
PaymentRequest: invoice.PaymentRequest,
TimeoutSeconds: 60,
},
)
require.NoError(t.t, err, "unable to send payment.")
// Make sure the payment doesn't error due to invalid parameters or so.
_, err = paymentClient.Recv()
require.NoError(t.t, err, "unable to get payment update.")
_, err := paymentClient.Recv()
require.NoError(ht, err, "unable to get payment update.")
// Assert the first payment update is an inflight update.
update1, err := tracker.Recv()
require.NoError(t.t, err, "unable to receive payment update 1.")
require.NoError(ht, err, "unable to receive payment update 1.")
require.Equal(
t.t, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
ht, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
update1.FailureReason,
)
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT, update1.Status)
require.Equal(t.t, invoice.PaymentRequest, update1.PaymentRequest)
require.Equal(t.t, amountMsat, update1.ValueMsat)
require.Equal(ht, lnrpc.Payment_IN_FLIGHT, update1.Status)
require.Equal(ht, invoice.PaymentRequest, update1.PaymentRequest)
require.Equal(ht, amountMsat, update1.ValueMsat)
// Assert the second payment update is a payment success update.
update2, err := tracker.Recv()
require.NoError(t.t, err, "unable to receive payment update 2.")
require.NoError(ht, err, "unable to receive payment update 2.")
require.Equal(
t.t, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
ht, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
update2.FailureReason,
)
require.Equal(t.t, lnrpc.Payment_SUCCEEDED, update2.Status)
require.Equal(t.t, invoice.PaymentRequest, update2.PaymentRequest)
require.Equal(t.t, amountMsat, update2.ValueMsat)
require.Equal(ht, lnrpc.Payment_SUCCEEDED, update2.Status)
require.Equal(ht, invoice.PaymentRequest, update2.PaymentRequest)
require.Equal(ht, amountMsat, update2.ValueMsat)
require.Equal(
t.t, hex.EncodeToString(invoice.RPreimage),
ht, hex.EncodeToString(invoice.RPreimage),
update2.PaymentPreimage,
)
// TODO(yy): remove the sleep once the following bug is fixed.
// When the invoice is reported settled, the commitment dance is not
// yet finished, which can cause an error when closing the channel,
// saying there's active HTLCs. We need to investigate this issue and
// reverse the order to, first finish the commitment dance, then report
// the invoice as settled.
time.Sleep(2 * time.Second)
ht.CloseChannel(alice, channel)
}

View File

@ -2,10 +2,6 @@ package itest
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"math"
"testing"
"time"
@ -20,8 +16,8 @@ import (
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/stretchr/testify/require"
)
@ -55,30 +51,27 @@ func walletToLNAddrType(t *testing.T,
// newExternalAddr generates a new external address of an imported account for a
// pair of nodes, where one acts as the funder and the other as the signer.
func newExternalAddr(t *testing.T, funder, signer *lntest.HarnessNode,
func newExternalAddr(ht *lntemp.HarnessTest, funder, signer *node.HarnessNode,
importedAccount string, addrType walletrpc.AddressType) string {
// We'll generate a new address for Carol from Dave's node to receive
// and fund a new channel.
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
funderResp, err := funder.NewAddress(ctxt, &lnrpc.NewAddressRequest{
Type: walletToLNAddrType(t, addrType),
req := &lnrpc.NewAddressRequest{
Type: walletToLNAddrType(ht.T, addrType),
Account: importedAccount,
})
require.NoError(t, err)
}
funderResp := funder.RPC.NewAddress(req)
// Carol also needs to generate the address for the sake of this test to
// be able to sign the channel funding input.
signerResp, err := signer.NewAddress(ctxt, &lnrpc.NewAddressRequest{
Type: walletToLNAddrType(t, addrType),
})
require.NoError(t, err)
// Carol also needs to generate the address for the sake of this test
// to be able to sign the channel funding input.
req = &lnrpc.NewAddressRequest{
Type: walletToLNAddrType(ht.T, addrType),
}
signerResp := signer.RPC.NewAddress(req)
// Sanity check that the generated addresses match.
require.Equal(t, funderResp.Address, signerResp.Address)
assertExternalAddrType(t, funderResp.Address, addrType)
require.Equal(ht, funderResp.Address, signerResp.Address)
assertExternalAddrType(ht.T, funderResp.Address, addrType)
return funderResp.Address
}
@ -130,108 +123,21 @@ func assertOutputScriptType(t *testing.T, expType txscript.ScriptClass,
spew.Sdump(tx))
}
// assertAccountBalance asserts that the unconfirmed and confirmed balance for
// the given account is satisfied by the WalletBalance and ListUnspent RPCs. The
// unconfirmed balance is not checked for neutrino nodes.
func assertAccountBalance(t *testing.T, node *lntest.HarnessNode, account string,
confirmedBalance, unconfirmedBalance int64) {
err := wait.NoError(func() error {
balanceResp, err := node.WalletBalance(
context.Background(), &lnrpc.WalletBalanceRequest{},
)
if err != nil {
return err
}
require.Contains(t, balanceResp.AccountBalance, account)
accountBalance := balanceResp.AccountBalance[account]
// Check confirmed balance.
if accountBalance.ConfirmedBalance != confirmedBalance {
return fmt.Errorf("expected confirmed balance %v, "+
"got %v", confirmedBalance,
accountBalance.ConfirmedBalance)
}
listUtxosReq := &lnrpc.ListUnspentRequest{
MinConfs: 1,
MaxConfs: math.MaxInt32,
Account: account,
}
confirmedUtxosResp, err := node.ListUnspent(
context.Background(), listUtxosReq,
)
if err != nil {
return err
}
var totalConfirmedVal int64
for _, utxo := range confirmedUtxosResp.Utxos {
totalConfirmedVal += utxo.AmountSat
}
if totalConfirmedVal != confirmedBalance {
return fmt.Errorf("expected total confirmed utxo "+
"balance %v, got %v", confirmedBalance,
totalConfirmedVal)
}
// Skip unconfirmed balance checks for neutrino nodes.
if node.Cfg.BackendCfg.Name() == lntest.NeutrinoBackendName {
return nil
}
// Check unconfirmed balance.
if accountBalance.UnconfirmedBalance != unconfirmedBalance {
return fmt.Errorf("expected unconfirmed balance %v, "+
"got %v", unconfirmedBalance,
accountBalance.UnconfirmedBalance)
}
listUtxosReq.MinConfs = 0
listUtxosReq.MaxConfs = 0
unconfirmedUtxosResp, err := node.ListUnspent(
context.Background(), listUtxosReq,
)
require.NoError(t, err)
var totalUnconfirmedVal int64
for _, utxo := range unconfirmedUtxosResp.Utxos {
totalUnconfirmedVal += utxo.AmountSat
}
if totalUnconfirmedVal != unconfirmedBalance {
return fmt.Errorf("expected total unconfirmed utxo "+
"balance %v, got %v", unconfirmedBalance,
totalUnconfirmedVal)
}
return nil
}, defaultTimeout)
require.NoError(t, err)
}
// psbtSendFromImportedAccount attempts to fund a PSBT from the given imported
// account, originating from the source node to the destination.
func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
signer *lntest.HarnessNode, account string,
func psbtSendFromImportedAccount(ht *lntemp.HarnessTest, srcNode, destNode,
signer *node.HarnessNode, account string,
accountAddrType walletrpc.AddressType) {
ctxb := context.Background()
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
balanceResp, err := srcNode.WalletBalance(
ctxt, &lnrpc.WalletBalanceRequest{},
)
require.NoError(t.t, err)
require.Contains(t.t, balanceResp.AccountBalance, account)
balanceResp := srcNode.RPC.WalletBalance()
require.Contains(ht, balanceResp.AccountBalance, account)
confBalance := balanceResp.AccountBalance[account].ConfirmedBalance
destAmt := confBalance - 10000
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
destAddrResp, err := destNode.NewAddress(ctxt, &lnrpc.NewAddressRequest{
destAddrResp := destNode.RPC.NewAddress(&lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
})
require.NoError(t.t, err)
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
fundReq := &walletrpc.FundPsbtRequest{
Template: &walletrpc.FundPsbtRequest_Raw{
Raw: &walletrpc.TxTemplate{
@ -245,29 +151,20 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
},
Account: account,
}
fundResp, err := srcNode.WalletKitClient.FundPsbt(ctxt, fundReq)
require.NoError(t.t, err)
fundResp := srcNode.RPC.FundPsbt(fundReq)
// Have Carol sign the PSBT input since Dave doesn't have any private
// key information.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
finalizeReq := &walletrpc.FinalizePsbtRequest{
FundedPsbt: fundResp.FundedPsbt,
}
finalizeResp, err := signer.WalletKitClient.FinalizePsbt(
ctxt, finalizeReq,
)
require.NoError(t.t, err)
finalizeResp := signer.RPC.FinalizePsbt(finalizeReq)
// With the PSBT signed, we can broadcast the resulting transaction.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
publishReq := &walletrpc.Transaction{
TxHex: finalizeResp.RawFinalTx,
}
_, err = srcNode.WalletKitClient.PublishTransaction(ctxt, publishReq)
require.NoError(t.t, err)
srcNode.RPC.PublishTransaction(publishReq)
// Carol's balance from Dave's perspective should update accordingly.
var (
@ -304,7 +201,7 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
expChangeScriptType = txscript.WitnessV1TaprootTy
default:
t.Fatalf("unsupported addr type %v", accountAddrType)
ht.Fatalf("unsupported addr type %v", accountAddrType)
}
changeUtxoAmt := confBalance - destAmt - expTxFee
@ -314,16 +211,20 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
if account == defaultImportedAccount {
accountWithBalance = defaultAccount
}
assertAccountBalance(t.t, srcNode, accountWithBalance, 0, changeUtxoAmt)
_ = mineBlocks(t, t.lndHarness, 1, 1)
assertAccountBalance(t.t, srcNode, accountWithBalance, changeUtxoAmt, 0)
ht.AssertWalletAccountBalance(
srcNode, accountWithBalance, 0, changeUtxoAmt,
)
ht.MineBlocksAndAssertNumTxes(1, 1)
ht.AssertWalletAccountBalance(
srcNode, accountWithBalance, changeUtxoAmt, 0,
)
// Finally, assert that the transaction has the expected change address
// type based on the account.
var tx wire.MsgTx
err = tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
require.NoError(t.t, err)
assertOutputScriptType(t.t, expChangeScriptType, &tx, changeUtxoAmt)
err := tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
require.NoError(ht, err)
assertOutputScriptType(ht.T, expChangeScriptType, &tx, changeUtxoAmt)
}
// fundChanAndCloseFromImportedAccount attempts to a fund a channel from the
@ -331,21 +232,14 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
// node. To ensure the channel is operational before closing it, a test payment
// is made. Several balance assertions are made along the way for the sake of
// correctness.
func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
signer *lntest.HarnessNode, account string,
func fundChanAndCloseFromImportedAccount(ht *lntemp.HarnessTest, srcNode,
destNode, signer *node.HarnessNode, account string,
accountAddrType walletrpc.AddressType, utxoAmt, chanSize int64) {
ctxb := context.Background()
// Retrieve the current confirmed balance to make some assertions later
// on.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
balanceResp, err := srcNode.WalletBalance(
ctxt, &lnrpc.WalletBalanceRequest{},
)
require.NoError(t.t, err)
require.Contains(t.t, balanceResp.AccountBalance, account)
balanceResp := srcNode.RPC.WalletBalance()
require.Contains(ht, balanceResp.AccountBalance, account)
accountConfBalance := balanceResp.
AccountBalance[account].ConfirmedBalance
defaultAccountConfBalance := balanceResp.
@ -353,31 +247,23 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
// Now, start the channel funding process. We'll need to connect both
// nodes first.
t.lndHarness.EnsureConnected(t.t, srcNode, destNode)
ht.EnsureConnected(srcNode, destNode)
// The source node will then fund the channel through a PSBT shim.
var pendingChanID [32]byte
_, err = rand.Read(pendingChanID[:])
require.NoError(t.t, err)
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
chanUpdates, rawPsbt, err := openChannelPsbt(
ctxt, srcNode, destNode, lntest.OpenChannelParams{
pendingChanID := ht.Random32Bytes()
chanUpdates, rawPsbt := ht.OpenChannelPsbt(
srcNode, destNode, lntemp.OpenChannelParams{
Amt: btcutil.Amount(chanSize),
FundingShim: &lnrpc.FundingShim{
Shim: &lnrpc.FundingShim_PsbtShim{
PsbtShim: &lnrpc.PsbtShim{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
},
},
},
},
)
require.NoError(t.t, err)
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
fundReq := &walletrpc.FundPsbtRequest{
Template: &walletrpc.FundPsbtRequest_Psbt{
Psbt: rawPsbt,
@ -387,49 +273,40 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
},
Account: account,
}
fundResp, err := srcNode.WalletKitClient.FundPsbt(ctxt, fundReq)
require.NoError(t.t, err)
fundResp := srcNode.RPC.FundPsbt(fundReq)
_, err = srcNode.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
srcNode.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
PsbtVerify: &lnrpc.FundingPsbtVerify{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
FundedPsbt: fundResp.FundedPsbt,
},
},
})
require.NoError(t.t, err)
// Now that we have a PSBT to fund the channel, our signer needs to sign
// it.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
finalizeReq := &walletrpc.FinalizePsbtRequest{
FundedPsbt: fundResp.FundedPsbt,
}
finalizeResp, err := signer.WalletKitClient.FinalizePsbt(ctxt, finalizeReq)
require.NoError(t.t, err)
finalizeResp := signer.RPC.FinalizePsbt(finalizeReq)
// The source node can then submit the signed PSBT and complete the
// channel funding process.
_, err = srcNode.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
srcNode.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
PsbtFinalize: &lnrpc.FundingPsbtFinalize{
PendingChanId: pendingChanID[:],
PendingChanId: pendingChanID,
SignedPsbt: finalizeResp.SignedPsbt,
},
},
})
require.NoError(t.t, err)
// We should receive a notification of the channel funding transaction
// being broadcast.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
updateResp, err := receiveChanUpdate(ctxt, chanUpdates)
require.NoError(t.t, err)
updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
require.True(t.t, ok)
require.True(ht, ok)
// Mine enough blocks to announce the channel to the network, making
// balance assertions along the way.
@ -467,11 +344,11 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
expChangeScriptType = txscript.WitnessV1TaprootTy
default:
t.Fatalf("unsupported addr type %v", accountAddrType)
ht.Fatalf("unsupported addr type %v", accountAddrType)
}
chanChangeUtxoAmt := utxoAmt - chanSize - expChanTxFee
txHash, err := chainhash.NewHash(upd.ChanPending.Txid)
require.NoError(t.t, err)
require.NoError(ht, err)
// If we're spending from the default imported account, then any change
// outputs produced are moved to the default wallet account, so we
@ -479,35 +356,35 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
var confBalanceAfterChan int64
if account == defaultImportedAccount {
confBalanceAfterChan = defaultAccountConfBalance
assertAccountBalance(t.t, srcNode, account, 0, 0)
assertAccountBalance(
t.t, srcNode, defaultAccount, defaultAccountConfBalance,
ht.AssertWalletAccountBalance(srcNode, account, 0, 0)
ht.AssertWalletAccountBalance(
srcNode, defaultAccount, defaultAccountConfBalance,
chanChangeUtxoAmt,
)
block := mineBlocks(t, t.lndHarness, 6, 1)[0]
assertTxInBlock(t, block, txHash)
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
ht.Miner.AssertTxInBlock(block, txHash)
confBalanceAfterChan += chanChangeUtxoAmt
assertAccountBalance(t.t, srcNode, account, 0, 0)
assertAccountBalance(
t.t, srcNode, defaultAccount, confBalanceAfterChan, 0,
ht.AssertWalletAccountBalance(srcNode, account, 0, 0)
ht.AssertWalletAccountBalance(
srcNode, defaultAccount, confBalanceAfterChan, 0,
)
} else {
// Otherwise, all interactions remain within Carol's imported
// account.
confBalanceAfterChan = accountConfBalance - utxoAmt
assertAccountBalance(
t.t, srcNode, account, confBalanceAfterChan,
ht.AssertWalletAccountBalance(
srcNode, account, confBalanceAfterChan,
chanChangeUtxoAmt,
)
block := mineBlocks(t, t.lndHarness, 6, 1)[0]
assertTxInBlock(t, block, txHash)
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
ht.Miner.AssertTxInBlock(block, txHash)
confBalanceAfterChan += chanChangeUtxoAmt
assertAccountBalance(
t.t, srcNode, account, confBalanceAfterChan, 0,
ht.AssertWalletAccountBalance(
srcNode, account, confBalanceAfterChan, 0,
)
}
@ -515,8 +392,10 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
// based on the account.
var tx wire.MsgTx
err = tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
require.NoError(t.t, err)
assertOutputScriptType(t.t, expChangeScriptType, &tx, chanChangeUtxoAmt)
require.NoError(ht, err)
assertOutputScriptType(
ht.T, expChangeScriptType, &tx, chanChangeUtxoAmt,
)
// Wait for the channel to be announced by both parties.
chanPoint := &lnrpc.ChannelPoint{
@ -525,29 +404,30 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
},
OutputIndex: upd.ChanPending.OutputIndex,
}
err = srcNode.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err)
err = destNode.WaitForNetworkChannelOpen(chanPoint)
require.NoError(t.t, err)
ht.AssertTopologyChannelOpen(srcNode, chanPoint)
ht.AssertTopologyChannelOpen(destNode, chanPoint)
// Send a test payment to ensure the channel is operating as normal.
const invoiceAmt = 100000
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
resp, err := destNode.AddInvoice(ctxt, &lnrpc.Invoice{
invoice := &lnrpc.Invoice{
Memo: "psbt import chan",
Value: invoiceAmt,
})
require.NoError(t.t, err)
}
resp := destNode.RPC.AddInvoice(invoice)
err = completePaymentRequests(
srcNode, srcNode.RouterClient,
[]string{resp.PaymentRequest}, true,
)
require.NoError(t.t, err)
ht.CompletePaymentRequests(srcNode, []string{resp.PaymentRequest})
// TODO(yy): remove the sleep once the following bug is fixed. When the
// payment is reported as settled by srcNode, it's expected the
// commitment dance is finished and all subsequent states have been
// updated. Yet we'd receive the error `cannot co-op close channel with
// active htlcs` or `link failed to shutdown` if we close the channel.
// We need to investigate the order of settling the payments and
// updating commitments to understand and fix .
time.Sleep(2 * time.Second)
// Now that we've confirmed the opened channel works, we'll close it.
closeChannelAndAssert(t, t.lndHarness, srcNode, chanPoint, false)
ht.CloseChannel(srcNode, chanPoint)
// Since the channel still had funds left on the source node's side,
// they must've been redeemed after the close. Without a pre-negotiated
@ -557,17 +437,17 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
balanceFromClosedChan := chanSize - invoiceAmt - chanCloseTxFee
if account == defaultImportedAccount {
assertAccountBalance(t.t, srcNode, account, 0, 0)
assertAccountBalance(
t.t, srcNode, defaultAccount,
ht.AssertWalletAccountBalance(srcNode, account, 0, 0)
ht.AssertWalletAccountBalance(
srcNode, defaultAccount,
confBalanceAfterChan+balanceFromClosedChan, 0,
)
} else {
assertAccountBalance(
t.t, srcNode, account, confBalanceAfterChan, 0,
ht.AssertWalletAccountBalance(
srcNode, account, confBalanceAfterChan, 0,
)
assertAccountBalance(
t.t, srcNode, defaultAccount, balanceFromClosedChan, 0,
ht.AssertWalletAccountBalance(
srcNode, defaultAccount, balanceFromClosedChan, 0,
)
}
}
@ -575,18 +455,20 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
// testWalletImportAccount tests that an imported account can fund transactions
// and channels through PSBTs, by having one node (the one with the imported
// account) craft the transactions and another node act as the signer.
func testWalletImportAccount(net *lntest.NetworkHarness, t *harnessTest) {
func testWalletImportAccount(ht *lntemp.HarnessTest) {
testCases := []struct {
name string
addrType walletrpc.AddressType
}{
{
name: "standard BIP-0049",
addrType: walletrpc.AddressType_NESTED_WITNESS_PUBKEY_HASH,
name: "standard BIP-0049",
addrType: walletrpc.
AddressType_NESTED_WITNESS_PUBKEY_HASH,
},
{
name: "lnd BIP-0049 variant",
addrType: walletrpc.AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH,
name: "lnd BIP-0049 variant",
addrType: walletrpc.
AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH,
},
{
name: "standard BIP-0084",
@ -600,23 +482,24 @@ func testWalletImportAccount(net *lntest.NetworkHarness, t *harnessTest) {
for _, tc := range testCases {
tc := tc
success := t.t.Run(tc.name, func(tt *testing.T) {
ht := newHarnessTest(tt, net)
ht.RunTestCase(&testCase{
name: tc.name,
test: func(net1 *lntest.NetworkHarness,
t1 *harnessTest) {
success := ht.Run(tc.name, func(tt *testing.T) {
testFunc := func(ht *lntemp.HarnessTest) {
testWalletImportAccountScenario(
ht, tc.addrType,
)
}
testWalletImportAccountScenario(
net, t, tc.addrType,
)
},
st := ht.Subtest(tt)
st.RunTestCase(&lntemp.TestCase{
Name: tc.name,
TestFunc: testFunc,
})
})
if !success {
// Log failure time to help relate the lnd logs to the
// failure.
t.Logf("Failure time: %v", time.Now().Format(
ht.Logf("Failure time: %v", time.Now().Format(
"2006-01-02 15:04:05.000",
))
break
@ -624,115 +507,103 @@ func testWalletImportAccount(net *lntest.NetworkHarness, t *harnessTest) {
}
}
func testWalletImportAccountScenario(net *lntest.NetworkHarness, t *harnessTest,
func testWalletImportAccountScenario(ht *lntemp.HarnessTest,
addrType walletrpc.AddressType) {
// We'll start our test by having two nodes, Carol and Dave. Carol's
// default wallet account will be imported into Dave's node.
carol := net.NewNode(t.t, "carol", nil)
defer shutdownAndAssert(net, t, carol)
//
// NOTE: we won't use standby nodes here since the test will change
// each of the node's wallet state.
carol := ht.NewNode("carol", nil)
dave := ht.NewNode("dave", nil)
dave := net.NewNode(t.t, "dave", nil)
defer shutdownAndAssert(net, t, dave)
runWalletImportAccountScenario(net, t, addrType, carol, dave)
runWalletImportAccountScenario(ht, addrType, carol, dave)
}
func runWalletImportAccountScenario(net *lntest.NetworkHarness, t *harnessTest,
addrType walletrpc.AddressType, carol, dave *lntest.HarnessNode) {
func runWalletImportAccountScenario(ht *lntemp.HarnessTest,
addrType walletrpc.AddressType, carol, dave *node.HarnessNode) {
ctxb := context.Background()
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
listReq := &walletrpc.ListAccountsRequest{
Name: "default",
AddressType: addrType,
}
listResp, err := carol.WalletKitClient.ListAccounts(ctxt, listReq)
require.NoError(t.t, err)
require.Equal(t.t, len(listResp.Accounts), 1)
listResp := carol.RPC.ListAccounts(listReq)
require.Len(ht, listResp.Accounts, 1)
carolAccount := listResp.Accounts[0]
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
const importedAccount = "carol"
importReq := &walletrpc.ImportAccountRequest{
Name: importedAccount,
ExtendedPublicKey: carolAccount.ExtendedPublicKey,
AddressType: addrType,
}
_, err = dave.WalletKitClient.ImportAccount(ctxt, importReq)
require.NoError(t.t, err)
dave.RPC.ImportAccount(importReq)
// We'll generate an address for Carol from Dave's node to receive some
// funds.
externalAddr := newExternalAddr(
t.t, dave, carol, importedAccount, addrType,
ht, dave, carol, importedAccount, addrType,
)
// Send coins to Carol's address and confirm them, making sure the
// balance updates accordingly.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = net.Alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
alice := ht.Alice
req := &lnrpc.SendCoinsRequest{
Addr: externalAddr,
Amount: utxoAmt,
SatPerByte: 1,
})
require.NoError(t.t, err)
}
alice.RPC.SendCoins(req)
assertAccountBalance(t.t, dave, importedAccount, 0, utxoAmt)
_ = mineBlocks(t, net, 1, 1)
assertAccountBalance(t.t, dave, importedAccount, utxoAmt, 0)
ht.AssertWalletAccountBalance(dave, importedAccount, 0, utxoAmt)
ht.MineBlocksAndAssertNumTxes(1, 1)
ht.AssertWalletAccountBalance(dave, importedAccount, utxoAmt, 0)
// To ensure that Dave can use Carol's account as watch-only, we'll
// construct a PSBT that sends funds to Alice, which we'll then hand
// over to Carol to sign.
psbtSendFromImportedAccount(
t, dave, net.Alice, carol, importedAccount, addrType,
ht, dave, alice, carol, importedAccount, addrType,
)
// We'll generate a new address for Carol from Dave's node to receive
// and fund a new channel.
externalAddr = newExternalAddr(
t.t, dave, carol, importedAccount, addrType,
ht, dave, carol, importedAccount, addrType,
)
// Retrieve the current confirmed balance of the imported account for
// some assertions we'll make later on.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
balanceResp, err := dave.WalletBalance(
ctxt, &lnrpc.WalletBalanceRequest{},
)
require.NoError(t.t, err)
require.Contains(t.t, balanceResp.AccountBalance, importedAccount)
confBalance := balanceResp.AccountBalance[importedAccount].ConfirmedBalance
balanceResp := dave.RPC.WalletBalance()
require.Contains(ht, balanceResp.AccountBalance, importedAccount)
confBalance := balanceResp.AccountBalance[importedAccount].
ConfirmedBalance
// Send coins to Carol's address and confirm them, making sure the
// balance updates accordingly.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = net.Alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
req = &lnrpc.SendCoinsRequest{
Addr: externalAddr,
Amount: utxoAmt,
SatPerByte: 1,
})
require.NoError(t.t, err)
}
alice.RPC.SendCoins(req)
assertAccountBalance(t.t, dave, importedAccount, confBalance, utxoAmt)
_ = mineBlocks(t, net, 1, 1)
assertAccountBalance(
t.t, dave, importedAccount, confBalance+utxoAmt, 0,
ht.AssertWalletAccountBalance(
dave, importedAccount, confBalance, utxoAmt,
)
ht.MineBlocksAndAssertNumTxes(1, 1)
ht.AssertWalletAccountBalance(
dave, importedAccount, confBalance+utxoAmt, 0,
)
// Now that we have enough funds, it's time to fund the channel, make a
// test payment, and close it. This contains several balance assertions
// along the way.
fundChanAndCloseFromImportedAccount(
t, dave, net.Alice, carol, importedAccount, addrType, utxoAmt,
ht, dave, alice, carol, importedAccount, addrType, utxoAmt,
int64(funding.MaxBtcFundingAmount),
)
}
@ -740,14 +611,15 @@ func runWalletImportAccountScenario(net *lntest.NetworkHarness, t *harnessTest,
// testWalletImportPubKey tests that an imported public keys can fund
// transactions and channels through PSBTs, by having one node (the one with the
// imported account) craft the transactions and another node act as the signer.
func testWalletImportPubKey(net *lntest.NetworkHarness, t *harnessTest) {
func testWalletImportPubKey(ht *lntemp.HarnessTest) {
testCases := []struct {
name string
addrType walletrpc.AddressType
}{
{
name: "BIP-0049",
addrType: walletrpc.AddressType_NESTED_WITNESS_PUBKEY_HASH,
name: "BIP-0049",
addrType: walletrpc.
AddressType_NESTED_WITNESS_PUBKEY_HASH,
},
{
name: "BIP-0084",
@ -761,23 +633,24 @@ func testWalletImportPubKey(net *lntest.NetworkHarness, t *harnessTest) {
for _, tc := range testCases {
tc := tc
success := t.t.Run(tc.name, func(tt *testing.T) {
ht := newHarnessTest(tt, net)
ht.RunTestCase(&testCase{
name: tc.name,
test: func(net1 *lntest.NetworkHarness,
t1 *harnessTest) {
success := ht.Run(tc.name, func(tt *testing.T) {
testFunc := func(ht *lntemp.HarnessTest) {
testWalletImportPubKeyScenario(
ht, tc.addrType,
)
}
testWalletImportPubKeyScenario(
net, t, tc.addrType,
)
},
st := ht.Subtest(tt)
st.RunTestCase(&lntemp.TestCase{
Name: tc.name,
TestFunc: testFunc,
})
})
if !success {
// Log failure time to help relate the lnd logs to the
// failure.
t.Logf("Failure time: %v", time.Now().Format(
ht.Logf("Failure time: %v", time.Now().Format(
"2006-01-02 15:04:05.000",
))
break
@ -785,18 +658,18 @@ func testWalletImportPubKey(net *lntest.NetworkHarness, t *harnessTest) {
}
}
func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
func testWalletImportPubKeyScenario(ht *lntemp.HarnessTest,
addrType walletrpc.AddressType) {
ctxb := context.Background()
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
alice := ht.Alice
// We'll start our test by having two nodes, Carol and Dave.
carol := net.NewNode(t.t, "carol", nil)
defer shutdownAndAssert(net, t, carol)
dave := net.NewNode(t.t, "dave", nil)
defer shutdownAndAssert(net, t, dave)
//
// NOTE: we won't use standby nodes here since the test will change
// each of the node's wallet state.
carol := ht.NewNode("carol", nil)
dave := ht.NewNode("dave", nil)
// We'll define a helper closure that we'll use throughout the test to
// generate a new address of the given type from Carol's perspective,
@ -806,30 +679,27 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
// Retrieve Carol's account public key for the corresponding
// address type.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
listReq := &walletrpc.ListAccountsRequest{
Name: "default",
AddressType: addrType,
}
listResp, err := carol.WalletKitClient.ListAccounts(
ctxt, listReq,
)
require.NoError(t.t, err)
require.Equal(t.t, len(listResp.Accounts), 1)
listResp := carol.RPC.ListAccounts(listReq)
require.Len(ht, listResp.Accounts, 1)
p2wkhAccount := listResp.Accounts[0]
// Derive the external address at the given index.
accountPubKey, err := hdkeychain.NewKeyFromString(
p2wkhAccount.ExtendedPublicKey,
)
require.NoError(t.t, err)
require.NoError(ht, err)
externalAccountExtKey, err := accountPubKey.Derive(0)
require.NoError(t.t, err)
externalAddrExtKey, err := externalAccountExtKey.Derive(keyIndex)
require.NoError(t.t, err)
require.NoError(ht, err)
externalAddrExtKey, err := externalAccountExtKey.Derive(
keyIndex,
)
require.NoError(ht, err)
externalAddrPubKey, err := externalAddrExtKey.ECPubKey()
require.NoError(t.t, err)
require.NoError(ht, err)
// Serialize as 32-byte x-only pubkey for Taproot addresses.
serializedPubKey := externalAddrPubKey.SerializeCompressed()
@ -840,44 +710,34 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
}
// Import the public key into Dave.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
importReq := &walletrpc.ImportPublicKeyRequest{
PublicKey: serializedPubKey,
AddressType: addrType,
}
_, err = dave.WalletKitClient.ImportPublicKey(ctxt, importReq)
require.NoError(t.t, err)
dave.RPC.ImportPublicKey(importReq)
// We'll also generate the same address for Carol, as it'll be
// required later when signing.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolAddrResp, err := carol.NewAddress(
ctxt, &lnrpc.NewAddressRequest{
Type: walletToLNAddrType(t.t, addrType),
},
)
require.NoError(t.t, err)
carolAddrResp := carol.RPC.NewAddress(&lnrpc.NewAddressRequest{
Type: walletToLNAddrType(ht.T, addrType),
})
// Send coins to Carol's address and confirm them, making sure
// the balance updates accordingly.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = net.Alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
req := &lnrpc.SendCoinsRequest{
Addr: carolAddrResp.Address,
Amount: utxoAmt,
SatPerByte: 1,
})
require.NoError(t.t, err)
}
alice.RPC.SendCoins(req)
assertAccountBalance(
t.t, dave, defaultImportedAccount, prevConfBalance,
ht.AssertWalletAccountBalance(
dave, defaultImportedAccount, prevConfBalance,
prevUnconfBalance+utxoAmt,
)
_ = mineBlocks(t, net, 1, 1)
assertAccountBalance(
t.t, dave, defaultImportedAccount,
ht.MineBlocksAndAssertNumTxes(1, 1)
ht.AssertWalletAccountBalance(
dave, defaultImportedAccount,
prevConfBalance+utxoAmt, prevUnconfBalance,
)
}
@ -890,22 +750,15 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
// construct a PSBT that sends funds to Alice, which we'll then hand
// over to Carol to sign.
psbtSendFromImportedAccount(
t, dave, net.Alice, carol, defaultImportedAccount, addrType,
ht, dave, alice, carol, defaultImportedAccount, addrType,
)
// We'll now attempt to fund a channel.
//
// We'll have Carol generate another external address, which we'll
// import into Dave again.
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
balanceResp, err := dave.WalletBalance(
ctxt, &lnrpc.WalletBalanceRequest{},
)
require.NoError(t.t, err)
require.Contains(
t.t, balanceResp.AccountBalance, defaultImportedAccount,
)
balanceResp := dave.RPC.WalletBalance()
require.Contains(ht, balanceResp.AccountBalance, defaultImportedAccount)
confBalance := balanceResp.
AccountBalance[defaultImportedAccount].ConfirmedBalance
importPubKey(1, confBalance, 0)
@ -914,7 +767,7 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
// test payment, and close it. This contains several balance assertions
// along the way.
fundChanAndCloseFromImportedAccount(
t, dave, net.Alice, carol, defaultImportedAccount, addrType,
ht, dave, alice, carol, defaultImportedAccount, addrType,
utxoAmt, int64(funding.MaxBtcFundingAmount),
)
}

File diff suppressed because it is too large Load Diff

View File

@ -5,20 +5,16 @@ import (
"crypto/rand"
"fmt"
"io"
"testing"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/rpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet"
@ -470,49 +466,3 @@ func findTxAtHeight(t *harnessTest, height int32,
return nil
}
// getOutputIndex returns the output index of the given address in the given
// transaction.
func getOutputIndex(t *harnessTest, miner *lntest.HarnessMiner,
txid *chainhash.Hash, addr string) int {
t.t.Helper()
// We'll then extract the raw transaction from the mempool in order to
// determine the index of the p2tr output.
tx, err := miner.Client.GetRawTransaction(txid)
require.NoError(t.t, err)
p2trOutputIndex := -1
for i, txOut := range tx.MsgTx().TxOut {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
txOut.PkScript, miner.ActiveNet,
)
require.NoError(t.t, err)
if addrs[0].String() == addr {
p2trOutputIndex = i
}
}
require.Greater(t.t, p2trOutputIndex, -1)
return p2trOutputIndex
}
// acceptChannel is used to accept a single channel that comes across. This
// should be run in a goroutine and is used to test nodes with the zero-conf
// feature bit.
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
t.Helper()
req, err := stream.Recv()
require.NoError(t, err)
resp := &lnrpc.ChannelAcceptResponse{
Accept: true,
PendingChanId: req.PendingChanId,
ZeroConf: zeroConf,
}
err = stream.Send(resp)
require.NoError(t, err)
}

View File

@ -3566,9 +3566,12 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
// balance to satisfy the final evaluated HTLC's.
switch {
case int64(ourBalance) < 0:
return ErrBelowChanReserve
return fmt.Errorf("%w: negative local balance",
ErrBelowChanReserve)
case int64(theirBalance) < 0:
return ErrBelowChanReserve
return fmt.Errorf("%w: negative remote balance",
ErrBelowChanReserve)
}
// Ensure that the fee being applied is enough to be relayed across the
@ -3580,17 +3583,25 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
// If the added HTLCs will decrease the balance, make sure they won't
// dip the local and remote balances below the channel reserves.
ourReserve := lnwire.NewMSatFromSatoshis(
lc.channelState.LocalChanCfg.ChanReserve,
)
theirReserve := lnwire.NewMSatFromSatoshis(
lc.channelState.RemoteChanCfg.ChanReserve,
)
switch {
case ourBalance < ourInitialBalance &&
ourBalance < lnwire.NewMSatFromSatoshis(
lc.channelState.LocalChanCfg.ChanReserve):
case ourBalance < ourInitialBalance && ourBalance < ourReserve:
lc.log.Debugf("Funds below chan reserve: ourBalance=%v, "+
"ourReserve=%v", ourBalance, ourReserve)
return fmt.Errorf("%w: our balance below chan reserve",
ErrBelowChanReserve)
return ErrBelowChanReserve
case theirBalance < theirInitialBalance &&
theirBalance < lnwire.NewMSatFromSatoshis(
lc.channelState.RemoteChanCfg.ChanReserve):
return ErrBelowChanReserve
case theirBalance < theirInitialBalance && theirBalance < theirReserve:
lc.log.Debugf("Funds below chan reserve: theirBalance=%v, "+
"theirReserve=%v", theirBalance, theirReserve)
return fmt.Errorf("%w: their balance below chan reserve",
ErrBelowChanReserve)
}
// validateUpdates take a set of updates, and validates them against

View File

@ -2737,10 +2737,7 @@ func TestAddHTLCNegativeBalance(t *testing.T) {
htlcAmt = lnwire.NewMSatFromSatoshis(2 * btcutil.SatoshiPerBitcoin)
htlc, _ := createHTLC(numHTLCs+1, htlcAmt)
_, err = aliceChannel.AddHTLC(htlc, nil)
if err != ErrBelowChanReserve {
t.Fatalf("expected balance below channel reserve, instead "+
"got: %v", err)
}
require.ErrorIs(t, err, ErrBelowChanReserve)
}
// assertNoChanSyncNeeded is a helper function that asserts that upon restart,
@ -5642,10 +5639,8 @@ func TestDesyncHTLCs(t *testing.T) {
// balance is unavailable.
htlcAmt = lnwire.NewMSatFromSatoshis(1 * btcutil.SatoshiPerBitcoin)
htlc, _ = createHTLC(1, htlcAmt)
if _, err = aliceChannel.AddHTLC(htlc, nil); err != ErrBelowChanReserve {
t.Fatalf("expected ErrInsufficientBalance, instead received: %v",
err)
}
_, err = aliceChannel.AddHTLC(htlc, nil)
require.ErrorIs(t, err, ErrBelowChanReserve)
// Now do a state transition, which will ACK the FailHTLC, making Alice
// able to add the new HTLC.
@ -6063,14 +6058,11 @@ func TestChanReserve(t *testing.T) {
htlc, _ = createHTLC(bobIndex, htlcAmt)
bobIndex++
_, err := bobChannel.AddHTLC(htlc, nil)
if err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
}
require.ErrorIs(t, err, ErrBelowChanReserve)
// Alice will reject this htlc upon receiving the htlc.
if _, err := aliceChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
}
_, err = aliceChannel.ReceiveHTLC(htlc)
require.ErrorIs(t, err, ErrBelowChanReserve)
// We must setup the channels again, since a violation of the channel
// constraints leads to channel shutdown.
@ -6105,14 +6097,11 @@ func TestChanReserve(t *testing.T) {
htlc, _ = createHTLC(aliceIndex, htlcAmt)
aliceIndex++
_, err = aliceChannel.AddHTLC(htlc, nil)
if err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
}
require.ErrorIs(t, err, ErrBelowChanReserve)
// Likewise, Bob will reject receiving the htlc because of the same reason.
if _, err := bobChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
}
_, err = bobChannel.ReceiveHTLC(htlc)
require.ErrorIs(t, err, ErrBelowChanReserve)
// We must setup the channels again, since a violation of the channel
// constraints leads to channel shutdown.
@ -6218,22 +6207,15 @@ func TestChanReserveRemoteInitiator(t *testing.T) {
// Bob should refuse to add this HTLC, since he realizes it will create
// an invalid commitment.
_, err = bobChannel.AddHTLC(htlc, nil)
if err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v",
err)
}
require.ErrorIs(t, err, ErrBelowChanReserve)
// Of course Alice will also not have enough balance to add it herself.
_, err = aliceChannel.AddHTLC(htlc, nil)
if err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v",
err)
}
require.ErrorIs(t, err, ErrBelowChanReserve)
// Same for Alice, she should refuse to accept this second HTLC.
if _, err := aliceChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
}
_, err = aliceChannel.ReceiveHTLC(htlc)
require.ErrorIs(t, err, ErrBelowChanReserve)
}
// TestChanReserveLocalInitiatorDustHtlc tests that fee the initiator must pay
@ -6276,9 +6258,7 @@ func TestChanReserveLocalInitiatorDustHtlc(t *testing.T) {
// Alice should realize that the fee she must pay to add this HTLC to
// the local commitment would take her below the channel reserve.
_, err = aliceChannel.AddHTLC(htlc, nil)
if err != ErrBelowChanReserve {
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
}
require.ErrorIs(t, err, ErrBelowChanReserve)
}
// TestMinHTLC tests that the ErrBelowMinHTLC error is thrown if an HTLC is added