mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-03-29 19:22:40 +01:00
Merge pull request #6824 from yyforyongyu/5-new-itest
itest: continued itest refactor and fix - IV
This commit is contained in:
commit
48c8c1bf48
@ -369,7 +369,8 @@ PRs([6776](https://github.com/lightningnetwork/lnd/pull/6776),
|
|||||||
[7172](https://github.com/lightningnetwork/lnd/pull/7172),
|
[7172](https://github.com/lightningnetwork/lnd/pull/7172),
|
||||||
[7242](https://github.com/lightningnetwork/lnd/pull/7242),
|
[7242](https://github.com/lightningnetwork/lnd/pull/7242),
|
||||||
[7245](https://github.com/lightningnetwork/lnd/pull/7245)),
|
[7245](https://github.com/lightningnetwork/lnd/pull/7245)),
|
||||||
[6823](https://github.com/lightningnetwork/lnd/pull/6823)) have been made to
|
[6823](https://github.com/lightningnetwork/lnd/pull/6823),
|
||||||
|
[6824](https://github.com/lightningnetwork/lnd/pull/6824),) have been made to
|
||||||
refactor the itest for code health and maintenance.
|
refactor the itest for code health and maintenance.
|
||||||
|
|
||||||
# Contributors (Alphabetical Order)
|
# Contributors (Alphabetical Order)
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -676,6 +675,32 @@ func (h *HarnessTest) NewNodeWithSeedEtcd(name string, etcdCfg *etcd.Config,
|
|||||||
return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
|
return h.newNodeWithSeed(name, extraArgs, req, statelessInit)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewNodeRemoteSigner creates a new remote signer node and asserts its
|
||||||
|
// creation.
|
||||||
|
func (h *HarnessTest) NewNodeRemoteSigner(name string, extraArgs []string,
|
||||||
|
password []byte, watchOnly *lnrpc.WatchOnly) *node.HarnessNode {
|
||||||
|
|
||||||
|
hn, err := h.manager.newNode(h.T, name, extraArgs, password, true)
|
||||||
|
require.NoErrorf(h, err, "unable to create new node for %s", name)
|
||||||
|
|
||||||
|
err = hn.StartWithNoAuth(h.runCtx)
|
||||||
|
require.NoError(h, err, "failed to start node %s", name)
|
||||||
|
|
||||||
|
// With the seed created, construct the init request to the node,
|
||||||
|
// including the newly generated seed.
|
||||||
|
initReq := &lnrpc.InitWalletRequest{
|
||||||
|
WalletPassword: password,
|
||||||
|
WatchOnly: watchOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass the init request via rpc to finish unlocking the node. This
|
||||||
|
// will also initialize the macaroon-authenticated LightningClient.
|
||||||
|
_, err = h.manager.initWalletAndNode(hn, initReq)
|
||||||
|
require.NoErrorf(h, err, "failed to init node %s", name)
|
||||||
|
|
||||||
|
return hn
|
||||||
|
}
|
||||||
|
|
||||||
// KillNode kills the node (but won't wait for the node process to stop).
|
// KillNode kills the node (but won't wait for the node process to stop).
|
||||||
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
|
func (h *HarnessTest) KillNode(hn *node.HarnessNode) {
|
||||||
require.NoErrorf(h, hn.Kill(), "%s: kill got error", hn.Name())
|
require.NoErrorf(h, hn.Kill(), "%s: kill got error", hn.Name())
|
||||||
@ -797,6 +822,26 @@ type OpenChannelParams struct {
|
|||||||
// ScidAlias denotes whether the channel will be an option-scid-alias
|
// ScidAlias denotes whether the channel will be an option-scid-alias
|
||||||
// channel type negotiation.
|
// channel type negotiation.
|
||||||
ScidAlias bool
|
ScidAlias bool
|
||||||
|
|
||||||
|
// BaseFee is the channel base fee applied during the channel
|
||||||
|
// announcement phase.
|
||||||
|
BaseFee uint64
|
||||||
|
|
||||||
|
// FeeRate is the channel fee rate in ppm applied during the channel
|
||||||
|
// announcement phase.
|
||||||
|
FeeRate uint64
|
||||||
|
|
||||||
|
// UseBaseFee, if set, instructs the downstream logic to apply the
|
||||||
|
// user-specified channel base fee to the channel update announcement.
|
||||||
|
// If set to false it avoids applying a base fee of 0 and instead
|
||||||
|
// activates the default configured base fee.
|
||||||
|
UseBaseFee bool
|
||||||
|
|
||||||
|
// UseFeeRate, if set, instructs the downstream logic to apply the
|
||||||
|
// user-specified channel fee rate to the channel update announcement.
|
||||||
|
// If set to false it avoids applying a fee rate of 0 and instead
|
||||||
|
// activates the default configured fee rate.
|
||||||
|
UseFeeRate bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
|
// prepareOpenChannel waits for both nodes to be synced to chain and returns an
|
||||||
@ -833,6 +878,10 @@ func (h *HarnessTest) prepareOpenChannel(srcNode, destNode *node.HarnessNode,
|
|||||||
CommitmentType: p.CommitmentType,
|
CommitmentType: p.CommitmentType,
|
||||||
ZeroConf: p.ZeroConf,
|
ZeroConf: p.ZeroConf,
|
||||||
ScidAlias: p.ScidAlias,
|
ScidAlias: p.ScidAlias,
|
||||||
|
BaseFee: p.BaseFee,
|
||||||
|
FeeRate: p.FeeRate,
|
||||||
|
UseBaseFee: p.UseBaseFee,
|
||||||
|
UseFeeRate: p.UseFeeRate,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -886,24 +935,76 @@ func (h *HarnessTest) OpenChannelAssertStream(srcNode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OpenChannel attempts to open a channel with the specified parameters
|
// OpenChannel attempts to open a channel with the specified parameters
|
||||||
// extended from Alice to Bob. Additionally, the following items are asserted,
|
// extended from Alice to Bob. Additionally, for public channels, it will mine
|
||||||
// - 6 blocks will be mined so the channel will be announced if it's public.
|
// extra blocks so they are announced to the network. In specific, the
|
||||||
// - the funding transaction should be found in the first block.
|
// following items are asserted,
|
||||||
|
// - for non-zero conf channel, 1 blocks will be mined to confirm the funding
|
||||||
|
// tx.
|
||||||
// - both nodes should see the channel edge update in their network graph.
|
// - both nodes should see the channel edge update in their network graph.
|
||||||
// - both nodes can report the status of the new channel from ListChannels.
|
// - both nodes can report the status of the new channel from ListChannels.
|
||||||
|
// - extra blocks are mined if it's a public channel.
|
||||||
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
|
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
|
||||||
p OpenChannelParams) *lnrpc.ChannelPoint {
|
p OpenChannelParams) *lnrpc.ChannelPoint {
|
||||||
|
|
||||||
|
// First, open the channel without announcing it.
|
||||||
|
cp := h.OpenChannelNoAnnounce(alice, bob, p)
|
||||||
|
|
||||||
|
// If this is a private channel, there's no need to mine extra blocks
|
||||||
|
// since it will never be announced to the network.
|
||||||
|
if p.Private {
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mine extra blocks to announce the channel.
|
||||||
|
if p.ZeroConf {
|
||||||
|
// For a zero-conf channel, no blocks have been mined so we
|
||||||
|
// need to mine 6 blocks.
|
||||||
|
//
|
||||||
|
// Mine 1 block to confirm the funding transaction.
|
||||||
|
h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)
|
||||||
|
} else {
|
||||||
|
// For a regular channel, 1 block has already been mined to
|
||||||
|
// confirm the funding transaction, so we mine 5 blocks.
|
||||||
|
h.MineBlocks(numBlocksOpenChannel - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenChannelNoAnnounce attempts to open a channel with the specified
|
||||||
|
// parameters extended from Alice to Bob without mining the necessary blocks to
|
||||||
|
// announce the channel. Additionally, the following items are asserted,
|
||||||
|
// - for non-zero conf channel, 1 blocks will be mined to confirm the funding
|
||||||
|
// tx.
|
||||||
|
// - both nodes should see the channel edge update in their network graph.
|
||||||
|
// - both nodes can report the status of the new channel from ListChannels.
|
||||||
|
func (h *HarnessTest) OpenChannelNoAnnounce(alice, bob *node.HarnessNode,
|
||||||
|
p OpenChannelParams) *lnrpc.ChannelPoint {
|
||||||
|
|
||||||
chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
|
chanOpenUpdate := h.OpenChannelAssertStream(alice, bob, p)
|
||||||
|
|
||||||
// Mine 6 blocks, then wait for Alice's node to notify us that the
|
// Open a zero conf channel.
|
||||||
// channel has been opened. The funding transaction should be found
|
if p.ZeroConf {
|
||||||
// within the first newly mined block. We mine 6 blocks so that in the
|
return h.openChannelZeroConf(alice, bob, chanOpenUpdate)
|
||||||
// case that the channel is public, it is announced to the network.
|
}
|
||||||
block := h.MineBlocksAndAssertNumTxes(numBlocksOpenChannel, 1)[0]
|
|
||||||
|
// Open a non-zero conf channel.
|
||||||
|
return h.openChannel(alice, bob, chanOpenUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openChannel attempts to open a channel with the specified parameters
|
||||||
|
// extended from Alice to Bob. Additionally, the following items are asserted,
|
||||||
|
// - 1 block is mined and the funding transaction should be found in it.
|
||||||
|
// - both nodes should see the channel edge update in their network graph.
|
||||||
|
// - both nodes can report the status of the new channel from ListChannels.
|
||||||
|
func (h *HarnessTest) openChannel(alice, bob *node.HarnessNode,
|
||||||
|
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
|
||||||
|
|
||||||
|
// Mine 1 block to confirm the funding transaction.
|
||||||
|
block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
|
||||||
|
|
||||||
// Wait for the channel open event.
|
// Wait for the channel open event.
|
||||||
fundingChanPoint := h.WaitForChannelOpenEvent(chanOpenUpdate)
|
fundingChanPoint := h.WaitForChannelOpenEvent(stream)
|
||||||
|
|
||||||
// Check that the funding tx is found in the first block.
|
// Check that the funding tx is found in the first block.
|
||||||
fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
|
fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
|
||||||
@ -918,9 +1019,27 @@ func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
|
|||||||
h.AssertChannelExists(alice, fundingChanPoint)
|
h.AssertChannelExists(alice, fundingChanPoint)
|
||||||
h.AssertChannelExists(bob, fundingChanPoint)
|
h.AssertChannelExists(bob, fundingChanPoint)
|
||||||
|
|
||||||
// Finally, check the blocks are synced.
|
return fundingChanPoint
|
||||||
h.WaitForBlockchainSync(alice)
|
}
|
||||||
h.WaitForBlockchainSync(bob)
|
|
||||||
|
// openChannelZeroConf attempts to open a channel with the specified parameters
|
||||||
|
// extended from Alice to Bob. Additionally, the following items are asserted,
|
||||||
|
// - both nodes should see the channel edge update in their network graph.
|
||||||
|
// - both nodes can report the status of the new channel from ListChannels.
|
||||||
|
func (h *HarnessTest) openChannelZeroConf(alice, bob *node.HarnessNode,
|
||||||
|
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
|
||||||
|
|
||||||
|
// Wait for the channel open event.
|
||||||
|
fundingChanPoint := h.WaitForChannelOpenEvent(stream)
|
||||||
|
|
||||||
|
// Check that both alice and bob have seen the channel from their
|
||||||
|
// network topology.
|
||||||
|
h.AssertTopologyChannelOpen(alice, fundingChanPoint)
|
||||||
|
h.AssertTopologyChannelOpen(bob, fundingChanPoint)
|
||||||
|
|
||||||
|
// Finally, check that the channel can be seen in their ListChannels.
|
||||||
|
h.AssertChannelExists(alice, fundingChanPoint)
|
||||||
|
h.AssertChannelExists(bob, fundingChanPoint)
|
||||||
|
|
||||||
return fundingChanPoint
|
return fundingChanPoint
|
||||||
}
|
}
|
||||||
@ -1145,36 +1264,54 @@ func (h *HarnessTest) FundCoinsP2TR(amt btcutil.Amount,
|
|||||||
h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
|
h.fundCoins(amt, target, lnrpc.AddressType_TAPROOT_PUBKEY, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompletePaymentRequests sends payments from a node to complete all payment
|
// completePaymentRequestsAssertStatus sends payments from a node to complete
|
||||||
// requests. This function does not return until all payments successfully
|
// all payment requests. This function does not return until all payments
|
||||||
// complete without errors.
|
// have reached the specified status.
|
||||||
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
|
func (h *HarnessTest) completePaymentRequestsAssertStatus(hn *node.HarnessNode,
|
||||||
paymentRequests []string) {
|
paymentRequests []string, status lnrpc.Payment_PaymentStatus) {
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
// Create a buffered chan to signal the results.
|
||||||
|
results := make(chan rpc.PaymentClient, len(paymentRequests))
|
||||||
|
|
||||||
// send sends a payment and asserts if it doesn't succeeded.
|
// send sends a payment and asserts if it doesn't succeeded.
|
||||||
send := func(payReq string) {
|
send := func(payReq string) {
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
req := &routerrpc.SendPaymentRequest{
|
req := &routerrpc.SendPaymentRequest{
|
||||||
PaymentRequest: payReq,
|
PaymentRequest: payReq,
|
||||||
TimeoutSeconds: defaultPaymentTimeout,
|
TimeoutSeconds: defaultPaymentTimeout,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
}
|
}
|
||||||
stream := hn.RPC.SendPayment(req)
|
stream := hn.RPC.SendPayment(req)
|
||||||
h.AssertPaymentStatusFromStream(stream, lnrpc.Payment_SUCCEEDED)
|
|
||||||
|
// Signal sent succeeded.
|
||||||
|
results <- stream
|
||||||
}
|
}
|
||||||
|
|
||||||
// Launch all payments simultaneously.
|
// Launch all payments simultaneously.
|
||||||
for _, payReq := range paymentRequests {
|
for _, payReq := range paymentRequests {
|
||||||
payReqCopy := payReq
|
payReqCopy := payReq
|
||||||
wg.Add(1)
|
|
||||||
go send(payReqCopy)
|
go send(payReqCopy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all payments to report success.
|
// Wait for all payments to report the expected status.
|
||||||
wg.Wait()
|
timer := time.After(DefaultTimeout)
|
||||||
|
select {
|
||||||
|
case stream := <-results:
|
||||||
|
h.AssertPaymentStatusFromStream(stream, status)
|
||||||
|
|
||||||
|
case <-timer:
|
||||||
|
require.Fail(h, "timeout", "waiting payment results timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompletePaymentRequests sends payments from a node to complete all payment
|
||||||
|
// requests. This function does not return until all payments successfully
|
||||||
|
// complete without errors.
|
||||||
|
func (h *HarnessTest) CompletePaymentRequests(hn *node.HarnessNode,
|
||||||
|
paymentRequests []string) {
|
||||||
|
|
||||||
|
h.completePaymentRequestsAssertStatus(
|
||||||
|
hn, paymentRequests, lnrpc.Payment_SUCCEEDED,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompletePaymentRequestsNoWait sends payments from a node to complete all
|
// CompletePaymentRequestsNoWait sends payments from a node to complete all
|
||||||
@ -1188,21 +1325,10 @@ func (h *HarnessTest) CompletePaymentRequestsNoWait(hn *node.HarnessNode,
|
|||||||
// we return.
|
// we return.
|
||||||
oldResp := h.GetChannelByChanPoint(hn, chanPoint)
|
oldResp := h.GetChannelByChanPoint(hn, chanPoint)
|
||||||
|
|
||||||
// send sends a payment and asserts if it doesn't succeeded.
|
// Send payments and assert they are in-flight.
|
||||||
send := func(payReq string) {
|
h.completePaymentRequestsAssertStatus(
|
||||||
req := &routerrpc.SendPaymentRequest{
|
hn, paymentRequests, lnrpc.Payment_IN_FLIGHT,
|
||||||
PaymentRequest: payReq,
|
)
|
||||||
TimeoutSeconds: defaultPaymentTimeout,
|
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
|
||||||
}
|
|
||||||
hn.RPC.SendPayment(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Launch all payments simultaneously.
|
|
||||||
for _, payReq := range paymentRequests {
|
|
||||||
payReqCopy := payReq
|
|
||||||
go send(payReqCopy)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We are not waiting for feedback in the form of a response, but we
|
// We are not waiting for feedback in the form of a response, but we
|
||||||
// should still wait long enough for the server to receive and handle
|
// should still wait long enough for the server to receive and handle
|
||||||
@ -1797,3 +1923,92 @@ func (h *HarnessTest) QueryRoutesAndRetry(hn *node.HarnessNode,
|
|||||||
|
|
||||||
return routes
|
return routes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReceiveHtlcInterceptor waits until a message is received on the htlc
|
||||||
|
// interceptor stream or the timeout is reached.
|
||||||
|
func (h *HarnessTest) ReceiveHtlcInterceptor(
|
||||||
|
stream rpc.InterceptorClient) *routerrpc.ForwardHtlcInterceptRequest {
|
||||||
|
|
||||||
|
chanMsg := make(chan *routerrpc.ForwardHtlcInterceptRequest)
|
||||||
|
errChan := make(chan error)
|
||||||
|
go func() {
|
||||||
|
// Consume one message. This will block until the message is
|
||||||
|
// received.
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chanMsg <- resp
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(DefaultTimeout):
|
||||||
|
require.Fail(h, "timeout", "timeout intercepting htlc")
|
||||||
|
|
||||||
|
case err := <-errChan:
|
||||||
|
require.Failf(h, "err from stream",
|
||||||
|
"received err from stream: %v", err)
|
||||||
|
|
||||||
|
case updateMsg := <-chanMsg:
|
||||||
|
return updateMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiveChannelEvent waits until a message is received from the
|
||||||
|
// ChannelEventsClient stream or the timeout is reached.
|
||||||
|
func (h *HarnessTest) ReceiveChannelEvent(
|
||||||
|
stream rpc.ChannelEventsClient) *lnrpc.ChannelEventUpdate {
|
||||||
|
|
||||||
|
chanMsg := make(chan *lnrpc.ChannelEventUpdate)
|
||||||
|
errChan := make(chan error)
|
||||||
|
go func() {
|
||||||
|
// Consume one message. This will block until the message is
|
||||||
|
// received.
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chanMsg <- resp
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(DefaultTimeout):
|
||||||
|
require.Fail(h, "timeout", "timeout intercepting htlc")
|
||||||
|
|
||||||
|
case err := <-errChan:
|
||||||
|
require.Failf(h, "err from stream",
|
||||||
|
"received err from stream: %v", err)
|
||||||
|
|
||||||
|
case updateMsg := <-chanMsg:
|
||||||
|
return updateMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOutputIndex returns the output index of the given address in the given
|
||||||
|
// transaction.
|
||||||
|
func (h *HarnessTest) GetOutputIndex(txid *chainhash.Hash, addr string) int {
|
||||||
|
// We'll then extract the raw transaction from the mempool in order to
|
||||||
|
// determine the index of the p2tr output.
|
||||||
|
tx := h.Miner.GetRawTransaction(txid)
|
||||||
|
|
||||||
|
p2trOutputIndex := -1
|
||||||
|
for i, txOut := range tx.MsgTx().TxOut {
|
||||||
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
||||||
|
txOut.PkScript, h.Miner.ActiveNet,
|
||||||
|
)
|
||||||
|
require.NoError(h, err)
|
||||||
|
|
||||||
|
if addrs[0].String() == addr {
|
||||||
|
p2trOutputIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Greater(h, p2trOutputIndex, -1)
|
||||||
|
|
||||||
|
return p2trOutputIndex
|
||||||
|
}
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
package lntemp
|
package lntemp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -127,6 +129,9 @@ func (h *HarnessTest) ConnectNodesPerm(a, b *node.HarnessNode) {
|
|||||||
func (h *HarnessTest) DisconnectNodes(a, b *node.HarnessNode) {
|
func (h *HarnessTest) DisconnectNodes(a, b *node.HarnessNode) {
|
||||||
bobInfo := b.RPC.GetInfo()
|
bobInfo := b.RPC.GetInfo()
|
||||||
a.RPC.DisconnectPeer(bobInfo.IdentityPubkey)
|
a.RPC.DisconnectPeer(bobInfo.IdentityPubkey)
|
||||||
|
|
||||||
|
// Assert disconnected.
|
||||||
|
h.AssertPeerNotConnected(a, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureConnected will try to connect to two nodes, returning no error if they
|
// EnsureConnected will try to connect to two nodes, returning no error if they
|
||||||
@ -729,6 +734,51 @@ func (h *HarnessTest) AssertNumUTXOs(hn *node.HarnessNode,
|
|||||||
return h.AssertNumUTXOsWithConf(hn, num, math.MaxInt32, 0)
|
return h.AssertNumUTXOsWithConf(hn, num, math.MaxInt32, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getUTXOs gets the number of newly created UTOXs within the current test
|
||||||
|
// scope.
|
||||||
|
func (h *HarnessTest) getUTXOs(hn *node.HarnessNode, account string,
|
||||||
|
max, min int32) []*lnrpc.Utxo {
|
||||||
|
|
||||||
|
var unconfirmed bool
|
||||||
|
|
||||||
|
if max == 0 {
|
||||||
|
unconfirmed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &walletrpc.ListUnspentRequest{
|
||||||
|
Account: account,
|
||||||
|
MaxConfs: max,
|
||||||
|
MinConfs: min,
|
||||||
|
UnconfirmedOnly: unconfirmed,
|
||||||
|
}
|
||||||
|
resp := hn.RPC.ListUnspent(req)
|
||||||
|
|
||||||
|
return resp.Utxos
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUTXOs returns all the UTXOs for the given node's account, including
|
||||||
|
// confirmed and unconfirmed.
|
||||||
|
func (h *HarnessTest) GetUTXOs(hn *node.HarnessNode,
|
||||||
|
account string) []*lnrpc.Utxo {
|
||||||
|
|
||||||
|
return h.getUTXOs(hn, account, math.MaxInt32, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUTXOsConfirmed returns the confirmed UTXOs for the given node's account.
|
||||||
|
func (h *HarnessTest) GetUTXOsConfirmed(hn *node.HarnessNode,
|
||||||
|
account string) []*lnrpc.Utxo {
|
||||||
|
|
||||||
|
return h.getUTXOs(hn, account, math.MaxInt32, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUTXOsUnconfirmed returns the unconfirmed UTXOs for the given node's
|
||||||
|
// account.
|
||||||
|
func (h *HarnessTest) GetUTXOsUnconfirmed(hn *node.HarnessNode,
|
||||||
|
account string) []*lnrpc.Utxo {
|
||||||
|
|
||||||
|
return h.getUTXOs(hn, account, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
// WaitForBalanceConfirmed waits until the node sees the expected confirmed
|
// WaitForBalanceConfirmed waits until the node sees the expected confirmed
|
||||||
// balance in its wallet.
|
// balance in its wallet.
|
||||||
func (h *HarnessTest) WaitForBalanceConfirmed(hn *node.HarnessNode,
|
func (h *HarnessTest) WaitForBalanceConfirmed(hn *node.HarnessNode,
|
||||||
@ -878,6 +928,16 @@ func (h *HarnessTest) AssertPaymentStatusFromStream(stream rpc.PaymentClient,
|
|||||||
return h.assertPaymentStatusWithTimeout(stream, status, DefaultTimeout)
|
return h.assertPaymentStatusWithTimeout(stream, status, DefaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AssertPaymentSucceedWithTimeout asserts that a payment is succeeded within
|
||||||
|
// the specified timeout.
|
||||||
|
func (h *HarnessTest) AssertPaymentSucceedWithTimeout(stream rpc.PaymentClient,
|
||||||
|
timeout time.Duration) *lnrpc.Payment {
|
||||||
|
|
||||||
|
return h.assertPaymentStatusWithTimeout(
|
||||||
|
stream, lnrpc.Payment_SUCCEEDED, timeout,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// assertPaymentStatusWithTimeout takes a client stream and asserts the payment
|
// assertPaymentStatusWithTimeout takes a client stream and asserts the payment
|
||||||
// is in desired status before the specified timeout. The payment found is
|
// is in desired status before the specified timeout. The payment found is
|
||||||
// returned once succeeded.
|
// returned once succeeded.
|
||||||
@ -889,7 +949,9 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
|
|||||||
err := wait.NoError(func() error {
|
err := wait.NoError(func() error {
|
||||||
// Consume one message. This will raise an error if the message
|
// Consume one message. This will raise an error if the message
|
||||||
// is not received within DefaultTimeout.
|
// is not received within DefaultTimeout.
|
||||||
payment, err := h.ReceivePaymentUpdate(stream)
|
payment, err := h.receivePaymentUpdateWithTimeout(
|
||||||
|
stream, timeout,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("received error from payment "+
|
return fmt.Errorf("received error from payment "+
|
||||||
"stream: %s", err)
|
"stream: %s", err)
|
||||||
@ -918,8 +980,17 @@ func (h *HarnessTest) assertPaymentStatusWithTimeout(stream rpc.PaymentClient,
|
|||||||
func (h *HarnessTest) ReceivePaymentUpdate(
|
func (h *HarnessTest) ReceivePaymentUpdate(
|
||||||
stream rpc.PaymentClient) (*lnrpc.Payment, error) {
|
stream rpc.PaymentClient) (*lnrpc.Payment, error) {
|
||||||
|
|
||||||
|
return h.receivePaymentUpdateWithTimeout(stream, DefaultTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// receivePaymentUpdateWithTimeout waits until a message is received on the
|
||||||
|
// payment client stream or the timeout is reached.
|
||||||
|
func (h *HarnessTest) receivePaymentUpdateWithTimeout(stream rpc.PaymentClient,
|
||||||
|
timeout time.Duration) (*lnrpc.Payment, error) {
|
||||||
|
|
||||||
chanMsg := make(chan *lnrpc.Payment, 1)
|
chanMsg := make(chan *lnrpc.Payment, 1)
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// Consume one message. This will block until the message is
|
// Consume one message. This will block until the message is
|
||||||
// received.
|
// received.
|
||||||
@ -933,7 +1004,7 @@ func (h *HarnessTest) ReceivePaymentUpdate(
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(DefaultTimeout):
|
case <-time.After(timeout):
|
||||||
require.Fail(h, "timeout", "timeout waiting for payment update")
|
require.Fail(h, "timeout", "timeout waiting for payment update")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|
||||||
@ -2074,3 +2145,145 @@ func (h *HarnessTest) ReceiveSendToRouteUpdate(
|
|||||||
return updateMsg, nil
|
return updateMsg, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AssertInvoiceEqual asserts that two lnrpc.Invoices are equivalent. A custom
|
||||||
|
// comparison function is defined for these tests, since proto message returned
|
||||||
|
// from unary and streaming RPCs (as of protobuf 1.23.0 and grpc 1.29.1) aren't
|
||||||
|
// consistent with the private fields set on the messages. As a result, we
|
||||||
|
// avoid using require.Equal and test only the actual data members.
|
||||||
|
func (h *HarnessTest) AssertInvoiceEqual(a, b *lnrpc.Invoice) {
|
||||||
|
// Ensure the HTLCs are sorted properly before attempting to compare.
|
||||||
|
sort.Slice(a.Htlcs, func(i, j int) bool {
|
||||||
|
return a.Htlcs[i].ChanId < a.Htlcs[j].ChanId
|
||||||
|
})
|
||||||
|
sort.Slice(b.Htlcs, func(i, j int) bool {
|
||||||
|
return b.Htlcs[i].ChanId < b.Htlcs[j].ChanId
|
||||||
|
})
|
||||||
|
|
||||||
|
require.Equal(h, a.Memo, b.Memo)
|
||||||
|
require.Equal(h, a.RPreimage, b.RPreimage)
|
||||||
|
require.Equal(h, a.RHash, b.RHash)
|
||||||
|
require.Equal(h, a.Value, b.Value)
|
||||||
|
require.Equal(h, a.ValueMsat, b.ValueMsat)
|
||||||
|
require.Equal(h, a.CreationDate, b.CreationDate)
|
||||||
|
require.Equal(h, a.SettleDate, b.SettleDate)
|
||||||
|
require.Equal(h, a.PaymentRequest, b.PaymentRequest)
|
||||||
|
require.Equal(h, a.DescriptionHash, b.DescriptionHash)
|
||||||
|
require.Equal(h, a.Expiry, b.Expiry)
|
||||||
|
require.Equal(h, a.FallbackAddr, b.FallbackAddr)
|
||||||
|
require.Equal(h, a.CltvExpiry, b.CltvExpiry)
|
||||||
|
require.Equal(h, a.RouteHints, b.RouteHints)
|
||||||
|
require.Equal(h, a.Private, b.Private)
|
||||||
|
require.Equal(h, a.AddIndex, b.AddIndex)
|
||||||
|
require.Equal(h, a.SettleIndex, b.SettleIndex)
|
||||||
|
require.Equal(h, a.AmtPaidSat, b.AmtPaidSat)
|
||||||
|
require.Equal(h, a.AmtPaidMsat, b.AmtPaidMsat)
|
||||||
|
require.Equal(h, a.State, b.State)
|
||||||
|
require.Equal(h, a.Features, b.Features)
|
||||||
|
require.Equal(h, a.IsKeysend, b.IsKeysend)
|
||||||
|
require.Equal(h, a.PaymentAddr, b.PaymentAddr)
|
||||||
|
require.Equal(h, a.IsAmp, b.IsAmp)
|
||||||
|
|
||||||
|
require.Equal(h, len(a.Htlcs), len(b.Htlcs))
|
||||||
|
for i := range a.Htlcs {
|
||||||
|
htlcA, htlcB := a.Htlcs[i], b.Htlcs[i]
|
||||||
|
require.Equal(h, htlcA.ChanId, htlcB.ChanId)
|
||||||
|
require.Equal(h, htlcA.HtlcIndex, htlcB.HtlcIndex)
|
||||||
|
require.Equal(h, htlcA.AmtMsat, htlcB.AmtMsat)
|
||||||
|
require.Equal(h, htlcA.AcceptHeight, htlcB.AcceptHeight)
|
||||||
|
require.Equal(h, htlcA.AcceptTime, htlcB.AcceptTime)
|
||||||
|
require.Equal(h, htlcA.ResolveTime, htlcB.ResolveTime)
|
||||||
|
require.Equal(h, htlcA.ExpiryHeight, htlcB.ExpiryHeight)
|
||||||
|
require.Equal(h, htlcA.State, htlcB.State)
|
||||||
|
require.Equal(h, htlcA.CustomRecords, htlcB.CustomRecords)
|
||||||
|
require.Equal(h, htlcA.MppTotalAmtMsat, htlcB.MppTotalAmtMsat)
|
||||||
|
require.Equal(h, htlcA.Amp, htlcB.Amp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertUTXOInWallet asserts that a given UTXO can be found in the node's
|
||||||
|
// wallet.
|
||||||
|
func (h *HarnessTest) AssertUTXOInWallet(hn *node.HarnessNode,
|
||||||
|
op *lnrpc.OutPoint, account string) {
|
||||||
|
|
||||||
|
err := wait.NoError(func() error {
|
||||||
|
utxos := h.GetUTXOs(hn, account)
|
||||||
|
|
||||||
|
err := fmt.Errorf("tx with hash %x not found", op.TxidBytes)
|
||||||
|
for _, utxo := range utxos {
|
||||||
|
if !bytes.Equal(utxo.Outpoint.TxidBytes, op.TxidBytes) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fmt.Errorf("tx with output index %v not found",
|
||||||
|
op.OutputIndex)
|
||||||
|
if utxo.Outpoint.OutputIndex != op.OutputIndex {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}, DefaultTimeout)
|
||||||
|
|
||||||
|
require.NoErrorf(h, err, "outpoint %v not found in %s's wallet",
|
||||||
|
op, hn.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertWalletAccountBalance asserts that the unconfirmed and confirmed
|
||||||
|
// balance for the given account is satisfied by the WalletBalance and
|
||||||
|
// ListUnspent RPCs. The unconfirmed balance is not checked for neutrino nodes.
|
||||||
|
func (h *HarnessTest) AssertWalletAccountBalance(hn *node.HarnessNode,
|
||||||
|
account string, confirmedBalance, unconfirmedBalance int64) {
|
||||||
|
|
||||||
|
err := wait.NoError(func() error {
|
||||||
|
balanceResp := hn.RPC.WalletBalance()
|
||||||
|
require.Contains(h, balanceResp.AccountBalance, account)
|
||||||
|
accountBalance := balanceResp.AccountBalance[account]
|
||||||
|
|
||||||
|
// Check confirmed balance.
|
||||||
|
if accountBalance.ConfirmedBalance != confirmedBalance {
|
||||||
|
return fmt.Errorf("expected confirmed balance %v, "+
|
||||||
|
"got %v", confirmedBalance,
|
||||||
|
accountBalance.ConfirmedBalance)
|
||||||
|
}
|
||||||
|
|
||||||
|
utxos := h.GetUTXOsConfirmed(hn, account)
|
||||||
|
var totalConfirmedVal int64
|
||||||
|
for _, utxo := range utxos {
|
||||||
|
totalConfirmedVal += utxo.AmountSat
|
||||||
|
}
|
||||||
|
if totalConfirmedVal != confirmedBalance {
|
||||||
|
return fmt.Errorf("expected total confirmed utxo "+
|
||||||
|
"balance %v, got %v", confirmedBalance,
|
||||||
|
totalConfirmedVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip unconfirmed balance checks for neutrino nodes.
|
||||||
|
if h.IsNeutrinoBackend() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check unconfirmed balance.
|
||||||
|
if accountBalance.UnconfirmedBalance != unconfirmedBalance {
|
||||||
|
return fmt.Errorf("expected unconfirmed balance %v, "+
|
||||||
|
"got %v", unconfirmedBalance,
|
||||||
|
accountBalance.UnconfirmedBalance)
|
||||||
|
}
|
||||||
|
|
||||||
|
utxos = h.GetUTXOsUnconfirmed(hn, account)
|
||||||
|
var totalUnconfirmedVal int64
|
||||||
|
for _, utxo := range utxos {
|
||||||
|
totalUnconfirmedVal += utxo.AmountSat
|
||||||
|
}
|
||||||
|
if totalUnconfirmedVal != unconfirmedBalance {
|
||||||
|
return fmt.Errorf("expected total unconfirmed utxo "+
|
||||||
|
"balance %v, got %v", unconfirmedBalance,
|
||||||
|
totalUnconfirmedVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, DefaultTimeout)
|
||||||
|
require.NoError(h, err, "timeout checking wallet account balance")
|
||||||
|
}
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/btcjson"
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
@ -253,6 +254,16 @@ func (h *HarnessMiner) GetRawTransaction(txid *chainhash.Hash) *btcutil.Tx {
|
|||||||
return tx
|
return tx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRawTransactionVerbose makes a RPC call to the miner's
|
||||||
|
// GetRawTransactionVerbose and asserts.
|
||||||
|
func (h *HarnessMiner) GetRawTransactionVerbose(
|
||||||
|
txid *chainhash.Hash) *btcjson.TxRawResult {
|
||||||
|
|
||||||
|
tx, err := h.Client.GetRawTransactionVerbose(txid)
|
||||||
|
require.NoErrorf(h, err, "failed to get raw tx verbose: %v", txid)
|
||||||
|
return tx
|
||||||
|
}
|
||||||
|
|
||||||
// AssertTxInMempool asserts a given transaction can be found in the mempool.
|
// AssertTxInMempool asserts a given transaction can be found in the mempool.
|
||||||
func (h *HarnessMiner) AssertTxInMempool(txid *chainhash.Hash) *wire.MsgTx {
|
func (h *HarnessMiner) AssertTxInMempool(txid *chainhash.Hash) *wire.MsgTx {
|
||||||
var msgTx *wire.MsgTx
|
var msgTx *wire.MsgTx
|
||||||
|
@ -135,15 +135,24 @@ func NewHarnessNode(t *testing.T, cfg *BaseNodeConfig) (*HarnessNode, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitRPCClients initializes a list of RPC clients for the node.
|
// Initialize creates a list of new RPC clients using the passed connection,
|
||||||
func (hn *HarnessNode) InitRPCClients(c *grpc.ClientConn) {
|
// initializes the node's internal state and creates a topology watcher.
|
||||||
|
func (hn *HarnessNode) Initialize(c *grpc.ClientConn) {
|
||||||
hn.conn = c
|
hn.conn = c
|
||||||
|
|
||||||
// Init all the rpc clients.
|
// Init all the rpc clients.
|
||||||
hn.RPC = rpc.NewHarnessRPC(hn.runCtx, hn.T, c, hn.Name())
|
hn.RPC = rpc.NewHarnessRPC(hn.runCtx, hn.T, c, hn.Name())
|
||||||
|
|
||||||
// Init the node's internal state.
|
// Init the node's state.
|
||||||
hn.State = newState(hn.RPC)
|
//
|
||||||
|
// If we already have a state, it means we are restarting the node and
|
||||||
|
// we will only reset its internal states. Otherwise we'll create a new
|
||||||
|
// state.
|
||||||
|
if hn.State != nil {
|
||||||
|
hn.State.resetEphermalStates(hn.RPC)
|
||||||
|
} else {
|
||||||
|
hn.State = newState(hn.RPC)
|
||||||
|
}
|
||||||
|
|
||||||
// Init the topology watcher.
|
// Init the topology watcher.
|
||||||
hn.Watcher = newNodeWatcher(hn.RPC, hn.State)
|
hn.Watcher = newNodeWatcher(hn.RPC, hn.State)
|
||||||
@ -164,11 +173,11 @@ func (hn *HarnessNode) String() string {
|
|||||||
type nodeCfg struct {
|
type nodeCfg struct {
|
||||||
LogFilenamePrefix string
|
LogFilenamePrefix string
|
||||||
ExtraArgs []string
|
ExtraArgs []string
|
||||||
HasSeed bool
|
SkipUnlock bool
|
||||||
|
Password []byte
|
||||||
P2PPort int
|
P2PPort int
|
||||||
RPCPort int
|
RPCPort int
|
||||||
RESTPort int
|
RESTPort int
|
||||||
ProfilePort int
|
|
||||||
AcceptKeySend bool
|
AcceptKeySend bool
|
||||||
FeeURL string
|
FeeURL string
|
||||||
}
|
}
|
||||||
@ -185,6 +194,8 @@ func (hn *HarnessNode) String() string {
|
|||||||
PubKey: hn.PubKeyStr,
|
PubKey: hn.PubKeyStr,
|
||||||
State: hn.State,
|
State: hn.State,
|
||||||
NodeCfg: nodeCfg{
|
NodeCfg: nodeCfg{
|
||||||
|
SkipUnlock: hn.Cfg.SkipUnlock,
|
||||||
|
Password: hn.Cfg.Password,
|
||||||
LogFilenamePrefix: hn.Cfg.LogFilenamePrefix,
|
LogFilenamePrefix: hn.Cfg.LogFilenamePrefix,
|
||||||
ExtraArgs: hn.Cfg.ExtraArgs,
|
ExtraArgs: hn.Cfg.ExtraArgs,
|
||||||
P2PPort: hn.Cfg.P2PPort,
|
P2PPort: hn.Cfg.P2PPort,
|
||||||
@ -434,8 +445,9 @@ func (hn *HarnessNode) Start(ctxt context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init all the RPC clients.
|
// Init the node by creating the RPC clients, initializing node's
|
||||||
hn.InitRPCClients(conn)
|
// internal state and watcher.
|
||||||
|
hn.Initialize(conn)
|
||||||
|
|
||||||
// Wait till the server is starting.
|
// Wait till the server is starting.
|
||||||
if err := hn.WaitUntilStarted(); err != nil {
|
if err := hn.WaitUntilStarted(); err != nil {
|
||||||
@ -477,8 +489,9 @@ func (hn *HarnessNode) InitNode(macBytes []byte) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init all the RPC clients.
|
// Init the node by creating the RPC clients, initializing node's
|
||||||
hn.InitRPCClients(conn)
|
// internal state and watcher.
|
||||||
|
hn.Initialize(conn)
|
||||||
|
|
||||||
// Wait till the server is starting.
|
// Wait till the server is starting.
|
||||||
if err := hn.WaitUntilStarted(); err != nil {
|
if err := hn.WaitUntilStarted(); err != nil {
|
||||||
|
@ -241,6 +241,14 @@ func (s *State) updatePaymentStats() {
|
|||||||
}
|
}
|
||||||
resp := s.rpc.ListPayments(req)
|
resp := s.rpc.ListPayments(req)
|
||||||
|
|
||||||
|
// Exit early when the there's no payment.
|
||||||
|
//
|
||||||
|
// NOTE: we need to exit early here because when there's no invoice the
|
||||||
|
// `LastOffsetIndex` will be zero.
|
||||||
|
if len(resp.Payments) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
s.Payment.LastIndexOffset = resp.LastIndexOffset
|
s.Payment.LastIndexOffset = resp.LastIndexOffset
|
||||||
for _, payment := range resp.Payments {
|
for _, payment := range resp.Payments {
|
||||||
if payment.Status == lnrpc.Payment_FAILED ||
|
if payment.Status == lnrpc.Payment_FAILED ||
|
||||||
@ -261,6 +269,14 @@ func (s *State) updateInvoiceStats() {
|
|||||||
}
|
}
|
||||||
resp := s.rpc.ListInvoices(req)
|
resp := s.rpc.ListInvoices(req)
|
||||||
|
|
||||||
|
// Exit early when the there's no invoice.
|
||||||
|
//
|
||||||
|
// NOTE: we need to exit early here because when there's no invoice the
|
||||||
|
// `LastOffsetIndex` will be zero.
|
||||||
|
if len(resp.Invoices) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
s.Invoice.LastIndexOffset = resp.LastIndexOffset
|
s.Invoice.LastIndexOffset = resp.LastIndexOffset
|
||||||
for _, invoice := range resp.Invoices {
|
for _, invoice := range resp.Invoices {
|
||||||
if invoice.State == lnrpc.Invoice_SETTLED ||
|
if invoice.State == lnrpc.Invoice_SETTLED ||
|
||||||
@ -328,3 +344,17 @@ func (s *State) String() string {
|
|||||||
|
|
||||||
return fmt.Sprintf("\n%s", stateBytes)
|
return fmt.Sprintf("\n%s", stateBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resetEphermalStates resets the current state with a new HarnessRPC and empty
|
||||||
|
// private fields which are used to track state only valid for the last test.
|
||||||
|
func (s *State) resetEphermalStates(rpc *rpc.HarnessRPC) {
|
||||||
|
s.rpc = rpc
|
||||||
|
|
||||||
|
// Reset ephermal states which are used to record info from finished
|
||||||
|
// tests.
|
||||||
|
s.openChans = &SyncMap[wire.OutPoint, []*OpenChannelUpdate]{}
|
||||||
|
s.closedChans = &SyncMap[wire.OutPoint, *lnrpc.ClosedChannelUpdate]{}
|
||||||
|
s.numChanUpdates = &SyncMap[wire.OutPoint, int]{}
|
||||||
|
s.nodeUpdates = &SyncMap[string, []*lnrpc.NodeUpdate]{}
|
||||||
|
s.policyUpdates = &SyncMap[wire.OutPoint, PolicyUpdate]{}
|
||||||
|
}
|
||||||
|
@ -1,5 +1,44 @@
|
|||||||
package rpc
|
package rpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lightningnetwork/lnd/lnrpc/chainrpc"
|
||||||
|
)
|
||||||
|
|
||||||
// =====================
|
// =====================
|
||||||
// ChainClient related RPCs.
|
// ChainClient related RPCs.
|
||||||
// =====================
|
// =====================
|
||||||
|
|
||||||
|
type ConfNtfnClient chainrpc.ChainNotifier_RegisterConfirmationsNtfnClient
|
||||||
|
|
||||||
|
// RegisterConfirmationsNtfn creates a notification client to watch a given
|
||||||
|
// transaction being confirmed.
|
||||||
|
func (h *HarnessRPC) RegisterConfirmationsNtfn(
|
||||||
|
req *chainrpc.ConfRequest) ConfNtfnClient {
|
||||||
|
|
||||||
|
// RegisterConfirmationsNtfn needs to have the context alive for the
|
||||||
|
// entire test case as the returned client will be used for send and
|
||||||
|
// receive events stream. Thus we use runCtx here instead of a timeout
|
||||||
|
// context.
|
||||||
|
client, err := h.ChainClient.RegisterConfirmationsNtfn(
|
||||||
|
h.runCtx, req,
|
||||||
|
)
|
||||||
|
h.NoError(err, "RegisterConfirmationsNtfn")
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
type SpendClient chainrpc.ChainNotifier_RegisterSpendNtfnClient
|
||||||
|
|
||||||
|
// RegisterSpendNtfn creates a notification client to watch a given
|
||||||
|
// transaction being spent.
|
||||||
|
func (h *HarnessRPC) RegisterSpendNtfn(req *chainrpc.SpendRequest) SpendClient {
|
||||||
|
// RegisterSpendNtfn needs to have the context alive for the entire
|
||||||
|
// test case as the returned client will be used for send and receive
|
||||||
|
// events stream. Thus we use runCtx here instead of a timeout context.
|
||||||
|
client, err := h.ChainClient.RegisterSpendNtfn(
|
||||||
|
h.runCtx, req,
|
||||||
|
)
|
||||||
|
h.NoError(err, "RegisterSpendNtfn")
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
@ -631,3 +631,54 @@ func (h *HarnessRPC) RegisterRPCMiddleware() (MiddlewareClient,
|
|||||||
|
|
||||||
return stream, cancel
|
return stream, cancel
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ChannelEventsClient lnrpc.Lightning_SubscribeChannelEventsClient
|
||||||
|
|
||||||
|
// SubscribeChannelEvents creates a subscription client for channel events and
|
||||||
|
// asserts its creation.
|
||||||
|
func (h *HarnessRPC) SubscribeChannelEvents() ChannelEventsClient {
|
||||||
|
req := &lnrpc.ChannelEventSubscription{}
|
||||||
|
|
||||||
|
// SubscribeChannelEvents needs to have the context alive for the
|
||||||
|
// entire test case as the returned client will be used for send and
|
||||||
|
// receive events stream. Thus we use runCtx here instead of a timeout
|
||||||
|
// context.
|
||||||
|
client, err := h.LN.SubscribeChannelEvents(h.runCtx, req)
|
||||||
|
h.NoError(err, "SubscribeChannelEvents")
|
||||||
|
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
type CustomMessageClient lnrpc.Lightning_SubscribeCustomMessagesClient
|
||||||
|
|
||||||
|
// SubscribeCustomMessages creates a subscription client for custom messages.
|
||||||
|
func (h *HarnessRPC) SubscribeCustomMessages() (CustomMessageClient,
|
||||||
|
context.CancelFunc) {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithCancel(h.runCtx)
|
||||||
|
|
||||||
|
req := &lnrpc.SubscribeCustomMessagesRequest{}
|
||||||
|
|
||||||
|
// SubscribeCustomMessages needs to have the context alive for the
|
||||||
|
// entire test case as the returned client will be used for send and
|
||||||
|
// receive events stream. Thus we use runCtx here instead of a timeout
|
||||||
|
// context.
|
||||||
|
stream, err := h.LN.SubscribeCustomMessages(ctxt, req)
|
||||||
|
h.NoError(err, "SubscribeCustomMessages")
|
||||||
|
|
||||||
|
return stream, cancel
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendCustomMessage makes a RPC call to the node's SendCustomMessage and
|
||||||
|
// returns the response.
|
||||||
|
func (h *HarnessRPC) SendCustomMessage(
|
||||||
|
req *lnrpc.SendCustomMessageRequest) *lnrpc.SendCustomMessageResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.LN.SendCustomMessage(ctxt, req)
|
||||||
|
h.NoError(err, "SendCustomMessage")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
@ -150,3 +150,43 @@ func (h *HarnessRPC) XImportMissionControlAssertErr(
|
|||||||
_, err := h.Router.XImportMissionControl(ctxt, req)
|
_, err := h.Router.XImportMissionControl(ctxt, req)
|
||||||
require.Error(h, err, "expect an error from x import mission control")
|
require.Error(h, err, "expect an error from x import mission control")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BuildRoute makes a RPC call to the node's RouterClient and asserts.
|
||||||
|
func (h *HarnessRPC) BuildRoute(
|
||||||
|
req *routerrpc.BuildRouteRequest) *routerrpc.BuildRouteResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Router.BuildRoute(ctxt, req)
|
||||||
|
h.NoError(err, "BuildRoute")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
type InterceptorClient routerrpc.Router_HtlcInterceptorClient
|
||||||
|
|
||||||
|
// HtlcInterceptor makes a RPC call to the node's RouterClient and asserts.
|
||||||
|
func (h *HarnessRPC) HtlcInterceptor() (InterceptorClient, context.CancelFunc) {
|
||||||
|
// HtlcInterceptor needs to have the context alive for the entire test
|
||||||
|
// case as the returned client will be used for send and receive events
|
||||||
|
// stream. Thus we use cancel context here instead of a timeout
|
||||||
|
// context.
|
||||||
|
ctxt, cancel := context.WithCancel(h.runCtx)
|
||||||
|
resp, err := h.Router.HtlcInterceptor(ctxt)
|
||||||
|
h.NoError(err, "HtlcInterceptor")
|
||||||
|
|
||||||
|
return resp, cancel
|
||||||
|
}
|
||||||
|
|
||||||
|
type TrackPaymentsClient routerrpc.Router_TrackPaymentsClient
|
||||||
|
|
||||||
|
// TrackPayments makes a RPC call to the node's RouterClient and asserts.
|
||||||
|
func (h *HarnessRPC) TrackPayments(
|
||||||
|
req *routerrpc.TrackPaymentsRequest) TrackPaymentsClient {
|
||||||
|
|
||||||
|
resp, err := h.Router.TrackPayments(h.runCtx, req)
|
||||||
|
h.NoError(err, "TrackPayments")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
@ -4,13 +4,39 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// =====================
|
// =====================
|
||||||
// Signer related RPCs.
|
// Signer related RPCs.
|
||||||
// =====================
|
// =====================
|
||||||
|
|
||||||
// SignOutputRaw makes a RPC call to node's SignOutputRaw and asserts.
|
// DeriveSharedKey makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
func (h *HarnessRPC) DeriveSharedKey(
|
||||||
|
req *signrpc.SharedKeyRequest) *signrpc.SharedKeyResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.DeriveSharedKey(ctxt, req)
|
||||||
|
h.NoError(err, "DeriveSharedKey")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeriveSharedKeyErr makes a RPC call to the node's SignerClient and asserts
|
||||||
|
// there is an error.
|
||||||
|
func (h *HarnessRPC) DeriveSharedKeyErr(req *signrpc.SharedKeyRequest) error {
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_, err := h.Signer.DeriveSharedKey(ctxt, req)
|
||||||
|
require.Error(h, err, "expected error from calling DeriveSharedKey")
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignOutputRaw makes a RPC call to the node's SignerClient and asserts.
|
||||||
func (h *HarnessRPC) SignOutputRaw(req *signrpc.SignReq) *signrpc.SignResp {
|
func (h *HarnessRPC) SignOutputRaw(req *signrpc.SignReq) *signrpc.SignResp {
|
||||||
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -20,3 +46,152 @@ func (h *HarnessRPC) SignOutputRaw(req *signrpc.SignReq) *signrpc.SignResp {
|
|||||||
|
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SignOutputRawErr makes a RPC call to the node's SignerClient and asserts an
|
||||||
|
// error is returned.
|
||||||
|
func (h *HarnessRPC) SignOutputRawErr(req *signrpc.SignReq) error {
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_, err := h.Signer.SignOutputRaw(ctxt, req)
|
||||||
|
require.Error(h, err, "expect to fail to sign raw output")
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2CreateSession makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
func (h *HarnessRPC) MuSig2CreateSession(
|
||||||
|
req *signrpc.MuSig2SessionRequest) *signrpc.MuSig2SessionResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.MuSig2CreateSession(ctxt, req)
|
||||||
|
h.NoError(err, "MuSig2CreateSession")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2CombineKeys makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
//
|
||||||
|
//nolint:lll
|
||||||
|
func (h *HarnessRPC) MuSig2CombineKeys(
|
||||||
|
req *signrpc.MuSig2CombineKeysRequest) *signrpc.MuSig2CombineKeysResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.MuSig2CombineKeys(ctxt, req)
|
||||||
|
h.NoError(err, "MuSig2CombineKeys")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2RegisterNonces makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
//
|
||||||
|
//nolint:lll
|
||||||
|
func (h *HarnessRPC) MuSig2RegisterNonces(
|
||||||
|
req *signrpc.MuSig2RegisterNoncesRequest) *signrpc.MuSig2RegisterNoncesResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.MuSig2RegisterNonces(ctxt, req)
|
||||||
|
h.NoError(err, "MuSig2RegisterNonces")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2Sign makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
func (h *HarnessRPC) MuSig2Sign(
|
||||||
|
req *signrpc.MuSig2SignRequest) *signrpc.MuSig2SignResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.MuSig2Sign(ctxt, req)
|
||||||
|
h.NoError(err, "MuSig2Sign")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2SignErr makes a RPC call to the node's SignerClient and asserts an
|
||||||
|
// error is returned.
|
||||||
|
func (h *HarnessRPC) MuSig2SignErr(req *signrpc.MuSig2SignRequest) error {
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_, err := h.Signer.MuSig2Sign(ctxt, req)
|
||||||
|
require.Error(h, err, "expect an error")
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2CombineSig makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
func (h *HarnessRPC) MuSig2CombineSig(
|
||||||
|
r *signrpc.MuSig2CombineSigRequest) *signrpc.MuSig2CombineSigResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.MuSig2CombineSig(ctxt, r)
|
||||||
|
h.NoError(err, "MuSig2CombineSig")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuSig2Cleanup makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
func (h *HarnessRPC) MuSig2Cleanup(
|
||||||
|
req *signrpc.MuSig2CleanupRequest) *signrpc.MuSig2CleanupResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.MuSig2Cleanup(ctxt, req)
|
||||||
|
h.NoError(err, "MuSig2Cleanup")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignMessageSigner makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
//
|
||||||
|
// NOTE: there's already `SignMessage` in `h.LN`.
|
||||||
|
func (h *HarnessRPC) SignMessageSigner(
|
||||||
|
req *signrpc.SignMessageReq) *signrpc.SignMessageResp {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.SignMessage(ctxt, req)
|
||||||
|
h.NoError(err, "SignMessage")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyMessageSigner makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
//
|
||||||
|
// NOTE: there's already `VerifyMessageSigner` in `h.LN`.
|
||||||
|
func (h *HarnessRPC) VerifyMessageSigner(
|
||||||
|
req *signrpc.VerifyMessageReq) *signrpc.VerifyMessageResp {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.VerifyMessage(ctxt, req)
|
||||||
|
h.NoError(err, "VerifyMessage")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeInputScript makes a RPC call to the node's SignerClient and asserts.
|
||||||
|
func (h *HarnessRPC) ComputeInputScript(
|
||||||
|
req *signrpc.SignReq) *signrpc.InputScriptResp {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.Signer.ComputeInputScript(ctxt, req)
|
||||||
|
h.NoError(err, "ComputeInputScript")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
@ -163,3 +163,85 @@ func (h *HarnessRPC) PublishTransaction(
|
|||||||
|
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BumpFee makes a RPC call to the node's WalletKitClient and asserts.
|
||||||
|
func (h *HarnessRPC) BumpFee(
|
||||||
|
req *walletrpc.BumpFeeRequest) *walletrpc.BumpFeeResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.WalletKit.BumpFee(ctxt, req)
|
||||||
|
h.NoError(err, "BumpFee")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAccounts makes a RPC call to the node's WalletKitClient and asserts.
|
||||||
|
func (h *HarnessRPC) ListAccounts(
|
||||||
|
req *walletrpc.ListAccountsRequest) *walletrpc.ListAccountsResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.WalletKit.ListAccounts(ctxt, req)
|
||||||
|
h.NoError(err, "ListAccounts")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportAccount makes a RPC call to the node's WalletKitClient and asserts.
|
||||||
|
func (h *HarnessRPC) ImportAccount(
|
||||||
|
req *walletrpc.ImportAccountRequest) *walletrpc.ImportAccountResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.WalletKit.ImportAccount(ctxt, req)
|
||||||
|
h.NoError(err, "ImportAccount")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportPublicKey makes a RPC call to the node's WalletKitClient and asserts.
|
||||||
|
//
|
||||||
|
//nolint:lll
|
||||||
|
func (h *HarnessRPC) ImportPublicKey(
|
||||||
|
req *walletrpc.ImportPublicKeyRequest) *walletrpc.ImportPublicKeyResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.WalletKit.ImportPublicKey(ctxt, req)
|
||||||
|
h.NoError(err, "ImportPublicKey")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignPsbt makes a RPC call to the node's WalletKitClient and asserts.
|
||||||
|
func (h *HarnessRPC) SignPsbt(
|
||||||
|
req *walletrpc.SignPsbtRequest) *walletrpc.SignPsbtResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.WalletKit.SignPsbt(ctxt, req)
|
||||||
|
h.NoError(err, "SignPsbt")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportTapscript makes a RPC call to the node's WalletKitClient and asserts.
|
||||||
|
//
|
||||||
|
//nolint:lll
|
||||||
|
func (h *HarnessRPC) ImportTapscript(
|
||||||
|
req *walletrpc.ImportTapscriptRequest) *walletrpc.ImportTapscriptResponse {
|
||||||
|
|
||||||
|
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resp, err := h.WalletKit.ImportTapscript(ctxt, req)
|
||||||
|
h.NoError(err, "ImportTapscript")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
@ -949,60 +949,6 @@ func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode,
|
|||||||
require.NoErrorf(t.t, err, "got err: %v", predErr)
|
require.NoErrorf(t.t, err, "got err: %v", predErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyCloseUpdate is used to verify that a closed channel update is of the
|
|
||||||
// expected type.
|
|
||||||
func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
|
|
||||||
closeType lnrpc.ChannelCloseSummary_ClosureType,
|
|
||||||
closeInitiator lnrpc.Initiator) error {
|
|
||||||
|
|
||||||
// We should receive one inactive and one closed notification
|
|
||||||
// for each channel.
|
|
||||||
switch update := chanUpdate.Channel.(type) {
|
|
||||||
case *lnrpc.ChannelEventUpdate_InactiveChannel:
|
|
||||||
if chanUpdate.Type != lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL {
|
|
||||||
return fmt.Errorf("update type mismatch: expected %v, got %v",
|
|
||||||
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
|
|
||||||
chanUpdate.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
case *lnrpc.ChannelEventUpdate_ClosedChannel:
|
|
||||||
if chanUpdate.Type !=
|
|
||||||
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL {
|
|
||||||
|
|
||||||
return fmt.Errorf("update type mismatch: expected %v, got %v",
|
|
||||||
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
|
|
||||||
chanUpdate.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
if update.ClosedChannel.CloseType != closeType {
|
|
||||||
return fmt.Errorf("channel closure type "+
|
|
||||||
"mismatch: expected %v, got %v",
|
|
||||||
closeType,
|
|
||||||
update.ClosedChannel.CloseType)
|
|
||||||
}
|
|
||||||
|
|
||||||
if update.ClosedChannel.CloseInitiator != closeInitiator {
|
|
||||||
return fmt.Errorf("expected close intiator: %v, got: %v",
|
|
||||||
closeInitiator,
|
|
||||||
update.ClosedChannel.CloseInitiator)
|
|
||||||
}
|
|
||||||
|
|
||||||
case *lnrpc.ChannelEventUpdate_FullyResolvedChannel:
|
|
||||||
if chanUpdate.Type != lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
|
|
||||||
return fmt.Errorf("update type mismatch: expected %v, got %v",
|
|
||||||
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
|
|
||||||
chanUpdate.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("channel update channel of wrong type, "+
|
|
||||||
"expected closed channel, got %T",
|
|
||||||
update)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assertNodeNumChannels polls the provided node's list channels rpc until it
|
// assertNodeNumChannels polls the provided node's list channels rpc until it
|
||||||
// reaches the desired number of total channels.
|
// reaches the desired number of total channels.
|
||||||
func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode,
|
func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode,
|
||||||
|
@ -275,6 +275,10 @@ var allTestCasesTemp = []*lntemp.TestCase{
|
|||||||
Name: "open channel reorg test",
|
Name: "open channel reorg test",
|
||||||
TestFunc: testOpenChannelAfterReorg,
|
TestFunc: testOpenChannelAfterReorg,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "psbt channel funding",
|
||||||
|
TestFunc: testPsbtChanFunding,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "psbt channel funding external",
|
Name: "psbt channel funding external",
|
||||||
TestFunc: testPsbtChanFundingExternal,
|
TestFunc: testPsbtChanFundingExternal,
|
||||||
@ -283,6 +287,10 @@ var allTestCasesTemp = []*lntemp.TestCase{
|
|||||||
Name: "psbt channel funding single step",
|
Name: "psbt channel funding single step",
|
||||||
TestFunc: testPsbtChanFundingSingleStep,
|
TestFunc: testPsbtChanFundingSingleStep,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "sign psbt",
|
||||||
|
TestFunc: testSignPsbt,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "resolution handoff",
|
Name: "resolution handoff",
|
||||||
TestFunc: testResHandoff,
|
TestFunc: testResHandoff,
|
||||||
@ -369,4 +377,128 @@ var allTestCasesTemp = []*lntemp.TestCase{
|
|||||||
Name: "wipe forwarding packages",
|
Name: "wipe forwarding packages",
|
||||||
TestFunc: testWipeForwardingPackages,
|
TestFunc: testWipeForwardingPackages,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "switch circuit persistence",
|
||||||
|
TestFunc: testSwitchCircuitPersistence,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "switch offline delivery",
|
||||||
|
TestFunc: testSwitchOfflineDelivery,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "switch offline delivery persistence",
|
||||||
|
TestFunc: testSwitchOfflineDeliveryPersistence,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "switch offline delivery outgoing offline",
|
||||||
|
TestFunc: testSwitchOfflineDeliveryOutgoingOffline,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sendtoroute multi path payment",
|
||||||
|
TestFunc: testSendToRouteMultiPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "send multi path payment",
|
||||||
|
TestFunc: testSendMultiPathPayment,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sendpayment amp invoice",
|
||||||
|
TestFunc: testSendPaymentAMPInvoice,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sendpayment amp invoice repeat",
|
||||||
|
TestFunc: testSendPaymentAMPInvoiceRepeat,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "send payment amp",
|
||||||
|
TestFunc: testSendPaymentAMP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sendtoroute amp",
|
||||||
|
TestFunc: testSendToRouteAMP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "forward interceptor dedup htlcs",
|
||||||
|
TestFunc: testForwardInterceptorDedupHtlc,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "forward interceptor",
|
||||||
|
TestFunc: testForwardInterceptorBasic,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "zero conf channel open",
|
||||||
|
TestFunc: testZeroConfChannelOpen,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "option scid alias",
|
||||||
|
TestFunc: testOptionScidAlias,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "scid alias channel update",
|
||||||
|
TestFunc: testUpdateChannelPolicyScidAlias,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "scid alias upgrade",
|
||||||
|
TestFunc: testOptionScidUpgrade,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "nonstd sweep",
|
||||||
|
TestFunc: testNonstdSweep,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "multiple channel creation and update subscription",
|
||||||
|
TestFunc: testBasicChannelCreationAndUpdates,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "derive shared key",
|
||||||
|
TestFunc: testDeriveSharedKey,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sign output raw",
|
||||||
|
TestFunc: testSignOutputRaw,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "sign verify message",
|
||||||
|
TestFunc: testSignVerifyMessage,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "cpfp",
|
||||||
|
TestFunc: testCPFP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "taproot",
|
||||||
|
TestFunc: testTaproot,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "wallet import account",
|
||||||
|
TestFunc: testWalletImportAccount,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "wallet import pubkey",
|
||||||
|
TestFunc: testWalletImportPubKey,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "async payments benchmark",
|
||||||
|
TestFunc: testAsyncPayments,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "remote signer",
|
||||||
|
TestFunc: testRemoteSigner,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "taproot coop close",
|
||||||
|
TestFunc: testTaprootCoopClose,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "trackpayments",
|
||||||
|
TestFunc: testTrackPayments,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "open channel fee policy",
|
||||||
|
TestFunc: testOpenChannelUpdateFeePolicy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "custom message",
|
||||||
|
TestFunc: testCustomMessage,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,7 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -14,40 +11,41 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lntypes"
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testSendPaymentAMPInvoice tests that we can send an AMP payment to a
|
// testSendPaymentAMPInvoice tests that we can send an AMP payment to a
|
||||||
// specified AMP invoice using SendPaymentV2.
|
// specified AMP invoice using SendPaymentV2.
|
||||||
func testSendPaymentAMPInvoice(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSendPaymentAMPInvoice(ht *lntemp.HarnessTest) {
|
||||||
t.t.Run("native payaddr", func(t *testing.T) {
|
succeed := ht.Run("native payaddr", func(t *testing.T) {
|
||||||
tt := newHarnessTest(t, net)
|
tt := ht.Subtest(t)
|
||||||
testSendPaymentAMPInvoiceCase(net, tt, false)
|
testSendPaymentAMPInvoiceCase(tt, false)
|
||||||
})
|
})
|
||||||
t.t.Run("external payaddr", func(t *testing.T) {
|
|
||||||
tt := newHarnessTest(t, net)
|
// Abort the test if failed.
|
||||||
testSendPaymentAMPInvoiceCase(net, tt, true)
|
if !succeed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ht.Run("external payaddr", func(t *testing.T) {
|
||||||
|
tt := ht.Subtest(t)
|
||||||
|
testSendPaymentAMPInvoiceCase(tt, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
|
func testSendPaymentAMPInvoiceCase(ht *lntemp.HarnessTest,
|
||||||
useExternalPayAddr bool) {
|
useExternalPayAddr bool) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
mts := newMppTestScenario(ht)
|
||||||
|
|
||||||
ctx := newMppTestContext(t, net)
|
|
||||||
defer ctx.shutdownNodes()
|
|
||||||
|
|
||||||
// Subscribe to bob's invoices. Do this early in the test to make sure
|
// Subscribe to bob's invoices. Do this early in the test to make sure
|
||||||
// that the subscription has actually been completed when we add an
|
// that the subscription has actually been completed when we add an
|
||||||
// invoice. Otherwise the notification will be missed.
|
// invoice. Otherwise the notification will be missed.
|
||||||
req := &lnrpc.InvoiceSubscription{}
|
req := &lnrpc.InvoiceSubscription{}
|
||||||
ctxc, cancelSubscription := context.WithCancel(ctxb)
|
bobInvoiceSubscription := mts.bob.RPC.SubscribeInvoices(req)
|
||||||
bobInvoiceSubscription, err := ctx.bob.SubscribeInvoices(ctxc, req)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
defer cancelSubscription()
|
|
||||||
|
|
||||||
const paymentAmt = btcutil.Amount(300000)
|
const paymentAmt = btcutil.Amount(300000)
|
||||||
|
|
||||||
@ -61,49 +59,45 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// \ /
|
// \ /
|
||||||
// \__ Dave ____/
|
// \__ Dave ____/
|
||||||
//
|
//
|
||||||
ctx.openChannel(ctx.carol, ctx.bob, 135000)
|
mppReq := &mppOpenChannelRequest{
|
||||||
ctx.openChannel(ctx.alice, ctx.carol, 235000)
|
amtAliceCarol: 235000,
|
||||||
ctx.openChannel(ctx.dave, ctx.bob, 135000)
|
amtAliceDave: 135000,
|
||||||
ctx.openChannel(ctx.alice, ctx.dave, 135000)
|
amtCarolBob: 135000,
|
||||||
ctx.openChannel(ctx.eve, ctx.bob, 135000)
|
amtCarolEve: 135000,
|
||||||
ctx.openChannel(ctx.carol, ctx.eve, 135000)
|
amtDaveBob: 135000,
|
||||||
|
amtEveBob: 135000,
|
||||||
|
}
|
||||||
|
mts.openChannels(mppReq)
|
||||||
|
chanPointAliceDave := mts.channelPoints[1]
|
||||||
|
chanPointDaveBob := mts.channelPoints[4]
|
||||||
|
|
||||||
defer ctx.closeChannels()
|
invoice := &lnrpc.Invoice{
|
||||||
|
|
||||||
ctx.waitForChannels()
|
|
||||||
|
|
||||||
addInvoiceResp, err := ctx.bob.AddInvoice(context.Background(), &lnrpc.Invoice{
|
|
||||||
Value: int64(paymentAmt),
|
Value: int64(paymentAmt),
|
||||||
IsAmp: true,
|
IsAmp: true,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
addInvoiceResp := mts.bob.RPC.AddInvoice(invoice)
|
||||||
|
|
||||||
// Ensure we get a notification of the invoice being added by Bob.
|
// Ensure we get a notification of the invoice being added by Bob.
|
||||||
rpcInvoice, err := bobInvoiceSubscription.Recv()
|
rpcInvoice := ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
require.False(t.t, rpcInvoice.Settled) // nolint:staticcheck
|
require.False(ht, rpcInvoice.Settled)
|
||||||
require.Equal(t.t, lnrpc.Invoice_OPEN, rpcInvoice.State)
|
require.Equal(ht, lnrpc.Invoice_OPEN, rpcInvoice.State)
|
||||||
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidSat)
|
require.Equal(ht, int64(0), rpcInvoice.AmtPaidSat)
|
||||||
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidMsat)
|
require.Equal(ht, int64(0), rpcInvoice.AmtPaidMsat)
|
||||||
|
require.Equal(ht, 0, len(rpcInvoice.Htlcs))
|
||||||
require.Equal(t.t, 0, len(rpcInvoice.Htlcs))
|
|
||||||
|
|
||||||
// Increase Dave's fee to make the test deterministic. Otherwise it
|
// Increase Dave's fee to make the test deterministic. Otherwise it
|
||||||
// would be unpredictable whether pathfinding would go through Charlie
|
// would be unpredictable whether pathfinding would go through Charlie
|
||||||
// or Dave for the first shard.
|
// or Dave for the first shard.
|
||||||
_, err = ctx.dave.UpdateChannelPolicy(
|
expectedPolicy := mts.updateDaveGlobalPolicy()
|
||||||
context.Background(),
|
|
||||||
&lnrpc.PolicyUpdateRequest{
|
// Make sure Alice has heard it for both Dave's channels.
|
||||||
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
|
ht.AssertChannelPolicyUpdate(
|
||||||
BaseFeeMsat: 500000,
|
mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
|
||||||
FeeRate: 0.001,
|
)
|
||||||
TimeLockDelta: 40,
|
ht.AssertChannelPolicyUpdate(
|
||||||
},
|
mts.alice, mts.dave, expectedPolicy, chanPointDaveBob, false,
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("dave policy update: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate an external payment address when attempting to pseudo-reuse
|
// Generate an external payment address when attempting to pseudo-reuse
|
||||||
// an AMP invoice. When using an external payment address, we'll also
|
// an AMP invoice. When using an external payment address, we'll also
|
||||||
@ -116,19 +110,16 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
)
|
)
|
||||||
if useExternalPayAddr {
|
if useExternalPayAddr {
|
||||||
expNumInvoices = 2
|
expNumInvoices = 2
|
||||||
externalPayAddr = make([]byte, 32)
|
externalPayAddr = ht.Random32Bytes()
|
||||||
_, err = rand.Read(externalPayAddr)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
payment := sendAndAssertSuccess(
|
sendReq := &routerrpc.SendPaymentRequest{
|
||||||
t, ctx.alice, &routerrpc.SendPaymentRequest{
|
PaymentRequest: addInvoiceResp.PaymentRequest,
|
||||||
PaymentRequest: addInvoiceResp.PaymentRequest,
|
PaymentAddr: externalPayAddr,
|
||||||
PaymentAddr: externalPayAddr,
|
TimeoutSeconds: 60,
|
||||||
TimeoutSeconds: 60,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
}
|
||||||
},
|
payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
|
||||||
)
|
|
||||||
|
|
||||||
// Check that Alice split the payment in at least three shards. Because
|
// Check that Alice split the payment in at least three shards. Because
|
||||||
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
|
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
|
||||||
@ -145,232 +136,201 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const minExpectedShards = 3
|
const minExpectedShards = 3
|
||||||
if succeeded < minExpectedShards {
|
require.GreaterOrEqual(ht, succeeded, minExpectedShards,
|
||||||
t.Fatalf("expected at least %v shards, but got %v",
|
"expected num of shards not reached")
|
||||||
minExpectedShards, succeeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// When an external payment address is supplied, we'll get an extra
|
// When an external payment address is supplied, we'll get an extra
|
||||||
// notification for the JIT inserted invoice, since it differs from the
|
// notification for the JIT inserted invoice, since it differs from the
|
||||||
// original.
|
// original.
|
||||||
if useExternalPayAddr {
|
if useExternalPayAddr {
|
||||||
_, err = bobInvoiceSubscription.Recv()
|
ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
|
||||||
require.NoError(t.t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// There should now be a settle event for the invoice.
|
// There should now be a settle event for the invoice.
|
||||||
rpcInvoice, err = bobInvoiceSubscription.Recv()
|
rpcInvoice = ht.ReceiveInvoiceUpdate(bobInvoiceSubscription)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
|
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
|
||||||
// the one received via the subscription.
|
// the one received via the subscription.
|
||||||
invoiceResp, err := ctx.bob.ListInvoices(
|
invoices := ht.AssertNumInvoices(mts.bob, expNumInvoices)
|
||||||
ctxb, &lnrpc.ListInvoiceRequest{},
|
ht.AssertInvoiceEqual(rpcInvoice, invoices[expNumInvoices-1])
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Equal(t.t, expNumInvoices, len(invoiceResp.Invoices))
|
|
||||||
assertInvoiceEqual(t.t, rpcInvoice, invoiceResp.Invoices[expNumInvoices-1])
|
|
||||||
|
|
||||||
// Assert that the invoice is settled for the total payment amount and
|
// Assert that the invoice is settled for the total payment amount and
|
||||||
// has the correct payment address.
|
// has the correct payment address.
|
||||||
require.True(t.t, rpcInvoice.Settled) // nolint:staticcheck
|
require.True(ht, rpcInvoice.Settled)
|
||||||
require.Equal(t.t, lnrpc.Invoice_SETTLED, rpcInvoice.State)
|
require.Equal(ht, lnrpc.Invoice_SETTLED, rpcInvoice.State)
|
||||||
require.Equal(t.t, int64(paymentAmt), rpcInvoice.AmtPaidSat)
|
require.Equal(ht, int64(paymentAmt), rpcInvoice.AmtPaidSat)
|
||||||
require.Equal(t.t, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
|
require.Equal(ht, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
|
||||||
|
|
||||||
// Finally, assert that the same set id is recorded for each htlc, and
|
// Finally, assert that the same set id is recorded for each htlc, and
|
||||||
// that the preimage hash pair is valid.
|
// that the preimage hash pair is valid.
|
||||||
var setID []byte
|
var setID []byte
|
||||||
require.Equal(t.t, succeeded, len(rpcInvoice.Htlcs))
|
require.Equal(ht, succeeded, len(rpcInvoice.Htlcs))
|
||||||
for _, htlc := range rpcInvoice.Htlcs {
|
for _, htlc := range rpcInvoice.Htlcs {
|
||||||
require.NotNil(t.t, htlc.Amp)
|
require.NotNil(ht, htlc.Amp)
|
||||||
if setID == nil {
|
if setID == nil {
|
||||||
setID = make([]byte, 32)
|
setID = make([]byte, 32)
|
||||||
copy(setID, htlc.Amp.SetId)
|
copy(setID, htlc.Amp.SetId)
|
||||||
}
|
}
|
||||||
require.Equal(t.t, setID, htlc.Amp.SetId)
|
require.Equal(ht, setID, htlc.Amp.SetId)
|
||||||
|
|
||||||
// Parse the child hash and child preimage, and assert they are
|
// Parse the child hash and child preimage, and assert they are
|
||||||
// well-formed.
|
// well-formed.
|
||||||
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
|
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
|
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Assert that the preimage actually matches the hashes.
|
// Assert that the preimage actually matches the hashes.
|
||||||
validPreimage := childPreimage.Matches(childHash)
|
validPreimage := childPreimage.Matches(childHash)
|
||||||
require.True(t.t, validPreimage)
|
require.True(ht, validPreimage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The set ID we extract above should be shown in the final settled
|
// The set ID we extract above should be shown in the final settled
|
||||||
// state.
|
// state.
|
||||||
ampState := rpcInvoice.AmpInvoiceState[hex.EncodeToString(setID)]
|
ampState := rpcInvoice.AmpInvoiceState[hex.EncodeToString(setID)]
|
||||||
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
|
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
|
||||||
|
|
||||||
|
// Finally, close all channels.
|
||||||
|
mts.closeChannels()
|
||||||
}
|
}
|
||||||
|
|
||||||
// testSendPaymentAMPInvoiceRepeat tests that it's possible to pay an AMP
|
// testSendPaymentAMPInvoiceRepeat tests that it's possible to pay an AMP
|
||||||
// invoice multiple times by having the client generate a new setID each time.
|
// invoice multiple times by having the client generate a new setID each time.
|
||||||
func testSendPaymentAMPInvoiceRepeat(net *lntest.NetworkHarness,
|
func testSendPaymentAMPInvoiceRepeat(ht *lntemp.HarnessTest) {
|
||||||
t *harnessTest) {
|
|
||||||
|
|
||||||
// In this basic test, we'll only need two nodes as we want to
|
// In this basic test, we'll only need two nodes as we want to
|
||||||
// primarily test the recurring payment feature. So we'll re-use the
|
// primarily test the recurring payment feature. So we'll re-use the
|
||||||
carol := net.NewNode(t.t, "Carol", nil)
|
carol := ht.NewNode("Carol", nil)
|
||||||
defer shutdownAndAssert(net, t, carol)
|
|
||||||
|
|
||||||
// Send Carol enough coins to be able to open a channel to Dave.
|
// Send Carol enough coins to be able to open a channel to Dave.
|
||||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
||||||
|
|
||||||
dave := net.NewNode(t.t, "Dave", nil)
|
dave := ht.NewNode("Dave", nil)
|
||||||
defer shutdownAndAssert(net, t, dave)
|
|
||||||
|
|
||||||
// Before we start the test, we'll ensure both sides are connected to
|
|
||||||
// the funding flow can properly be executed.
|
|
||||||
net.EnsureConnected(t.t, carol, dave)
|
|
||||||
|
|
||||||
// Set up an invoice subscription so we can be notified when Dave
|
// Set up an invoice subscription so we can be notified when Dave
|
||||||
// receives his repeated payments.
|
// receives his repeated payments.
|
||||||
req := &lnrpc.InvoiceSubscription{}
|
req := &lnrpc.InvoiceSubscription{}
|
||||||
ctxb := context.Background()
|
invSubscription := dave.RPC.SubscribeInvoices(req)
|
||||||
ctxc, cancelSubscription := context.WithCancel(ctxb)
|
|
||||||
invSubscription, err := dave.SubscribeInvoices(ctxc, req)
|
// Before we start the test, we'll ensure both sides are connected to
|
||||||
require.NoError(t.t, err)
|
// the funding flow can properly be executed.
|
||||||
defer cancelSubscription()
|
ht.EnsureConnected(carol, dave)
|
||||||
|
|
||||||
// Establish a channel between Carol and Dave.
|
// Establish a channel between Carol and Dave.
|
||||||
chanAmt := btcutil.Amount(100_000)
|
chanAmt := btcutil.Amount(100_000)
|
||||||
chanPoint := openChannelAndAssert(
|
ht.OpenChannel(
|
||||||
t, net, carol, dave,
|
carol, dave, lntemp.OpenChannelParams{Amt: chanAmt},
|
||||||
lntest.OpenChannelParams{
|
|
||||||
Amt: chanAmt,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
err = carol.WaitForNetworkChannelOpen(chanPoint)
|
|
||||||
require.NoError(t.t, err, "carol didn't report channel")
|
|
||||||
err = dave.WaitForNetworkChannelOpen(chanPoint)
|
|
||||||
require.NoError(t.t, err, "dave didn't report channel")
|
|
||||||
|
|
||||||
// Create an AMP invoice of a trivial amount, that we'll pay repeatedly
|
// Create an AMP invoice of a trivial amount, that we'll pay repeatedly
|
||||||
// in this integration test.
|
// in this integration test.
|
||||||
paymentAmt := 10000
|
paymentAmt := 10000
|
||||||
addInvoiceResp, err := dave.AddInvoice(ctxb, &lnrpc.Invoice{
|
invoice := &lnrpc.Invoice{
|
||||||
Value: int64(paymentAmt),
|
Value: int64(paymentAmt),
|
||||||
IsAmp: true,
|
IsAmp: true,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
addInvoiceResp := dave.RPC.AddInvoice(invoice)
|
||||||
|
|
||||||
// We should get an initial notification that the HTLC has been added.
|
// We should get an initial notification that the HTLC has been added.
|
||||||
rpcInvoice, err := invSubscription.Recv()
|
rpcInvoice := ht.ReceiveInvoiceUpdate(invSubscription)
|
||||||
require.NoError(t.t, err)
|
require.False(ht, rpcInvoice.Settled)
|
||||||
require.False(t.t, rpcInvoice.Settled) // nolint:staticcheck
|
require.Equal(ht, lnrpc.Invoice_OPEN, rpcInvoice.State)
|
||||||
require.Equal(t.t, lnrpc.Invoice_OPEN, rpcInvoice.State)
|
require.Equal(ht, int64(0), rpcInvoice.AmtPaidSat)
|
||||||
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidSat)
|
require.Equal(ht, int64(0), rpcInvoice.AmtPaidMsat)
|
||||||
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidMsat)
|
require.Equal(ht, 0, len(rpcInvoice.Htlcs))
|
||||||
|
|
||||||
require.Equal(t.t, 0, len(rpcInvoice.Htlcs))
|
|
||||||
|
|
||||||
// Now we'll use Carol to pay the invoice that Dave created.
|
// Now we'll use Carol to pay the invoice that Dave created.
|
||||||
_ = sendAndAssertSuccess(
|
ht.CompletePaymentRequests(
|
||||||
t, carol, &routerrpc.SendPaymentRequest{
|
carol, []string{addInvoiceResp.PaymentRequest},
|
||||||
PaymentRequest: addInvoiceResp.PaymentRequest,
|
|
||||||
TimeoutSeconds: 60,
|
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Dave should get a notification that the invoice has been settled.
|
// Dave should get a notification that the invoice has been settled.
|
||||||
invoiceNtfn, err := invSubscription.Recv()
|
invoiceNtfn := ht.ReceiveInvoiceUpdate(invSubscription)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// The notification should signal that the invoice is now settled, and
|
// The notification should signal that the invoice is now settled, and
|
||||||
// should also include the set ID, and show the proper amount paid.
|
// should also include the set ID, and show the proper amount paid.
|
||||||
require.True(t.t, invoiceNtfn.Settled) // nolint:staticcheck
|
require.True(ht, invoiceNtfn.Settled)
|
||||||
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoiceNtfn.State)
|
require.Equal(ht, lnrpc.Invoice_SETTLED, invoiceNtfn.State)
|
||||||
require.Equal(t.t, paymentAmt, int(invoiceNtfn.AmtPaidSat))
|
require.Equal(ht, paymentAmt, int(invoiceNtfn.AmtPaidSat))
|
||||||
require.Equal(t.t, 1, len(invoiceNtfn.AmpInvoiceState))
|
require.Equal(ht, 1, len(invoiceNtfn.AmpInvoiceState))
|
||||||
var firstSetID []byte
|
var firstSetID []byte
|
||||||
for setIDStr, ampState := range invoiceNtfn.AmpInvoiceState {
|
for setIDStr, ampState := range invoiceNtfn.AmpInvoiceState {
|
||||||
firstSetID, _ = hex.DecodeString(setIDStr)
|
firstSetID, _ = hex.DecodeString(setIDStr)
|
||||||
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
|
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED,
|
||||||
|
ampState.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pay the invoice again, we should get another notification that Dave
|
// Pay the invoice again, we should get another notification that Dave
|
||||||
// has received another payment.
|
// has received another payment.
|
||||||
_ = sendAndAssertSuccess(
|
ht.CompletePaymentRequests(
|
||||||
t, carol, &routerrpc.SendPaymentRequest{
|
carol, []string{addInvoiceResp.PaymentRequest},
|
||||||
PaymentRequest: addInvoiceResp.PaymentRequest,
|
|
||||||
TimeoutSeconds: 60,
|
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Dave should get another notification.
|
// Dave should get another notification.
|
||||||
invoiceNtfn, err = invSubscription.Recv()
|
invoiceNtfn = ht.ReceiveInvoiceUpdate(invSubscription)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// The invoice should still be shown as settled, and also include the
|
// The invoice should still be shown as settled, and also include the
|
||||||
// information about this newly generated setID, showing 2x the amount
|
// information about this newly generated setID, showing 2x the amount
|
||||||
// paid.
|
// paid.
|
||||||
require.True(t.t, invoiceNtfn.Settled) // nolint:staticcheck
|
require.True(ht, invoiceNtfn.Settled)
|
||||||
require.Equal(t.t, paymentAmt*2, int(invoiceNtfn.AmtPaidSat))
|
require.Equal(ht, paymentAmt*2, int(invoiceNtfn.AmtPaidSat))
|
||||||
|
|
||||||
var secondSetID []byte
|
var secondSetID []byte
|
||||||
for setIDStr, ampState := range invoiceNtfn.AmpInvoiceState {
|
for setIDStr, ampState := range invoiceNtfn.AmpInvoiceState {
|
||||||
secondSetID, _ = hex.DecodeString(setIDStr)
|
secondSetID, _ = hex.DecodeString(setIDStr)
|
||||||
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
|
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED,
|
||||||
|
ampState.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The returned invoice should only include a single HTLC since we
|
// The returned invoice should only include a single HTLC since we
|
||||||
// return the "projected" sub-invoice for a given setID.
|
// return the "projected" sub-invoice for a given setID.
|
||||||
require.Equal(t.t, 1, len(invoiceNtfn.Htlcs))
|
require.Equal(ht, 1, len(invoiceNtfn.Htlcs))
|
||||||
|
|
||||||
// However the AMP state index should show that there've been two
|
// However the AMP state index should show that there've been two
|
||||||
// repeated payments to this invoice so far.
|
// repeated payments to this invoice so far.
|
||||||
require.Equal(t.t, 2, len(invoiceNtfn.AmpInvoiceState))
|
require.Equal(ht, 2, len(invoiceNtfn.AmpInvoiceState))
|
||||||
|
|
||||||
// Now we'll look up the invoice using the new LookupInvoice2 RPC call
|
// Now we'll look up the invoice using the new LookupInvoice2 RPC call
|
||||||
// by the set ID of each of the invoices.
|
// by the set ID of each of the invoices.
|
||||||
subInvoice1, err := dave.LookupInvoiceV2(ctxb, &invoicesrpc.LookupInvoiceMsg{
|
msg := &invoicesrpc.LookupInvoiceMsg{
|
||||||
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_SetId{
|
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_SetId{
|
||||||
SetId: firstSetID,
|
SetId: firstSetID,
|
||||||
},
|
},
|
||||||
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_ONLY,
|
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_ONLY,
|
||||||
})
|
}
|
||||||
require.Nil(t.t, err)
|
subInvoice1 := dave.RPC.LookupInvoiceV2(msg)
|
||||||
subInvoice2, err := dave.LookupInvoiceV2(ctxb, &invoicesrpc.LookupInvoiceMsg{
|
msg = &invoicesrpc.LookupInvoiceMsg{
|
||||||
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_SetId{
|
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_SetId{
|
||||||
SetId: secondSetID,
|
SetId: secondSetID,
|
||||||
},
|
},
|
||||||
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_ONLY,
|
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_ONLY,
|
||||||
})
|
}
|
||||||
require.Nil(t.t, err)
|
subInvoice2 := dave.RPC.LookupInvoiceV2(msg)
|
||||||
|
|
||||||
// Each invoice should only show a single HTLC present, as we passed
|
// Each invoice should only show a single HTLC present, as we passed
|
||||||
// the HTLC set only modifier.
|
// the HTLC set only modifier.
|
||||||
require.Equal(t.t, 1, len(subInvoice1.Htlcs))
|
require.Equal(ht, 1, len(subInvoice1.Htlcs))
|
||||||
require.Equal(t.t, 1, len(subInvoice2.Htlcs))
|
require.Equal(ht, 1, len(subInvoice2.Htlcs))
|
||||||
|
|
||||||
// If we look up the same invoice, by its payment address, but now with
|
// If we look up the same invoice, by its payment address, but now with
|
||||||
// the HTLC blank modifier, then none of them should be returned.
|
// the HTLC blank modifier, then none of them should be returned.
|
||||||
rootInvoice, err := dave.LookupInvoiceV2(ctxb, &invoicesrpc.LookupInvoiceMsg{
|
msg = &invoicesrpc.LookupInvoiceMsg{
|
||||||
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
|
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
|
||||||
PaymentAddr: addInvoiceResp.PaymentAddr,
|
PaymentAddr: addInvoiceResp.PaymentAddr,
|
||||||
},
|
},
|
||||||
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_BLANK,
|
LookupModifier: invoicesrpc.LookupModifier_HTLC_SET_BLANK,
|
||||||
})
|
}
|
||||||
require.Nil(t.t, err)
|
rootInvoice := dave.RPC.LookupInvoiceV2(msg)
|
||||||
require.Equal(t.t, 0, len(rootInvoice.Htlcs))
|
require.Equal(ht, 0, len(rootInvoice.Htlcs))
|
||||||
|
|
||||||
// If we look up the same invoice, by its payment address, but without
|
// If we look up the same invoice, by its payment address, but without
|
||||||
// that modified, then we should get all the relevant HTLCs.
|
// that modified, then we should get all the relevant HTLCs.
|
||||||
rootInvoice, err = dave.LookupInvoiceV2(ctxb,
|
msg = &invoicesrpc.LookupInvoiceMsg{
|
||||||
&invoicesrpc.LookupInvoiceMsg{
|
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
|
||||||
InvoiceRef: &invoicesrpc.LookupInvoiceMsg_PaymentAddr{
|
PaymentAddr: addInvoiceResp.PaymentAddr,
|
||||||
PaymentAddr: addInvoiceResp.PaymentAddr,
|
},
|
||||||
},
|
}
|
||||||
})
|
rootInvoice = dave.RPC.LookupInvoiceV2(msg)
|
||||||
require.Nil(t.t, err)
|
require.Equal(ht, 2, len(rootInvoice.Htlcs))
|
||||||
require.Equal(t.t, 2, len(rootInvoice.Htlcs))
|
|
||||||
|
|
||||||
// Finally, we'll test that if we subscribe for notifications of
|
// Finally, we'll test that if we subscribe for notifications of
|
||||||
// settled invoices, we get a backlog, which includes the invoice we
|
// settled invoices, we get a backlog, which includes the invoice we
|
||||||
@ -379,29 +339,22 @@ func testSendPaymentAMPInvoiceRepeat(net *lntest.NetworkHarness,
|
|||||||
req = &lnrpc.InvoiceSubscription{
|
req = &lnrpc.InvoiceSubscription{
|
||||||
SettleIndex: 1,
|
SettleIndex: 1,
|
||||||
}
|
}
|
||||||
ctxc, cancelSubscription2 := context.WithCancel(ctxb)
|
invSub2 := dave.RPC.SubscribeInvoices(req)
|
||||||
invSub2, err := dave.SubscribeInvoices(ctxc, req)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
defer cancelSubscription2()
|
|
||||||
|
|
||||||
// The first invoice we get back should match the state of the invoice
|
// The first invoice we get back should match the state of the invoice
|
||||||
// after our second payment: amt updated, but only a single HTLC shown
|
// after our second payment: amt updated, but only a single HTLC shown
|
||||||
// through.
|
// through.
|
||||||
backlogInv, _ := invSub2.Recv()
|
backlogInv := ht.ReceiveInvoiceUpdate(invSub2)
|
||||||
require.Equal(t.t, 1, len(backlogInv.Htlcs))
|
require.Equal(ht, 1, len(backlogInv.Htlcs))
|
||||||
require.Equal(t.t, 2, len(backlogInv.AmpInvoiceState))
|
require.Equal(ht, 2, len(backlogInv.AmpInvoiceState))
|
||||||
require.True(t.t, backlogInv.Settled) // nolint:staticcheck
|
require.True(ht, backlogInv.Settled)
|
||||||
require.Equal(t.t, paymentAmt*2, int(backlogInv.AmtPaidSat))
|
require.Equal(ht, paymentAmt*2, int(backlogInv.AmtPaidSat))
|
||||||
}
|
}
|
||||||
|
|
||||||
// testSendPaymentAMP tests that we can send an AMP payment to a specified
|
// testSendPaymentAMP tests that we can send an AMP payment to a specified
|
||||||
// destination using SendPaymentV2.
|
// destination using SendPaymentV2.
|
||||||
func testSendPaymentAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSendPaymentAMP(ht *lntemp.HarnessTest) {
|
||||||
ctxb := context.Background()
|
mts := newMppTestScenario(ht)
|
||||||
|
|
||||||
ctx := newMppTestContext(t, net)
|
|
||||||
defer ctx.shutdownNodes()
|
|
||||||
|
|
||||||
const paymentAmt = btcutil.Amount(300000)
|
const paymentAmt = btcutil.Amount(300000)
|
||||||
|
|
||||||
// Set up a network with three different paths Alice <-> Bob. Channel
|
// Set up a network with three different paths Alice <-> Bob. Channel
|
||||||
@ -414,44 +367,37 @@ func testSendPaymentAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
// \ /
|
// \ /
|
||||||
// \__ Dave ____/
|
// \__ Dave ____/
|
||||||
//
|
//
|
||||||
ctx.openChannel(ctx.carol, ctx.bob, 135000)
|
mppReq := &mppOpenChannelRequest{
|
||||||
ctx.openChannel(ctx.alice, ctx.carol, 235000)
|
amtAliceCarol: 235000,
|
||||||
ctx.openChannel(ctx.dave, ctx.bob, 135000)
|
amtAliceDave: 135000,
|
||||||
ctx.openChannel(ctx.alice, ctx.dave, 135000)
|
amtCarolBob: 135000,
|
||||||
ctx.openChannel(ctx.eve, ctx.bob, 135000)
|
amtCarolEve: 135000,
|
||||||
ctx.openChannel(ctx.carol, ctx.eve, 135000)
|
amtDaveBob: 135000,
|
||||||
|
amtEveBob: 135000,
|
||||||
defer ctx.closeChannels()
|
}
|
||||||
|
mts.openChannels(mppReq)
|
||||||
ctx.waitForChannels()
|
chanPointAliceDave := mts.channelPoints[1]
|
||||||
|
|
||||||
// Increase Dave's fee to make the test deterministic. Otherwise it
|
// Increase Dave's fee to make the test deterministic. Otherwise it
|
||||||
// would be unpredictable whether pathfinding would go through Charlie
|
// would be unpredictable whether pathfinding would go through Charlie
|
||||||
// or Dave for the first shard.
|
// or Dave for the first shard.
|
||||||
_, err := ctx.dave.UpdateChannelPolicy(
|
expectedPolicy := mts.updateDaveGlobalPolicy()
|
||||||
context.Background(),
|
|
||||||
&lnrpc.PolicyUpdateRequest{
|
|
||||||
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
|
|
||||||
BaseFeeMsat: 500000,
|
|
||||||
FeeRate: 0.001,
|
|
||||||
TimeLockDelta: 40,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("dave policy update: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
payment := sendAndAssertSuccess(
|
// Make sure Alice has heard it.
|
||||||
t, ctx.alice, &routerrpc.SendPaymentRequest{
|
ht.AssertChannelPolicyUpdate(
|
||||||
Dest: ctx.bob.PubKey[:],
|
mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
|
||||||
Amt: int64(paymentAmt),
|
|
||||||
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
|
|
||||||
TimeoutSeconds: 60,
|
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
|
||||||
Amp: true,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
sendReq := &routerrpc.SendPaymentRequest{
|
||||||
|
Dest: mts.bob.PubKey[:],
|
||||||
|
Amt: int64(paymentAmt),
|
||||||
|
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
|
||||||
|
TimeoutSeconds: 60,
|
||||||
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
|
Amp: true,
|
||||||
|
}
|
||||||
|
payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
|
||||||
|
|
||||||
// Check that Alice split the payment in at least three shards. Because
|
// Check that Alice split the payment in at least three shards. Because
|
||||||
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
|
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
|
||||||
// there is some non-determinism in the process. Depending on whether
|
// there is some non-determinism in the process. Depending on whether
|
||||||
@ -467,64 +413,55 @@ func testSendPaymentAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const minExpectedShards = 3
|
const minExpectedShards = 3
|
||||||
if succeeded < minExpectedShards {
|
require.GreaterOrEqual(ht, succeeded, minExpectedShards,
|
||||||
t.Fatalf("expected at least %v shards, but got %v",
|
"expected num of shards not reached")
|
||||||
minExpectedShards, succeeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch Bob's invoices.
|
// Fetch Bob's invoices. There should only be one invoice.
|
||||||
invoiceResp, err := ctx.bob.ListInvoices(
|
invoices := ht.AssertNumInvoices(mts.bob, 1)
|
||||||
ctxb, &lnrpc.ListInvoiceRequest{},
|
rpcInvoice := invoices[0]
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// There should only be one invoice.
|
|
||||||
require.Equal(t.t, 1, len(invoiceResp.Invoices))
|
|
||||||
rpcInvoice := invoiceResp.Invoices[0]
|
|
||||||
|
|
||||||
// Assert that the invoice is settled for the total payment amount and
|
// Assert that the invoice is settled for the total payment amount and
|
||||||
// has the correct payment address.
|
// has the correct payment address.
|
||||||
require.True(t.t, rpcInvoice.Settled) // nolint:staticcheck
|
require.True(ht, rpcInvoice.Settled)
|
||||||
require.Equal(t.t, lnrpc.Invoice_SETTLED, rpcInvoice.State)
|
require.Equal(ht, lnrpc.Invoice_SETTLED, rpcInvoice.State)
|
||||||
require.Equal(t.t, int64(paymentAmt), rpcInvoice.AmtPaidSat)
|
require.Equal(ht, int64(paymentAmt), rpcInvoice.AmtPaidSat)
|
||||||
require.Equal(t.t, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
|
require.Equal(ht, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
|
||||||
|
|
||||||
// Finally, assert that the same set id is recorded for each htlc, and
|
// Finally, assert that the same set id is recorded for each htlc, and
|
||||||
// that the preimage hash pair is valid.
|
// that the preimage hash pair is valid.
|
||||||
var setID []byte
|
var setID []byte
|
||||||
require.Equal(t.t, succeeded, len(rpcInvoice.Htlcs))
|
require.Equal(ht, succeeded, len(rpcInvoice.Htlcs))
|
||||||
for _, htlc := range rpcInvoice.Htlcs {
|
for _, htlc := range rpcInvoice.Htlcs {
|
||||||
require.NotNil(t.t, htlc.Amp)
|
require.NotNil(ht, htlc.Amp)
|
||||||
if setID == nil {
|
if setID == nil {
|
||||||
setID = make([]byte, 32)
|
setID = make([]byte, 32)
|
||||||
copy(setID, htlc.Amp.SetId)
|
copy(setID, htlc.Amp.SetId)
|
||||||
}
|
}
|
||||||
require.Equal(t.t, setID, htlc.Amp.SetId)
|
require.Equal(ht, setID, htlc.Amp.SetId)
|
||||||
|
|
||||||
// Parse the child hash and child preimage, and assert they are
|
// Parse the child hash and child preimage, and assert they are
|
||||||
// well-formed.
|
// well-formed.
|
||||||
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
|
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
|
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Assert that the preimage actually matches the hashes.
|
// Assert that the preimage actually matches the hashes.
|
||||||
validPreimage := childPreimage.Matches(childHash)
|
validPreimage := childPreimage.Matches(childHash)
|
||||||
require.True(t.t, validPreimage)
|
require.True(ht, validPreimage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The set ID we extract above should be shown in the final settled
|
// The set ID we extract above should be shown in the final settled
|
||||||
// state.
|
// state.
|
||||||
ampState := rpcInvoice.AmpInvoiceState[hex.EncodeToString(setID)]
|
ampState := rpcInvoice.AmpInvoiceState[hex.EncodeToString(setID)]
|
||||||
require.Equal(t.t, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
|
require.Equal(ht, lnrpc.InvoiceHTLCState_SETTLED, ampState.State)
|
||||||
|
|
||||||
|
// Finally, close all channels.
|
||||||
|
mts.closeChannels()
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSendToRouteAMP(ht *lntemp.HarnessTest) {
|
||||||
ctxb := context.Background()
|
mts := newMppTestScenario(ht)
|
||||||
|
|
||||||
ctx := newMppTestContext(t, net)
|
|
||||||
defer ctx.shutdownNodes()
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
paymentAmt = btcutil.Amount(300000)
|
paymentAmt = btcutil.Amount(300000)
|
||||||
numShards = 3
|
numShards = 3
|
||||||
@ -532,63 +469,50 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
chanAmt = shardAmt * 3 / 2
|
chanAmt = shardAmt * 3 / 2
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Subscribe to bob's invoices.
|
||||||
|
req := &lnrpc.InvoiceSubscription{}
|
||||||
|
bobInvoiceSubscription := mts.bob.RPC.SubscribeInvoices(req)
|
||||||
|
|
||||||
// Set up a network with three different paths Alice <-> Bob.
|
// Set up a network with three different paths Alice <-> Bob.
|
||||||
// _ Eve _
|
// _ Eve _
|
||||||
// / \
|
// / \
|
||||||
// Alice -- Carol ---- Bob
|
// Alice -- Carol ---- Bob
|
||||||
// \ /
|
// \ /
|
||||||
// \__ Dave ____/
|
// \__ Dave ____/
|
||||||
//
|
///
|
||||||
ctx.openChannel(ctx.carol, ctx.bob, chanAmt)
|
mppReq := &mppOpenChannelRequest{
|
||||||
ctx.openChannel(ctx.dave, ctx.bob, chanAmt)
|
// Since the channel Alice-> Carol will have to carry two
|
||||||
ctx.openChannel(ctx.alice, ctx.dave, chanAmt)
|
// shards, we make it larger.
|
||||||
ctx.openChannel(ctx.eve, ctx.bob, chanAmt)
|
amtAliceCarol: chanAmt + shardAmt,
|
||||||
ctx.openChannel(ctx.carol, ctx.eve, chanAmt)
|
amtAliceDave: chanAmt,
|
||||||
|
amtCarolBob: chanAmt,
|
||||||
// Since the channel Alice-> Carol will have to carry two
|
amtCarolEve: chanAmt,
|
||||||
// shards, we make it larger.
|
amtDaveBob: chanAmt,
|
||||||
ctx.openChannel(ctx.alice, ctx.carol, chanAmt+shardAmt)
|
amtEveBob: chanAmt,
|
||||||
|
}
|
||||||
defer ctx.closeChannels()
|
mts.openChannels(mppReq)
|
||||||
|
|
||||||
ctx.waitForChannels()
|
|
||||||
|
|
||||||
// Subscribe to bob's invoices.
|
|
||||||
req := &lnrpc.InvoiceSubscription{}
|
|
||||||
ctxc, cancelSubscription := context.WithCancel(ctxb)
|
|
||||||
bobInvoiceSubscription, err := ctx.bob.SubscribeInvoices(ctxc, req)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
defer cancelSubscription()
|
|
||||||
|
|
||||||
// We'll send shards along three routes from Alice.
|
// We'll send shards along three routes from Alice.
|
||||||
sendRoutes := [numShards][]*lntest.HarnessNode{
|
sendRoutes := [numShards][]*node.HarnessNode{
|
||||||
{ctx.carol, ctx.bob},
|
{mts.carol, mts.bob},
|
||||||
{ctx.dave, ctx.bob},
|
{mts.dave, mts.bob},
|
||||||
{ctx.carol, ctx.eve, ctx.bob},
|
{mts.carol, mts.eve, mts.bob},
|
||||||
}
|
}
|
||||||
|
|
||||||
payAddr := make([]byte, 32)
|
payAddr := ht.Random32Bytes()
|
||||||
_, err = rand.Read(payAddr)
|
setID := ht.Random32Bytes()
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
setID := make([]byte, 32)
|
|
||||||
_, err = rand.Read(setID)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
var sharer amp.Sharer
|
var sharer amp.Sharer
|
||||||
sharer, err = amp.NewSeedSharer()
|
sharer, err := amp.NewSeedSharer()
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
childPreimages := make(map[lntypes.Preimage]uint32)
|
childPreimages := make(map[lntypes.Preimage]uint32)
|
||||||
responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes))
|
responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes))
|
||||||
|
|
||||||
// Define a closure for sending each of the three shards.
|
// Define a closure for sending each of the three shards.
|
||||||
sendShard := func(i int, hops []*lntest.HarnessNode) {
|
sendShard := func(i int, hops []*node.HarnessNode) {
|
||||||
// Build a route for the specified hops.
|
// Build a route for the specified hops.
|
||||||
r, err := ctx.buildRoute(ctxb, shardAmt, ctx.alice, hops)
|
r := mts.buildRoute(shardAmt, mts.alice, hops)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to build route: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the MPP records to indicate this is a payment shard.
|
// Set the MPP records to indicate this is a payment shard.
|
||||||
hop := r.Hops[len(r.Hops)-1]
|
hop := r.Hops[len(r.Hops)-1]
|
||||||
@ -602,7 +526,7 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
if i < len(sendRoutes)-1 {
|
if i < len(sendRoutes)-1 {
|
||||||
var left amp.Sharer
|
var left amp.Sharer
|
||||||
left, sharer, err = sharer.Split()
|
left, sharer, err = sharer.Split()
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
child = left.Child(uint32(i))
|
child = left.Child(uint32(i))
|
||||||
} else {
|
} else {
|
||||||
@ -622,15 +546,10 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
Route: r,
|
Route: r,
|
||||||
}
|
}
|
||||||
|
|
||||||
// We'll send all shards in their own goroutine, since SendToRoute will
|
// We'll send all shards in their own goroutine, since
|
||||||
// block as long as the payment is in flight.
|
// SendToRoute will block as long as the payment is in flight.
|
||||||
go func() {
|
go func() {
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
resp := mts.alice.RPC.SendToRouteV2(sendReq)
|
||||||
resp, err := ctx.alice.RouterClient.SendToRouteV2(ctxt, sendReq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
responses <- resp
|
responses <- resp
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -640,21 +559,21 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
// Ensure we get a notification of the invoice being added by Bob.
|
// Ensure we get a notification of the invoice being added by Bob.
|
||||||
rpcInvoice, err := bobInvoiceSubscription.Recv()
|
rpcInvoice, err := bobInvoiceSubscription.Recv()
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
require.False(t.t, rpcInvoice.Settled) // nolint:staticcheck
|
require.False(ht, rpcInvoice.Settled)
|
||||||
require.Equal(t.t, lnrpc.Invoice_OPEN, rpcInvoice.State)
|
require.Equal(ht, lnrpc.Invoice_OPEN, rpcInvoice.State)
|
||||||
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidSat)
|
require.Equal(ht, int64(0), rpcInvoice.AmtPaidSat)
|
||||||
require.Equal(t.t, int64(0), rpcInvoice.AmtPaidMsat)
|
require.Equal(ht, int64(0), rpcInvoice.AmtPaidMsat)
|
||||||
require.Equal(t.t, payAddr, rpcInvoice.PaymentAddr)
|
require.Equal(ht, payAddr, rpcInvoice.PaymentAddr)
|
||||||
|
|
||||||
require.Equal(t.t, 0, len(rpcInvoice.Htlcs))
|
require.Equal(ht, 0, len(rpcInvoice.Htlcs))
|
||||||
|
|
||||||
sendShard(1, sendRoutes[1])
|
sendShard(1, sendRoutes[1])
|
||||||
sendShard(2, sendRoutes[2])
|
sendShard(2, sendRoutes[2])
|
||||||
|
|
||||||
// Assert that all of the child preimages are unique.
|
// Assert that all of the child preimages are unique.
|
||||||
require.Equal(t.t, len(sendRoutes), len(childPreimages))
|
require.Equal(ht, len(sendRoutes), len(childPreimages))
|
||||||
|
|
||||||
// Make a copy of the childPreimages map for validating the resulting
|
// Make a copy of the childPreimages map for validating the resulting
|
||||||
// invoice.
|
// invoice.
|
||||||
@ -665,24 +584,23 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
// Wait for all responses to be back, and check that they all
|
// Wait for all responses to be back, and check that they all
|
||||||
// succeeded.
|
// succeeded.
|
||||||
|
timer := time.After(defaultTimeout)
|
||||||
for range sendRoutes {
|
for range sendRoutes {
|
||||||
var resp *lnrpc.HTLCAttempt
|
var resp *lnrpc.HTLCAttempt
|
||||||
select {
|
select {
|
||||||
case resp = <-responses:
|
case resp = <-responses:
|
||||||
case <-time.After(defaultTimeout):
|
case <-timer:
|
||||||
t.Fatalf("response not received")
|
require.Fail(ht, "response not received")
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.Failure != nil {
|
require.Nil(ht, resp.Failure, "received payment failure")
|
||||||
t.Fatalf("received payment failure : %v", resp.Failure)
|
|
||||||
}
|
|
||||||
|
|
||||||
preimage, err := lntypes.MakePreimage(resp.Preimage)
|
preimage, err := lntypes.MakePreimage(resp.Preimage)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Assert that the response includes one of our child preimages.
|
// Assert that the response includes one of our child preimages.
|
||||||
_, ok := childPreimages[preimage]
|
_, ok := childPreimages[preimage]
|
||||||
require.True(t.t, ok)
|
require.True(ht, ok)
|
||||||
|
|
||||||
// Remove this preimage from out set so that we ensure all
|
// Remove this preimage from out set so that we ensure all
|
||||||
// responses have a unique child preimage.
|
// responses have a unique child preimage.
|
||||||
@ -692,109 +610,51 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
// There should now be a settle event for the invoice.
|
// There should now be a settle event for the invoice.
|
||||||
rpcInvoice, err = bobInvoiceSubscription.Recv()
|
rpcInvoice, err = bobInvoiceSubscription.Recv()
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
|
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
|
||||||
// the one received via the subscription.
|
// the one received via the subscription.
|
||||||
invoiceResp, err := ctx.bob.ListInvoices(
|
invoices := ht.AssertNumInvoices(mts.bob, 1)
|
||||||
ctxb, &lnrpc.ListInvoiceRequest{},
|
ht.AssertInvoiceEqual(rpcInvoice, invoices[0])
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Equal(t.t, 1, len(invoiceResp.Invoices))
|
|
||||||
assertInvoiceEqual(t.t, rpcInvoice, invoiceResp.Invoices[0])
|
|
||||||
|
|
||||||
// Assert that the invoice is settled for the total payment amount and
|
// Assert that the invoice is settled for the total payment amount and
|
||||||
// has the correct payment address.
|
// has the correct payment address.
|
||||||
require.True(t.t, rpcInvoice.Settled) // nolint:staticcheck
|
require.True(ht, rpcInvoice.Settled)
|
||||||
require.Equal(t.t, lnrpc.Invoice_SETTLED, rpcInvoice.State)
|
require.Equal(ht, lnrpc.Invoice_SETTLED, rpcInvoice.State)
|
||||||
require.Equal(t.t, int64(paymentAmt), rpcInvoice.AmtPaidSat)
|
require.Equal(ht, int64(paymentAmt), rpcInvoice.AmtPaidSat)
|
||||||
require.Equal(t.t, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
|
require.Equal(ht, int64(paymentAmt*1000), rpcInvoice.AmtPaidMsat)
|
||||||
require.Equal(t.t, payAddr, rpcInvoice.PaymentAddr)
|
require.Equal(ht, payAddr, rpcInvoice.PaymentAddr)
|
||||||
|
|
||||||
// Finally, assert that the proper set id is recorded for each htlc, and
|
// Finally, assert that the proper set id is recorded for each htlc, and
|
||||||
// that the preimage hash pair is valid.
|
// that the preimage hash pair is valid.
|
||||||
require.Equal(t.t, numShards, len(rpcInvoice.Htlcs))
|
require.Equal(ht, numShards, len(rpcInvoice.Htlcs))
|
||||||
for _, htlc := range rpcInvoice.Htlcs {
|
for _, htlc := range rpcInvoice.Htlcs {
|
||||||
require.NotNil(t.t, htlc.Amp)
|
require.NotNil(ht, htlc.Amp)
|
||||||
require.Equal(t.t, setID, htlc.Amp.SetId)
|
require.Equal(ht, setID, htlc.Amp.SetId)
|
||||||
|
|
||||||
// Parse the child hash and child preimage, and assert they are
|
// Parse the child hash and child preimage, and assert they are
|
||||||
// well-formed.
|
// well-formed.
|
||||||
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
|
childHash, err := lntypes.MakeHash(htlc.Amp.Hash)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
|
childPreimage, err := lntypes.MakePreimage(htlc.Amp.Preimage)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Assert that the preimage actually matches the hashes.
|
// Assert that the preimage actually matches the hashes.
|
||||||
validPreimage := childPreimage.Matches(childHash)
|
validPreimage := childPreimage.Matches(childHash)
|
||||||
require.True(t.t, validPreimage)
|
require.True(ht, validPreimage)
|
||||||
|
|
||||||
// Assert that the HTLC includes one of our child preimages.
|
// Assert that the HTLC includes one of our child preimages.
|
||||||
childIndex, ok := childPreimages[childPreimage]
|
childIndex, ok := childPreimages[childPreimage]
|
||||||
require.True(t.t, ok)
|
require.True(ht, ok)
|
||||||
|
|
||||||
// Assert that the correct child index is reflected.
|
// Assert that the correct child index is reflected.
|
||||||
require.Equal(t.t, childIndex, htlc.Amp.ChildIndex)
|
require.Equal(ht, childIndex, htlc.Amp.ChildIndex)
|
||||||
|
|
||||||
// Remove this preimage from our set so that we ensure all HTLCs
|
// Remove this preimage from our set so that we ensure all HTLCs
|
||||||
// have a unique child preimage.
|
// have a unique child preimage.
|
||||||
delete(childPreimages, childPreimage)
|
delete(childPreimages, childPreimage)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
// Finally, close all channels.
|
||||||
// assertInvoiceEqual asserts that two lnrpc.Invoices are equivalent. A custom
|
mts.closeChannels()
|
||||||
// comparison function is defined for these tests, since proto message returned
|
|
||||||
// from unary and streaming RPCs (as of protobuf 1.23.0 and grpc 1.29.1) aren't
|
|
||||||
// consistent with the private fields set on the messages. As a result, we avoid
|
|
||||||
// using require.Equal and test only the actual data members.
|
|
||||||
func assertInvoiceEqual(t *testing.T, a, b *lnrpc.Invoice) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
// Ensure the HTLCs are sorted properly before attempting to compare.
|
|
||||||
sort.Slice(a.Htlcs, func(i, j int) bool {
|
|
||||||
return a.Htlcs[i].ChanId < a.Htlcs[j].ChanId
|
|
||||||
})
|
|
||||||
sort.Slice(b.Htlcs, func(i, j int) bool {
|
|
||||||
return b.Htlcs[i].ChanId < b.Htlcs[j].ChanId
|
|
||||||
})
|
|
||||||
|
|
||||||
require.Equal(t, a.Memo, b.Memo)
|
|
||||||
require.Equal(t, a.RPreimage, b.RPreimage)
|
|
||||||
require.Equal(t, a.RHash, b.RHash)
|
|
||||||
require.Equal(t, a.Value, b.Value)
|
|
||||||
require.Equal(t, a.ValueMsat, b.ValueMsat)
|
|
||||||
require.Equal(t, a.CreationDate, b.CreationDate)
|
|
||||||
require.Equal(t, a.SettleDate, b.SettleDate)
|
|
||||||
require.Equal(t, a.PaymentRequest, b.PaymentRequest)
|
|
||||||
require.Equal(t, a.DescriptionHash, b.DescriptionHash)
|
|
||||||
require.Equal(t, a.Expiry, b.Expiry)
|
|
||||||
require.Equal(t, a.FallbackAddr, b.FallbackAddr)
|
|
||||||
require.Equal(t, a.CltvExpiry, b.CltvExpiry)
|
|
||||||
require.Equal(t, a.RouteHints, b.RouteHints)
|
|
||||||
require.Equal(t, a.Private, b.Private)
|
|
||||||
require.Equal(t, a.AddIndex, b.AddIndex)
|
|
||||||
require.Equal(t, a.SettleIndex, b.SettleIndex)
|
|
||||||
require.Equal(t, a.AmtPaidSat, b.AmtPaidSat)
|
|
||||||
require.Equal(t, a.AmtPaidMsat, b.AmtPaidMsat)
|
|
||||||
require.Equal(t, a.State, b.State)
|
|
||||||
require.Equal(t, a.Features, b.Features)
|
|
||||||
require.Equal(t, a.IsKeysend, b.IsKeysend)
|
|
||||||
require.Equal(t, a.PaymentAddr, b.PaymentAddr)
|
|
||||||
require.Equal(t, a.IsAmp, b.IsAmp)
|
|
||||||
|
|
||||||
require.Equal(t, len(a.Htlcs), len(b.Htlcs))
|
|
||||||
for i := range a.Htlcs {
|
|
||||||
htlcA, htlcB := a.Htlcs[i], b.Htlcs[i]
|
|
||||||
require.Equal(t, htlcA.ChanId, htlcB.ChanId)
|
|
||||||
require.Equal(t, htlcA.HtlcIndex, htlcB.HtlcIndex)
|
|
||||||
require.Equal(t, htlcA.AmtMsat, htlcB.AmtMsat)
|
|
||||||
require.Equal(t, htlcA.AcceptHeight, htlcB.AcceptHeight)
|
|
||||||
require.Equal(t, htlcA.AcceptTime, htlcB.AcceptTime)
|
|
||||||
require.Equal(t, htlcA.ResolveTime, htlcB.ResolveTime)
|
|
||||||
require.Equal(t, htlcA.ExpiryHeight, htlcB.ExpiryHeight)
|
|
||||||
require.Equal(t, htlcA.State, htlcB.State)
|
|
||||||
require.Equal(t, htlcA.CustomRecords, htlcB.CustomRecords)
|
|
||||||
require.Equal(t, htlcA.MppTotalAmtMsat, htlcB.MppTotalAmtMsat)
|
|
||||||
require.Equal(t, htlcA.Amp, htlcB.Amp)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,11 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -15,16 +13,8 @@ import (
|
|||||||
// testCustomMessage tests sending and receiving of overridden custom message
|
// testCustomMessage tests sending and receiving of overridden custom message
|
||||||
// types (within the message type range usually reserved for protocol messages)
|
// types (within the message type range usually reserved for protocol messages)
|
||||||
// via the send and subscribe custom message APIs.
|
// via the send and subscribe custom message APIs.
|
||||||
func testCustomMessage(net *lntest.NetworkHarness, t *harnessTest) {
|
func testCustomMessage(ht *lntemp.HarnessTest) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
alice, bob := ht.Alice, ht.Bob
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
// At the end of our test, cancel our context and wait for all
|
|
||||||
// goroutines to exit.
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
wg.Wait()
|
|
||||||
}()
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
overrideType1 uint32 = 554
|
overrideType1 uint32 = 554
|
||||||
@ -34,28 +24,19 @@ func testCustomMessage(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
// Update Alice to accept custom protocol messages with type 1 but do
|
// Update Alice to accept custom protocol messages with type 1 but do
|
||||||
// not allow Bob to handle them yet.
|
// not allow Bob to handle them yet.
|
||||||
net.Alice.Cfg.ExtraArgs = append(
|
extraArgs := []string{
|
||||||
net.Alice.Cfg.ExtraArgs,
|
|
||||||
fmt.Sprintf(msgOverrideArg, overrideType1),
|
fmt.Sprintf(msgOverrideArg, overrideType1),
|
||||||
)
|
}
|
||||||
require.NoError(t.t, net.RestartNode(net.Alice, nil, nil))
|
ht.RestartNodeWithExtraArgs(alice, extraArgs)
|
||||||
|
|
||||||
// Wait for Alice's server to be active after the restart before we
|
|
||||||
// try to subscribe to our message stream.
|
|
||||||
require.NoError(t.t, net.Alice.WaitUntilServerActive())
|
|
||||||
|
|
||||||
// Subscribe Alice to custom messages before we send any, so that we
|
// Subscribe Alice to custom messages before we send any, so that we
|
||||||
// don't miss any.
|
// don't miss any.
|
||||||
msgClient, err := net.Alice.LightningClient.SubscribeCustomMessages(
|
msgClient, cancel := alice.RPC.SubscribeCustomMessages()
|
||||||
ctx, &lnrpc.SubscribeCustomMessagesRequest{},
|
defer cancel()
|
||||||
)
|
|
||||||
require.NoError(t.t, err, "alice could not subscribe")
|
|
||||||
|
|
||||||
// Create a channel to receive custom messages on.
|
// Create a channel to receive custom messages on.
|
||||||
messages := make(chan *lnrpc.CustomMessage)
|
messages := make(chan *lnrpc.CustomMessage)
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
|
||||||
for {
|
for {
|
||||||
// If we fail to receive, just exit. The test should
|
// If we fail to receive, just exit. The test should
|
||||||
// fail elsewhere if it doesn't get a message that it
|
// fail elsewhere if it doesn't get a message that it
|
||||||
@ -69,120 +50,108 @@ func testCustomMessage(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
// test is shutting down.
|
// test is shutting down.
|
||||||
select {
|
select {
|
||||||
case messages <- msg:
|
case messages <- msg:
|
||||||
case <-ctx.Done():
|
case <-ht.Context().Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Connect alice and bob so that they can exchange messages.
|
// Connect alice and bob so that they can exchange messages.
|
||||||
net.EnsureConnected(t.t, net.Alice, net.Bob)
|
ht.EnsureConnected(alice, bob)
|
||||||
|
|
||||||
// Create a custom message that is within our allowed range.
|
// Create a custom message that is within our allowed range.
|
||||||
msgType := uint32(lnwire.CustomTypeStart + 1)
|
msgType := uint32(lnwire.CustomTypeStart + 1)
|
||||||
msgData := []byte{1, 2, 3}
|
msgData := []byte{1, 2, 3}
|
||||||
|
|
||||||
// Send it from Bob to Alice.
|
// Send it from Bob to Alice.
|
||||||
ctxt, _ := context.WithTimeout(ctx, defaultTimeout)
|
bobMsg := &lnrpc.SendCustomMessageRequest{
|
||||||
_, err = net.Bob.LightningClient.SendCustomMessage(
|
Peer: alice.PubKey[:],
|
||||||
ctxt, &lnrpc.SendCustomMessageRequest{
|
Type: msgType,
|
||||||
Peer: net.Alice.PubKey[:],
|
Data: msgData,
|
||||||
Type: msgType,
|
}
|
||||||
Data: msgData,
|
bob.RPC.SendCustomMessage(bobMsg)
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err, "bob could not send")
|
|
||||||
|
|
||||||
// Wait for Alice to receive the message. It should come through because
|
// Wait for Alice to receive the message. It should come through
|
||||||
// it is within our allowed range.
|
// because it is within our allowed range.
|
||||||
select {
|
select {
|
||||||
case msg := <-messages:
|
case msg := <-messages:
|
||||||
// Check our type and data and (sanity) check the peer we got it
|
// Check our type and data and (sanity) check the peer we got
|
||||||
// from.
|
// it from.
|
||||||
require.Equal(t.t, msgType, msg.Type, "first msg type wrong")
|
require.Equal(ht, msgType, msg.Type, "first msg type wrong")
|
||||||
require.Equal(t.t, msgData, msg.Data, "first msg data wrong")
|
require.Equal(ht, msgData, msg.Data, "first msg data wrong")
|
||||||
require.Equal(t.t, net.Bob.PubKey[:], msg.Peer, "first msg "+
|
require.Equal(ht, bob.PubKey[:], msg.Peer, "first msg "+
|
||||||
"peer wrong")
|
"peer wrong")
|
||||||
|
|
||||||
case <-time.After(defaultTimeout):
|
case <-time.After(defaultTimeout):
|
||||||
t.t.Fatalf("alice did not receive first custom message: %v",
|
ht.Fatalf("alice did not receive first custom message: %v",
|
||||||
msgType)
|
msgType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to send a message from Bob to Alice which has a message type
|
// Try to send a message from Bob to Alice which has a message type
|
||||||
// outside of the custom type range and assert that it fails.
|
// outside of the custom type range and assert that it fails.
|
||||||
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
|
bobMsg = &lnrpc.SendCustomMessageRequest{
|
||||||
_, err = net.Bob.LightningClient.SendCustomMessage(
|
Peer: alice.PubKey[:],
|
||||||
ctxt, &lnrpc.SendCustomMessageRequest{
|
Type: overrideType1,
|
||||||
Peer: net.Alice.PubKey[:],
|
Data: msgData,
|
||||||
Type: overrideType1,
|
}
|
||||||
Data: msgData,
|
_, err := bob.RPC.LN.SendCustomMessage(ht.Context(), bobMsg)
|
||||||
},
|
require.Error(ht, err, "bob should not be able to send type 1")
|
||||||
)
|
|
||||||
require.Error(t.t, err, "bob should not be able to send type 1")
|
|
||||||
|
|
||||||
// Now, restart Bob with the ability to send two different custom
|
// Now, restart Bob with the ability to send two different custom
|
||||||
// protocol messages.
|
// protocol messages.
|
||||||
net.Bob.Cfg.ExtraArgs = append(
|
extraArgs = []string{
|
||||||
net.Bob.Cfg.ExtraArgs,
|
|
||||||
fmt.Sprintf(msgOverrideArg, overrideType1),
|
fmt.Sprintf(msgOverrideArg, overrideType1),
|
||||||
fmt.Sprintf(msgOverrideArg, overrideType2),
|
fmt.Sprintf(msgOverrideArg, overrideType2),
|
||||||
)
|
}
|
||||||
require.NoError(t.t, net.RestartNode(net.Bob, nil, nil))
|
ht.RestartNodeWithExtraArgs(bob, extraArgs)
|
||||||
|
|
||||||
// Make sure Bob and Alice are connected after his restart.
|
// Make sure Bob and Alice are connected after his restart.
|
||||||
net.EnsureConnected(t.t, net.Alice, net.Bob)
|
ht.EnsureConnected(alice, bob)
|
||||||
|
|
||||||
// Send a message from Bob to Alice with a type that Bob is allowed to
|
// Send a message from Bob to Alice with a type that Bob is allowed to
|
||||||
// send, but Alice will not handle as a custom message.
|
// send, but Alice will not handle as a custom message.
|
||||||
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
|
bobMsg = &lnrpc.SendCustomMessageRequest{
|
||||||
_, err = net.Bob.LightningClient.SendCustomMessage(
|
Peer: alice.PubKey[:],
|
||||||
ctxt, &lnrpc.SendCustomMessageRequest{
|
Type: overrideType2,
|
||||||
Peer: net.Alice.PubKey[:],
|
Data: msgData,
|
||||||
Type: overrideType2,
|
}
|
||||||
Data: msgData,
|
bob.RPC.SendCustomMessage(bobMsg)
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err, "bob should be able to send type 2")
|
|
||||||
|
|
||||||
// Do a quick check that Alice did not receive this message in her
|
// Do a quick check that Alice did not receive this message in her
|
||||||
// stream. Note that this is an instant check, so could miss the message
|
// stream. Note that this is an instant check, so could miss the
|
||||||
// being received. We'll also check below that she didn't get it, this
|
// message being received. We'll also check below that she didn't get
|
||||||
// is just a sanity check.
|
// it, this is just a sanity check.
|
||||||
select {
|
select {
|
||||||
case msg := <-messages:
|
case msg := <-messages:
|
||||||
t.t.Fatalf("unexpected message: %v", msg)
|
ht.Fatalf("unexpected message: %v", msg)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, send a custom message with a type that Bob is allowed to
|
// Finally, send a custom message with a type that Bob is allowed to
|
||||||
// send and Alice is configured to receive.
|
// send and Alice is configured to receive.
|
||||||
ctxt, _ = context.WithTimeout(ctx, defaultTimeout)
|
bobMsg = &lnrpc.SendCustomMessageRequest{
|
||||||
_, err = net.Bob.LightningClient.SendCustomMessage(
|
Peer: alice.PubKey[:],
|
||||||
ctxt, &lnrpc.SendCustomMessageRequest{
|
Type: overrideType1,
|
||||||
Peer: net.Alice.PubKey[:],
|
Data: msgData,
|
||||||
Type: overrideType1,
|
}
|
||||||
Data: msgData,
|
bob.RPC.SendCustomMessage(bobMsg)
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err, "bob should be able to send type 1")
|
|
||||||
|
|
||||||
// Wait to receive a message from Bob. This check serves to ensure that
|
// Wait to receive a message from Bob. This check serves to ensure that
|
||||||
// our message type 1 was delivered, and assert that the preceding one
|
// our message type 1 was delivered, and assert that the preceding one
|
||||||
// was not (we could have missed it in our check above). When we receive
|
// was not (we could have missed it in our check above). When we
|
||||||
// the second message, we know that the first one did not go through,
|
// receive the second message, we know that the first one did not go
|
||||||
// because we expect our messages to deliver in order.
|
// through, because we expect our messages to deliver in order.
|
||||||
select {
|
select {
|
||||||
case msg := <-messages:
|
case msg := <-messages:
|
||||||
// Check our type and data and (sanity) check the peer we got it
|
// Check our type and data and (sanity) check the peer we got
|
||||||
// from.
|
// it from.
|
||||||
require.Equal(t.t, overrideType1, msg.Type, "second message "+
|
require.Equal(ht, overrideType1, msg.Type, "second message "+
|
||||||
"type")
|
"type")
|
||||||
require.Equal(t.t, msgData, msg.Data, "second message data")
|
require.Equal(ht, msgData, msg.Data, "second message data")
|
||||||
require.Equal(t.t, net.Bob.PubKey[:], msg.Peer, "second "+
|
require.Equal(ht, bob.PubKey[:], msg.Peer, "second "+
|
||||||
"message peer")
|
"message peer")
|
||||||
|
|
||||||
case <-time.After(defaultTimeout):
|
case <-time.After(defaultTimeout):
|
||||||
t.t.Fatalf("alice did not receive second custom message")
|
ht.Fatalf("alice did not receive second custom message")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,19 +1,18 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/lightningnetwork/lnd/chainreg"
|
"github.com/lightningnetwork/lnd/chainreg"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
"github.com/lightningnetwork/lnd/routing/route"
|
"github.com/lightningnetwork/lnd/routing/route"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@ -35,163 +34,145 @@ type interceptorTestCase struct {
|
|||||||
|
|
||||||
// testForwardInterceptorDedupHtlc tests that upon reconnection, duplicate
|
// testForwardInterceptorDedupHtlc tests that upon reconnection, duplicate
|
||||||
// HTLCs aren't re-notified using the HTLC interceptor API.
|
// HTLCs aren't re-notified using the HTLC interceptor API.
|
||||||
func testForwardInterceptorDedupHtlc(net *lntest.NetworkHarness, t *harnessTest) {
|
func testForwardInterceptorDedupHtlc(ht *lntemp.HarnessTest) {
|
||||||
// Initialize the test context with 3 connected nodes.
|
// Initialize the test context with 3 connected nodes.
|
||||||
alice := net.NewNode(t.t, "alice", nil)
|
ts := newInterceptorTestScenario(ht)
|
||||||
defer shutdownAndAssert(net, t, alice)
|
|
||||||
|
|
||||||
bob := net.NewNode(t.t, "bob", nil)
|
alice, bob, carol := ts.alice, ts.bob, ts.carol
|
||||||
defer shutdownAndAssert(net, t, bob)
|
|
||||||
|
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
|
||||||
defer shutdownAndAssert(net, t, carol)
|
|
||||||
|
|
||||||
tc := newInterceptorTestContext(t, net, alice, bob, carol)
|
|
||||||
|
|
||||||
const (
|
|
||||||
chanAmt = btcutil.Amount(300000)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Open and wait for channels.
|
// Open and wait for channels.
|
||||||
tc.openChannel(tc.alice, tc.bob, chanAmt)
|
const chanAmt = btcutil.Amount(300000)
|
||||||
tc.openChannel(tc.bob, tc.carol, chanAmt)
|
p := lntemp.OpenChannelParams{Amt: chanAmt}
|
||||||
defer tc.closeChannels()
|
reqs := []*lntemp.OpenChannelRequest{
|
||||||
tc.waitForChannels()
|
{Local: alice, Remote: bob, Param: p},
|
||||||
|
{Local: bob, Remote: carol, Param: p},
|
||||||
|
}
|
||||||
|
resp := ht.OpenMultiChannelsAsync(reqs)
|
||||||
|
cpAB, cpBC := resp[0], resp[1]
|
||||||
|
|
||||||
ctxb := context.Background()
|
// Make sure Alice is aware of channel Bob=>Carol.
|
||||||
ctxt, cancelInterceptor := context.WithCancel(ctxb)
|
ht.AssertTopologyChannelOpen(alice, cpBC)
|
||||||
interceptor, err := tc.bob.RouterClient.HtlcInterceptor(ctxt)
|
|
||||||
require.NoError(tc.t.t, err, "failed to create HtlcInterceptor")
|
|
||||||
|
|
||||||
addResponse, err := tc.carol.AddInvoice(ctxb, &lnrpc.Invoice{
|
// Connect the interceptor.
|
||||||
ValueMsat: 1000,
|
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
|
||||||
})
|
|
||||||
require.NoError(tc.t.t, err, "unable to add invoice")
|
|
||||||
|
|
||||||
invoice, err := tc.carol.LookupInvoice(ctxb, &lnrpc.PaymentHash{
|
// Prepare the test cases.
|
||||||
RHashStr: hex.EncodeToString(addResponse.RHash),
|
req := &lnrpc.Invoice{ValueMsat: 1000}
|
||||||
})
|
addResponse := carol.RPC.AddInvoice(req)
|
||||||
require.NoError(tc.t.t, err, "unable to find invoice")
|
invoice := carol.RPC.LookupInvoice(addResponse.RHash)
|
||||||
|
tc := &interceptorTestCase{
|
||||||
|
amountMsat: 1000,
|
||||||
|
invoice: invoice,
|
||||||
|
payAddr: invoice.PaymentAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
// We initiate a payment from Alice.
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
// Signal that all the payments have been sent.
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
ts.sendPaymentAndAssertAction(tc)
|
||||||
|
}()
|
||||||
|
|
||||||
// We start the htlc interceptor with a simple implementation that
|
// We start the htlc interceptor with a simple implementation that
|
||||||
// saves all intercepted packets. These packets are held to simulate a
|
// saves all intercepted packets. These packets are held to simulate a
|
||||||
// pending payment.
|
// pending payment.
|
||||||
interceptedPacketstMap := &sync.Map{}
|
packet := ht.ReceiveHtlcInterceptor(interceptor)
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for {
|
|
||||||
packet, err := interceptor.Recv()
|
|
||||||
if err != nil {
|
|
||||||
// If it is just the error result of the
|
|
||||||
// context cancellation the we exit silently.
|
|
||||||
status, ok := status.FromError(err)
|
|
||||||
if ok && status.Code() == codes.Canceled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise it an unexpected error, we fail
|
|
||||||
// the test.
|
|
||||||
require.NoError(
|
|
||||||
tc.t.t, err,
|
|
||||||
"unexpected error in interceptor.Recv()",
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
interceptedPacketstMap.Store(
|
|
||||||
packet.IncomingCircuitKey.HtlcId, packet,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// We initiate a payment from Alice.
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
_, _ = tc.sendAliceToCarolPayment(
|
|
||||||
ctxb, 1000,
|
|
||||||
invoice.RHash, invoice.PaymentAddr,
|
|
||||||
)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Here we should wait for the channel to contain a pending htlc, and
|
// Here we should wait for the channel to contain a pending htlc, and
|
||||||
// also be shown as being active.
|
// also be shown as being active.
|
||||||
err = wait.Predicate(func() bool {
|
err := wait.NoError(func() error {
|
||||||
channels, err := tc.bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{
|
channel := ht.QueryChannelByChanPoint(bob, cpAB)
|
||||||
ActiveOnly: true,
|
|
||||||
Peer: tc.alice.PubKey[:],
|
if len(channel.PendingHtlcs) == 0 {
|
||||||
})
|
return fmt.Errorf("expect alice <> bob channel to " +
|
||||||
if err != nil {
|
"have pending htlcs")
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
if len(channels.Channels) == 0 {
|
if channel.Active {
|
||||||
return false
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
aliceChan := channels.Channels[0]
|
return fmt.Errorf("channel not active")
|
||||||
if len(aliceChan.PendingHtlcs) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return aliceChan.Active
|
|
||||||
}, defaultTimeout)
|
}, defaultTimeout)
|
||||||
require.NoError(
|
require.NoError(
|
||||||
tc.t.t, err, "alice <> bob channel pending htlc never arrived",
|
ht, err, "alice <> bob channel pending htlc never arrived",
|
||||||
)
|
)
|
||||||
|
|
||||||
// At this point we want to make bob's link send all pending htlcs to
|
// At this point we want to make bob's link send all pending htlcs to
|
||||||
// the switch again. We force this behavior by disconnecting and
|
// the switch again. We force this behavior by disconnecting and
|
||||||
// connecting to the peer.
|
// connecting to the peer.
|
||||||
if err := tc.net.DisconnectNodes(tc.bob, tc.alice); err != nil {
|
ht.DisconnectNodes(bob, alice)
|
||||||
tc.t.Fatalf("failed to disconnect alice and bob")
|
ht.EnsureConnected(bob, alice)
|
||||||
}
|
|
||||||
tc.net.EnsureConnected(tc.t.t, tc.bob, tc.alice)
|
|
||||||
|
|
||||||
// Here we wait for the channel to be active again.
|
// Here we wait for the channel to be active again.
|
||||||
err = wait.Predicate(func() bool {
|
ht.AssertChannelExists(bob, cpAB)
|
||||||
req := &lnrpc.ListChannelsRequest{
|
|
||||||
ActiveOnly: true,
|
|
||||||
Peer: tc.alice.PubKey[:],
|
|
||||||
}
|
|
||||||
|
|
||||||
channels, err := tc.bob.ListChannels(ctxt, req)
|
|
||||||
return err == nil && len(channels.Channels) > 0
|
|
||||||
}, defaultTimeout)
|
|
||||||
require.NoError(
|
|
||||||
tc.t.t, err, "alice <> bob channel didn't re-activate",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Now that the channel is active we make sure the test passes as
|
// Now that the channel is active we make sure the test passes as
|
||||||
// expected.
|
// expected.
|
||||||
payments, err := tc.alice.ListPayments(ctxb, &lnrpc.ListPaymentsRequest{
|
|
||||||
IncludeIncomplete: true,
|
|
||||||
})
|
|
||||||
require.NoError(tc.t.t, err, "failed to fetch payment")
|
|
||||||
|
|
||||||
// We expect one in flight payment since we held the htlcs.
|
// We expect one in flight payment since we held the htlcs.
|
||||||
require.Equal(tc.t.t, len(payments.Payments), 1)
|
var preimage lntypes.Preimage
|
||||||
require.Equal(tc.t.t, payments.Payments[0].Status, lnrpc.Payment_IN_FLIGHT)
|
copy(preimage[:], invoice.RPreimage)
|
||||||
|
ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_IN_FLIGHT)
|
||||||
// We now fail all htlcs to cancel the payment.
|
|
||||||
packetsCount := 0
|
|
||||||
interceptedPacketstMap.Range(func(_, packet interface{}) bool {
|
|
||||||
p := packet.(*routerrpc.ForwardHtlcInterceptRequest)
|
|
||||||
_ = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
|
|
||||||
IncomingCircuitKey: p.IncomingCircuitKey,
|
|
||||||
Action: routerrpc.ResolveHoldForwardAction_FAIL,
|
|
||||||
})
|
|
||||||
packetsCount++
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// At this point if we have more than one held htlcs then we should
|
// At this point if we have more than one held htlcs then we should
|
||||||
// fail. This means we hold the same htlc twice which is a risk we
|
// fail. This means we hold the same htlc twice which is a risk we want
|
||||||
// want to eliminate. If we don't have the same htlc twice in theory we
|
// to eliminate. If we don't have the same htlc twice in theory we can
|
||||||
// can cancel one and settle the other by mistake.
|
// cancel one and settle the other by mistake.
|
||||||
require.Equal(tc.t.t, packetsCount, 1)
|
errDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(errDone)
|
||||||
|
|
||||||
|
_, err := interceptor.Recv()
|
||||||
|
require.Error(ht, err, "expected an error from interceptor")
|
||||||
|
|
||||||
|
status, ok := status.FromError(err)
|
||||||
|
switch {
|
||||||
|
// If it is just the error result of the context cancellation
|
||||||
|
// the we exit silently.
|
||||||
|
case ok && status.Code() == codes.Canceled:
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
// When the test ends, during the node's shutdown it will close
|
||||||
|
// the connection.
|
||||||
|
case strings.Contains(err.Error(), "closed network connection"):
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case strings.Contains(err.Error(), "EOF"):
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise we receive an unexpected error.
|
||||||
|
require.Failf(ht, "interceptor", "unexpected err: %v", err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// We now fail all htlcs to cancel the payment.
|
||||||
|
err = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
|
||||||
|
IncomingCircuitKey: packet.IncomingCircuitKey,
|
||||||
|
Action: routerrpc.ResolveHoldForwardAction_FAIL,
|
||||||
|
})
|
||||||
|
require.NoError(ht, err, "failed to send request")
|
||||||
|
|
||||||
|
// Cancel the context, which will disconnect the above interceptor.
|
||||||
cancelInterceptor()
|
cancelInterceptor()
|
||||||
wg.Wait()
|
|
||||||
|
// Make sure all goroutines are finished.
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(defaultTimeout):
|
||||||
|
require.Fail(ht, "timeout waiting for sending payment")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-errDone:
|
||||||
|
case <-time.After(defaultTimeout):
|
||||||
|
require.Fail(ht, "timeout waiting for interceptor error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, close channels.
|
||||||
|
ht.CloseChannel(alice, cpAB)
|
||||||
|
ht.CloseChannel(bob, cpBC)
|
||||||
}
|
}
|
||||||
|
|
||||||
// testForwardInterceptorBasic tests the forward interceptor RPC layer.
|
// testForwardInterceptorBasic tests the forward interceptor RPC layer.
|
||||||
@ -204,265 +185,191 @@ func testForwardInterceptorDedupHtlc(net *lntest.NetworkHarness, t *harnessTest)
|
|||||||
// 3. Intercepted held htlcs result in no payment (invoice is not settled).
|
// 3. Intercepted held htlcs result in no payment (invoice is not settled).
|
||||||
// 4. When Interceptor disconnects it resumes all held htlcs, which result in
|
// 4. When Interceptor disconnects it resumes all held htlcs, which result in
|
||||||
// valid payment (invoice is settled).
|
// valid payment (invoice is settled).
|
||||||
func testForwardInterceptorBasic(net *lntest.NetworkHarness, t *harnessTest) {
|
func testForwardInterceptorBasic(ht *lntemp.HarnessTest) {
|
||||||
// Initialize the test context with 3 connected nodes.
|
ts := newInterceptorTestScenario(ht)
|
||||||
alice := net.NewNode(t.t, "alice", nil)
|
|
||||||
defer shutdownAndAssert(net, t, alice)
|
|
||||||
|
|
||||||
bob := net.NewNode(t.t, "bob", nil)
|
alice, bob, carol := ts.alice, ts.bob, ts.carol
|
||||||
defer shutdownAndAssert(net, t, bob)
|
|
||||||
|
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
|
||||||
defer shutdownAndAssert(net, t, carol)
|
|
||||||
|
|
||||||
testContext := newInterceptorTestContext(t, net, alice, bob, carol)
|
|
||||||
|
|
||||||
const (
|
|
||||||
chanAmt = btcutil.Amount(300000)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Open and wait for channels.
|
// Open and wait for channels.
|
||||||
testContext.openChannel(testContext.alice, testContext.bob, chanAmt)
|
const chanAmt = btcutil.Amount(300000)
|
||||||
testContext.openChannel(testContext.bob, testContext.carol, chanAmt)
|
p := lntemp.OpenChannelParams{Amt: chanAmt}
|
||||||
defer testContext.closeChannels()
|
reqs := []*lntemp.OpenChannelRequest{
|
||||||
testContext.waitForChannels()
|
{Local: alice, Remote: bob, Param: p},
|
||||||
|
{Local: bob, Remote: carol, Param: p},
|
||||||
|
}
|
||||||
|
resp := ht.OpenMultiChannelsAsync(reqs)
|
||||||
|
cpAB, cpBC := resp[0], resp[1]
|
||||||
|
|
||||||
|
// Make sure Alice is aware of channel Bob=>Carol.
|
||||||
|
ht.AssertTopologyChannelOpen(alice, cpBC)
|
||||||
|
|
||||||
// Connect the interceptor.
|
// Connect the interceptor.
|
||||||
ctxb := context.Background()
|
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
|
||||||
ctxt, cancelInterceptor := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
interceptor, err := testContext.bob.RouterClient.HtlcInterceptor(ctxt)
|
|
||||||
require.NoError(t.t, err, "failed to create HtlcInterceptor")
|
|
||||||
|
|
||||||
// Prepare the test cases.
|
// Prepare the test cases.
|
||||||
testCases := testContext.prepareTestCases()
|
testCases := ts.prepareTestCases()
|
||||||
|
|
||||||
// A channel for the interceptor go routine to send the requested packets.
|
// For each test case make sure we initiate a payment from Alice to
|
||||||
interceptedChan := make(chan *routerrpc.ForwardHtlcInterceptRequest,
|
// Carol routed through Bob. For each payment we also test its final
|
||||||
len(testCases))
|
// status according to the interceptorAction specified in the test
|
||||||
|
// case.
|
||||||
// Run the interceptor loop in its own go routine.
|
done := make(chan struct{})
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
// Signal that all the payments have been sent.
|
||||||
for {
|
defer close(done)
|
||||||
request, err := interceptor.Recv()
|
|
||||||
if err != nil {
|
|
||||||
// If it is just the error result of the context cancellation
|
|
||||||
// the we exit silently.
|
|
||||||
status, ok := status.FromError(err)
|
|
||||||
if ok && status.Code() == codes.Canceled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Otherwise it an unexpected error, we fail the test.
|
|
||||||
require.NoError(t.t, err, "unexpected error in interceptor.Recv()")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
interceptedChan <- request
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// For each test case make sure we initiate a payment from Alice to Carol
|
|
||||||
// routed through Bob. For each payment we also test its final status
|
|
||||||
// according to the interceptorAction specified in the test case.
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
attempt, err := testContext.sendAliceToCarolPayment(
|
attempt := ts.sendPaymentAndAssertAction(tc)
|
||||||
context.Background(), tc.invoice.ValueMsat,
|
ts.assertAction(tc, attempt)
|
||||||
tc.invoice.RHash, tc.payAddr,
|
|
||||||
)
|
|
||||||
|
|
||||||
if t.t.Failed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
require.NoError(t.t, err, "failed to send payment")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tc.interceptorAction {
|
|
||||||
// For 'fail' interceptor action we make sure the payment failed.
|
|
||||||
case routerrpc.ResolveHoldForwardAction_FAIL:
|
|
||||||
require.Equal(t.t, lnrpc.HTLCAttempt_FAILED,
|
|
||||||
attempt.Status, "expected payment to fail")
|
|
||||||
|
|
||||||
// Assert that we get a temporary channel
|
|
||||||
// failure which has a channel update.
|
|
||||||
require.NotNil(t.t, attempt.Failure)
|
|
||||||
require.NotNil(t.t, attempt.Failure.ChannelUpdate)
|
|
||||||
|
|
||||||
require.Equal(t.t,
|
|
||||||
lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE,
|
|
||||||
attempt.Failure.Code)
|
|
||||||
|
|
||||||
// For settle and resume we make sure the payment is successful.
|
|
||||||
case routerrpc.ResolveHoldForwardAction_SETTLE:
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case routerrpc.ResolveHoldForwardAction_RESUME:
|
|
||||||
require.Equal(t.t, lnrpc.HTLCAttempt_SUCCEEDED,
|
|
||||||
attempt.Status, "expected payment to succeed")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// We make sure here the interceptor has processed all packets before we
|
// We make sure here the interceptor has processed all packets before
|
||||||
// check the payment statuses.
|
// we check the payment statuses.
|
||||||
for i := 0; i < len(testCases); i++ {
|
for _, tc := range testCases {
|
||||||
select {
|
request := ht.ReceiveHtlcInterceptor(interceptor)
|
||||||
case request := <-interceptedChan:
|
|
||||||
// Assert sanity of informational packet data.
|
|
||||||
require.NotZero(t.t, request.OutgoingRequestedChanId)
|
|
||||||
require.NotZero(t.t, request.IncomingExpiry)
|
|
||||||
require.NotZero(t.t, request.IncomingAmountMsat)
|
|
||||||
|
|
||||||
require.Less(
|
// Assert sanity of informational packet data.
|
||||||
t.t,
|
require.NotZero(ht, request.OutgoingRequestedChanId)
|
||||||
request.OutgoingExpiry, request.IncomingExpiry,
|
require.NotZero(ht, request.IncomingExpiry)
|
||||||
)
|
require.NotZero(ht, request.IncomingAmountMsat)
|
||||||
require.Less(
|
|
||||||
t.t,
|
|
||||||
request.OutgoingAmountMsat,
|
|
||||||
request.IncomingAmountMsat,
|
|
||||||
)
|
|
||||||
|
|
||||||
value, ok := request.CustomRecords[customTestKey]
|
require.Less(ht, request.OutgoingExpiry,
|
||||||
require.True(t.t, ok, "expected custom record")
|
request.IncomingExpiry)
|
||||||
require.Equal(t.t, customTestValue, value)
|
require.Less(ht, request.OutgoingAmountMsat,
|
||||||
|
request.IncomingAmountMsat)
|
||||||
|
|
||||||
testCase := testCases[i]
|
value, ok := request.CustomRecords[customTestKey]
|
||||||
|
require.True(ht, ok, "expected custom record")
|
||||||
|
require.Equal(ht, customTestValue, value)
|
||||||
|
|
||||||
// For held packets we ignore, keeping them in hold status.
|
// For held packets we ignore, keeping them in hold status.
|
||||||
if testCase.shouldHold {
|
if tc.shouldHold {
|
||||||
continue
|
continue
|
||||||
}
|
|
||||||
|
|
||||||
// For all other packets we resolve according to the test case.
|
|
||||||
_ = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
|
|
||||||
IncomingCircuitKey: request.IncomingCircuitKey,
|
|
||||||
Action: testCase.interceptorAction,
|
|
||||||
Preimage: testCase.invoice.RPreimage,
|
|
||||||
})
|
|
||||||
case <-time.After(defaultTimeout):
|
|
||||||
t.Fatalf("response from interceptor was not received %v", i)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For all other packets we resolve according to the test case.
|
||||||
|
err := interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
|
||||||
|
IncomingCircuitKey: request.IncomingCircuitKey,
|
||||||
|
Action: tc.interceptorAction,
|
||||||
|
Preimage: tc.invoice.RPreimage,
|
||||||
|
})
|
||||||
|
require.NoError(ht, err, "failed to send request")
|
||||||
}
|
}
|
||||||
|
|
||||||
// At this point we are left with the held packets, we want to make sure
|
// At this point we are left with the held packets, we want to make
|
||||||
// each one of them has a corresponding 'in-flight' payment at
|
// sure each one of them has a corresponding 'in-flight' payment at
|
||||||
// Alice's node.
|
// Alice's node.
|
||||||
payments, err := testContext.alice.ListPayments(context.Background(),
|
|
||||||
&lnrpc.ListPaymentsRequest{IncludeIncomplete: true})
|
|
||||||
require.NoError(t.t, err, "failed to fetch payment")
|
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
if testCase.shouldHold {
|
if !testCase.shouldHold {
|
||||||
hashStr := hex.EncodeToString(testCase.invoice.RHash)
|
continue
|
||||||
var foundPayment *lnrpc.Payment
|
|
||||||
expectedAmt := testCase.invoice.ValueMsat
|
|
||||||
for _, p := range payments.Payments {
|
|
||||||
if p.PaymentHash == hashStr {
|
|
||||||
foundPayment = p
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.NotNil(t.t, foundPayment, fmt.Sprintf("expected "+
|
|
||||||
"to find pending payment for held htlc %v",
|
|
||||||
hashStr))
|
|
||||||
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT,
|
|
||||||
foundPayment.Status, "expected payment to be "+
|
|
||||||
"in flight")
|
|
||||||
require.Equal(t.t, expectedAmt, foundPayment.ValueMsat,
|
|
||||||
"incorrect in flight amount")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var preimage lntypes.Preimage
|
||||||
|
copy(preimage[:], testCase.invoice.RPreimage)
|
||||||
|
|
||||||
|
payment := ht.AssertPaymentStatus(
|
||||||
|
alice, preimage, lnrpc.Payment_IN_FLIGHT,
|
||||||
|
)
|
||||||
|
expectedAmt := testCase.invoice.ValueMsat
|
||||||
|
require.Equal(ht, expectedAmt, payment.ValueMsat,
|
||||||
|
"incorrect in flight amount")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disconnect interceptor should cause resume held packets.
|
// Cancel the context, which will disconnect the above interceptor.
|
||||||
// After that we wait for all go routines to finish, including the one
|
|
||||||
// that tests the payment final status for the held payment.
|
|
||||||
cancelInterceptor()
|
cancelInterceptor()
|
||||||
wg.Wait()
|
|
||||||
|
// Disconnect interceptor should cause resume held packets. After that
|
||||||
|
// we wait for all go routines to finish, including the one that tests
|
||||||
|
// the payment final status for the held payment.
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(defaultTimeout):
|
||||||
|
require.Fail(ht, "timeout waiting for sending payment")
|
||||||
|
}
|
||||||
|
|
||||||
// Verify that we don't get notified about already completed HTLCs
|
// Verify that we don't get notified about already completed HTLCs
|
||||||
// We do that by restarting alice, the sender the HTLCs. Under
|
// We do that by restarting alice, the sender the HTLCs. Under
|
||||||
// https://github.com/lightningnetwork/lnd/issues/5115
|
// https://github.com/lightningnetwork/lnd/issues/5115
|
||||||
// this should cause all HTLCs settled or failed by the interceptor to renotify.
|
// this should cause all HTLCs settled or failed by the interceptor to
|
||||||
restartAlice, err := net.SuspendNode(alice)
|
// renotify.
|
||||||
require.NoError(t.t, err, "failed to suspend alice")
|
restartAlice := ht.SuspendNode(alice)
|
||||||
|
require.NoError(ht, restartAlice(), "failed to restart alice")
|
||||||
|
|
||||||
ctxt, cancelInterceptor = context.WithTimeout(ctxb, defaultTimeout)
|
// Make sure the channel is active from Bob's PoV.
|
||||||
defer cancelInterceptor()
|
ht.AssertChannelExists(bob, cpAB)
|
||||||
interceptor, err = testContext.bob.RouterClient.HtlcInterceptor(ctxt)
|
|
||||||
require.NoError(t.t, err, "failed to create HtlcInterceptor")
|
|
||||||
|
|
||||||
err = restartAlice()
|
// Create a new interceptor as the old one has quit.
|
||||||
require.NoError(t.t, err, "failed to restart alice")
|
interceptor, cancelInterceptor = bob.RPC.HtlcInterceptor()
|
||||||
|
|
||||||
|
done = make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
request, err := interceptor.Recv()
|
defer close(done)
|
||||||
if err != nil {
|
|
||||||
// If it is just the error result of the context cancellation
|
_, err := interceptor.Recv()
|
||||||
// the we exit silently.
|
require.Error(ht, err, "expected an error from interceptor")
|
||||||
status, ok := status.FromError(err)
|
|
||||||
if ok && status.Code() == codes.Canceled {
|
status, ok := status.FromError(err)
|
||||||
return
|
switch {
|
||||||
}
|
// If it is just the error result of the context cancellation
|
||||||
// Otherwise it an unexpected error, we fail the test.
|
// the we exit silently.
|
||||||
require.NoError(
|
case ok && status.Code() == codes.Canceled:
|
||||||
t.t, err, "unexpected error in interceptor.Recv()",
|
fallthrough
|
||||||
)
|
|
||||||
|
// When the test ends, during the node's shutdown it will close
|
||||||
|
// the connection.
|
||||||
|
case strings.Contains(err.Error(), "closed network connection"):
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case strings.Contains(err.Error(), "EOF"):
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Nil(t.t, request, "no more intercepts should arrive")
|
// Otherwise we receive an unexpected error.
|
||||||
|
require.Failf(ht, "iinterceptor", "unexpected err: %v", err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = wait.Predicate(func() bool {
|
// Cancel the context, which will disconnect the above interceptor.
|
||||||
channels, err := bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{
|
cancelInterceptor()
|
||||||
ActiveOnly: true, Peer: alice.PubKey[:],
|
select {
|
||||||
})
|
case <-done:
|
||||||
return err == nil && len(channels.Channels) > 0
|
case <-time.After(defaultTimeout):
|
||||||
}, defaultTimeout)
|
require.Fail(ht, "timeout waiting for interceptor error")
|
||||||
require.NoError(t.t, err, "alice <> bob channel didn't re-activate")
|
|
||||||
}
|
|
||||||
|
|
||||||
// interceptorTestContext is a helper struct to hold the test context and
|
|
||||||
// provide the needed functionality.
|
|
||||||
type interceptorTestContext struct {
|
|
||||||
t *harnessTest
|
|
||||||
net *lntest.NetworkHarness
|
|
||||||
|
|
||||||
// Keep a list of all our active channels.
|
|
||||||
networkChans []*lnrpc.ChannelPoint
|
|
||||||
closeChannelFuncs []func()
|
|
||||||
|
|
||||||
alice, bob, carol *lntest.HarnessNode
|
|
||||||
nodes []*lntest.HarnessNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInterceptorTestContext(t *harnessTest,
|
|
||||||
net *lntest.NetworkHarness,
|
|
||||||
alice, bob, carol *lntest.HarnessNode) *interceptorTestContext {
|
|
||||||
|
|
||||||
// Connect nodes
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
|
||||||
for i := 0; i < len(nodes); i++ {
|
|
||||||
for j := i + 1; j < len(nodes); j++ {
|
|
||||||
net.EnsureConnected(t.t, nodes[i], nodes[j])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := interceptorTestContext{
|
// Finally, close channels.
|
||||||
t: t,
|
ht.CloseChannel(alice, cpAB)
|
||||||
net: net,
|
ht.CloseChannel(bob, cpBC)
|
||||||
|
}
|
||||||
|
|
||||||
|
// interceptorTestScenario is a helper struct to hold the test context and
|
||||||
|
// provide the needed functionality.
|
||||||
|
type interceptorTestScenario struct {
|
||||||
|
ht *lntemp.HarnessTest
|
||||||
|
alice, bob, carol *node.HarnessNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// newInterceptorTestScenario initializes a new test scenario with three nodes
|
||||||
|
// and connects them to have the following topology,
|
||||||
|
//
|
||||||
|
// Alice --> Bob --> Carol
|
||||||
|
//
|
||||||
|
// Among them, Alice and Bob are standby nodes and Carol is a new node.
|
||||||
|
func newInterceptorTestScenario(
|
||||||
|
ht *lntemp.HarnessTest) *interceptorTestScenario {
|
||||||
|
|
||||||
|
alice, bob := ht.Alice, ht.Bob
|
||||||
|
carol := ht.NewNode("carol", nil)
|
||||||
|
|
||||||
|
ht.EnsureConnected(alice, bob)
|
||||||
|
ht.EnsureConnected(bob, carol)
|
||||||
|
|
||||||
|
return &interceptorTestScenario{
|
||||||
|
ht: ht,
|
||||||
alice: alice,
|
alice: alice,
|
||||||
bob: bob,
|
bob: bob,
|
||||||
carol: carol,
|
carol: carol,
|
||||||
nodes: nodes,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ctx
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepareTestCases prepares 4 tests:
|
// prepareTestCases prepares 4 tests:
|
||||||
@ -470,36 +377,41 @@ func newInterceptorTestContext(t *harnessTest,
|
|||||||
// 2. resumed htlc.
|
// 2. resumed htlc.
|
||||||
// 3. settling htlc externally.
|
// 3. settling htlc externally.
|
||||||
// 4. held htlc that is resumed later.
|
// 4. held htlc that is resumed later.
|
||||||
func (c *interceptorTestContext) prepareTestCases() []*interceptorTestCase {
|
func (c *interceptorTestScenario) prepareTestCases() []*interceptorTestCase {
|
||||||
|
var (
|
||||||
|
actionFail = routerrpc.ResolveHoldForwardAction_FAIL
|
||||||
|
actionResume = routerrpc.ResolveHoldForwardAction_RESUME
|
||||||
|
actionSettle = routerrpc.ResolveHoldForwardAction_SETTLE
|
||||||
|
)
|
||||||
|
|
||||||
cases := []*interceptorTestCase{
|
cases := []*interceptorTestCase{
|
||||||
{amountMsat: 1000, shouldHold: false,
|
{
|
||||||
interceptorAction: routerrpc.ResolveHoldForwardAction_FAIL},
|
amountMsat: 1000, shouldHold: false,
|
||||||
{amountMsat: 1000, shouldHold: false,
|
interceptorAction: actionFail,
|
||||||
interceptorAction: routerrpc.ResolveHoldForwardAction_RESUME},
|
},
|
||||||
{amountMsat: 1000, shouldHold: false,
|
{
|
||||||
interceptorAction: routerrpc.ResolveHoldForwardAction_SETTLE},
|
amountMsat: 1000, shouldHold: false,
|
||||||
{amountMsat: 1000, shouldHold: true,
|
interceptorAction: actionResume,
|
||||||
interceptorAction: routerrpc.ResolveHoldForwardAction_RESUME},
|
},
|
||||||
|
{
|
||||||
|
amountMsat: 1000, shouldHold: false,
|
||||||
|
interceptorAction: actionSettle,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
amountMsat: 1000, shouldHold: true,
|
||||||
|
interceptorAction: actionResume,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, t := range cases {
|
for _, t := range cases {
|
||||||
addResponse, err := c.carol.AddInvoice(context.Background(), &lnrpc.Invoice{
|
inv := &lnrpc.Invoice{ValueMsat: t.amountMsat}
|
||||||
ValueMsat: t.amountMsat,
|
addResponse := c.carol.RPC.AddInvoice(inv)
|
||||||
})
|
invoice := c.carol.RPC.LookupInvoice(addResponse.RHash)
|
||||||
require.NoError(c.t.t, err, "unable to add invoice")
|
|
||||||
|
|
||||||
invoice, err := c.carol.LookupInvoice(context.Background(), &lnrpc.PaymentHash{
|
|
||||||
RHashStr: hex.EncodeToString(addResponse.RHash),
|
|
||||||
})
|
|
||||||
require.NoError(c.t.t, err, "unable to find invoice")
|
|
||||||
|
|
||||||
// We'll need to also decode the returned invoice so we can
|
// We'll need to also decode the returned invoice so we can
|
||||||
// grab the payment address which is now required for ALL
|
// grab the payment address which is now required for ALL
|
||||||
// payments.
|
// payments.
|
||||||
payReq, err := c.carol.DecodePayReq(context.Background(), &lnrpc.PayReqString{
|
payReq := c.carol.RPC.DecodePayReq(invoice.PaymentRequest)
|
||||||
PayReq: invoice.PaymentRequest,
|
|
||||||
})
|
|
||||||
require.NoError(c.t.t, err, "unable to decode invoice")
|
|
||||||
|
|
||||||
t.invoice = invoice
|
t.invoice = invoice
|
||||||
t.payAddr = payReq.PaymentAddr
|
t.payAddr = payReq.PaymentAddr
|
||||||
@ -507,69 +419,15 @@ func (c *interceptorTestContext) prepareTestCases() []*interceptorTestCase {
|
|||||||
return cases
|
return cases
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *interceptorTestContext) openChannel(from, to *lntest.HarnessNode,
|
// sendPaymentAndAssertAction sends a payment from alice to carol and asserts
|
||||||
chanSize btcutil.Amount) {
|
// that the specified interceptor action is taken.
|
||||||
|
func (c *interceptorTestScenario) sendPaymentAndAssertAction(
|
||||||
c.net.SendCoins(c.t.t, btcutil.SatoshiPerBitcoin, from)
|
tc *interceptorTestCase) *lnrpc.HTLCAttempt {
|
||||||
|
|
||||||
chanPoint := openChannelAndAssert(
|
|
||||||
c.t, c.net, from, to,
|
|
||||||
lntest.OpenChannelParams{
|
|
||||||
Amt: chanSize,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
c.closeChannelFuncs = append(c.closeChannelFuncs, func() {
|
|
||||||
closeChannelAndAssert(c.t, c.net, from, chanPoint, false)
|
|
||||||
})
|
|
||||||
|
|
||||||
c.networkChans = append(c.networkChans, chanPoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *interceptorTestContext) closeChannels() {
|
|
||||||
for _, f := range c.closeChannelFuncs {
|
|
||||||
f()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *interceptorTestContext) waitForChannels() {
|
|
||||||
// Wait for all nodes to have seen all channels.
|
|
||||||
for _, chanPoint := range c.networkChans {
|
|
||||||
for _, node := range c.nodes {
|
|
||||||
txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
|
|
||||||
require.NoError(c.t.t, err, "unable to get txid")
|
|
||||||
|
|
||||||
point := wire.OutPoint{
|
|
||||||
Hash: *txid,
|
|
||||||
Index: chanPoint.OutputIndex,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
|
||||||
require.NoError(c.t.t, err, fmt.Sprintf("(%d): timeout "+
|
|
||||||
"waiting for channel(%s) open", node.NodeID,
|
|
||||||
point))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendAliceToCarolPayment sends a payment from alice to carol and make an
|
|
||||||
// attempt to pay. The lnrpc.HTLCAttempt is returned.
|
|
||||||
func (c *interceptorTestContext) sendAliceToCarolPayment(ctx context.Context,
|
|
||||||
amtMsat int64,
|
|
||||||
paymentHash, paymentAddr []byte) (*lnrpc.HTLCAttempt, error) {
|
|
||||||
|
|
||||||
// Build a route from alice to carol.
|
// Build a route from alice to carol.
|
||||||
route, err := c.buildRoute(
|
route := c.buildRoute(
|
||||||
ctx, amtMsat, []*lntest.HarnessNode{c.bob, c.carol},
|
tc.amountMsat, []*node.HarnessNode{c.bob, c.carol}, tc.payAddr,
|
||||||
paymentAddr,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sendReq := &routerrpc.SendToRouteRequest{
|
|
||||||
PaymentHash: paymentHash,
|
|
||||||
Route: route,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a custom record to the forwarding node.
|
// Send a custom record to the forwarding node.
|
||||||
route.Hops[0].CustomRecords = map[uint64][]byte{
|
route.Hops[0].CustomRecords = map[uint64][]byte{
|
||||||
@ -577,21 +435,51 @@ func (c *interceptorTestContext) sendAliceToCarolPayment(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send the payment.
|
// Send the payment.
|
||||||
return c.alice.RouterClient.SendToRouteV2(ctx, sendReq)
|
sendReq := &routerrpc.SendToRouteRequest{
|
||||||
|
PaymentHash: tc.invoice.RHash,
|
||||||
|
Route: route,
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.alice.RPC.SendToRouteV2(sendReq)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *interceptorTestScenario) assertAction(tc *interceptorTestCase,
|
||||||
|
attempt *lnrpc.HTLCAttempt) {
|
||||||
|
|
||||||
|
// Now check the expected action has been taken.
|
||||||
|
switch tc.interceptorAction {
|
||||||
|
// For 'fail' interceptor action we make sure the payment failed.
|
||||||
|
case routerrpc.ResolveHoldForwardAction_FAIL:
|
||||||
|
require.Equal(c.ht, lnrpc.HTLCAttempt_FAILED, attempt.Status,
|
||||||
|
"expected payment to fail")
|
||||||
|
|
||||||
|
// Assert that we get a temporary channel failure which has a
|
||||||
|
// channel update.
|
||||||
|
require.NotNil(c.ht, attempt.Failure)
|
||||||
|
require.NotNil(c.ht, attempt.Failure.ChannelUpdate)
|
||||||
|
|
||||||
|
require.Equal(c.ht, lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE,
|
||||||
|
attempt.Failure.Code)
|
||||||
|
|
||||||
|
// For settle and resume we make sure the payment is successful.
|
||||||
|
case routerrpc.ResolveHoldForwardAction_SETTLE:
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case routerrpc.ResolveHoldForwardAction_RESUME:
|
||||||
|
require.Equal(c.ht, lnrpc.HTLCAttempt_SUCCEEDED,
|
||||||
|
attempt.Status, "expected payment to succeed")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildRoute is a helper function to build a route with given hops.
|
// buildRoute is a helper function to build a route with given hops.
|
||||||
func (c *interceptorTestContext) buildRoute(ctx context.Context, amtMsat int64,
|
func (c *interceptorTestScenario) buildRoute(amtMsat int64,
|
||||||
hops []*lntest.HarnessNode, payAddr []byte) (*lnrpc.Route, error) {
|
hops []*node.HarnessNode, payAddr []byte) *lnrpc.Route {
|
||||||
|
|
||||||
rpcHops := make([][]byte, 0, len(hops))
|
rpcHops := make([][]byte, 0, len(hops))
|
||||||
for _, hop := range hops {
|
for _, hop := range hops {
|
||||||
k := hop.PubKeyStr
|
k := hop.PubKeyStr
|
||||||
pubkey, err := route.NewVertexFromStr(k)
|
pubkey, err := route.NewVertexFromStr(k)
|
||||||
if err != nil {
|
require.NoErrorf(c.ht, err, "error parsing %v: %v", k, err)
|
||||||
return nil, fmt.Errorf("error parsing %v: %v",
|
|
||||||
k, err)
|
|
||||||
}
|
|
||||||
rpcHops = append(rpcHops, pubkey[:])
|
rpcHops = append(rpcHops, pubkey[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -602,10 +490,7 @@ func (c *interceptorTestContext) buildRoute(ctx context.Context, amtMsat int64,
|
|||||||
PaymentAddr: payAddr,
|
PaymentAddr: payAddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
routeResp, err := c.alice.RouterClient.BuildRoute(ctx, req)
|
routeResp := c.alice.RPC.BuildRoute(req)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return routeResp.Route, nil
|
return routeResp.Route
|
||||||
}
|
}
|
||||||
|
@ -1,27 +1,23 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/lightningnetwork/lnd/chainreg"
|
"github.com/lightningnetwork/lnd/chainreg"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
"github.com/lightningnetwork/lnd/routing/route"
|
"github.com/lightningnetwork/lnd/routing/route"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testSendToRouteMultiPath tests that we are able to successfully route a
|
// testSendToRouteMultiPath tests that we are able to successfully route a
|
||||||
// payment using multiple shards across different paths, by using SendToRoute.
|
// payment using multiple shards across different paths, by using SendToRoute.
|
||||||
func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSendToRouteMultiPath(ht *lntemp.HarnessTest) {
|
||||||
ctxb := context.Background()
|
mts := newMppTestScenario(ht)
|
||||||
|
|
||||||
ctx := newMppTestContext(t, net)
|
|
||||||
defer ctx.shutdownNodes()
|
|
||||||
|
|
||||||
// To ensure the payment goes through separate paths, we'll set a
|
// To ensure the payment goes through separate paths, we'll set a
|
||||||
// channel size that can only carry one shard at a time. We'll divide
|
// channel size that can only carry one shard at a time. We'll divide
|
||||||
@ -39,55 +35,41 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
// \ /
|
// \ /
|
||||||
// \__ Dave ____/
|
// \__ Dave ____/
|
||||||
//
|
//
|
||||||
ctx.openChannel(ctx.carol, ctx.bob, chanAmt)
|
req := &mppOpenChannelRequest{
|
||||||
ctx.openChannel(ctx.dave, ctx.bob, chanAmt)
|
// Since the channel Alice-> Carol will have to carry two
|
||||||
ctx.openChannel(ctx.alice, ctx.dave, chanAmt)
|
// shards, we make it larger.
|
||||||
ctx.openChannel(ctx.eve, ctx.bob, chanAmt)
|
amtAliceCarol: chanAmt + shardAmt,
|
||||||
ctx.openChannel(ctx.carol, ctx.eve, chanAmt)
|
amtAliceDave: chanAmt,
|
||||||
|
amtCarolBob: chanAmt,
|
||||||
// Since the channel Alice-> Carol will have to carry two
|
amtCarolEve: chanAmt,
|
||||||
// shards, we make it larger.
|
amtDaveBob: chanAmt,
|
||||||
ctx.openChannel(ctx.alice, ctx.carol, chanAmt+shardAmt)
|
amtEveBob: chanAmt,
|
||||||
|
}
|
||||||
defer ctx.closeChannels()
|
mts.openChannels(req)
|
||||||
|
|
||||||
ctx.waitForChannels()
|
|
||||||
|
|
||||||
// Make Bob create an invoice for Alice to pay.
|
// Make Bob create an invoice for Alice to pay.
|
||||||
payReqs, rHashes, invoices, err := createPayReqs(
|
payReqs, rHashes, invoices := ht.CreatePayReqs(mts.bob, paymentAmt, 1)
|
||||||
ctx.bob, paymentAmt, 1,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to create pay reqs: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rHash := rHashes[0]
|
rHash := rHashes[0]
|
||||||
payReq := payReqs[0]
|
payReq := payReqs[0]
|
||||||
|
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
decodeResp := mts.bob.RPC.DecodePayReq(payReq)
|
||||||
decodeResp, err := ctx.bob.DecodePayReq(
|
|
||||||
ctxt, &lnrpc.PayReqString{PayReq: payReq},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("decode pay req: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
payAddr := decodeResp.PaymentAddr
|
payAddr := decodeResp.PaymentAddr
|
||||||
|
|
||||||
|
// Subscribe the invoice.
|
||||||
|
stream := mts.bob.RPC.SubscribeSingleInvoice(rHash)
|
||||||
|
|
||||||
// We'll send shards along three routes from Alice.
|
// We'll send shards along three routes from Alice.
|
||||||
sendRoutes := [][]*lntest.HarnessNode{
|
sendRoutes := [][]*node.HarnessNode{
|
||||||
{ctx.carol, ctx.bob},
|
{mts.carol, mts.bob},
|
||||||
{ctx.dave, ctx.bob},
|
{mts.dave, mts.bob},
|
||||||
{ctx.carol, ctx.eve, ctx.bob},
|
{mts.carol, mts.eve, mts.bob},
|
||||||
}
|
}
|
||||||
|
|
||||||
responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes))
|
responses := make(chan *lnrpc.HTLCAttempt, len(sendRoutes))
|
||||||
for _, hops := range sendRoutes {
|
for _, hops := range sendRoutes {
|
||||||
// Build a route for the specified hops.
|
// Build a route for the specified hops.
|
||||||
r, err := ctx.buildRoute(ctxb, shardAmt, ctx.alice, hops)
|
r := mts.buildRoute(shardAmt, mts.alice, hops)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to build route: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the MPP records to indicate this is a payment shard.
|
// Set the MPP records to indicate this is a payment shard.
|
||||||
hop := r.Hops[len(r.Hops)-1]
|
hop := r.Hops[len(r.Hops)-1]
|
||||||
@ -103,62 +85,44 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
Route: r,
|
Route: r,
|
||||||
}
|
}
|
||||||
|
|
||||||
// We'll send all shards in their own goroutine, since SendToRoute will
|
// We'll send all shards in their own goroutine, since
|
||||||
// block as long as the payment is in flight.
|
// SendToRoute will block as long as the payment is in flight.
|
||||||
go func() {
|
go func() {
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
resp := mts.alice.RPC.SendToRouteV2(sendReq)
|
||||||
resp, err := ctx.alice.RouterClient.SendToRouteV2(ctxt, sendReq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
responses <- resp
|
responses <- resp
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all responses to be back, and check that they all
|
// Wait for all responses to be back, and check that they all
|
||||||
// succeeded.
|
// succeeded.
|
||||||
|
timer := time.After(defaultTimeout)
|
||||||
for range sendRoutes {
|
for range sendRoutes {
|
||||||
var resp *lnrpc.HTLCAttempt
|
var resp *lnrpc.HTLCAttempt
|
||||||
select {
|
select {
|
||||||
case resp = <-responses:
|
case resp = <-responses:
|
||||||
case <-time.After(defaultTimeout):
|
case <-timer:
|
||||||
t.Fatalf("response not received")
|
require.Fail(ht, "response not received")
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.Failure != nil {
|
require.Nil(ht, resp.Failure, "received payment failure")
|
||||||
t.Fatalf("received payment failure : %v", resp.Failure)
|
|
||||||
}
|
|
||||||
|
|
||||||
// All shards should come back with the preimage.
|
// All shards should come back with the preimage.
|
||||||
if !bytes.Equal(resp.Preimage, invoices[0].RPreimage) {
|
require.Equal(ht, resp.Preimage, invoices[0].RPreimage,
|
||||||
t.Fatalf("preimage doesn't match")
|
"preimage doesn't match")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertNumHtlcs is a helper that checks the node's latest payment,
|
// assertNumHtlcs is a helper that checks the node's latest payment,
|
||||||
// and asserts it was split into num shards.
|
// and asserts it was split into num shards.
|
||||||
assertNumHtlcs := func(node *lntest.HarnessNode, num int) {
|
assertNumHtlcs := func(hn *node.HarnessNode, num int) {
|
||||||
req := &lnrpc.ListPaymentsRequest{
|
var preimage lntypes.Preimage
|
||||||
IncludeIncomplete: true,
|
copy(preimage[:], invoices[0].RPreimage)
|
||||||
}
|
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
paymentsResp, err := node.ListPayments(ctxt, req)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error when obtaining payments: %v",
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
payments := paymentsResp.Payments
|
payment := ht.AssertPaymentStatus(
|
||||||
if len(payments) == 0 {
|
hn, preimage, lnrpc.Payment_SUCCEEDED,
|
||||||
t.Fatalf("no payments found")
|
)
|
||||||
}
|
|
||||||
|
|
||||||
payment := payments[len(payments)-1]
|
|
||||||
htlcs := payment.Htlcs
|
htlcs := payment.Htlcs
|
||||||
if len(htlcs) == 0 {
|
require.NotEmpty(ht, htlcs, "no htlcs")
|
||||||
t.Fatalf("no htlcs")
|
|
||||||
}
|
|
||||||
|
|
||||||
succeeded := 0
|
succeeded := 0
|
||||||
for _, htlc := range htlcs {
|
for _, htlc := range htlcs {
|
||||||
@ -166,194 +130,217 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
succeeded++
|
succeeded++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
require.Equal(ht, num, succeeded, "HTLCs not matched")
|
||||||
if succeeded != num {
|
|
||||||
t.Fatalf("expected %v succussful HTLCs, got %v", num,
|
|
||||||
succeeded)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertSettledInvoice checks that the invoice for the given payment
|
// assertSettledInvoice checks that the invoice for the given payment
|
||||||
// hash is settled, and has been paid using num HTLCs.
|
// hash is settled, and has been paid using num HTLCs.
|
||||||
assertSettledInvoice := func(node *lntest.HarnessNode, rhash []byte,
|
assertSettledInvoice := func(rhash []byte, num int) {
|
||||||
num int) {
|
var payHash lntypes.Hash
|
||||||
|
copy(payHash[:], rhash)
|
||||||
|
inv := ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED)
|
||||||
|
|
||||||
found := false
|
// Assert that the amount paid to the invoice is correct.
|
||||||
offset := uint64(0)
|
require.EqualValues(ht, paymentAmt, inv.AmtPaidSat,
|
||||||
for !found {
|
"incorrect payment amt")
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
invoicesResp, err := node.ListInvoices(
|
|
||||||
ctxt, &lnrpc.ListInvoiceRequest{
|
|
||||||
IndexOffset: offset,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error when obtaining payments: %v",
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(invoicesResp.Invoices) == 0 {
|
require.Len(ht, inv.Htlcs, num, "wrong num of HTLCs")
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, inv := range invoicesResp.Invoices {
|
|
||||||
if !bytes.Equal(inv.RHash, rhash) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assert that the amount paid to the invoice is
|
|
||||||
// correct.
|
|
||||||
if inv.AmtPaidSat != int64(paymentAmt) {
|
|
||||||
t.Fatalf("incorrect payment amt for "+
|
|
||||||
"invoicewant: %d, got %d",
|
|
||||||
paymentAmt, inv.AmtPaidSat)
|
|
||||||
}
|
|
||||||
|
|
||||||
if inv.State != lnrpc.Invoice_SETTLED {
|
|
||||||
t.Fatalf("Invoice not settled: %v",
|
|
||||||
inv.State)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(inv.Htlcs) != num {
|
|
||||||
t.Fatalf("expected invoice to be "+
|
|
||||||
"settled with %v HTLCs, had %v",
|
|
||||||
num, len(inv.Htlcs))
|
|
||||||
}
|
|
||||||
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
offset = invoicesResp.LastIndexOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("invoice not found")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally check that the payment shows up with three settled HTLCs in
|
// Finally check that the payment shows up with three settled HTLCs in
|
||||||
// Alice's list of payments...
|
// Alice's list of payments...
|
||||||
assertNumHtlcs(ctx.alice, 3)
|
assertNumHtlcs(mts.alice, 3)
|
||||||
|
|
||||||
// ...and in Bob's list of paid invoices.
|
// ...and in Bob's list of paid invoices.
|
||||||
assertSettledInvoice(ctx.bob, rHash, 3)
|
assertSettledInvoice(rHash, 3)
|
||||||
|
|
||||||
|
// Finally, close all channels.
|
||||||
|
mts.closeChannels()
|
||||||
}
|
}
|
||||||
|
|
||||||
type mppTestContext struct {
|
// mppTestScenario defines a test scenario used for testing MPP-related tests.
|
||||||
t *harnessTest
|
// It has two standby nodes, alice and bob, and three new nodes, carol, dave,
|
||||||
net *lntest.NetworkHarness
|
// and eve.
|
||||||
|
type mppTestScenario struct {
|
||||||
|
ht *lntemp.HarnessTest
|
||||||
|
|
||||||
|
alice, bob, carol, dave, eve *node.HarnessNode
|
||||||
|
nodes []*node.HarnessNode
|
||||||
|
|
||||||
// Keep a list of all our active channels.
|
// Keep a list of all our active channels.
|
||||||
networkChans []*lnrpc.ChannelPoint
|
channelPoints []*lnrpc.ChannelPoint
|
||||||
closeChannelFuncs []func()
|
|
||||||
|
|
||||||
alice, bob, carol, dave, eve *lntest.HarnessNode
|
|
||||||
nodes []*lntest.HarnessNode
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMppTestContext(t *harnessTest,
|
// newMppTestScenario initializes a new mpp test scenario with five funded
|
||||||
net *lntest.NetworkHarness) *mppTestContext {
|
// nodes and connects them to have the following topology,
|
||||||
|
//
|
||||||
alice := net.NewNode(t.t, "alice", nil)
|
// _ Eve _
|
||||||
bob := net.NewNode(t.t, "bob", []string{"--accept-amp"})
|
// / \
|
||||||
|
// Alice -- Carol ---- Bob
|
||||||
|
// \ /
|
||||||
|
// \__ Dave ____/
|
||||||
|
func newMppTestScenario(ht *lntemp.HarnessTest) *mppTestScenario {
|
||||||
|
alice, bob := ht.Alice, ht.Bob
|
||||||
|
ht.RestartNodeWithExtraArgs(bob, []string{
|
||||||
|
"--maxpendingchannels=2",
|
||||||
|
"--accept-amp",
|
||||||
|
})
|
||||||
|
|
||||||
// Create a five-node context consisting of Alice, Bob and three new
|
// Create a five-node context consisting of Alice, Bob and three new
|
||||||
// nodes.
|
// nodes.
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
carol := ht.NewNode("carol", []string{
|
||||||
dave := net.NewNode(t.t, "dave", nil)
|
"--maxpendingchannels=2",
|
||||||
eve := net.NewNode(t.t, "eve", nil)
|
"--accept-amp",
|
||||||
|
})
|
||||||
|
dave := ht.NewNode("dave", nil)
|
||||||
|
eve := ht.NewNode("eve", nil)
|
||||||
|
|
||||||
// Connect nodes to ensure propagation of channels.
|
// Connect nodes to ensure propagation of channels.
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol, dave, eve}
|
ht.EnsureConnected(alice, carol)
|
||||||
for i := 0; i < len(nodes); i++ {
|
ht.EnsureConnected(alice, dave)
|
||||||
for j := i + 1; j < len(nodes); j++ {
|
ht.EnsureConnected(carol, bob)
|
||||||
net.EnsureConnected(t.t, nodes[i], nodes[j])
|
ht.EnsureConnected(carol, eve)
|
||||||
}
|
ht.EnsureConnected(dave, bob)
|
||||||
|
ht.EnsureConnected(eve, bob)
|
||||||
|
|
||||||
|
// Send coins to the nodes and mine 1 blocks to confirm them.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol)
|
||||||
|
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, dave)
|
||||||
|
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, eve)
|
||||||
|
ht.MineBlocks(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := mppTestContext{
|
mts := &mppTestScenario{
|
||||||
t: t,
|
ht: ht,
|
||||||
net: net,
|
|
||||||
alice: alice,
|
alice: alice,
|
||||||
bob: bob,
|
bob: bob,
|
||||||
carol: carol,
|
carol: carol,
|
||||||
dave: dave,
|
dave: dave,
|
||||||
eve: eve,
|
eve: eve,
|
||||||
nodes: nodes,
|
nodes: []*node.HarnessNode{alice, bob, carol, dave, eve},
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ctx
|
return mts
|
||||||
}
|
}
|
||||||
|
|
||||||
// openChannel is a helper to open a channel from->to.
|
// mppOpenChannelRequest defines the amounts used for each channel opening.
|
||||||
func (c *mppTestContext) openChannel(from, to *lntest.HarnessNode,
|
type mppOpenChannelRequest struct {
|
||||||
chanSize btcutil.Amount) {
|
// Channel Alice=>Carol.
|
||||||
|
amtAliceCarol btcutil.Amount
|
||||||
|
|
||||||
c.net.SendCoins(c.t.t, btcutil.SatoshiPerBitcoin, from)
|
// Channel Alice=>Dave.
|
||||||
|
amtAliceDave btcutil.Amount
|
||||||
|
|
||||||
chanPoint := openChannelAndAssert(
|
// Channel Carol=>Bob.
|
||||||
c.t, c.net, from, to,
|
amtCarolBob btcutil.Amount
|
||||||
lntest.OpenChannelParams{Amt: chanSize},
|
|
||||||
)
|
|
||||||
|
|
||||||
c.closeChannelFuncs = append(c.closeChannelFuncs, func() {
|
// Channel Carol=>Eve.
|
||||||
closeChannelAndAssert(c.t, c.net, from, chanPoint, false)
|
amtCarolEve btcutil.Amount
|
||||||
})
|
|
||||||
|
|
||||||
c.networkChans = append(c.networkChans, chanPoint)
|
// Channel Dave=>Bob.
|
||||||
|
amtDaveBob btcutil.Amount
|
||||||
|
|
||||||
|
// Channel Eve=>Bob.
|
||||||
|
amtEveBob btcutil.Amount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mppTestContext) closeChannels() {
|
// openChannels is a helper to open channels that sets up a network topology
|
||||||
for _, f := range c.closeChannelFuncs {
|
// with three different paths Alice <-> Bob as following,
|
||||||
f()
|
//
|
||||||
|
// _ Eve _
|
||||||
|
// / \
|
||||||
|
// Alice -- Carol ---- Bob
|
||||||
|
// \ /
|
||||||
|
// \__ Dave ____/
|
||||||
|
//
|
||||||
|
// NOTE: all the channels are open together to save blocks mined.
|
||||||
|
func (m *mppTestScenario) openChannels(r *mppOpenChannelRequest) {
|
||||||
|
reqs := []*lntemp.OpenChannelRequest{
|
||||||
|
{
|
||||||
|
Local: m.alice,
|
||||||
|
Remote: m.carol,
|
||||||
|
Param: lntemp.OpenChannelParams{Amt: r.amtAliceCarol},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Local: m.alice,
|
||||||
|
Remote: m.dave,
|
||||||
|
Param: lntemp.OpenChannelParams{Amt: r.amtAliceDave},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Local: m.carol,
|
||||||
|
Remote: m.bob,
|
||||||
|
Param: lntemp.OpenChannelParams{Amt: r.amtCarolBob},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Local: m.carol,
|
||||||
|
Remote: m.eve,
|
||||||
|
Param: lntemp.OpenChannelParams{Amt: r.amtCarolEve},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Local: m.dave,
|
||||||
|
Remote: m.bob,
|
||||||
|
Param: lntemp.OpenChannelParams{Amt: r.amtDaveBob},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Local: m.eve,
|
||||||
|
Remote: m.bob,
|
||||||
|
Param: lntemp.OpenChannelParams{Amt: r.amtEveBob},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (c *mppTestContext) shutdownNodes() {
|
m.channelPoints = m.ht.OpenMultiChannelsAsync(reqs)
|
||||||
shutdownAndAssert(c.net, c.t, c.alice)
|
|
||||||
shutdownAndAssert(c.net, c.t, c.bob)
|
|
||||||
shutdownAndAssert(c.net, c.t, c.carol)
|
|
||||||
shutdownAndAssert(c.net, c.t, c.dave)
|
|
||||||
shutdownAndAssert(c.net, c.t, c.eve)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *mppTestContext) waitForChannels() {
|
// Make sure every node has heard every channel.
|
||||||
// Wait for all nodes to have seen all channels.
|
for _, hn := range m.nodes {
|
||||||
for _, chanPoint := range c.networkChans {
|
for _, cp := range m.channelPoints {
|
||||||
for _, node := range c.nodes {
|
m.ht.AssertTopologyChannelOpen(hn, cp)
|
||||||
txid, err := lnrpc.GetChanPointFundingTxid(chanPoint)
|
|
||||||
if err != nil {
|
|
||||||
c.t.Fatalf("unable to get txid: %v", err)
|
|
||||||
}
|
|
||||||
point := wire.OutPoint{
|
|
||||||
Hash: *txid,
|
|
||||||
Index: chanPoint.OutputIndex,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = node.WaitForNetworkChannelOpen(chanPoint)
|
|
||||||
if err != nil {
|
|
||||||
c.t.Fatalf("(%v:%d): timeout waiting for "+
|
|
||||||
"channel(%s) open: %v",
|
|
||||||
node.Cfg.Name, node.NodeID, point, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// closeChannels closes all the open channels from `openChannels`.
|
||||||
|
func (m *mppTestScenario) closeChannels() {
|
||||||
|
if m.ht.Failed() {
|
||||||
|
m.ht.Log("Skipped closing channels for failed test")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(yy): remove the sleep once the following bug is fixed. When the
|
||||||
|
// payment is reported as settled by Alice, it's expected the
|
||||||
|
// commitment dance is finished and all subsequent states have been
|
||||||
|
// updated. Yet we'd receive the error `cannot co-op close channel with
|
||||||
|
// active htlcs` or `link failed to shutdown` if we close the channel.
|
||||||
|
// We need to investigate the order of settling the payments and
|
||||||
|
// updating commitments to understand and fix .
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Close all channels without mining the closing transactions.
|
||||||
|
m.ht.CloseChannelAssertPending(m.alice, m.channelPoints[0], false)
|
||||||
|
m.ht.CloseChannelAssertPending(m.alice, m.channelPoints[1], false)
|
||||||
|
m.ht.CloseChannelAssertPending(m.carol, m.channelPoints[2], false)
|
||||||
|
m.ht.CloseChannelAssertPending(m.carol, m.channelPoints[3], false)
|
||||||
|
m.ht.CloseChannelAssertPending(m.dave, m.channelPoints[4], false)
|
||||||
|
m.ht.CloseChannelAssertPending(m.eve, m.channelPoints[5], false)
|
||||||
|
|
||||||
|
// Now mine a block to include all the closing transactions.
|
||||||
|
m.ht.MineBlocks(1)
|
||||||
|
|
||||||
|
// Assert that the channels are closed.
|
||||||
|
for _, hn := range m.nodes {
|
||||||
|
m.ht.AssertNumWaitingClose(hn, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Helper function for Alice to build a route from pubkeys.
|
// Helper function for Alice to build a route from pubkeys.
|
||||||
func (c *mppTestContext) buildRoute(ctxb context.Context, amt btcutil.Amount,
|
func (m *mppTestScenario) buildRoute(amt btcutil.Amount,
|
||||||
sender *lntest.HarnessNode, hops []*lntest.HarnessNode) (*lnrpc.Route,
|
sender *node.HarnessNode, hops []*node.HarnessNode) *lnrpc.Route {
|
||||||
error) {
|
|
||||||
|
|
||||||
rpcHops := make([][]byte, 0, len(hops))
|
rpcHops := make([][]byte, 0, len(hops))
|
||||||
for _, hop := range hops {
|
for _, hop := range hops {
|
||||||
k := hop.PubKeyStr
|
k := hop.PubKeyStr
|
||||||
pubkey, err := route.NewVertexFromStr(k)
|
pubkey, err := route.NewVertexFromStr(k)
|
||||||
if err != nil {
|
require.NoErrorf(m.ht, err, "error parsing %v: %v", k, err)
|
||||||
return nil, fmt.Errorf("error parsing %v: %v",
|
|
||||||
k, err)
|
|
||||||
}
|
|
||||||
rpcHops = append(rpcHops, pubkey[:])
|
rpcHops = append(rpcHops, pubkey[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,12 +349,37 @@ func (c *mppTestContext) buildRoute(ctxb context.Context, amt btcutil.Amount,
|
|||||||
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
|
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
|
||||||
HopPubkeys: rpcHops,
|
HopPubkeys: rpcHops,
|
||||||
}
|
}
|
||||||
|
routeResp := sender.RPC.BuildRoute(req)
|
||||||
|
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
return routeResp.Route
|
||||||
routeResp, err := sender.RouterClient.BuildRoute(ctxt, req)
|
}
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
// updatePolicy updates a Dave's global channel policy and returns the expected
|
||||||
|
// policy for further check. It changes Dave's `FeeBaseMsat` from 1000 msat to
|
||||||
|
// 500,000 msat, and `FeeProportionalMillonths` from 1 msat to 1000 msat.
|
||||||
|
func (m *mppTestScenario) updateDaveGlobalPolicy() *lnrpc.RoutingPolicy {
|
||||||
|
const (
|
||||||
|
baseFeeMsat = 500_000
|
||||||
|
feeRate = 0.001
|
||||||
|
maxHtlcMsat = 133_650_000
|
||||||
|
)
|
||||||
|
|
||||||
|
expectedPolicy := &lnrpc.RoutingPolicy{
|
||||||
|
FeeBaseMsat: baseFeeMsat,
|
||||||
|
FeeRateMilliMsat: feeRate * testFeeBase,
|
||||||
|
TimeLockDelta: 40,
|
||||||
|
MinHtlc: 1000, // default value
|
||||||
|
MaxHtlcMsat: maxHtlcMsat,
|
||||||
}
|
}
|
||||||
|
|
||||||
return routeResp.Route, nil
|
updateFeeReq := &lnrpc.PolicyUpdateRequest{
|
||||||
|
BaseFeeMsat: baseFeeMsat,
|
||||||
|
FeeRate: feeRate,
|
||||||
|
TimeLockDelta: 40,
|
||||||
|
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
|
||||||
|
MaxHtlcMsat: maxHtlcMsat,
|
||||||
|
}
|
||||||
|
m.dave.RPC.UpdateChannelPolicy(updateFeeReq)
|
||||||
|
|
||||||
|
return expectedPolicy
|
||||||
}
|
}
|
||||||
|
@ -1,40 +1,39 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testNonstdSweep(net *lntest.NetworkHarness, t *harnessTest) {
|
func testNonstdSweep(ht *lntemp.HarnessTest) {
|
||||||
p2shAddr, err := btcutil.NewAddressScriptHash(
|
p2shAddr, err := btcutil.NewAddressScriptHash(
|
||||||
make([]byte, 1), harnessNetParams,
|
make([]byte, 1), harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
p2pkhAddr, err := btcutil.NewAddressPubKeyHash(
|
p2pkhAddr, err := btcutil.NewAddressPubKeyHash(
|
||||||
make([]byte, 20), harnessNetParams,
|
make([]byte, 20), harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
p2wshAddr, err := btcutil.NewAddressWitnessScriptHash(
|
p2wshAddr, err := btcutil.NewAddressWitnessScriptHash(
|
||||||
make([]byte, 32), harnessNetParams,
|
make([]byte, 32), harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash(
|
p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash(
|
||||||
make([]byte, 20), harnessNetParams,
|
make([]byte, 20), harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
p2trAddr, err := btcutil.NewAddressTaproot(
|
p2trAddr, err := btcutil.NewAddressTaproot(
|
||||||
make([]byte, 32), harnessNetParams,
|
make([]byte, 32), harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@ -64,10 +63,10 @@ func testNonstdSweep(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
success := t.t.Run(test.name, func(t *testing.T) {
|
success := ht.Run(test.name, func(t *testing.T) {
|
||||||
h := newHarnessTest(t, net)
|
st := ht.Subtest(t)
|
||||||
|
|
||||||
testNonStdSweepInner(net, h, test.address)
|
testNonStdSweepInner(st, test.address)
|
||||||
})
|
})
|
||||||
if !success {
|
if !success {
|
||||||
break
|
break
|
||||||
@ -75,18 +74,14 @@ func testNonstdSweep(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
|
func testNonStdSweepInner(ht *lntemp.HarnessTest, address string) {
|
||||||
address string) {
|
carol := ht.NewNode("carol", nil)
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
|
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
|
||||||
|
|
||||||
// Give Carol a UTXO so SendCoins will behave as expected.
|
// Give Carol a UTXO so SendCoins will behave as expected.
|
||||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
||||||
|
|
||||||
// Set the fee estimate to 1sat/vbyte.
|
// Set the fee estimate to 1sat/vbyte.
|
||||||
net.SetFeeEstimate(250)
|
ht.SetFeeEstimate(250)
|
||||||
|
|
||||||
// Make Carol call SendCoins with the SendAll flag and the created
|
// Make Carol call SendCoins with the SendAll flag and the created
|
||||||
// address.
|
// address.
|
||||||
@ -96,20 +91,13 @@ func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
SendAll: true,
|
SendAll: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// If a non-standard transaction was created, then this SendCoins call
|
// If a non-standard transaction was created, then this SendCoins call
|
||||||
// will fail.
|
// will fail.
|
||||||
_, err := carol.SendCoins(ctxt, sendReq)
|
carol.RPC.SendCoins(sendReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Fetch the txid so we can grab the raw transaction.
|
// Fetch the txid so we can grab the raw transaction.
|
||||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
|
||||||
require.NoError(t.t, err)
|
tx := ht.Miner.GetRawTransaction(txid)
|
||||||
|
|
||||||
tx, err := net.Miner.Client.GetRawTransaction(txid)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
msgTx := tx.MsgTx()
|
msgTx := tx.MsgTx()
|
||||||
|
|
||||||
@ -123,9 +111,7 @@ func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
for _, inp := range msgTx.TxIn {
|
for _, inp := range msgTx.TxIn {
|
||||||
// Fetch the previous outpoint's value.
|
// Fetch the previous outpoint's value.
|
||||||
prevOut := inp.PreviousOutPoint
|
prevOut := inp.PreviousOutPoint
|
||||||
|
ptx := ht.Miner.GetRawTransaction(&prevOut.Hash)
|
||||||
ptx, err := net.Miner.Client.GetRawTransaction(&prevOut.Hash)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
pout := ptx.MsgTx().TxOut[prevOut.Index]
|
pout := ptx.MsgTx().TxOut[prevOut.Index]
|
||||||
inputVal += int(pout.Value)
|
inputVal += int(pout.Value)
|
||||||
@ -139,9 +125,11 @@ func testNonStdSweepInner(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
|
|
||||||
// Fetch the vsize of the transaction so we can determine if the
|
// Fetch the vsize of the transaction so we can determine if the
|
||||||
// transaction pays >= 1 sat/vbyte.
|
// transaction pays >= 1 sat/vbyte.
|
||||||
rawTx, err := net.Miner.Client.GetRawTransactionVerbose(txid)
|
rawTx := ht.Miner.GetRawTransactionVerbose(txid)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Require fee >= vbytes.
|
// Require fee >= vbytes.
|
||||||
require.True(t.t, fee >= int(rawTx.Vsize))
|
require.True(ht, fee >= int(rawTx.Vsize))
|
||||||
|
|
||||||
|
// Mine a block to keep the mempool clean.
|
||||||
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ package itest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
@ -14,7 +13,6 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntemp"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/lightningnetwork/lnd/lnwallet"
|
"github.com/lightningnetwork/lnd/lnwallet"
|
||||||
"github.com/lightningnetwork/lnd/sweep"
|
"github.com/lightningnetwork/lnd/sweep"
|
||||||
@ -80,35 +78,28 @@ func testChainKitGetBlockHash(ht *lntemp.HarnessTest) {
|
|||||||
// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction.
|
// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction.
|
||||||
//
|
//
|
||||||
// TODO(wilmer): Add RBF case once btcd supports it.
|
// TODO(wilmer): Add RBF case once btcd supports it.
|
||||||
func testCPFP(net *lntest.NetworkHarness, t *harnessTest) {
|
func testCPFP(ht *lntemp.HarnessTest) {
|
||||||
runCPFP(net, t, net.Alice, net.Bob)
|
runCPFP(ht, ht.Alice, ht.Bob)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runCPFP ensures that the daemon can bump an unconfirmed transaction's fee
|
// runCPFP ensures that the daemon can bump an unconfirmed transaction's fee
|
||||||
// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction.
|
// rate by broadcasting a Child-Pays-For-Parent (CPFP) transaction.
|
||||||
func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
|
func runCPFP(ht *lntemp.HarnessTest, alice, bob *node.HarnessNode) {
|
||||||
alice, bob *lntest.HarnessNode) {
|
|
||||||
|
|
||||||
// Skip this test for neutrino, as it's not aware of mempool
|
// Skip this test for neutrino, as it's not aware of mempool
|
||||||
// transactions.
|
// transactions.
|
||||||
if net.BackendCfg.Name() == lntest.NeutrinoBackendName {
|
if ht.IsNeutrinoBackend() {
|
||||||
t.Skipf("skipping CPFP test for neutrino backend")
|
ht.Skipf("skipping CPFP test for neutrino backend")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We'll start the test by sending Alice some coins, which she'll use to
|
// We'll start the test by sending Alice some coins, which she'll use
|
||||||
// send to Bob.
|
// to send to Bob.
|
||||||
ctxb := context.Background()
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
|
||||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
|
|
||||||
|
|
||||||
// Create an address for Bob to send the coins to.
|
// Create an address for Bob to send the coins to.
|
||||||
addrReq := &lnrpc.NewAddressRequest{
|
req := &lnrpc.NewAddressRequest{
|
||||||
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
||||||
}
|
}
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
resp := bob.RPC.NewAddress(req)
|
||||||
resp, err := bob.NewAddress(ctxt, addrReq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to get new address for bob: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the coins from Alice to Bob. We should expect a transaction to
|
// Send the coins from Alice to Bob. We should expect a transaction to
|
||||||
// be broadcast and seen in the mempool.
|
// be broadcast and seen in the mempool.
|
||||||
@ -116,46 +107,33 @@ func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
Addr: resp.Address,
|
Addr: resp.Address,
|
||||||
Amount: btcutil.SatoshiPerBitcoin,
|
Amount: btcutil.SatoshiPerBitcoin,
|
||||||
}
|
}
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
alice.RPC.SendCoins(sendReq)
|
||||||
if _, err = alice.SendCoins(ctxt, sendReq); err != nil {
|
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
|
||||||
t.Fatalf("unable to send coins to bob: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected one mempool transaction: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll then extract the raw transaction from the mempool in order to
|
// We'll then extract the raw transaction from the mempool in order to
|
||||||
// determine the index of Bob's output.
|
// determine the index of Bob's output.
|
||||||
tx, err := net.Miner.Client.GetRawTransaction(txid)
|
tx := ht.Miner.GetRawTransaction(txid)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to extract raw transaction from mempool: %v",
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
bobOutputIdx := -1
|
bobOutputIdx := -1
|
||||||
for i, txOut := range tx.MsgTx().TxOut {
|
for i, txOut := range tx.MsgTx().TxOut {
|
||||||
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
||||||
txOut.PkScript, net.Miner.ActiveNet,
|
txOut.PkScript, ht.Miner.ActiveNet,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoErrorf(ht, err, "unable to extract address "+
|
||||||
t.Fatalf("unable to extract address from pkScript=%x: "+
|
"from pkScript=%x: %v", txOut.PkScript, err)
|
||||||
"%v", txOut.PkScript, err)
|
|
||||||
}
|
|
||||||
if addrs[0].String() == resp.Address {
|
if addrs[0].String() == resp.Address {
|
||||||
bobOutputIdx = i
|
bobOutputIdx = i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if bobOutputIdx == -1 {
|
require.NotEqual(ht, -1, bobOutputIdx, "bob's output was not found "+
|
||||||
t.Fatalf("bob's output was not found within the transaction")
|
"within the transaction")
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until bob has seen the tx and considers it as owned.
|
// Wait until bob has seen the tx and considers it as owned.
|
||||||
op := &lnrpc.OutPoint{
|
op := &lnrpc.OutPoint{
|
||||||
TxidBytes: txid[:],
|
TxidBytes: txid[:],
|
||||||
OutputIndex: uint32(bobOutputIdx),
|
OutputIndex: uint32(bobOutputIdx),
|
||||||
}
|
}
|
||||||
assertWalletUnspent(t, bob, op, "")
|
ht.AssertUTXOInWallet(bob, op, "")
|
||||||
|
|
||||||
// We'll attempt to bump the fee of this transaction by performing a
|
// We'll attempt to bump the fee of this transaction by performing a
|
||||||
// CPFP from Alice's point of view.
|
// CPFP from Alice's point of view.
|
||||||
@ -165,68 +143,38 @@ func runCPFP(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
sweep.DefaultMaxFeeRate.FeePerKVByte() / 2000,
|
sweep.DefaultMaxFeeRate.FeePerKVByte() / 2000,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
bob.RPC.BumpFee(bumpFeeReq)
|
||||||
_, err = bob.WalletKitClient.BumpFee(ctxt, bumpFeeReq)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to bump fee: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should now expect to see two transactions within the mempool, a
|
// We should now expect to see two transactions within the mempool, a
|
||||||
// parent and its child.
|
// parent and its child.
|
||||||
_, err = waitForNTxsInMempool(net.Miner.Client, 2, minerMempoolTimeout)
|
ht.Miner.AssertNumTxsInMempool(2)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected two mempool transactions: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should also expect to see the output being swept by the
|
// We should also expect to see the output being swept by the
|
||||||
// UtxoSweeper. We'll ensure it's using the fee rate specified.
|
// UtxoSweeper. We'll ensure it's using the fee rate specified.
|
||||||
pendingSweepsReq := &walletrpc.PendingSweepsRequest{}
|
pendingSweepsResp := bob.RPC.PendingSweeps()
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
require.Len(ht, pendingSweepsResp.PendingSweeps, 1,
|
||||||
pendingSweepsResp, err := bob.WalletKitClient.PendingSweeps(
|
"expected to find 1 pending sweep")
|
||||||
ctxt, pendingSweepsReq,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to retrieve pending sweeps: %v", err)
|
|
||||||
}
|
|
||||||
if len(pendingSweepsResp.PendingSweeps) != 1 {
|
|
||||||
t.Fatalf("expected to find %v pending sweep(s), found %v", 1,
|
|
||||||
len(pendingSweepsResp.PendingSweeps))
|
|
||||||
}
|
|
||||||
pendingSweep := pendingSweepsResp.PendingSweeps[0]
|
pendingSweep := pendingSweepsResp.PendingSweeps[0]
|
||||||
if !bytes.Equal(pendingSweep.Outpoint.TxidBytes, op.TxidBytes) {
|
require.Equal(ht, pendingSweep.Outpoint.TxidBytes, op.TxidBytes,
|
||||||
t.Fatalf("expected output txid %x, got %x", op.TxidBytes,
|
"output txid not matched")
|
||||||
pendingSweep.Outpoint.TxidBytes)
|
require.Equal(ht, pendingSweep.Outpoint.OutputIndex, op.OutputIndex,
|
||||||
}
|
"output index not matched")
|
||||||
if pendingSweep.Outpoint.OutputIndex != op.OutputIndex {
|
require.Equal(ht, pendingSweep.SatPerVbyte, bumpFeeReq.SatPerVbyte,
|
||||||
t.Fatalf("expected output index %v, got %v", op.OutputIndex,
|
"sweep sat per vbyte not matched")
|
||||||
pendingSweep.Outpoint.OutputIndex)
|
|
||||||
}
|
|
||||||
if pendingSweep.SatPerVbyte != bumpFeeReq.SatPerVbyte {
|
|
||||||
t.Fatalf("expected sweep sat per vbyte %v, got %v",
|
|
||||||
bumpFeeReq.SatPerVbyte, pendingSweep.SatPerVbyte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mine a block to clean up the unconfirmed transactions.
|
// Mine a block to clean up the unconfirmed transactions.
|
||||||
mineBlocks(t, net, 1, 2)
|
ht.MineBlocksAndAssertNumTxes(1, 2)
|
||||||
|
|
||||||
// The input used to CPFP should no longer be pending.
|
// The input used to CPFP should no longer be pending.
|
||||||
err = wait.NoError(func() error {
|
err := wait.NoError(func() error {
|
||||||
req := &walletrpc.PendingSweepsRequest{}
|
resp := bob.RPC.PendingSweeps()
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
resp, err := bob.WalletKitClient.PendingSweeps(ctxt, req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to retrieve bob's pending "+
|
|
||||||
"sweeps: %v", err)
|
|
||||||
}
|
|
||||||
if len(resp.PendingSweeps) != 0 {
|
if len(resp.PendingSweeps) != 0 {
|
||||||
return fmt.Errorf("expected 0 pending sweeps, found %d",
|
return fmt.Errorf("expected 0 pending sweeps, found %d",
|
||||||
len(resp.PendingSweeps))
|
len(resp.PendingSweeps))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, defaultTimeout)
|
}, defaultTimeout)
|
||||||
if err != nil {
|
require.NoError(ht, err, "timeout checking bob's pending sweeps")
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// testAnchorReservedValue tests that we won't allow sending transactions when
|
// testAnchorReservedValue tests that we won't allow sending transactions when
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"testing"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcjson"
|
"github.com/btcsuite/btcd/btcjson"
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
@ -12,7 +11,8 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/funding"
|
"github.com/lightningnetwork/lnd/funding"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntemp"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -172,9 +172,7 @@ func testOpenChannelAfterReorg(ht *lntemp.HarnessTest) {
|
|||||||
// ChannelUpdate --> defaultBaseFee, provided FeeRate
|
// ChannelUpdate --> defaultBaseFee, provided FeeRate
|
||||||
// 4.) baseFee and feeRate provided to OpenChannelRequest
|
// 4.) baseFee and feeRate provided to OpenChannelRequest
|
||||||
// ChannelUpdate --> provided baseFee, provided feeRate.
|
// ChannelUpdate --> provided baseFee, provided feeRate.
|
||||||
func testOpenChannelUpdateFeePolicy(net *lntest.NetworkHarness,
|
func testOpenChannelUpdateFeePolicy(ht *lntemp.HarnessTest) {
|
||||||
t *harnessTest) {
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultBaseFee = 1000
|
defaultBaseFee = 1000
|
||||||
defaultFeeRate = 1
|
defaultFeeRate = 1
|
||||||
@ -189,7 +187,7 @@ func testOpenChannelUpdateFeePolicy(net *lntest.NetworkHarness,
|
|||||||
chanAmt := funding.MaxBtcFundingAmount
|
chanAmt := funding.MaxBtcFundingAmount
|
||||||
pushAmt := chanAmt / 2
|
pushAmt := chanAmt / 2
|
||||||
|
|
||||||
feeScenarios := []lntest.OpenChannelParams{
|
feeScenarios := []lntemp.OpenChannelParams{
|
||||||
{
|
{
|
||||||
Amt: chanAmt,
|
Amt: chanAmt,
|
||||||
PushAmt: pushAmt,
|
PushAmt: pushAmt,
|
||||||
@ -259,89 +257,79 @@ func testOpenChannelUpdateFeePolicy(net *lntest.NetworkHarness,
|
|||||||
MaxHtlcMsat: defaultMaxHtlc,
|
MaxHtlcMsat: defaultMaxHtlc,
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, feeScenario := range feeScenarios {
|
alice, bob := ht.Alice, ht.Bob
|
||||||
|
|
||||||
|
runTestCase := func(ht *lntemp.HarnessTest,
|
||||||
|
fs lntemp.OpenChannelParams,
|
||||||
|
alicePolicy, bobPolicy *lnrpc.RoutingPolicy) {
|
||||||
|
|
||||||
// Create a channel Alice->Bob.
|
// Create a channel Alice->Bob.
|
||||||
chanPoint := openChannelAndAssert(
|
chanPoint := ht.OpenChannel(alice, bob, fs)
|
||||||
t, net, net.Alice, net.Bob,
|
defer ht.CloseChannel(alice, chanPoint)
|
||||||
feeScenario,
|
|
||||||
)
|
|
||||||
|
|
||||||
defer closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
|
// We add all the nodes' update channels to a slice, such that
|
||||||
|
// we can make sure they all receive the expected updates.
|
||||||
|
nodes := []*node.HarnessNode{alice, bob}
|
||||||
|
|
||||||
// We add all the nodes' update channels to a slice, such that we can
|
// Alice and Bob should see each other's ChannelUpdates,
|
||||||
// make sure they all receive the expected updates.
|
// advertising the preferred routing policies.
|
||||||
nodes := []*lntest.HarnessNode{net.Alice, net.Bob}
|
assertNodesPolicyUpdate(
|
||||||
|
ht, nodes, alice, alicePolicy, chanPoint,
|
||||||
// Alice and Bob should see each other's ChannelUpdates, advertising
|
|
||||||
// the preferred routing policies.
|
|
||||||
assertPolicyUpdate(
|
|
||||||
t, nodes, net.Alice.PubKeyStr,
|
|
||||||
&expectedPolicies[i], chanPoint,
|
|
||||||
)
|
|
||||||
assertPolicyUpdate(
|
|
||||||
t, nodes, net.Bob.PubKeyStr,
|
|
||||||
&bobExpectedPolicy, chanPoint,
|
|
||||||
)
|
)
|
||||||
|
assertNodesPolicyUpdate(ht, nodes, bob, bobPolicy, chanPoint)
|
||||||
|
|
||||||
// They should now know about the default policies.
|
// They should now know about the default policies.
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
assertChannelPolicy(
|
ht.AssertChannelPolicy(
|
||||||
t, node, net.Alice.PubKeyStr,
|
node, alice.PubKeyStr, alicePolicy, chanPoint,
|
||||||
&expectedPolicies[i], chanPoint,
|
|
||||||
)
|
)
|
||||||
assertChannelPolicy(
|
ht.AssertChannelPolicy(
|
||||||
t, node, net.Bob.PubKeyStr,
|
node, bob.PubKeyStr, bobPolicy, chanPoint,
|
||||||
&bobExpectedPolicy, chanPoint,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
require.NoError(
|
for i, feeScenario := range feeScenarios {
|
||||||
t.t, net.Alice.WaitForNetworkChannelOpen(chanPoint),
|
ht.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||||
"alice reports channel opening",
|
st := ht.Subtest(t)
|
||||||
)
|
ht.EnsureConnected(alice, bob)
|
||||||
|
|
||||||
require.NoError(
|
runTestCase(
|
||||||
t.t, net.Bob.WaitForNetworkChannelOpen(chanPoint),
|
st, feeScenario,
|
||||||
"bob reports channel opening",
|
&expectedPolicies[i], &bobExpectedPolicy,
|
||||||
)
|
)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// testBasicChannelCreationAndUpdates tests multiple channel opening and
|
// testBasicChannelCreationAndUpdates tests multiple channel opening and
|
||||||
// closing, and ensures that if a node is subscribed to channel updates
|
// closing, and ensures that if a node is subscribed to channel updates they
|
||||||
// they will be received correctly for both cooperative and force closed
|
// will be received correctly for both cooperative and force closed channels.
|
||||||
// channels.
|
func testBasicChannelCreationAndUpdates(ht *lntemp.HarnessTest) {
|
||||||
func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
|
runBasicChannelCreationAndUpdates(ht, ht.Alice, ht.Bob)
|
||||||
t *harnessTest) {
|
|
||||||
|
|
||||||
runBasicChannelCreationAndUpdates(net, t, net.Alice, net.Bob)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// runBasicChannelCreationAndUpdates tests multiple channel opening and closing,
|
// runBasicChannelCreationAndUpdates tests multiple channel opening and closing,
|
||||||
// and ensures that if a node is subscribed to channel updates they will be
|
// and ensures that if a node is subscribed to channel updates they will be
|
||||||
// received correctly for both cooperative and force closed channels.
|
// received correctly for both cooperative and force closed channels.
|
||||||
func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
|
func runBasicChannelCreationAndUpdates(ht *lntemp.HarnessTest,
|
||||||
t *harnessTest, alice, bob *lntest.HarnessNode) {
|
alice, bob *node.HarnessNode) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
const (
|
const (
|
||||||
numChannels = 2
|
numChannels = 2
|
||||||
amount = funding.MaxBtcFundingAmount
|
amount = funding.MaxBtcFundingAmount
|
||||||
)
|
)
|
||||||
|
|
||||||
// Subscribe Bob and Alice to channel event notifications.
|
// Subscribe Bob and Alice to channel event notifications.
|
||||||
bobChanSub := subscribeChannelNotifications(ctxb, t, bob)
|
bobChanSub := bob.RPC.SubscribeChannelEvents()
|
||||||
defer close(bobChanSub.quit)
|
aliceChanSub := alice.RPC.SubscribeChannelEvents()
|
||||||
|
|
||||||
aliceChanSub := subscribeChannelNotifications(ctxb, t, alice)
|
|
||||||
defer close(aliceChanSub.quit)
|
|
||||||
|
|
||||||
// Open the channels between Alice and Bob, asserting that the channels
|
// Open the channels between Alice and Bob, asserting that the channels
|
||||||
// have been properly opened on-chain.
|
// have been properly opened on-chain.
|
||||||
chanPoints := make([]*lnrpc.ChannelPoint, numChannels)
|
chanPoints := make([]*lnrpc.ChannelPoint, numChannels)
|
||||||
for i := 0; i < numChannels; i++ {
|
for i := 0; i < numChannels; i++ {
|
||||||
chanPoints[i] = openChannelAndAssert(
|
chanPoints[i] = ht.OpenChannel(
|
||||||
t, net, alice, bob, lntest.OpenChannelParams{
|
alice, bob, lntemp.OpenChannelParams{
|
||||||
Amt: amount,
|
Amt: amount,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -350,112 +338,93 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
|
|||||||
// Since each of the channels just became open, Bob and Alice should
|
// Since each of the channels just became open, Bob and Alice should
|
||||||
// each receive an open and an active notification for each channel.
|
// each receive an open and an active notification for each channel.
|
||||||
const numExpectedOpenUpdates = 3 * numChannels
|
const numExpectedOpenUpdates = 3 * numChannels
|
||||||
verifyOpenUpdatesReceived := func(sub channelSubscription) error {
|
verifyOpenUpdatesReceived := func(sub rpc.ChannelEventsClient) error {
|
||||||
numChannelUpds := 0
|
for i := 0; i < numExpectedOpenUpdates; i++ {
|
||||||
for numChannelUpds < numExpectedOpenUpdates {
|
update := ht.ReceiveChannelEvent(sub)
|
||||||
select {
|
|
||||||
case update := <-sub.updateChan:
|
|
||||||
switch update.Type {
|
|
||||||
case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL:
|
|
||||||
if numChannelUpds%3 != 0 {
|
|
||||||
return fmt.Errorf("expected " +
|
|
||||||
"open or active" +
|
|
||||||
"channel ntfn, got pending open " +
|
|
||||||
"channel ntfn instead")
|
|
||||||
}
|
|
||||||
case lnrpc.ChannelEventUpdate_OPEN_CHANNEL:
|
|
||||||
if numChannelUpds%3 != 1 {
|
|
||||||
return fmt.Errorf("expected " +
|
|
||||||
"pending open or active" +
|
|
||||||
"channel ntfn, got open" +
|
|
||||||
"channel ntfn instead")
|
|
||||||
}
|
|
||||||
case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL:
|
|
||||||
if numChannelUpds%3 != 2 {
|
|
||||||
return fmt.Errorf("expected " +
|
|
||||||
"pending open or open" +
|
|
||||||
"channel ntfn, got active " +
|
|
||||||
"channel ntfn instead")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("update type mismatch: "+
|
|
||||||
"expected open or active channel "+
|
|
||||||
"notification, got: %v",
|
|
||||||
update.Type)
|
|
||||||
}
|
|
||||||
numChannelUpds++
|
|
||||||
|
|
||||||
case <-time.After(time.Second * 10):
|
switch update.Type {
|
||||||
return fmt.Errorf("timeout waiting for channel "+
|
case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL:
|
||||||
"notifications, only received %d/%d "+
|
if i%3 == 0 {
|
||||||
"chanupds", numChannelUpds,
|
continue
|
||||||
numExpectedOpenUpdates)
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("expected open or active" +
|
||||||
|
"channel ntfn, got pending open " +
|
||||||
|
"channel ntfn instead")
|
||||||
|
|
||||||
|
case lnrpc.ChannelEventUpdate_OPEN_CHANNEL:
|
||||||
|
if i%3 == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("expected pending open or " +
|
||||||
|
"active channel ntfn, got open" +
|
||||||
|
"channel ntfn instead")
|
||||||
|
|
||||||
|
case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL:
|
||||||
|
if i%3 == 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("expected pending open or " +
|
||||||
|
"open channel ntfn, got active " +
|
||||||
|
"channel ntfn instead")
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("update type mismatch: "+
|
||||||
|
"expected open or active channel "+
|
||||||
|
"notification, got: %v", update.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(
|
require.NoError(ht, verifyOpenUpdatesReceived(bobChanSub),
|
||||||
t.t, verifyOpenUpdatesReceived(bobChanSub), "bob open channels",
|
"bob open channels")
|
||||||
)
|
require.NoError(ht, verifyOpenUpdatesReceived(aliceChanSub),
|
||||||
require.NoError(
|
"alice open channels")
|
||||||
t.t, verifyOpenUpdatesReceived(aliceChanSub), "alice open "+
|
|
||||||
"channels",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Close the channels between Alice and Bob, asserting that the channels
|
// Close the channels between Alice and Bob, asserting that the
|
||||||
// have been properly closed on-chain.
|
// channels have been properly closed on-chain.
|
||||||
for i, chanPoint := range chanPoints {
|
for i, chanPoint := range chanPoints {
|
||||||
// Force close the first of the two channels.
|
// Force close the first of the two channels.
|
||||||
force := i%2 == 0
|
force := i%2 == 0
|
||||||
closeChannelAndAssert(t, net, alice, chanPoint, force)
|
|
||||||
if force {
|
if force {
|
||||||
cleanupForceClose(t, net, alice, chanPoint)
|
ht.ForceCloseChannel(alice, chanPoint)
|
||||||
|
} else {
|
||||||
|
ht.CloseChannel(alice, chanPoint)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyCloseUpdatesReceived is used to verify that Alice and Bob
|
// verifyCloseUpdatesReceived is used to verify that Alice and Bob
|
||||||
// receive the correct channel updates in order.
|
// receive the correct channel updates in order.
|
||||||
const numExpectedCloseUpdates = 3 * numChannels
|
const numExpectedCloseUpdates = 3 * numChannels
|
||||||
verifyCloseUpdatesReceived := func(sub channelSubscription,
|
verifyCloseUpdatesReceived := func(sub rpc.ChannelEventsClient,
|
||||||
forceType lnrpc.ChannelCloseSummary_ClosureType,
|
forceType lnrpc.ChannelCloseSummary_ClosureType,
|
||||||
closeInitiator lnrpc.Initiator) error {
|
closeInitiator lnrpc.Initiator) error {
|
||||||
|
|
||||||
// Ensure one inactive and one closed notification is received
|
// Ensure one inactive and one closed notification is received
|
||||||
// for each closed channel.
|
// for each closed channel.
|
||||||
numChannelUpds := 0
|
for i := 0; i < numExpectedCloseUpdates; i++ {
|
||||||
for numChannelUpds < numExpectedCloseUpdates {
|
expectedCloseType := lnrpc.
|
||||||
expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
|
ChannelCloseSummary_COOPERATIVE_CLOSE
|
||||||
|
|
||||||
// Every other channel should be force closed. If this
|
// Every other channel should be force closed. If this
|
||||||
// channel was force closed, set the expected close type
|
// channel was force closed, set the expected close type
|
||||||
// to the type passed in.
|
// to the type passed in.
|
||||||
force := (numChannelUpds/3)%2 == 0
|
force := (i/3)%2 == 0
|
||||||
if force {
|
if force {
|
||||||
expectedCloseType = forceType
|
expectedCloseType = forceType
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
chanUpdate := ht.ReceiveChannelEvent(sub)
|
||||||
case chanUpdate := <-sub.updateChan:
|
err := verifyCloseUpdate(
|
||||||
err := verifyCloseUpdate(
|
chanUpdate, expectedCloseType,
|
||||||
chanUpdate, expectedCloseType,
|
closeInitiator,
|
||||||
closeInitiator,
|
)
|
||||||
)
|
if err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
numChannelUpds++
|
|
||||||
|
|
||||||
case err := <-sub.errChan:
|
|
||||||
return err
|
return err
|
||||||
|
|
||||||
case <-time.After(time.Second * 10):
|
|
||||||
return fmt.Errorf("timeout waiting "+
|
|
||||||
"for channel notifications, only "+
|
|
||||||
"received %d/%d chanupds",
|
|
||||||
numChannelUpds, numChannelUpds)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,7 +436,7 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
|
|||||||
// All channels (cooperatively and force closed) should have a remote
|
// All channels (cooperatively and force closed) should have a remote
|
||||||
// close initiator because Alice closed the channels.
|
// close initiator because Alice closed the channels.
|
||||||
require.NoError(
|
require.NoError(
|
||||||
t.t, verifyCloseUpdatesReceived(
|
ht, verifyCloseUpdatesReceived(
|
||||||
bobChanSub,
|
bobChanSub,
|
||||||
lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
|
lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE,
|
||||||
lnrpc.Initiator_INITIATOR_REMOTE,
|
lnrpc.Initiator_INITIATOR_REMOTE,
|
||||||
@ -479,7 +448,7 @@ func runBasicChannelCreationAndUpdates(net *lntest.NetworkHarness,
|
|||||||
// All channels (cooperatively and force closed) should have a local
|
// All channels (cooperatively and force closed) should have a local
|
||||||
// close initiator because Alice closed the channels.
|
// close initiator because Alice closed the channels.
|
||||||
require.NoError(
|
require.NoError(
|
||||||
t.t, verifyCloseUpdatesReceived(
|
ht, verifyCloseUpdatesReceived(
|
||||||
aliceChanSub,
|
aliceChanSub,
|
||||||
lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
|
lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE,
|
||||||
lnrpc.Initiator_INITIATOR_LOCAL,
|
lnrpc.Initiator_INITIATOR_LOCAL,
|
||||||
@ -516,3 +485,64 @@ func assertMinerBlockHeightDelta(ht *lntemp.HarnessTest,
|
|||||||
}, defaultTimeout)
|
}, defaultTimeout)
|
||||||
require.NoError(ht, err, "failed to assert block height delta")
|
require.NoError(ht, err, "failed to assert block height delta")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// verifyCloseUpdate is used to verify that a closed channel update is of the
|
||||||
|
// expected type.
|
||||||
|
func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
|
||||||
|
closeType lnrpc.ChannelCloseSummary_ClosureType,
|
||||||
|
closeInitiator lnrpc.Initiator) error {
|
||||||
|
|
||||||
|
// We should receive one inactive and one closed notification
|
||||||
|
// for each channel.
|
||||||
|
switch update := chanUpdate.Channel.(type) {
|
||||||
|
case *lnrpc.ChannelEventUpdate_InactiveChannel:
|
||||||
|
if chanUpdate.Type !=
|
||||||
|
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL {
|
||||||
|
|
||||||
|
return fmt.Errorf("update type mismatch: "+
|
||||||
|
"expected %v, got %v",
|
||||||
|
lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
|
||||||
|
chanUpdate.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
case *lnrpc.ChannelEventUpdate_ClosedChannel:
|
||||||
|
if chanUpdate.Type !=
|
||||||
|
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL {
|
||||||
|
|
||||||
|
return fmt.Errorf("update type mismatch: "+
|
||||||
|
"expected %v, got %v",
|
||||||
|
lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
|
||||||
|
chanUpdate.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
if update.ClosedChannel.CloseType != closeType {
|
||||||
|
return fmt.Errorf("channel closure type "+
|
||||||
|
"mismatch: expected %v, got %v",
|
||||||
|
closeType,
|
||||||
|
update.ClosedChannel.CloseType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if update.ClosedChannel.CloseInitiator != closeInitiator {
|
||||||
|
return fmt.Errorf("expected close intiator: %v, "+
|
||||||
|
"got: %v", closeInitiator,
|
||||||
|
update.ClosedChannel.CloseInitiator)
|
||||||
|
}
|
||||||
|
|
||||||
|
case *lnrpc.ChannelEventUpdate_FullyResolvedChannel:
|
||||||
|
if chanUpdate.Type !=
|
||||||
|
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL {
|
||||||
|
|
||||||
|
return fmt.Errorf("update type mismatch: "+
|
||||||
|
"expected %v, got %v",
|
||||||
|
lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL,
|
||||||
|
chanUpdate.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("channel update channel of wrong type, "+
|
||||||
|
"expected closed channel, got %T",
|
||||||
|
update)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntemp"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -225,35 +226,31 @@ func testPaymentFollowingChannelOpen(ht *lntemp.HarnessTest) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// testAsyncPayments tests the performance of the async payments.
|
// testAsyncPayments tests the performance of the async payments.
|
||||||
func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
func testAsyncPayments(ht *lntemp.HarnessTest) {
|
||||||
runAsyncPayments(net, t, net.Alice, net.Bob)
|
// We use new nodes here as the benchmark test creates lots of data
|
||||||
|
// which can be costly to be carried on.
|
||||||
|
alice := ht.NewNode("Alice", []string{"--pending-commit-interval=3m"})
|
||||||
|
bob := ht.NewNode("Bob", []string{"--pending-commit-interval=3m"})
|
||||||
|
|
||||||
|
ht.EnsureConnected(alice, bob)
|
||||||
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
|
||||||
|
|
||||||
|
runAsyncPayments(ht, alice, bob)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runAsyncPayments tests the performance of the async payments.
|
// runAsyncPayments tests the performance of the async payments.
|
||||||
func runAsyncPayments(net *lntest.NetworkHarness, t *harnessTest, alice,
|
func runAsyncPayments(ht *lntemp.HarnessTest, alice, bob *node.HarnessNode) {
|
||||||
bob *lntest.HarnessNode) {
|
const paymentAmt = 100
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
|
|
||||||
const (
|
|
||||||
paymentAmt = 100
|
|
||||||
)
|
|
||||||
|
|
||||||
// First establish a channel with a capacity equals to the overall
|
// First establish a channel with a capacity equals to the overall
|
||||||
// amount of payments, between Alice and Bob, at the end of the test
|
// amount of payments, between Alice and Bob, at the end of the test
|
||||||
// Alice should send all money from her side to Bob.
|
// Alice should send all money from her side to Bob.
|
||||||
channelCapacity := btcutil.Amount(paymentAmt * 2000)
|
channelCapacity := btcutil.Amount(paymentAmt * 2000)
|
||||||
chanPoint := openChannelAndAssert(
|
chanPoint := ht.OpenChannel(
|
||||||
t, net, alice, bob,
|
alice, bob, lntemp.OpenChannelParams{Amt: channelCapacity},
|
||||||
lntest.OpenChannelParams{
|
|
||||||
Amt: channelCapacity,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
info, err := getChanInfo(alice)
|
info := ht.QueryChannelByChanPoint(alice, chanPoint)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to get alice channel info: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll create a number of invoices equal the max number of HTLCs that
|
// We'll create a number of invoices equal the max number of HTLCs that
|
||||||
// can be carried in one direction. The number on the commitment will
|
// can be carried in one direction. The number on the commitment will
|
||||||
@ -267,128 +264,95 @@ func runAsyncPayments(net *lntest.NetworkHarness, t *harnessTest, alice,
|
|||||||
|
|
||||||
// With the channel open, we'll create invoices for Bob that Alice
|
// With the channel open, we'll create invoices for Bob that Alice
|
||||||
// will pay to in order to advance the state of the channel.
|
// will pay to in order to advance the state of the channel.
|
||||||
bobPayReqs, _, _, err := createPayReqs(
|
bobPayReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, numInvoices)
|
||||||
bob, paymentAmt, numInvoices,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to create pay reqs: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for Alice to receive the channel edge from the funding manager.
|
// Simultaneously send payments from Alice to Bob using of Bob's
|
||||||
err = alice.WaitForNetworkChannelOpen(chanPoint)
|
// payment hashes generated above.
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("alice didn't see the alice->bob channel before "+
|
|
||||||
"timeout: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simultaneously send payments from Alice to Bob using of Bob's payment
|
|
||||||
// hashes generated above.
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
errChan := make(chan error)
|
|
||||||
statusChan := make(chan *lnrpc.Payment)
|
settled := make(chan struct{})
|
||||||
|
defer close(settled)
|
||||||
|
|
||||||
|
timeout := lntest.AsyncBenchmarkTimeout * 2
|
||||||
for i := 0; i < numInvoices; i++ {
|
for i := 0; i < numInvoices; i++ {
|
||||||
payReq := bobPayReqs[i]
|
payReq := bobPayReqs[i]
|
||||||
go func() {
|
go func() {
|
||||||
ctxt, _ := context.WithTimeout(ctxb, lntest.AsyncBenchmarkTimeout)
|
req := &routerrpc.SendPaymentRequest{
|
||||||
stream, err := alice.RouterClient.SendPaymentV2(
|
PaymentRequest: payReq,
|
||||||
ctxt,
|
TimeoutSeconds: int32(timeout.Seconds()),
|
||||||
&routerrpc.SendPaymentRequest{
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
PaymentRequest: payReq,
|
|
||||||
TimeoutSeconds: 60,
|
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
result, err := getPaymentResult(stream)
|
|
||||||
if err != nil {
|
|
||||||
errChan <- err
|
|
||||||
}
|
}
|
||||||
|
// AssertPaymentStatusWithTimeout will assert that the
|
||||||
|
// payment is settled.
|
||||||
|
stream := alice.RPC.SendPayment(req)
|
||||||
|
ht.AssertPaymentSucceedWithTimeout(stream, timeout)
|
||||||
|
|
||||||
statusChan <- result
|
settled <- struct{}{}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait until all the payments have settled.
|
// Wait until all the payments have settled.
|
||||||
|
timer := time.After(timeout)
|
||||||
for i := 0; i < numInvoices; i++ {
|
for i := 0; i < numInvoices; i++ {
|
||||||
select {
|
select {
|
||||||
case result := <-statusChan:
|
case <-settled:
|
||||||
if result.Status == lnrpc.Payment_SUCCEEDED {
|
case <-timer:
|
||||||
continue
|
require.Fail(ht, "timeout", "wait payment failed")
|
||||||
}
|
|
||||||
|
|
||||||
case err := <-errChan:
|
|
||||||
t.Fatalf("payment error: %v", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All payments have been sent, mark the finish time.
|
// All payments have been sent, mark the finish time.
|
||||||
timeTaken := time.Since(now)
|
timeTaken := time.Since(now)
|
||||||
|
|
||||||
// Next query for Bob's and Alice's channel states, in order to confirm
|
// assertChannelState asserts the channel state by checking the values
|
||||||
// that all payment have been successful transmitted.
|
// in fields, LocalBalance, RemoteBalance and num of PendingHtlcs.
|
||||||
|
assertChannelState := func(hn *node.HarnessNode, cp *lnrpc.ChannelPoint,
|
||||||
|
localBalance, remoteBalance int64, numPendingHtlcs int) {
|
||||||
|
|
||||||
// Wait for the revocation to be received so alice no longer has pending
|
// Get the funding point.
|
||||||
// htlcs listed and has correct balances. This is needed due to the fact
|
err := wait.NoError(func() error {
|
||||||
// that we now pipeline the settles.
|
// Find the target channel first.
|
||||||
err = wait.Predicate(func() bool {
|
target := ht.GetChannelByChanPoint(hn, cp)
|
||||||
aliceChan, err := getChanInfo(alice)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(aliceChan.PendingHtlcs) != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if aliceChan.RemoteBalance != bobAmt {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if aliceChan.LocalBalance != aliceAmt {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
if len(target.PendingHtlcs) != numPendingHtlcs {
|
||||||
}, defaultTimeout)
|
return fmt.Errorf("pending htlcs is "+
|
||||||
if err != nil {
|
"incorrect, got %v, expected %v",
|
||||||
t.Fatalf("failed to assert alice's pending htlcs and/or remote/local balance")
|
len(target.PendingHtlcs), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.LocalBalance != localBalance {
|
||||||
|
return fmt.Errorf("local balance is "+
|
||||||
|
"incorrect, got %v, expected %v",
|
||||||
|
target.LocalBalance, localBalance)
|
||||||
|
}
|
||||||
|
|
||||||
|
if target.RemoteBalance != remoteBalance {
|
||||||
|
return fmt.Errorf("remote balance is "+
|
||||||
|
"incorrect, got %v, expected %v",
|
||||||
|
target.RemoteBalance, remoteBalance)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, lntemp.DefaultTimeout)
|
||||||
|
require.NoError(ht, err, "timeout while chekcing for balance")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for the revocation to be received so alice no longer has
|
||||||
|
// pending htlcs listed and has correct balances. This is needed due to
|
||||||
|
// the fact that we now pipeline the settles.
|
||||||
|
assertChannelState(alice, chanPoint, aliceAmt, bobAmt, 0)
|
||||||
|
|
||||||
// Wait for Bob to receive revocation from Alice.
|
// Wait for Bob to receive revocation from Alice.
|
||||||
err = wait.NoError(func() error {
|
assertChannelState(bob, chanPoint, bobAmt, aliceAmt, 0)
|
||||||
bobChan, err := getChanInfo(bob)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to get bob's channel info: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(bobChan.PendingHtlcs) != 0 {
|
ht.Log("\tBenchmark info: Elapsed time: ", timeTaken)
|
||||||
return fmt.Errorf("bob's pending htlcs is incorrect, "+
|
ht.Log("\tBenchmark info: TPS: ",
|
||||||
"got %v, expected %v",
|
float64(numInvoices)/timeTaken.Seconds())
|
||||||
len(bobChan.PendingHtlcs), 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if bobChan.LocalBalance != bobAmt {
|
|
||||||
return fmt.Errorf("bob's local balance is incorrect, "+
|
|
||||||
"got %v, expected %v", bobChan.LocalBalance,
|
|
||||||
bobAmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
if bobChan.RemoteBalance != aliceAmt {
|
|
||||||
return fmt.Errorf("bob's remote balance is incorrect, "+
|
|
||||||
"got %v, expected %v", bobChan.RemoteBalance,
|
|
||||||
aliceAmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}, defaultTimeout)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
t.Log("\tBenchmark info: Elapsed time: ", timeTaken)
|
|
||||||
t.Log("\tBenchmark info: TPS: ", float64(numInvoices)/timeTaken.Seconds())
|
|
||||||
|
|
||||||
// Finally, immediately close the channel. This function will also
|
// Finally, immediately close the channel. This function will also
|
||||||
// block until the channel is closed and will additionally assert the
|
// block until the channel is closed and will additionally assert the
|
||||||
// relevant channel closing post conditions.
|
// relevant channel closing post conditions.
|
||||||
closeChannelAndAssert(t, net, alice, chanPoint, false)
|
ht.CloseChannel(alice, chanPoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
// testBidirectionalAsyncPayments tests that nodes are able to send the
|
// testBidirectionalAsyncPayments tests that nodes are able to send the
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,6 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -11,7 +10,8 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/keychain"
|
"github.com/lightningnetwork/lnd/keychain"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,105 +54,94 @@ var (
|
|||||||
|
|
||||||
// testRemoteSigner tests that a watch-only wallet can use a remote signing
|
// testRemoteSigner tests that a watch-only wallet can use a remote signing
|
||||||
// wallet to perform any signing or ECDH operations.
|
// wallet to perform any signing or ECDH operations.
|
||||||
func testRemoteSigner(net *lntest.NetworkHarness, t *harnessTest) {
|
func testRemoteSigner(ht *lntemp.HarnessTest) {
|
||||||
ctxb := context.Background()
|
type testCase struct {
|
||||||
|
|
||||||
subTests := []struct {
|
|
||||||
name string
|
name string
|
||||||
randomSeed bool
|
randomSeed bool
|
||||||
sendCoins bool
|
sendCoins bool
|
||||||
fn func(tt *harnessTest, wo, carol *lntest.HarnessNode)
|
fn func(tt *lntemp.HarnessTest,
|
||||||
}{{
|
wo, carol *node.HarnessNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
subTests := []testCase{{
|
||||||
name: "random seed",
|
name: "random seed",
|
||||||
randomSeed: true,
|
randomSeed: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
// Nothing more to test here.
|
// Nothing more to test here.
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "account import",
|
name: "account import",
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runWalletImportAccountScenario(
|
runWalletImportAccountScenario(
|
||||||
net, tt,
|
tt, walletrpc.AddressType_WITNESS_PUBKEY_HASH,
|
||||||
walletrpc.AddressType_WITNESS_PUBKEY_HASH,
|
|
||||||
carol, wo,
|
carol, wo,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "basic channel open close",
|
name: "basic channel open close",
|
||||||
sendCoins: true,
|
sendCoins: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runBasicChannelCreationAndUpdates(
|
runBasicChannelCreationAndUpdates(tt, wo, carol)
|
||||||
net, tt, wo, carol,
|
|
||||||
)
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "async payments",
|
name: "async payments",
|
||||||
sendCoins: true,
|
sendCoins: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runAsyncPayments(net, tt, wo, carol)
|
runAsyncPayments(tt, wo, carol)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "shared key",
|
name: "shared key",
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runDeriveSharedKey(tt, wo)
|
runDeriveSharedKey(tt, wo)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "cpfp",
|
name: "cpfp",
|
||||||
sendCoins: true,
|
sendCoins: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runCPFP(net, tt, wo, carol)
|
runCPFP(tt, wo, carol)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "psbt",
|
name: "psbt",
|
||||||
randomSeed: true,
|
randomSeed: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runPsbtChanFunding(net, tt, carol, wo)
|
runPsbtChanFunding(tt, carol, wo)
|
||||||
runSignPsbtSegWitV0P2WKH(tt, net, wo)
|
runSignPsbtSegWitV0P2WKH(tt, wo)
|
||||||
runSignPsbtSegWitV1KeySpendBip86(tt, net, wo)
|
runSignPsbtSegWitV1KeySpendBip86(tt, wo)
|
||||||
runSignPsbtSegWitV1KeySpendRootHash(tt, net, wo)
|
runSignPsbtSegWitV1KeySpendRootHash(tt, wo)
|
||||||
runSignPsbtSegWitV1ScriptSpend(tt, net, wo)
|
runSignPsbtSegWitV1ScriptSpend(tt, wo)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "sign output raw",
|
name: "sign output raw",
|
||||||
sendCoins: true,
|
sendCoins: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runSignOutputRaw(tt, net, wo)
|
runSignOutputRaw(tt, wo)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "sign verify msg",
|
name: "sign verify msg",
|
||||||
sendCoins: true,
|
sendCoins: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
runSignVerifyMessage(tt, net, wo)
|
runSignVerifyMessage(tt, wo)
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "taproot",
|
name: "taproot",
|
||||||
sendCoins: true,
|
sendCoins: true,
|
||||||
randomSeed: true,
|
randomSeed: true,
|
||||||
fn: func(tt *harnessTest, wo, carol *lntest.HarnessNode) {
|
fn: func(tt *lntemp.HarnessTest, wo, carol *node.HarnessNode) {
|
||||||
longTimeout := 3 * defaultTimeout
|
testTaprootSendCoinsKeySpendBip86(tt, wo)
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, longTimeout)
|
testTaprootComputeInputScriptKeySpendBip86(tt, wo)
|
||||||
testTaprootSendCoinsKeySpendBip86(ctxt, tt, wo, net)
|
testTaprootSignOutputRawScriptSpend(tt, wo)
|
||||||
testTaprootComputeInputScriptKeySpendBip86(
|
testTaprootSignOutputRawKeySpendBip86(tt, wo)
|
||||||
ctxt, tt, wo, net,
|
testTaprootSignOutputRawKeySpendRootHash(tt, wo)
|
||||||
)
|
testTaprootMuSig2KeySpendRootHash(tt, wo)
|
||||||
testTaprootSignOutputRawScriptSpend(ctxt, tt, wo, net)
|
testTaprootMuSig2ScriptSpend(tt, wo)
|
||||||
testTaprootSignOutputRawKeySpendBip86(ctxt, tt, wo, net)
|
testTaprootMuSig2KeySpendBip86(tt, wo)
|
||||||
testTaprootSignOutputRawKeySpendRootHash(
|
testTaprootMuSig2CombinedLeafKeySpend(tt, wo)
|
||||||
ctxt, tt, wo, net,
|
|
||||||
)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, longTimeout)
|
|
||||||
testTaprootMuSig2KeySpendRootHash(ctxt, tt, wo, net)
|
|
||||||
testTaprootMuSig2ScriptSpend(ctxt, tt, wo, net)
|
|
||||||
testTaprootMuSig2KeySpendBip86(ctxt, tt, wo, net)
|
|
||||||
testTaprootMuSig2CombinedLeafKeySpend(ctxt, tt, wo, net)
|
|
||||||
cancel()
|
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
for _, st := range subTests {
|
prepareTest := func(st *lntemp.HarnessTest,
|
||||||
subTest := st
|
subTest testCase) (*node.HarnessNode,
|
||||||
|
*node.HarnessNode, *node.HarnessNode) {
|
||||||
|
|
||||||
// Signer is our signing node and has the wallet with the full
|
// Signer is our signing node and has the wallet with the full
|
||||||
// master private key. We test that we can create the watch-only
|
// master private key. We test that we can create the watch-only
|
||||||
@ -162,34 +151,32 @@ func testRemoteSigner(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
password := []byte("itestpassword")
|
password := []byte("itestpassword")
|
||||||
var (
|
var (
|
||||||
signerNodePubKey = nodePubKey
|
signerNodePubKey = nodePubKey
|
||||||
watchOnlyAccounts = deriveCustomScopeAccounts(t.t)
|
watchOnlyAccounts = deriveCustomScopeAccounts(ht.T)
|
||||||
signer *lntest.HarnessNode
|
signer *node.HarnessNode
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
if !subTest.randomSeed {
|
if !subTest.randomSeed {
|
||||||
signer, err = net.RestoreNodeWithSeed(
|
signer = st.RestoreNodeWithSeed(
|
||||||
"Signer", nil, password, nil, rootKey, 0, nil,
|
"Signer", nil, password, nil, rootKey, 0, nil,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
|
||||||
} else {
|
} else {
|
||||||
signer = net.NewNode(t.t, "Signer", nil)
|
signer = st.NewNode("Signer", nil)
|
||||||
signerNodePubKey = signer.PubKeyStr
|
signerNodePubKey = signer.PubKeyStr
|
||||||
|
|
||||||
rpcAccts, err := signer.WalletKitClient.ListAccounts(
|
rpcAccts := signer.RPC.ListAccounts(
|
||||||
ctxb, &walletrpc.ListAccountsRequest{},
|
&walletrpc.ListAccountsRequest{},
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
watchOnlyAccounts, err = walletrpc.AccountsToWatchOnly(
|
watchOnlyAccounts, err = walletrpc.AccountsToWatchOnly(
|
||||||
rpcAccts.Accounts,
|
rpcAccts.Accounts,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(st, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchOnly is the node that has a watch-only wallet and uses
|
// WatchOnly is the node that has a watch-only wallet and uses
|
||||||
// the Signer node for any operation that requires access to
|
// the Signer node for any operation that requires access to
|
||||||
// private keys.
|
// private keys.
|
||||||
watchOnly, err := net.NewNodeRemoteSigner(
|
watchOnly := st.NewNodeRemoteSigner(
|
||||||
"WatchOnly", []string{
|
"WatchOnly", []string{
|
||||||
"--remotesigner.enable",
|
"--remotesigner.enable",
|
||||||
fmt.Sprintf(
|
fmt.Sprintf(
|
||||||
@ -210,33 +197,35 @@ func testRemoteSigner(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
Accounts: watchOnlyAccounts,
|
Accounts: watchOnlyAccounts,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
resp, err := watchOnly.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
|
resp := watchOnly.RPC.GetInfo()
|
||||||
require.NoError(t.t, err)
|
require.Equal(st, signerNodePubKey, resp.IdentityPubkey)
|
||||||
|
|
||||||
require.Equal(t.t, signerNodePubKey, resp.IdentityPubkey)
|
|
||||||
|
|
||||||
if subTest.sendCoins {
|
if subTest.sendCoins {
|
||||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, watchOnly)
|
st.FundCoins(btcutil.SatoshiPerBitcoin, watchOnly)
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, watchOnly, "default",
|
watchOnly, "default",
|
||||||
btcutil.SatoshiPerBitcoin, 0,
|
btcutil.SatoshiPerBitcoin, 0,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
carol := st.NewNode("carol", nil)
|
||||||
net.EnsureConnected(t.t, watchOnly, carol)
|
st.EnsureConnected(watchOnly, carol)
|
||||||
|
|
||||||
success := t.t.Run(subTest.name, func(tt *testing.T) {
|
return signer, watchOnly, carol
|
||||||
ht := newHarnessTest(tt, net)
|
}
|
||||||
subTest.fn(ht, watchOnly, carol)
|
|
||||||
|
for _, testCase := range subTests {
|
||||||
|
subTest := testCase
|
||||||
|
|
||||||
|
success := ht.Run(subTest.name, func(tt *testing.T) {
|
||||||
|
// Skip the cleanup here as no standby node is used.
|
||||||
|
st := ht.Subtest(tt)
|
||||||
|
|
||||||
|
_, watchOnly, carol := prepareTest(st, subTest)
|
||||||
|
subTest.fn(st, watchOnly, carol)
|
||||||
})
|
})
|
||||||
|
|
||||||
shutdownAndAssert(net, t, carol)
|
|
||||||
shutdownAndAssert(net, t, watchOnly)
|
|
||||||
shutdownAndAssert(net, t, signer)
|
|
||||||
|
|
||||||
if !success {
|
if !success {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -560,7 +560,7 @@ func middlewareMandatoryTest(ht *lntemp.HarnessTest, node *node.HarnessNode) {
|
|||||||
// test case. So we need to do the wait and client setup manually here.
|
// test case. So we need to do the wait and client setup manually here.
|
||||||
conn, err := node.ConnectRPC()
|
conn, err := node.ConnectRPC()
|
||||||
require.NoError(ht, err)
|
require.NoError(ht, err)
|
||||||
node.InitRPCClients(conn)
|
node.Initialize(conn)
|
||||||
err = node.WaitUntilServerActive()
|
err = node.WaitUntilServerActive()
|
||||||
require.NoError(ht, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
|
@ -1,22 +1,19 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testSendMultiPathPayment tests that we are able to successfully route a
|
// testSendMultiPathPayment tests that we are able to successfully route a
|
||||||
// payment using multiple shards across different paths.
|
// payment using multiple shards across different paths.
|
||||||
func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSendMultiPathPayment(ht *lntemp.HarnessTest) {
|
||||||
ctxb := context.Background()
|
mts := newMppTestScenario(ht)
|
||||||
|
|
||||||
ctx := newMppTestContext(t, net)
|
|
||||||
defer ctx.shutdownNodes()
|
|
||||||
|
|
||||||
const paymentAmt = btcutil.Amount(300000)
|
const paymentAmt = btcutil.Amount(300000)
|
||||||
|
|
||||||
@ -30,57 +27,45 @@ func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
// \ /
|
// \ /
|
||||||
// \__ Dave ____/
|
// \__ Dave ____/
|
||||||
//
|
//
|
||||||
ctx.openChannel(ctx.carol, ctx.bob, 135000)
|
req := &mppOpenChannelRequest{
|
||||||
ctx.openChannel(ctx.alice, ctx.carol, 235000)
|
amtAliceCarol: 235000,
|
||||||
ctx.openChannel(ctx.dave, ctx.bob, 135000)
|
amtAliceDave: 135000,
|
||||||
ctx.openChannel(ctx.alice, ctx.dave, 135000)
|
amtCarolBob: 135000,
|
||||||
ctx.openChannel(ctx.eve, ctx.bob, 135000)
|
amtCarolEve: 135000,
|
||||||
ctx.openChannel(ctx.carol, ctx.eve, 135000)
|
amtDaveBob: 135000,
|
||||||
|
amtEveBob: 135000,
|
||||||
defer ctx.closeChannels()
|
}
|
||||||
|
mts.openChannels(req)
|
||||||
ctx.waitForChannels()
|
chanPointAliceDave := mts.channelPoints[1]
|
||||||
|
|
||||||
// Increase Dave's fee to make the test deterministic. Otherwise it
|
// Increase Dave's fee to make the test deterministic. Otherwise it
|
||||||
// would be unpredictable whether pathfinding would go through Charlie
|
// would be unpredictable whether pathfinding would go through Charlie
|
||||||
// or Dave for the first shard.
|
// or Dave for the first shard.
|
||||||
_, err := ctx.dave.UpdateChannelPolicy(
|
expectedPolicy := mts.updateDaveGlobalPolicy()
|
||||||
context.Background(),
|
|
||||||
&lnrpc.PolicyUpdateRequest{
|
// Make sure Alice has heard it.
|
||||||
Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true},
|
ht.AssertChannelPolicyUpdate(
|
||||||
BaseFeeMsat: 500000,
|
mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
|
||||||
FeeRate: 0.001,
|
|
||||||
TimeLockDelta: 40,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("dave policy update: %v", err)
|
|
||||||
}
|
|
||||||
// Our first test will be Alice paying Bob using a SendPayment call.
|
// Our first test will be Alice paying Bob using a SendPayment call.
|
||||||
// Let Bob create an invoice for Alice to pay.
|
// Let Bob create an invoice for Alice to pay.
|
||||||
payReqs, rHashes, invoices, err := createPayReqs(
|
payReqs, rHashes, invoices := ht.CreatePayReqs(mts.bob, paymentAmt, 1)
|
||||||
ctx.bob, paymentAmt, 1,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to create pay reqs: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rHash := rHashes[0]
|
rHash := rHashes[0]
|
||||||
payReq := payReqs[0]
|
payReq := payReqs[0]
|
||||||
|
|
||||||
payment := sendAndAssertSuccess(
|
sendReq := &routerrpc.SendPaymentRequest{
|
||||||
t, ctx.alice, &routerrpc.SendPaymentRequest{
|
PaymentRequest: payReq,
|
||||||
PaymentRequest: payReq,
|
MaxParts: 10,
|
||||||
MaxParts: 10,
|
TimeoutSeconds: 60,
|
||||||
TimeoutSeconds: 60,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
}
|
||||||
},
|
payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
|
||||||
)
|
|
||||||
|
|
||||||
// Make sure we got the preimage.
|
// Make sure we got the preimage.
|
||||||
if payment.PaymentPreimage != hex.EncodeToString(invoices[0].RPreimage) {
|
require.Equal(ht, hex.EncodeToString(invoices[0].RPreimage),
|
||||||
t.Fatalf("preimage doesn't match")
|
payment.PaymentPreimage, "preimage doesn't match")
|
||||||
}
|
|
||||||
|
|
||||||
// Check that Alice split the payment in at least three shards. Because
|
// Check that Alice split the payment in at least three shards. Because
|
||||||
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
|
// the hand-off of the htlc to the link is asynchronous (via a mailbox),
|
||||||
@ -97,32 +82,17 @@ func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const minExpectedShards = 3
|
const minExpectedShards = 3
|
||||||
if succeeded < minExpectedShards {
|
require.GreaterOrEqual(ht, succeeded, minExpectedShards,
|
||||||
t.Fatalf("expected at least %v shards, but got %v",
|
"expected shards not reached")
|
||||||
minExpectedShards, succeeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure Bob show the invoice as settled for the full
|
// Make sure Bob show the invoice as settled for the full amount.
|
||||||
// amount.
|
inv := mts.bob.RPC.LookupInvoice(rHash)
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
inv, err := ctx.bob.LookupInvoice(
|
|
||||||
ctxt, &lnrpc.PaymentHash{
|
|
||||||
RHash: rHash,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error when obtaining invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if inv.AmtPaidSat != int64(paymentAmt) {
|
require.EqualValues(ht, paymentAmt, inv.AmtPaidSat,
|
||||||
t.Fatalf("incorrect payment amt for invoice"+
|
"incorrect payment amt")
|
||||||
"want: %d, got %d",
|
|
||||||
paymentAmt, inv.AmtPaidSat)
|
|
||||||
}
|
|
||||||
|
|
||||||
if inv.State != lnrpc.Invoice_SETTLED {
|
require.Equal(ht, lnrpc.Invoice_SETTLED, inv.State,
|
||||||
t.Fatalf("Invoice not settled: %v", inv.State)
|
"Invoice not settled")
|
||||||
}
|
|
||||||
|
|
||||||
settled := 0
|
settled := 0
|
||||||
for _, htlc := range inv.Htlcs {
|
for _, htlc := range inv.Htlcs {
|
||||||
@ -130,8 +100,9 @@ func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
settled++
|
settled++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if settled != succeeded {
|
require.Equal(ht, succeeded, settled,
|
||||||
t.Fatalf("expected invoice to be settled "+
|
"num of HTLCs wrong")
|
||||||
"with %v HTLCs, had %v", succeeded, settled)
|
|
||||||
}
|
// Finally, close all channels.
|
||||||
|
mts.closeChannels()
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,7 @@ package itest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcec/v2"
|
"github.com/btcsuite/btcd/btcec/v2"
|
||||||
"github.com/btcsuite/btcd/btcec/v2/schnorr"
|
"github.com/btcsuite/btcd/btcec/v2/schnorr"
|
||||||
@ -15,7 +13,8 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,21 +22,19 @@ import (
|
|||||||
// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with
|
// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with
|
||||||
// the node's pubkey and a customized public key to check the validity of the
|
// the node's pubkey and a customized public key to check the validity of the
|
||||||
// result.
|
// result.
|
||||||
func testDeriveSharedKey(net *lntest.NetworkHarness, t *harnessTest) {
|
func testDeriveSharedKey(ht *lntemp.HarnessTest) {
|
||||||
runDeriveSharedKey(t, net.Alice)
|
runDeriveSharedKey(ht, ht.Alice)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDeriveSharedKey checks the ECDH performed by the endpoint
|
// runDeriveSharedKey checks the ECDH performed by the endpoint
|
||||||
// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with
|
// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with
|
||||||
// the node's pubkey and a customized public key to check the validity of the
|
// the node's pubkey and a customized public key to check the validity of the
|
||||||
// result.
|
// result.
|
||||||
func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
|
func runDeriveSharedKey(ht *lntemp.HarnessTest, alice *node.HarnessNode) {
|
||||||
ctxb := context.Background()
|
|
||||||
|
|
||||||
// Create an ephemeral key, extracts its public key, and make a
|
// Create an ephemeral key, extracts its public key, and make a
|
||||||
// PrivKeyECDH using the ephemeral key.
|
// PrivKeyECDH using the ephemeral key.
|
||||||
ephemeralPriv, err := btcec.NewPrivateKey()
|
ephemeralPriv, err := btcec.NewPrivateKey()
|
||||||
require.NoError(t.t, err, "failed to create ephemeral key")
|
require.NoError(ht, err, "failed to create ephemeral key")
|
||||||
|
|
||||||
ephemeralPubBytes := ephemeralPriv.PubKey().SerializeCompressed()
|
ephemeralPubBytes := ephemeralPriv.PubKey().SerializeCompressed()
|
||||||
privKeyECDH := &keychain.PrivKeyECDH{PrivKey: ephemeralPriv}
|
privKeyECDH := &keychain.PrivKeyECDH{PrivKey: ephemeralPriv}
|
||||||
@ -47,30 +44,27 @@ func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
|
|||||||
assertECDHMatch := func(pub *btcec.PublicKey,
|
assertECDHMatch := func(pub *btcec.PublicKey,
|
||||||
req *signrpc.SharedKeyRequest) {
|
req *signrpc.SharedKeyRequest) {
|
||||||
|
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
resp := alice.RPC.DeriveSharedKey(req)
|
||||||
resp, err := alice.SignerClient.DeriveSharedKey(ctxt, req)
|
|
||||||
require.NoError(t.t, err, "calling DeriveSharedKey failed")
|
|
||||||
|
|
||||||
sharedKey, _ := privKeyECDH.ECDH(pub)
|
sharedKey, _ := privKeyECDH.ECDH(pub)
|
||||||
require.Equal(
|
require.Equal(ht, sharedKey[:], resp.SharedKey,
|
||||||
t.t, sharedKey[:], resp.SharedKey,
|
"failed to derive the expected key")
|
||||||
"failed to derive the expected key",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nodePub, err := btcec.ParsePubKey(alice.PubKey[:])
|
nodePub, err := btcec.ParsePubKey(alice.PubKey[:])
|
||||||
require.NoError(t.t, err, "failed to parse node pubkey")
|
require.NoError(ht, err, "failed to parse node pubkey")
|
||||||
|
|
||||||
customizedKeyFamily := int32(keychain.KeyFamilyMultiSig)
|
customizedKeyFamily := int32(keychain.KeyFamilyMultiSig)
|
||||||
customizedIndex := int32(1)
|
customizedIndex := int32(1)
|
||||||
|
|
||||||
customizedPub, err := deriveCustomizedKey(
|
// Derive a customized key.
|
||||||
ctxb, alice, &signrpc.KeyLocator{
|
deriveReq := &signrpc.KeyLocator{
|
||||||
KeyFamily: customizedKeyFamily,
|
KeyFamily: customizedKeyFamily,
|
||||||
KeyIndex: customizedIndex,
|
KeyIndex: customizedIndex,
|
||||||
},
|
}
|
||||||
)
|
resp := alice.RPC.DeriveKey(deriveReq)
|
||||||
require.NoError(t.t, err, "failed to create customized pubkey")
|
customizedPub, err := btcec.ParsePubKey(resp.RawKeyBytes)
|
||||||
|
require.NoError(ht, err, "failed to parse node pubkey")
|
||||||
|
|
||||||
// Test DeriveSharedKey with no optional arguments. It will result in
|
// Test DeriveSharedKey with no optional arguments. It will result in
|
||||||
// performing an ECDH between the ephemeral key and the node's pubkey.
|
// performing an ECDH between the ephemeral key and the node's pubkey.
|
||||||
@ -154,12 +148,8 @@ func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
|
|||||||
// assertErrorMatch checks when calling DeriveSharedKey with invalid
|
// assertErrorMatch checks when calling DeriveSharedKey with invalid
|
||||||
// params, the expected error is returned.
|
// params, the expected error is returned.
|
||||||
assertErrorMatch := func(match string, req *signrpc.SharedKeyRequest) {
|
assertErrorMatch := func(match string, req *signrpc.SharedKeyRequest) {
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
err := alice.RPC.DeriveSharedKeyErr(req)
|
||||||
_, err := alice.SignerClient.DeriveSharedKey(ctxt, req)
|
require.Contains(ht, err.Error(), match, "error not match")
|
||||||
require.Error(t.t, err, "expected to have an error")
|
|
||||||
require.Contains(
|
|
||||||
t.t, err.Error(), match, "error failed to match",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that EphemeralPubkey must be supplied.
|
// Test that EphemeralPubkey must be supplied.
|
||||||
@ -204,43 +194,35 @@ func runDeriveSharedKey(t *harnessTest, alice *lntest.HarnessNode) {
|
|||||||
|
|
||||||
// testSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
|
// testSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
|
||||||
// custom ways of specifying the signing key in the key descriptor/locator.
|
// custom ways of specifying the signing key in the key descriptor/locator.
|
||||||
func testSignOutputRaw(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSignOutputRaw(ht *lntemp.HarnessTest) {
|
||||||
runSignOutputRaw(t, net, net.Alice)
|
runSignOutputRaw(ht, ht.Alice)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
|
// runSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
|
||||||
// custom ways of specifying the signing key in the key descriptor/locator.
|
// custom ways of specifying the signing key in the key descriptor/locator.
|
||||||
func runSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
|
func runSignOutputRaw(ht *lntemp.HarnessTest, alice *node.HarnessNode) {
|
||||||
alice *lntest.HarnessNode) {
|
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// For the next step, we need a public key. Let's use a special family
|
// For the next step, we need a public key. Let's use a special family
|
||||||
// for this. We want this to be an index of zero.
|
// for this. We want this to be an index of zero.
|
||||||
const testCustomKeyFamily = 44
|
const testCustomKeyFamily = 44
|
||||||
keyDesc, err := alice.WalletKitClient.DeriveNextKey(
|
req := &walletrpc.KeyReq{
|
||||||
ctxt, &walletrpc.KeyReq{
|
KeyFamily: testCustomKeyFamily,
|
||||||
KeyFamily: testCustomKeyFamily,
|
}
|
||||||
},
|
keyDesc := alice.RPC.DeriveNextKey(req)
|
||||||
)
|
require.Equal(ht, int32(0), keyDesc.KeyLoc.KeyIndex)
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Equal(t.t, int32(0), keyDesc.KeyLoc.KeyIndex)
|
|
||||||
|
|
||||||
targetPubKey, err := btcec.ParsePubKey(keyDesc.RawKeyBytes)
|
targetPubKey, err := btcec.ParsePubKey(keyDesc.RawKeyBytes)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// First, try with a key descriptor that only sets the public key.
|
// First, try with a key descriptor that only sets the public key.
|
||||||
assertSignOutputRaw(
|
assertSignOutputRaw(
|
||||||
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
|
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
|
||||||
RawKeyBytes: keyDesc.RawKeyBytes,
|
RawKeyBytes: keyDesc.RawKeyBytes,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
// Now try again, this time only with the (0 index!) key locator.
|
// Now try again, this time only with the (0 index!) key locator.
|
||||||
assertSignOutputRaw(
|
assertSignOutputRaw(
|
||||||
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
|
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
|
||||||
KeyLoc: &signrpc.KeyLocator{
|
KeyLoc: &signrpc.KeyLocator{
|
||||||
KeyFamily: keyDesc.KeyLoc.KeyFamily,
|
KeyFamily: keyDesc.KeyLoc.KeyFamily,
|
||||||
KeyIndex: keyDesc.KeyLoc.KeyIndex,
|
KeyIndex: keyDesc.KeyLoc.KeyIndex,
|
||||||
@ -250,29 +232,25 @@ func runSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
|
|
||||||
// And now test everything again with a new key where we know the index
|
// And now test everything again with a new key where we know the index
|
||||||
// is not 0.
|
// is not 0.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
req = &walletrpc.KeyReq{
|
||||||
defer cancel()
|
KeyFamily: testCustomKeyFamily,
|
||||||
keyDesc, err = alice.WalletKitClient.DeriveNextKey(
|
}
|
||||||
ctxt, &walletrpc.KeyReq{
|
keyDesc = alice.RPC.DeriveNextKey(req)
|
||||||
KeyFamily: testCustomKeyFamily,
|
require.Equal(ht, int32(1), keyDesc.KeyLoc.KeyIndex)
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Equal(t.t, int32(1), keyDesc.KeyLoc.KeyIndex)
|
|
||||||
|
|
||||||
targetPubKey, err = btcec.ParsePubKey(keyDesc.RawKeyBytes)
|
targetPubKey, err = btcec.ParsePubKey(keyDesc.RawKeyBytes)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// First, try with a key descriptor that only sets the public key.
|
// First, try with a key descriptor that only sets the public key.
|
||||||
assertSignOutputRaw(
|
assertSignOutputRaw(
|
||||||
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
|
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
|
||||||
RawKeyBytes: keyDesc.RawKeyBytes,
|
RawKeyBytes: keyDesc.RawKeyBytes,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
// Now try again, this time only with the key locator.
|
// Now try again, this time only with the key locator.
|
||||||
assertSignOutputRaw(
|
assertSignOutputRaw(
|
||||||
t, net, alice, targetPubKey, &signrpc.KeyDescriptor{
|
ht, alice, targetPubKey, &signrpc.KeyDescriptor{
|
||||||
KeyLoc: &signrpc.KeyLocator{
|
KeyLoc: &signrpc.KeyLocator{
|
||||||
KeyFamily: keyDesc.KeyLoc.KeyFamily,
|
KeyFamily: keyDesc.KeyLoc.KeyFamily,
|
||||||
KeyIndex: keyDesc.KeyLoc.KeyIndex,
|
KeyIndex: keyDesc.KeyLoc.KeyIndex,
|
||||||
@ -284,53 +262,44 @@ func runSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
// assertSignOutputRaw sends coins to a p2wkh address derived from the given
|
// assertSignOutputRaw sends coins to a p2wkh address derived from the given
|
||||||
// target public key and then tries to spend that output again by invoking the
|
// target public key and then tries to spend that output again by invoking the
|
||||||
// SignOutputRaw RPC with the key descriptor provided.
|
// SignOutputRaw RPC with the key descriptor provided.
|
||||||
func assertSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
|
func assertSignOutputRaw(ht *lntemp.HarnessTest,
|
||||||
alice *lntest.HarnessNode, targetPubKey *btcec.PublicKey,
|
alice *node.HarnessNode, targetPubKey *btcec.PublicKey,
|
||||||
keyDesc *signrpc.KeyDescriptor) {
|
keyDesc *signrpc.KeyDescriptor) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
pubKeyHash := btcutil.Hash160(targetPubKey.SerializeCompressed())
|
pubKeyHash := btcutil.Hash160(targetPubKey.SerializeCompressed())
|
||||||
targetAddr, err := btcutil.NewAddressWitnessPubKeyHash(
|
targetAddr, err := btcutil.NewAddressWitnessPubKeyHash(
|
||||||
pubKeyHash, harnessNetParams,
|
pubKeyHash, harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
targetScript, err := txscript.PayToAddrScript(targetAddr)
|
targetScript, err := txscript.PayToAddrScript(targetAddr)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Send some coins to the generated p2wpkh address.
|
// Send some coins to the generated p2wpkh address.
|
||||||
_, err = alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
|
req := &lnrpc.SendCoinsRequest{
|
||||||
Addr: targetAddr.String(),
|
Addr: targetAddr.String(),
|
||||||
Amount: 800_000,
|
Amount: 800_000,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
alice.RPC.SendCoins(req)
|
||||||
|
|
||||||
// Wait until the TX is found in the mempool.
|
// Wait until the TX is found in the mempool.
|
||||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
txid := ht.Miner.AssertNumTxsInMempool(1)[0]
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
targetOutputIndex := getOutputIndex(
|
targetOutputIndex := ht.GetOutputIndex(txid, targetAddr.String())
|
||||||
t, net.Miner, txid, targetAddr.String(),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Clear the mempool.
|
// Clear the mempool.
|
||||||
mineBlocks(t, net, 1, 1)
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
|
|
||||||
// Try to spend the output now to a new p2wkh address.
|
// Try to spend the output now to a new p2wkh address.
|
||||||
p2wkhResp, err := alice.NewAddress(ctxt, &lnrpc.NewAddressRequest{
|
addrReq := &lnrpc.NewAddressRequest{Type: AddrTypeWitnessPubkeyHash}
|
||||||
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
p2wkhResp := alice.RPC.NewAddress(addrReq)
|
||||||
})
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
p2wkhAdrr, err := btcutil.DecodeAddress(
|
p2wkhAdrr, err := btcutil.DecodeAddress(
|
||||||
p2wkhResp.Address, harnessNetParams,
|
p2wkhResp.Address, harnessNetParams,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
p2wkhPkScript, err := txscript.PayToAddrScript(p2wkhAdrr)
|
p2wkhPkScript, err := txscript.PayToAddrScript(p2wkhAdrr)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
tx := wire.NewMsgTx(2)
|
tx := wire.NewMsgTx(2)
|
||||||
tx.TxIn = []*wire.TxIn{{
|
tx.TxIn = []*wire.TxIn{{
|
||||||
@ -346,24 +315,22 @@ func assertSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
}}
|
}}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
require.NoError(t.t, tx.Serialize(&buf))
|
require.NoError(ht, tx.Serialize(&buf))
|
||||||
|
|
||||||
signResp, err := alice.SignerClient.SignOutputRaw(
|
signReq := &signrpc.SignReq{
|
||||||
ctxt, &signrpc.SignReq{
|
RawTxBytes: buf.Bytes(),
|
||||||
RawTxBytes: buf.Bytes(),
|
SignDescs: []*signrpc.SignDescriptor{{
|
||||||
SignDescs: []*signrpc.SignDescriptor{{
|
Output: &signrpc.TxOut{
|
||||||
Output: &signrpc.TxOut{
|
PkScript: targetScript,
|
||||||
PkScript: targetScript,
|
Value: 800_000,
|
||||||
Value: 800_000,
|
},
|
||||||
},
|
InputIndex: 0,
|
||||||
InputIndex: 0,
|
KeyDesc: keyDesc,
|
||||||
KeyDesc: keyDesc,
|
Sighash: uint32(txscript.SigHashAll),
|
||||||
Sighash: uint32(txscript.SigHashAll),
|
WitnessScript: targetScript,
|
||||||
WitnessScript: targetScript,
|
}},
|
||||||
}},
|
}
|
||||||
},
|
signResp := alice.RPC.SignOutputRaw(signReq)
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
tx.TxIn[0].Witness = wire.TxWitness{
|
tx.TxIn[0].Witness = wire.TxWitness{
|
||||||
append(signResp.RawSigs[0], byte(txscript.SigHashAll)),
|
append(signResp.RawSigs[0], byte(txscript.SigHashAll)),
|
||||||
@ -371,68 +338,38 @@ func assertSignOutputRaw(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
}
|
}
|
||||||
|
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
require.NoError(t.t, tx.Serialize(&buf))
|
require.NoError(ht, tx.Serialize(&buf))
|
||||||
|
|
||||||
_, err = alice.WalletKitClient.PublishTransaction(
|
alice.RPC.PublishTransaction(&walletrpc.Transaction{
|
||||||
ctxt, &walletrpc.Transaction{
|
TxHex: buf.Bytes(),
|
||||||
TxHex: buf.Bytes(),
|
})
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Wait until the spending tx is found.
|
// Wait until the spending tx is found.
|
||||||
txid, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
txid = ht.Miner.AssertNumTxsInMempool(1)[0]
|
||||||
require.NoError(t.t, err)
|
p2wkhOutputIndex := ht.GetOutputIndex(txid, p2wkhAdrr.String())
|
||||||
p2wkhOutputIndex := getOutputIndex(
|
|
||||||
t, net.Miner, txid, p2wkhAdrr.String(),
|
|
||||||
)
|
|
||||||
op := &lnrpc.OutPoint{
|
op := &lnrpc.OutPoint{
|
||||||
TxidBytes: txid[:],
|
TxidBytes: txid[:],
|
||||||
OutputIndex: uint32(p2wkhOutputIndex),
|
OutputIndex: uint32(p2wkhOutputIndex),
|
||||||
}
|
}
|
||||||
assertWalletUnspent(t, alice, op, "")
|
ht.AssertUTXOInWallet(alice, op, "")
|
||||||
|
|
||||||
// Mine another block to clean up the mempool and to make sure the spend
|
// Mine another block to clean up the mempool and to make sure the
|
||||||
// tx is actually included in a block.
|
// spend tx is actually included in a block.
|
||||||
mineBlocks(t, net, 1, 1)
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
}
|
|
||||||
|
|
||||||
// deriveCustomizedKey uses the family and index to derive a public key from
|
|
||||||
// the node's walletkit client.
|
|
||||||
func deriveCustomizedKey(ctx context.Context, node *lntest.HarnessNode,
|
|
||||||
keyLoc *signrpc.KeyLocator) (*btcec.PublicKey, error) {
|
|
||||||
|
|
||||||
ctxt, cancel := context.WithTimeout(ctx, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
resp, err := node.WalletKitClient.DeriveKey(ctxt, keyLoc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to derive key: %v", err)
|
|
||||||
}
|
|
||||||
pub, err := btcec.ParsePubKey(resp.RawKeyBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse node pubkey: %v", err)
|
|
||||||
}
|
|
||||||
return pub, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// testSignVerifyMessage makes sure that the SignMessage RPC can be used with
|
// testSignVerifyMessage makes sure that the SignMessage RPC can be used with
|
||||||
// all custom flags by verifying with VerifyMessage. Tests both ECDSA and
|
// all custom flags by verifying with VerifyMessage. Tests both ECDSA and
|
||||||
// Schnorr signatures.
|
// Schnorr signatures.
|
||||||
func testSignVerifyMessage(net *lntest.NetworkHarness, t *harnessTest) {
|
func testSignVerifyMessage(ht *lntemp.HarnessTest) {
|
||||||
runSignVerifyMessage(t, net, net.Alice)
|
runSignVerifyMessage(ht, ht.Alice)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runSignVerifyMessage makes sure that the SignMessage RPC can be used with all
|
// runSignVerifyMessage makes sure that the SignMessage RPC can be used with
|
||||||
// custom flags by verifying with VerifyMessage. Tests both ECDSA and Schnorr
|
// all custom flags by verifying with VerifyMessage. Tests both ECDSA and
|
||||||
// signatures.
|
// Schnorr signatures.
|
||||||
func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
func runSignVerifyMessage(ht *lntemp.HarnessTest, alice *node.HarnessNode) {
|
||||||
alice *lntest.HarnessNode) {
|
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
aliceMsg := []byte("alice msg")
|
aliceMsg := []byte("alice msg")
|
||||||
keyLoc := &signrpc.KeyLocator{
|
keyLoc := &signrpc.KeyLocator{
|
||||||
KeyFamily: int32(keychain.KeyFamilyNodeKey),
|
KeyFamily: int32(keychain.KeyFamilyNodeKey),
|
||||||
@ -446,11 +383,17 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
SchnorrSig: false,
|
SchnorrSig: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
signMsgResp, err := alice.SignerClient.SignMessage(ctxt, signMsgReq)
|
signMsgResp := alice.RPC.SignMessageSigner(signMsgReq)
|
||||||
require.NoError(t.t, err, "failed to sign message")
|
|
||||||
|
|
||||||
customPubKey, err := deriveCustomizedKey(ctxt, alice, keyLoc)
|
deriveCustomizedKey := func() *btcec.PublicKey {
|
||||||
require.NoError(t.t, err, "failed to create customized pubkey")
|
resp := alice.RPC.DeriveKey(keyLoc)
|
||||||
|
pub, err := btcec.ParsePubKey(resp.RawKeyBytes)
|
||||||
|
require.NoError(ht, err, "failed to parse node pubkey")
|
||||||
|
|
||||||
|
return pub
|
||||||
|
}
|
||||||
|
|
||||||
|
customPubKey := deriveCustomizedKey()
|
||||||
|
|
||||||
verifyReq := &signrpc.VerifyMessageReq{
|
verifyReq := &signrpc.VerifyMessageReq{
|
||||||
Msg: aliceMsg,
|
Msg: aliceMsg,
|
||||||
@ -458,10 +401,8 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
Pubkey: customPubKey.SerializeCompressed(),
|
Pubkey: customPubKey.SerializeCompressed(),
|
||||||
IsSchnorrSig: false,
|
IsSchnorrSig: false,
|
||||||
}
|
}
|
||||||
verifyResp, err := alice.SignerClient.VerifyMessage(ctxt, verifyReq)
|
verifyResp := alice.RPC.VerifyMessageSigner(verifyReq)
|
||||||
require.NoError(t.t, err)
|
require.True(ht, verifyResp.Valid, "failed to verify message")
|
||||||
|
|
||||||
require.True(t.t, verifyResp.Valid, "failed to verify message")
|
|
||||||
|
|
||||||
// Use a different key locator.
|
// Use a different key locator.
|
||||||
keyLoc = &signrpc.KeyLocator{
|
keyLoc = &signrpc.KeyLocator{
|
||||||
@ -475,12 +416,8 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
KeyLoc: keyLoc,
|
KeyLoc: keyLoc,
|
||||||
SchnorrSig: true,
|
SchnorrSig: true,
|
||||||
}
|
}
|
||||||
|
signMsgResp = alice.RPC.SignMessageSigner(signMsgReq)
|
||||||
signMsgResp, err = alice.SignerClient.SignMessage(ctxt, signMsgReq)
|
customPubKey = deriveCustomizedKey()
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
customPubKey, err = deriveCustomizedKey(ctxt, alice, keyLoc)
|
|
||||||
require.NoError(t.t, err, "failed to create customized pubkey")
|
|
||||||
|
|
||||||
// Verify the Schnorr signature.
|
// Verify the Schnorr signature.
|
||||||
verifyReq = &signrpc.VerifyMessageReq{
|
verifyReq = &signrpc.VerifyMessageReq{
|
||||||
@ -489,10 +426,8 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
Pubkey: schnorr.SerializePubKey(customPubKey),
|
Pubkey: schnorr.SerializePubKey(customPubKey),
|
||||||
IsSchnorrSig: true,
|
IsSchnorrSig: true,
|
||||||
}
|
}
|
||||||
verifyResp, err = alice.SignerClient.VerifyMessage(ctxt, verifyReq)
|
verifyResp = alice.RPC.VerifyMessageSigner(verifyReq)
|
||||||
require.NoError(t.t, err)
|
require.True(ht, verifyResp.Valid, "failed to verify message")
|
||||||
|
|
||||||
require.True(t.t, verifyResp.Valid, "failed to verify message")
|
|
||||||
|
|
||||||
// Also test that we can tweak a private key and verify the message
|
// Also test that we can tweak a private key and verify the message
|
||||||
// against the tweaked public key.
|
// against the tweaked public key.
|
||||||
@ -502,8 +437,7 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
)
|
)
|
||||||
|
|
||||||
signMsgReq.SchnorrSigTapTweak = tweakBytes[:]
|
signMsgReq.SchnorrSigTapTweak = tweakBytes[:]
|
||||||
signMsgResp, err = alice.SignerClient.SignMessage(ctxt, signMsgReq)
|
signMsgResp = alice.RPC.SignMessageSigner(signMsgReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
verifyReq = &signrpc.VerifyMessageReq{
|
verifyReq = &signrpc.VerifyMessageReq{
|
||||||
Msg: aliceMsg,
|
Msg: aliceMsg,
|
||||||
@ -511,8 +445,6 @@ func runSignVerifyMessage(t *harnessTest, net *lntest.NetworkHarness,
|
|||||||
Pubkey: schnorr.SerializePubKey(tweakedPubKey),
|
Pubkey: schnorr.SerializePubKey(tweakedPubKey),
|
||||||
IsSchnorrSig: true,
|
IsSchnorrSig: true,
|
||||||
}
|
}
|
||||||
verifyResp, err = alice.SignerClient.VerifyMessage(ctxt, verifyReq)
|
verifyResp = alice.RPC.VerifyMessageSigner(verifyReq)
|
||||||
require.NoError(t.t, err)
|
require.True(ht, verifyResp.Valid, "failed to verify message")
|
||||||
|
|
||||||
require.True(t.t, verifyResp.Valid, "failed to verify message")
|
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -4,140 +4,8 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
var allTestCases = []*testCase{
|
var allTestCases = []*testCase{
|
||||||
{
|
|
||||||
name: "multiple channel creation and update subscription",
|
|
||||||
test: testBasicChannelCreationAndUpdates,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "derive shared key",
|
|
||||||
test: testDeriveSharedKey,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sign output raw",
|
|
||||||
test: testSignOutputRaw,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sign verify message",
|
|
||||||
test: testSignVerifyMessage,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "async payments benchmark",
|
|
||||||
test: testAsyncPayments,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "async bidirectional payments",
|
name: "async bidirectional payments",
|
||||||
test: testBidirectionalAsyncPayments,
|
test: testBidirectionalAsyncPayments,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "switch circuit persistence",
|
|
||||||
test: testSwitchCircuitPersistence,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "switch offline delivery",
|
|
||||||
test: testSwitchOfflineDelivery,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "switch offline delivery persistence",
|
|
||||||
test: testSwitchOfflineDeliveryPersistence,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "switch offline delivery outgoing offline",
|
|
||||||
test: testSwitchOfflineDeliveryOutgoingOffline,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cpfp",
|
|
||||||
test: testCPFP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "psbt channel funding",
|
|
||||||
test: testPsbtChanFunding,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sign psbt",
|
|
||||||
test: testSignPsbt,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sendtoroute multi path payment",
|
|
||||||
test: testSendToRouteMultiPath,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sendtoroute amp",
|
|
||||||
test: testSendToRouteAMP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sendpayment amp",
|
|
||||||
test: testSendPaymentAMP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sendpayment amp invoice",
|
|
||||||
test: testSendPaymentAMPInvoice,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "sendpayment amp invoice repeat",
|
|
||||||
test: testSendPaymentAMPInvoiceRepeat,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "send multi path payment",
|
|
||||||
test: testSendMultiPathPayment,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "forward interceptor",
|
|
||||||
test: testForwardInterceptorBasic,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "forward interceptor dedup htlcs",
|
|
||||||
test: testForwardInterceptorDedupHtlc,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wallet import account",
|
|
||||||
test: testWalletImportAccount,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "wallet import pubkey",
|
|
||||||
test: testWalletImportPubKey,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "remote signer",
|
|
||||||
test: testRemoteSigner,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "taproot",
|
|
||||||
test: testTaproot,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zero conf channel open",
|
|
||||||
test: testZeroConfChannelOpen,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "option scid alias",
|
|
||||||
test: testOptionScidAlias,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "scid alias channel update",
|
|
||||||
test: testUpdateChannelPolicyScidAlias,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "scid alias upgrade",
|
|
||||||
test: testOptionScidUpgrade,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nonstd sweep",
|
|
||||||
test: testNonstdSweep,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "taproot coop close",
|
|
||||||
test: testTaprootCoopClose,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "trackpayments",
|
|
||||||
test: testTrackPayments,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "open channel fee policy",
|
|
||||||
test: testOpenChannelUpdateFeePolicy,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "custom messaging",
|
|
||||||
test: testCustomMessage,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
@ -1,102 +1,89 @@
|
|||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testTrackPayments tests whether a client that calls the TrackPayments api
|
// testTrackPayments tests whether a client that calls the TrackPayments api
|
||||||
// receives payment updates.
|
// receives payment updates.
|
||||||
func testTrackPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
func testTrackPayments(ht *lntemp.HarnessTest) {
|
||||||
// Open a channel between alice and bob.
|
// Open a channel between alice and bob.
|
||||||
net.EnsureConnected(t.t, net.Alice, net.Bob)
|
alice, bob := ht.Alice, ht.Bob
|
||||||
channel := openChannelAndAssert(
|
channel := ht.OpenChannel(
|
||||||
t, net, net.Alice, net.Bob,
|
alice, bob, lntemp.OpenChannelParams{
|
||||||
lntest.OpenChannelParams{
|
|
||||||
Amt: btcutil.Amount(300000),
|
Amt: btcutil.Amount(300000),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
defer closeChannelAndAssert(t, net, net.Alice, channel, true)
|
|
||||||
|
|
||||||
err := net.Alice.WaitForNetworkChannelOpen(channel)
|
|
||||||
require.NoError(t.t, err, "unable to wait for channel to open")
|
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
ctxt, cancelTracker := context.WithCancel(ctxb)
|
|
||||||
defer cancelTracker()
|
|
||||||
|
|
||||||
// Call the TrackPayments api to listen for payment updates.
|
// Call the TrackPayments api to listen for payment updates.
|
||||||
tracker, err := net.Alice.RouterClient.TrackPayments(
|
req := &routerrpc.TrackPaymentsRequest{
|
||||||
ctxt,
|
NoInflightUpdates: false,
|
||||||
&routerrpc.TrackPaymentsRequest{
|
}
|
||||||
NoInflightUpdates: false,
|
tracker := alice.RPC.TrackPayments(req)
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err, "failed to call TrackPayments successfully.")
|
|
||||||
|
|
||||||
// Create an invoice from bob.
|
// Create an invoice from bob.
|
||||||
var amountMsat int64 = 1000
|
var amountMsat int64 = 1000
|
||||||
invoiceResp, err := net.Bob.AddInvoice(
|
invoiceResp := bob.RPC.AddInvoice(
|
||||||
ctxb,
|
|
||||||
&lnrpc.Invoice{
|
&lnrpc.Invoice{
|
||||||
ValueMsat: amountMsat,
|
ValueMsat: amountMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err, "unable to add invoice.")
|
invoice := bob.RPC.LookupInvoice(invoiceResp.RHash)
|
||||||
|
|
||||||
invoice, err := net.Bob.LookupInvoice(
|
|
||||||
ctxb,
|
|
||||||
&lnrpc.PaymentHash{
|
|
||||||
RHashStr: hex.EncodeToString(invoiceResp.RHash),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err, "unable to find invoice.")
|
|
||||||
|
|
||||||
// Send payment from alice to bob.
|
// Send payment from alice to bob.
|
||||||
paymentClient, err := net.Alice.RouterClient.SendPaymentV2(
|
paymentClient := alice.RPC.SendPayment(
|
||||||
ctxb,
|
|
||||||
&routerrpc.SendPaymentRequest{
|
&routerrpc.SendPaymentRequest{
|
||||||
PaymentRequest: invoice.PaymentRequest,
|
PaymentRequest: invoice.PaymentRequest,
|
||||||
TimeoutSeconds: 60,
|
TimeoutSeconds: 60,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err, "unable to send payment.")
|
|
||||||
|
|
||||||
// Make sure the payment doesn't error due to invalid parameters or so.
|
// Make sure the payment doesn't error due to invalid parameters or so.
|
||||||
_, err = paymentClient.Recv()
|
_, err := paymentClient.Recv()
|
||||||
require.NoError(t.t, err, "unable to get payment update.")
|
require.NoError(ht, err, "unable to get payment update.")
|
||||||
|
|
||||||
// Assert the first payment update is an inflight update.
|
// Assert the first payment update is an inflight update.
|
||||||
update1, err := tracker.Recv()
|
update1, err := tracker.Recv()
|
||||||
require.NoError(t.t, err, "unable to receive payment update 1.")
|
require.NoError(ht, err, "unable to receive payment update 1.")
|
||||||
|
|
||||||
require.Equal(
|
require.Equal(
|
||||||
t.t, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
|
ht, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
|
||||||
update1.FailureReason,
|
update1.FailureReason,
|
||||||
)
|
)
|
||||||
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT, update1.Status)
|
require.Equal(ht, lnrpc.Payment_IN_FLIGHT, update1.Status)
|
||||||
require.Equal(t.t, invoice.PaymentRequest, update1.PaymentRequest)
|
require.Equal(ht, invoice.PaymentRequest, update1.PaymentRequest)
|
||||||
require.Equal(t.t, amountMsat, update1.ValueMsat)
|
require.Equal(ht, amountMsat, update1.ValueMsat)
|
||||||
|
|
||||||
// Assert the second payment update is a payment success update.
|
// Assert the second payment update is a payment success update.
|
||||||
update2, err := tracker.Recv()
|
update2, err := tracker.Recv()
|
||||||
require.NoError(t.t, err, "unable to receive payment update 2.")
|
require.NoError(ht, err, "unable to receive payment update 2.")
|
||||||
|
|
||||||
require.Equal(
|
require.Equal(
|
||||||
t.t, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
|
ht, lnrpc.PaymentFailureReason_FAILURE_REASON_NONE,
|
||||||
update2.FailureReason,
|
update2.FailureReason,
|
||||||
)
|
)
|
||||||
require.Equal(t.t, lnrpc.Payment_SUCCEEDED, update2.Status)
|
require.Equal(ht, lnrpc.Payment_SUCCEEDED, update2.Status)
|
||||||
require.Equal(t.t, invoice.PaymentRequest, update2.PaymentRequest)
|
require.Equal(ht, invoice.PaymentRequest, update2.PaymentRequest)
|
||||||
require.Equal(t.t, amountMsat, update2.ValueMsat)
|
require.Equal(ht, amountMsat, update2.ValueMsat)
|
||||||
require.Equal(
|
require.Equal(
|
||||||
t.t, hex.EncodeToString(invoice.RPreimage),
|
ht, hex.EncodeToString(invoice.RPreimage),
|
||||||
update2.PaymentPreimage,
|
update2.PaymentPreimage,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(yy): remove the sleep once the following bug is fixed.
|
||||||
|
// When the invoice is reported settled, the commitment dance is not
|
||||||
|
// yet finished, which can cause an error when closing the channel,
|
||||||
|
// saying there's active HTLCs. We need to investigate this issue and
|
||||||
|
// reverse the order to, first finish the commitment dance, then report
|
||||||
|
// the invoice as settled.
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
ht.CloseChannel(alice, channel)
|
||||||
}
|
}
|
||||||
|
@ -2,10 +2,6 @@ package itest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -20,8 +16,8 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/funding"
|
"github.com/lightningnetwork/lnd/funding"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lnwallet"
|
"github.com/lightningnetwork/lnd/lnwallet"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -55,30 +51,27 @@ func walletToLNAddrType(t *testing.T,
|
|||||||
|
|
||||||
// newExternalAddr generates a new external address of an imported account for a
|
// newExternalAddr generates a new external address of an imported account for a
|
||||||
// pair of nodes, where one acts as the funder and the other as the signer.
|
// pair of nodes, where one acts as the funder and the other as the signer.
|
||||||
func newExternalAddr(t *testing.T, funder, signer *lntest.HarnessNode,
|
func newExternalAddr(ht *lntemp.HarnessTest, funder, signer *node.HarnessNode,
|
||||||
importedAccount string, addrType walletrpc.AddressType) string {
|
importedAccount string, addrType walletrpc.AddressType) string {
|
||||||
|
|
||||||
// We'll generate a new address for Carol from Dave's node to receive
|
// We'll generate a new address for Carol from Dave's node to receive
|
||||||
// and fund a new channel.
|
// and fund a new channel.
|
||||||
ctxb := context.Background()
|
req := &lnrpc.NewAddressRequest{
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
Type: walletToLNAddrType(ht.T, addrType),
|
||||||
defer cancel()
|
|
||||||
funderResp, err := funder.NewAddress(ctxt, &lnrpc.NewAddressRequest{
|
|
||||||
Type: walletToLNAddrType(t, addrType),
|
|
||||||
Account: importedAccount,
|
Account: importedAccount,
|
||||||
})
|
}
|
||||||
require.NoError(t, err)
|
funderResp := funder.RPC.NewAddress(req)
|
||||||
|
|
||||||
// Carol also needs to generate the address for the sake of this test to
|
// Carol also needs to generate the address for the sake of this test
|
||||||
// be able to sign the channel funding input.
|
// to be able to sign the channel funding input.
|
||||||
signerResp, err := signer.NewAddress(ctxt, &lnrpc.NewAddressRequest{
|
req = &lnrpc.NewAddressRequest{
|
||||||
Type: walletToLNAddrType(t, addrType),
|
Type: walletToLNAddrType(ht.T, addrType),
|
||||||
})
|
}
|
||||||
require.NoError(t, err)
|
signerResp := signer.RPC.NewAddress(req)
|
||||||
|
|
||||||
// Sanity check that the generated addresses match.
|
// Sanity check that the generated addresses match.
|
||||||
require.Equal(t, funderResp.Address, signerResp.Address)
|
require.Equal(ht, funderResp.Address, signerResp.Address)
|
||||||
assertExternalAddrType(t, funderResp.Address, addrType)
|
assertExternalAddrType(ht.T, funderResp.Address, addrType)
|
||||||
|
|
||||||
return funderResp.Address
|
return funderResp.Address
|
||||||
}
|
}
|
||||||
@ -130,108 +123,21 @@ func assertOutputScriptType(t *testing.T, expType txscript.ScriptClass,
|
|||||||
spew.Sdump(tx))
|
spew.Sdump(tx))
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertAccountBalance asserts that the unconfirmed and confirmed balance for
|
|
||||||
// the given account is satisfied by the WalletBalance and ListUnspent RPCs. The
|
|
||||||
// unconfirmed balance is not checked for neutrino nodes.
|
|
||||||
func assertAccountBalance(t *testing.T, node *lntest.HarnessNode, account string,
|
|
||||||
confirmedBalance, unconfirmedBalance int64) {
|
|
||||||
|
|
||||||
err := wait.NoError(func() error {
|
|
||||||
balanceResp, err := node.WalletBalance(
|
|
||||||
context.Background(), &lnrpc.WalletBalanceRequest{},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
require.Contains(t, balanceResp.AccountBalance, account)
|
|
||||||
accountBalance := balanceResp.AccountBalance[account]
|
|
||||||
|
|
||||||
// Check confirmed balance.
|
|
||||||
if accountBalance.ConfirmedBalance != confirmedBalance {
|
|
||||||
return fmt.Errorf("expected confirmed balance %v, "+
|
|
||||||
"got %v", confirmedBalance,
|
|
||||||
accountBalance.ConfirmedBalance)
|
|
||||||
}
|
|
||||||
listUtxosReq := &lnrpc.ListUnspentRequest{
|
|
||||||
MinConfs: 1,
|
|
||||||
MaxConfs: math.MaxInt32,
|
|
||||||
Account: account,
|
|
||||||
}
|
|
||||||
confirmedUtxosResp, err := node.ListUnspent(
|
|
||||||
context.Background(), listUtxosReq,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var totalConfirmedVal int64
|
|
||||||
for _, utxo := range confirmedUtxosResp.Utxos {
|
|
||||||
totalConfirmedVal += utxo.AmountSat
|
|
||||||
}
|
|
||||||
if totalConfirmedVal != confirmedBalance {
|
|
||||||
return fmt.Errorf("expected total confirmed utxo "+
|
|
||||||
"balance %v, got %v", confirmedBalance,
|
|
||||||
totalConfirmedVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip unconfirmed balance checks for neutrino nodes.
|
|
||||||
if node.Cfg.BackendCfg.Name() == lntest.NeutrinoBackendName {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check unconfirmed balance.
|
|
||||||
if accountBalance.UnconfirmedBalance != unconfirmedBalance {
|
|
||||||
return fmt.Errorf("expected unconfirmed balance %v, "+
|
|
||||||
"got %v", unconfirmedBalance,
|
|
||||||
accountBalance.UnconfirmedBalance)
|
|
||||||
}
|
|
||||||
listUtxosReq.MinConfs = 0
|
|
||||||
listUtxosReq.MaxConfs = 0
|
|
||||||
unconfirmedUtxosResp, err := node.ListUnspent(
|
|
||||||
context.Background(), listUtxosReq,
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
var totalUnconfirmedVal int64
|
|
||||||
for _, utxo := range unconfirmedUtxosResp.Utxos {
|
|
||||||
totalUnconfirmedVal += utxo.AmountSat
|
|
||||||
}
|
|
||||||
if totalUnconfirmedVal != unconfirmedBalance {
|
|
||||||
return fmt.Errorf("expected total unconfirmed utxo "+
|
|
||||||
"balance %v, got %v", unconfirmedBalance,
|
|
||||||
totalUnconfirmedVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}, defaultTimeout)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// psbtSendFromImportedAccount attempts to fund a PSBT from the given imported
|
// psbtSendFromImportedAccount attempts to fund a PSBT from the given imported
|
||||||
// account, originating from the source node to the destination.
|
// account, originating from the source node to the destination.
|
||||||
func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
|
func psbtSendFromImportedAccount(ht *lntemp.HarnessTest, srcNode, destNode,
|
||||||
signer *lntest.HarnessNode, account string,
|
signer *node.HarnessNode, account string,
|
||||||
accountAddrType walletrpc.AddressType) {
|
accountAddrType walletrpc.AddressType) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
balanceResp := srcNode.RPC.WalletBalance()
|
||||||
|
require.Contains(ht, balanceResp.AccountBalance, account)
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
balanceResp, err := srcNode.WalletBalance(
|
|
||||||
ctxt, &lnrpc.WalletBalanceRequest{},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Contains(t.t, balanceResp.AccountBalance, account)
|
|
||||||
confBalance := balanceResp.AccountBalance[account].ConfirmedBalance
|
confBalance := balanceResp.AccountBalance[account].ConfirmedBalance
|
||||||
|
|
||||||
destAmt := confBalance - 10000
|
destAmt := confBalance - 10000
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
destAddrResp := destNode.RPC.NewAddress(&lnrpc.NewAddressRequest{
|
||||||
defer cancel()
|
|
||||||
destAddrResp, err := destNode.NewAddress(ctxt, &lnrpc.NewAddressRequest{
|
|
||||||
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
Type: lnrpc.AddressType_WITNESS_PUBKEY_HASH,
|
||||||
})
|
})
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
fundReq := &walletrpc.FundPsbtRequest{
|
fundReq := &walletrpc.FundPsbtRequest{
|
||||||
Template: &walletrpc.FundPsbtRequest_Raw{
|
Template: &walletrpc.FundPsbtRequest_Raw{
|
||||||
Raw: &walletrpc.TxTemplate{
|
Raw: &walletrpc.TxTemplate{
|
||||||
@ -245,29 +151,20 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
},
|
},
|
||||||
Account: account,
|
Account: account,
|
||||||
}
|
}
|
||||||
fundResp, err := srcNode.WalletKitClient.FundPsbt(ctxt, fundReq)
|
fundResp := srcNode.RPC.FundPsbt(fundReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Have Carol sign the PSBT input since Dave doesn't have any private
|
// Have Carol sign the PSBT input since Dave doesn't have any private
|
||||||
// key information.
|
// key information.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
finalizeReq := &walletrpc.FinalizePsbtRequest{
|
finalizeReq := &walletrpc.FinalizePsbtRequest{
|
||||||
FundedPsbt: fundResp.FundedPsbt,
|
FundedPsbt: fundResp.FundedPsbt,
|
||||||
}
|
}
|
||||||
finalizeResp, err := signer.WalletKitClient.FinalizePsbt(
|
finalizeResp := signer.RPC.FinalizePsbt(finalizeReq)
|
||||||
ctxt, finalizeReq,
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// With the PSBT signed, we can broadcast the resulting transaction.
|
// With the PSBT signed, we can broadcast the resulting transaction.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
publishReq := &walletrpc.Transaction{
|
publishReq := &walletrpc.Transaction{
|
||||||
TxHex: finalizeResp.RawFinalTx,
|
TxHex: finalizeResp.RawFinalTx,
|
||||||
}
|
}
|
||||||
_, err = srcNode.WalletKitClient.PublishTransaction(ctxt, publishReq)
|
srcNode.RPC.PublishTransaction(publishReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Carol's balance from Dave's perspective should update accordingly.
|
// Carol's balance from Dave's perspective should update accordingly.
|
||||||
var (
|
var (
|
||||||
@ -304,7 +201,7 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
expChangeScriptType = txscript.WitnessV1TaprootTy
|
expChangeScriptType = txscript.WitnessV1TaprootTy
|
||||||
|
|
||||||
default:
|
default:
|
||||||
t.Fatalf("unsupported addr type %v", accountAddrType)
|
ht.Fatalf("unsupported addr type %v", accountAddrType)
|
||||||
}
|
}
|
||||||
changeUtxoAmt := confBalance - destAmt - expTxFee
|
changeUtxoAmt := confBalance - destAmt - expTxFee
|
||||||
|
|
||||||
@ -314,16 +211,20 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
if account == defaultImportedAccount {
|
if account == defaultImportedAccount {
|
||||||
accountWithBalance = defaultAccount
|
accountWithBalance = defaultAccount
|
||||||
}
|
}
|
||||||
assertAccountBalance(t.t, srcNode, accountWithBalance, 0, changeUtxoAmt)
|
ht.AssertWalletAccountBalance(
|
||||||
_ = mineBlocks(t, t.lndHarness, 1, 1)
|
srcNode, accountWithBalance, 0, changeUtxoAmt,
|
||||||
assertAccountBalance(t.t, srcNode, accountWithBalance, changeUtxoAmt, 0)
|
)
|
||||||
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
|
ht.AssertWalletAccountBalance(
|
||||||
|
srcNode, accountWithBalance, changeUtxoAmt, 0,
|
||||||
|
)
|
||||||
|
|
||||||
// Finally, assert that the transaction has the expected change address
|
// Finally, assert that the transaction has the expected change address
|
||||||
// type based on the account.
|
// type based on the account.
|
||||||
var tx wire.MsgTx
|
var tx wire.MsgTx
|
||||||
err = tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
|
err := tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
assertOutputScriptType(t.t, expChangeScriptType, &tx, changeUtxoAmt)
|
assertOutputScriptType(ht.T, expChangeScriptType, &tx, changeUtxoAmt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fundChanAndCloseFromImportedAccount attempts to a fund a channel from the
|
// fundChanAndCloseFromImportedAccount attempts to a fund a channel from the
|
||||||
@ -331,21 +232,14 @@ func psbtSendFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
// node. To ensure the channel is operational before closing it, a test payment
|
// node. To ensure the channel is operational before closing it, a test payment
|
||||||
// is made. Several balance assertions are made along the way for the sake of
|
// is made. Several balance assertions are made along the way for the sake of
|
||||||
// correctness.
|
// correctness.
|
||||||
func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
func fundChanAndCloseFromImportedAccount(ht *lntemp.HarnessTest, srcNode,
|
||||||
signer *lntest.HarnessNode, account string,
|
destNode, signer *node.HarnessNode, account string,
|
||||||
accountAddrType walletrpc.AddressType, utxoAmt, chanSize int64) {
|
accountAddrType walletrpc.AddressType, utxoAmt, chanSize int64) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
|
|
||||||
// Retrieve the current confirmed balance to make some assertions later
|
// Retrieve the current confirmed balance to make some assertions later
|
||||||
// on.
|
// on.
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
balanceResp := srcNode.RPC.WalletBalance()
|
||||||
defer cancel()
|
require.Contains(ht, balanceResp.AccountBalance, account)
|
||||||
balanceResp, err := srcNode.WalletBalance(
|
|
||||||
ctxt, &lnrpc.WalletBalanceRequest{},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Contains(t.t, balanceResp.AccountBalance, account)
|
|
||||||
accountConfBalance := balanceResp.
|
accountConfBalance := balanceResp.
|
||||||
AccountBalance[account].ConfirmedBalance
|
AccountBalance[account].ConfirmedBalance
|
||||||
defaultAccountConfBalance := balanceResp.
|
defaultAccountConfBalance := balanceResp.
|
||||||
@ -353,31 +247,23 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
|
|
||||||
// Now, start the channel funding process. We'll need to connect both
|
// Now, start the channel funding process. We'll need to connect both
|
||||||
// nodes first.
|
// nodes first.
|
||||||
t.lndHarness.EnsureConnected(t.t, srcNode, destNode)
|
ht.EnsureConnected(srcNode, destNode)
|
||||||
|
|
||||||
// The source node will then fund the channel through a PSBT shim.
|
// The source node will then fund the channel through a PSBT shim.
|
||||||
var pendingChanID [32]byte
|
pendingChanID := ht.Random32Bytes()
|
||||||
_, err = rand.Read(pendingChanID[:])
|
chanUpdates, rawPsbt := ht.OpenChannelPsbt(
|
||||||
require.NoError(t.t, err)
|
srcNode, destNode, lntemp.OpenChannelParams{
|
||||||
|
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
chanUpdates, rawPsbt, err := openChannelPsbt(
|
|
||||||
ctxt, srcNode, destNode, lntest.OpenChannelParams{
|
|
||||||
Amt: btcutil.Amount(chanSize),
|
Amt: btcutil.Amount(chanSize),
|
||||||
FundingShim: &lnrpc.FundingShim{
|
FundingShim: &lnrpc.FundingShim{
|
||||||
Shim: &lnrpc.FundingShim_PsbtShim{
|
Shim: &lnrpc.FundingShim_PsbtShim{
|
||||||
PsbtShim: &lnrpc.PsbtShim{
|
PsbtShim: &lnrpc.PsbtShim{
|
||||||
PendingChanId: pendingChanID[:],
|
PendingChanId: pendingChanID,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
fundReq := &walletrpc.FundPsbtRequest{
|
fundReq := &walletrpc.FundPsbtRequest{
|
||||||
Template: &walletrpc.FundPsbtRequest_Psbt{
|
Template: &walletrpc.FundPsbtRequest_Psbt{
|
||||||
Psbt: rawPsbt,
|
Psbt: rawPsbt,
|
||||||
@ -387,49 +273,40 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
},
|
},
|
||||||
Account: account,
|
Account: account,
|
||||||
}
|
}
|
||||||
fundResp, err := srcNode.WalletKitClient.FundPsbt(ctxt, fundReq)
|
fundResp := srcNode.RPC.FundPsbt(fundReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
_, err = srcNode.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
|
srcNode.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
|
||||||
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
|
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
|
||||||
PsbtVerify: &lnrpc.FundingPsbtVerify{
|
PsbtVerify: &lnrpc.FundingPsbtVerify{
|
||||||
PendingChanId: pendingChanID[:],
|
PendingChanId: pendingChanID,
|
||||||
FundedPsbt: fundResp.FundedPsbt,
|
FundedPsbt: fundResp.FundedPsbt,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Now that we have a PSBT to fund the channel, our signer needs to sign
|
// Now that we have a PSBT to fund the channel, our signer needs to sign
|
||||||
// it.
|
// it.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
finalizeReq := &walletrpc.FinalizePsbtRequest{
|
finalizeReq := &walletrpc.FinalizePsbtRequest{
|
||||||
FundedPsbt: fundResp.FundedPsbt,
|
FundedPsbt: fundResp.FundedPsbt,
|
||||||
}
|
}
|
||||||
finalizeResp, err := signer.WalletKitClient.FinalizePsbt(ctxt, finalizeReq)
|
finalizeResp := signer.RPC.FinalizePsbt(finalizeReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// The source node can then submit the signed PSBT and complete the
|
// The source node can then submit the signed PSBT and complete the
|
||||||
// channel funding process.
|
// channel funding process.
|
||||||
_, err = srcNode.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
|
srcNode.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
|
||||||
Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
|
Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
|
||||||
PsbtFinalize: &lnrpc.FundingPsbtFinalize{
|
PsbtFinalize: &lnrpc.FundingPsbtFinalize{
|
||||||
PendingChanId: pendingChanID[:],
|
PendingChanId: pendingChanID,
|
||||||
SignedPsbt: finalizeResp.SignedPsbt,
|
SignedPsbt: finalizeResp.SignedPsbt,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// We should receive a notification of the channel funding transaction
|
// We should receive a notification of the channel funding transaction
|
||||||
// being broadcast.
|
// being broadcast.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
|
||||||
defer cancel()
|
|
||||||
updateResp, err := receiveChanUpdate(ctxt, chanUpdates)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
||||||
require.True(t.t, ok)
|
require.True(ht, ok)
|
||||||
|
|
||||||
// Mine enough blocks to announce the channel to the network, making
|
// Mine enough blocks to announce the channel to the network, making
|
||||||
// balance assertions along the way.
|
// balance assertions along the way.
|
||||||
@ -467,11 +344,11 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
expChangeScriptType = txscript.WitnessV1TaprootTy
|
expChangeScriptType = txscript.WitnessV1TaprootTy
|
||||||
|
|
||||||
default:
|
default:
|
||||||
t.Fatalf("unsupported addr type %v", accountAddrType)
|
ht.Fatalf("unsupported addr type %v", accountAddrType)
|
||||||
}
|
}
|
||||||
chanChangeUtxoAmt := utxoAmt - chanSize - expChanTxFee
|
chanChangeUtxoAmt := utxoAmt - chanSize - expChanTxFee
|
||||||
txHash, err := chainhash.NewHash(upd.ChanPending.Txid)
|
txHash, err := chainhash.NewHash(upd.ChanPending.Txid)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// If we're spending from the default imported account, then any change
|
// If we're spending from the default imported account, then any change
|
||||||
// outputs produced are moved to the default wallet account, so we
|
// outputs produced are moved to the default wallet account, so we
|
||||||
@ -479,35 +356,35 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
var confBalanceAfterChan int64
|
var confBalanceAfterChan int64
|
||||||
if account == defaultImportedAccount {
|
if account == defaultImportedAccount {
|
||||||
confBalanceAfterChan = defaultAccountConfBalance
|
confBalanceAfterChan = defaultAccountConfBalance
|
||||||
assertAccountBalance(t.t, srcNode, account, 0, 0)
|
ht.AssertWalletAccountBalance(srcNode, account, 0, 0)
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, defaultAccount, defaultAccountConfBalance,
|
srcNode, defaultAccount, defaultAccountConfBalance,
|
||||||
chanChangeUtxoAmt,
|
chanChangeUtxoAmt,
|
||||||
)
|
)
|
||||||
|
|
||||||
block := mineBlocks(t, t.lndHarness, 6, 1)[0]
|
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
|
||||||
assertTxInBlock(t, block, txHash)
|
ht.Miner.AssertTxInBlock(block, txHash)
|
||||||
|
|
||||||
confBalanceAfterChan += chanChangeUtxoAmt
|
confBalanceAfterChan += chanChangeUtxoAmt
|
||||||
assertAccountBalance(t.t, srcNode, account, 0, 0)
|
ht.AssertWalletAccountBalance(srcNode, account, 0, 0)
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, defaultAccount, confBalanceAfterChan, 0,
|
srcNode, defaultAccount, confBalanceAfterChan, 0,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, all interactions remain within Carol's imported
|
// Otherwise, all interactions remain within Carol's imported
|
||||||
// account.
|
// account.
|
||||||
confBalanceAfterChan = accountConfBalance - utxoAmt
|
confBalanceAfterChan = accountConfBalance - utxoAmt
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, account, confBalanceAfterChan,
|
srcNode, account, confBalanceAfterChan,
|
||||||
chanChangeUtxoAmt,
|
chanChangeUtxoAmt,
|
||||||
)
|
)
|
||||||
|
|
||||||
block := mineBlocks(t, t.lndHarness, 6, 1)[0]
|
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
|
||||||
assertTxInBlock(t, block, txHash)
|
ht.Miner.AssertTxInBlock(block, txHash)
|
||||||
|
|
||||||
confBalanceAfterChan += chanChangeUtxoAmt
|
confBalanceAfterChan += chanChangeUtxoAmt
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, account, confBalanceAfterChan, 0,
|
srcNode, account, confBalanceAfterChan, 0,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,8 +392,10 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
// based on the account.
|
// based on the account.
|
||||||
var tx wire.MsgTx
|
var tx wire.MsgTx
|
||||||
err = tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
|
err = tx.Deserialize(bytes.NewReader(finalizeResp.RawFinalTx))
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
assertOutputScriptType(t.t, expChangeScriptType, &tx, chanChangeUtxoAmt)
|
assertOutputScriptType(
|
||||||
|
ht.T, expChangeScriptType, &tx, chanChangeUtxoAmt,
|
||||||
|
)
|
||||||
|
|
||||||
// Wait for the channel to be announced by both parties.
|
// Wait for the channel to be announced by both parties.
|
||||||
chanPoint := &lnrpc.ChannelPoint{
|
chanPoint := &lnrpc.ChannelPoint{
|
||||||
@ -525,29 +404,30 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
},
|
},
|
||||||
OutputIndex: upd.ChanPending.OutputIndex,
|
OutputIndex: upd.ChanPending.OutputIndex,
|
||||||
}
|
}
|
||||||
err = srcNode.WaitForNetworkChannelOpen(chanPoint)
|
ht.AssertTopologyChannelOpen(srcNode, chanPoint)
|
||||||
require.NoError(t.t, err)
|
ht.AssertTopologyChannelOpen(destNode, chanPoint)
|
||||||
err = destNode.WaitForNetworkChannelOpen(chanPoint)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Send a test payment to ensure the channel is operating as normal.
|
// Send a test payment to ensure the channel is operating as normal.
|
||||||
const invoiceAmt = 100000
|
const invoiceAmt = 100000
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
invoice := &lnrpc.Invoice{
|
||||||
defer cancel()
|
|
||||||
resp, err := destNode.AddInvoice(ctxt, &lnrpc.Invoice{
|
|
||||||
Memo: "psbt import chan",
|
Memo: "psbt import chan",
|
||||||
Value: invoiceAmt,
|
Value: invoiceAmt,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
resp := destNode.RPC.AddInvoice(invoice)
|
||||||
|
|
||||||
err = completePaymentRequests(
|
ht.CompletePaymentRequests(srcNode, []string{resp.PaymentRequest})
|
||||||
srcNode, srcNode.RouterClient,
|
|
||||||
[]string{resp.PaymentRequest}, true,
|
// TODO(yy): remove the sleep once the following bug is fixed. When the
|
||||||
)
|
// payment is reported as settled by srcNode, it's expected the
|
||||||
require.NoError(t.t, err)
|
// commitment dance is finished and all subsequent states have been
|
||||||
|
// updated. Yet we'd receive the error `cannot co-op close channel with
|
||||||
|
// active htlcs` or `link failed to shutdown` if we close the channel.
|
||||||
|
// We need to investigate the order of settling the payments and
|
||||||
|
// updating commitments to understand and fix .
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
// Now that we've confirmed the opened channel works, we'll close it.
|
// Now that we've confirmed the opened channel works, we'll close it.
|
||||||
closeChannelAndAssert(t, t.lndHarness, srcNode, chanPoint, false)
|
ht.CloseChannel(srcNode, chanPoint)
|
||||||
|
|
||||||
// Since the channel still had funds left on the source node's side,
|
// Since the channel still had funds left on the source node's side,
|
||||||
// they must've been redeemed after the close. Without a pre-negotiated
|
// they must've been redeemed after the close. Without a pre-negotiated
|
||||||
@ -557,17 +437,17 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
balanceFromClosedChan := chanSize - invoiceAmt - chanCloseTxFee
|
balanceFromClosedChan := chanSize - invoiceAmt - chanCloseTxFee
|
||||||
|
|
||||||
if account == defaultImportedAccount {
|
if account == defaultImportedAccount {
|
||||||
assertAccountBalance(t.t, srcNode, account, 0, 0)
|
ht.AssertWalletAccountBalance(srcNode, account, 0, 0)
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, defaultAccount,
|
srcNode, defaultAccount,
|
||||||
confBalanceAfterChan+balanceFromClosedChan, 0,
|
confBalanceAfterChan+balanceFromClosedChan, 0,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, account, confBalanceAfterChan, 0,
|
srcNode, account, confBalanceAfterChan, 0,
|
||||||
)
|
)
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, srcNode, defaultAccount, balanceFromClosedChan, 0,
|
srcNode, defaultAccount, balanceFromClosedChan, 0,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -575,18 +455,20 @@ func fundChanAndCloseFromImportedAccount(t *harnessTest, srcNode, destNode,
|
|||||||
// testWalletImportAccount tests that an imported account can fund transactions
|
// testWalletImportAccount tests that an imported account can fund transactions
|
||||||
// and channels through PSBTs, by having one node (the one with the imported
|
// and channels through PSBTs, by having one node (the one with the imported
|
||||||
// account) craft the transactions and another node act as the signer.
|
// account) craft the transactions and another node act as the signer.
|
||||||
func testWalletImportAccount(net *lntest.NetworkHarness, t *harnessTest) {
|
func testWalletImportAccount(ht *lntemp.HarnessTest) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
addrType walletrpc.AddressType
|
addrType walletrpc.AddressType
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "standard BIP-0049",
|
name: "standard BIP-0049",
|
||||||
addrType: walletrpc.AddressType_NESTED_WITNESS_PUBKEY_HASH,
|
addrType: walletrpc.
|
||||||
|
AddressType_NESTED_WITNESS_PUBKEY_HASH,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "lnd BIP-0049 variant",
|
name: "lnd BIP-0049 variant",
|
||||||
addrType: walletrpc.AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH,
|
addrType: walletrpc.
|
||||||
|
AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "standard BIP-0084",
|
name: "standard BIP-0084",
|
||||||
@ -600,23 +482,24 @@ func testWalletImportAccount(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
tc := tc
|
tc := tc
|
||||||
success := t.t.Run(tc.name, func(tt *testing.T) {
|
success := ht.Run(tc.name, func(tt *testing.T) {
|
||||||
ht := newHarnessTest(tt, net)
|
testFunc := func(ht *lntemp.HarnessTest) {
|
||||||
ht.RunTestCase(&testCase{
|
testWalletImportAccountScenario(
|
||||||
name: tc.name,
|
ht, tc.addrType,
|
||||||
test: func(net1 *lntest.NetworkHarness,
|
)
|
||||||
t1 *harnessTest) {
|
}
|
||||||
|
|
||||||
testWalletImportAccountScenario(
|
st := ht.Subtest(tt)
|
||||||
net, t, tc.addrType,
|
|
||||||
)
|
st.RunTestCase(&lntemp.TestCase{
|
||||||
},
|
Name: tc.name,
|
||||||
|
TestFunc: testFunc,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if !success {
|
if !success {
|
||||||
// Log failure time to help relate the lnd logs to the
|
// Log failure time to help relate the lnd logs to the
|
||||||
// failure.
|
// failure.
|
||||||
t.Logf("Failure time: %v", time.Now().Format(
|
ht.Logf("Failure time: %v", time.Now().Format(
|
||||||
"2006-01-02 15:04:05.000",
|
"2006-01-02 15:04:05.000",
|
||||||
))
|
))
|
||||||
break
|
break
|
||||||
@ -624,115 +507,103 @@ func testWalletImportAccount(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testWalletImportAccountScenario(net *lntest.NetworkHarness, t *harnessTest,
|
func testWalletImportAccountScenario(ht *lntemp.HarnessTest,
|
||||||
addrType walletrpc.AddressType) {
|
addrType walletrpc.AddressType) {
|
||||||
|
|
||||||
// We'll start our test by having two nodes, Carol and Dave. Carol's
|
// We'll start our test by having two nodes, Carol and Dave. Carol's
|
||||||
// default wallet account will be imported into Dave's node.
|
// default wallet account will be imported into Dave's node.
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
//
|
||||||
defer shutdownAndAssert(net, t, carol)
|
// NOTE: we won't use standby nodes here since the test will change
|
||||||
|
// each of the node's wallet state.
|
||||||
|
carol := ht.NewNode("carol", nil)
|
||||||
|
dave := ht.NewNode("dave", nil)
|
||||||
|
|
||||||
dave := net.NewNode(t.t, "dave", nil)
|
runWalletImportAccountScenario(ht, addrType, carol, dave)
|
||||||
defer shutdownAndAssert(net, t, dave)
|
|
||||||
|
|
||||||
runWalletImportAccountScenario(net, t, addrType, carol, dave)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runWalletImportAccountScenario(net *lntest.NetworkHarness, t *harnessTest,
|
func runWalletImportAccountScenario(ht *lntemp.HarnessTest,
|
||||||
addrType walletrpc.AddressType, carol, dave *lntest.HarnessNode) {
|
addrType walletrpc.AddressType, carol, dave *node.HarnessNode) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
|
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
|
||||||
|
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
listReq := &walletrpc.ListAccountsRequest{
|
listReq := &walletrpc.ListAccountsRequest{
|
||||||
Name: "default",
|
Name: "default",
|
||||||
AddressType: addrType,
|
AddressType: addrType,
|
||||||
}
|
}
|
||||||
listResp, err := carol.WalletKitClient.ListAccounts(ctxt, listReq)
|
listResp := carol.RPC.ListAccounts(listReq)
|
||||||
require.NoError(t.t, err)
|
require.Len(ht, listResp.Accounts, 1)
|
||||||
require.Equal(t.t, len(listResp.Accounts), 1)
|
|
||||||
carolAccount := listResp.Accounts[0]
|
carolAccount := listResp.Accounts[0]
|
||||||
|
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
const importedAccount = "carol"
|
const importedAccount = "carol"
|
||||||
importReq := &walletrpc.ImportAccountRequest{
|
importReq := &walletrpc.ImportAccountRequest{
|
||||||
Name: importedAccount,
|
Name: importedAccount,
|
||||||
ExtendedPublicKey: carolAccount.ExtendedPublicKey,
|
ExtendedPublicKey: carolAccount.ExtendedPublicKey,
|
||||||
AddressType: addrType,
|
AddressType: addrType,
|
||||||
}
|
}
|
||||||
_, err = dave.WalletKitClient.ImportAccount(ctxt, importReq)
|
dave.RPC.ImportAccount(importReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// We'll generate an address for Carol from Dave's node to receive some
|
// We'll generate an address for Carol from Dave's node to receive some
|
||||||
// funds.
|
// funds.
|
||||||
externalAddr := newExternalAddr(
|
externalAddr := newExternalAddr(
|
||||||
t.t, dave, carol, importedAccount, addrType,
|
ht, dave, carol, importedAccount, addrType,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Send coins to Carol's address and confirm them, making sure the
|
// Send coins to Carol's address and confirm them, making sure the
|
||||||
// balance updates accordingly.
|
// balance updates accordingly.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
alice := ht.Alice
|
||||||
defer cancel()
|
req := &lnrpc.SendCoinsRequest{
|
||||||
_, err = net.Alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
|
|
||||||
Addr: externalAddr,
|
Addr: externalAddr,
|
||||||
Amount: utxoAmt,
|
Amount: utxoAmt,
|
||||||
SatPerByte: 1,
|
SatPerByte: 1,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
alice.RPC.SendCoins(req)
|
||||||
|
|
||||||
assertAccountBalance(t.t, dave, importedAccount, 0, utxoAmt)
|
ht.AssertWalletAccountBalance(dave, importedAccount, 0, utxoAmt)
|
||||||
_ = mineBlocks(t, net, 1, 1)
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
assertAccountBalance(t.t, dave, importedAccount, utxoAmt, 0)
|
ht.AssertWalletAccountBalance(dave, importedAccount, utxoAmt, 0)
|
||||||
|
|
||||||
// To ensure that Dave can use Carol's account as watch-only, we'll
|
// To ensure that Dave can use Carol's account as watch-only, we'll
|
||||||
// construct a PSBT that sends funds to Alice, which we'll then hand
|
// construct a PSBT that sends funds to Alice, which we'll then hand
|
||||||
// over to Carol to sign.
|
// over to Carol to sign.
|
||||||
psbtSendFromImportedAccount(
|
psbtSendFromImportedAccount(
|
||||||
t, dave, net.Alice, carol, importedAccount, addrType,
|
ht, dave, alice, carol, importedAccount, addrType,
|
||||||
)
|
)
|
||||||
|
|
||||||
// We'll generate a new address for Carol from Dave's node to receive
|
// We'll generate a new address for Carol from Dave's node to receive
|
||||||
// and fund a new channel.
|
// and fund a new channel.
|
||||||
externalAddr = newExternalAddr(
|
externalAddr = newExternalAddr(
|
||||||
t.t, dave, carol, importedAccount, addrType,
|
ht, dave, carol, importedAccount, addrType,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Retrieve the current confirmed balance of the imported account for
|
// Retrieve the current confirmed balance of the imported account for
|
||||||
// some assertions we'll make later on.
|
// some assertions we'll make later on.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
balanceResp := dave.RPC.WalletBalance()
|
||||||
defer cancel()
|
require.Contains(ht, balanceResp.AccountBalance, importedAccount)
|
||||||
balanceResp, err := dave.WalletBalance(
|
confBalance := balanceResp.AccountBalance[importedAccount].
|
||||||
ctxt, &lnrpc.WalletBalanceRequest{},
|
ConfirmedBalance
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Contains(t.t, balanceResp.AccountBalance, importedAccount)
|
|
||||||
confBalance := balanceResp.AccountBalance[importedAccount].ConfirmedBalance
|
|
||||||
|
|
||||||
// Send coins to Carol's address and confirm them, making sure the
|
// Send coins to Carol's address and confirm them, making sure the
|
||||||
// balance updates accordingly.
|
// balance updates accordingly.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
req = &lnrpc.SendCoinsRequest{
|
||||||
defer cancel()
|
|
||||||
_, err = net.Alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
|
|
||||||
Addr: externalAddr,
|
Addr: externalAddr,
|
||||||
Amount: utxoAmt,
|
Amount: utxoAmt,
|
||||||
SatPerByte: 1,
|
SatPerByte: 1,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
alice.RPC.SendCoins(req)
|
||||||
|
|
||||||
assertAccountBalance(t.t, dave, importedAccount, confBalance, utxoAmt)
|
ht.AssertWalletAccountBalance(
|
||||||
_ = mineBlocks(t, net, 1, 1)
|
dave, importedAccount, confBalance, utxoAmt,
|
||||||
assertAccountBalance(
|
)
|
||||||
t.t, dave, importedAccount, confBalance+utxoAmt, 0,
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
|
ht.AssertWalletAccountBalance(
|
||||||
|
dave, importedAccount, confBalance+utxoAmt, 0,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Now that we have enough funds, it's time to fund the channel, make a
|
// Now that we have enough funds, it's time to fund the channel, make a
|
||||||
// test payment, and close it. This contains several balance assertions
|
// test payment, and close it. This contains several balance assertions
|
||||||
// along the way.
|
// along the way.
|
||||||
fundChanAndCloseFromImportedAccount(
|
fundChanAndCloseFromImportedAccount(
|
||||||
t, dave, net.Alice, carol, importedAccount, addrType, utxoAmt,
|
ht, dave, alice, carol, importedAccount, addrType, utxoAmt,
|
||||||
int64(funding.MaxBtcFundingAmount),
|
int64(funding.MaxBtcFundingAmount),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -740,14 +611,15 @@ func runWalletImportAccountScenario(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// testWalletImportPubKey tests that an imported public keys can fund
|
// testWalletImportPubKey tests that an imported public keys can fund
|
||||||
// transactions and channels through PSBTs, by having one node (the one with the
|
// transactions and channels through PSBTs, by having one node (the one with the
|
||||||
// imported account) craft the transactions and another node act as the signer.
|
// imported account) craft the transactions and another node act as the signer.
|
||||||
func testWalletImportPubKey(net *lntest.NetworkHarness, t *harnessTest) {
|
func testWalletImportPubKey(ht *lntemp.HarnessTest) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
addrType walletrpc.AddressType
|
addrType walletrpc.AddressType
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "BIP-0049",
|
name: "BIP-0049",
|
||||||
addrType: walletrpc.AddressType_NESTED_WITNESS_PUBKEY_HASH,
|
addrType: walletrpc.
|
||||||
|
AddressType_NESTED_WITNESS_PUBKEY_HASH,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "BIP-0084",
|
name: "BIP-0084",
|
||||||
@ -761,23 +633,24 @@ func testWalletImportPubKey(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
tc := tc
|
tc := tc
|
||||||
success := t.t.Run(tc.name, func(tt *testing.T) {
|
success := ht.Run(tc.name, func(tt *testing.T) {
|
||||||
ht := newHarnessTest(tt, net)
|
testFunc := func(ht *lntemp.HarnessTest) {
|
||||||
ht.RunTestCase(&testCase{
|
testWalletImportPubKeyScenario(
|
||||||
name: tc.name,
|
ht, tc.addrType,
|
||||||
test: func(net1 *lntest.NetworkHarness,
|
)
|
||||||
t1 *harnessTest) {
|
}
|
||||||
|
|
||||||
testWalletImportPubKeyScenario(
|
st := ht.Subtest(tt)
|
||||||
net, t, tc.addrType,
|
|
||||||
)
|
st.RunTestCase(&lntemp.TestCase{
|
||||||
},
|
Name: tc.name,
|
||||||
|
TestFunc: testFunc,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if !success {
|
if !success {
|
||||||
// Log failure time to help relate the lnd logs to the
|
// Log failure time to help relate the lnd logs to the
|
||||||
// failure.
|
// failure.
|
||||||
t.Logf("Failure time: %v", time.Now().Format(
|
ht.Logf("Failure time: %v", time.Now().Format(
|
||||||
"2006-01-02 15:04:05.000",
|
"2006-01-02 15:04:05.000",
|
||||||
))
|
))
|
||||||
break
|
break
|
||||||
@ -785,18 +658,18 @@ func testWalletImportPubKey(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
|
func testWalletImportPubKeyScenario(ht *lntemp.HarnessTest,
|
||||||
addrType walletrpc.AddressType) {
|
addrType walletrpc.AddressType) {
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
|
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
|
||||||
|
alice := ht.Alice
|
||||||
|
|
||||||
// We'll start our test by having two nodes, Carol and Dave.
|
// We'll start our test by having two nodes, Carol and Dave.
|
||||||
carol := net.NewNode(t.t, "carol", nil)
|
//
|
||||||
defer shutdownAndAssert(net, t, carol)
|
// NOTE: we won't use standby nodes here since the test will change
|
||||||
|
// each of the node's wallet state.
|
||||||
dave := net.NewNode(t.t, "dave", nil)
|
carol := ht.NewNode("carol", nil)
|
||||||
defer shutdownAndAssert(net, t, dave)
|
dave := ht.NewNode("dave", nil)
|
||||||
|
|
||||||
// We'll define a helper closure that we'll use throughout the test to
|
// We'll define a helper closure that we'll use throughout the test to
|
||||||
// generate a new address of the given type from Carol's perspective,
|
// generate a new address of the given type from Carol's perspective,
|
||||||
@ -806,30 +679,27 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
|
|
||||||
// Retrieve Carol's account public key for the corresponding
|
// Retrieve Carol's account public key for the corresponding
|
||||||
// address type.
|
// address type.
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
listReq := &walletrpc.ListAccountsRequest{
|
listReq := &walletrpc.ListAccountsRequest{
|
||||||
Name: "default",
|
Name: "default",
|
||||||
AddressType: addrType,
|
AddressType: addrType,
|
||||||
}
|
}
|
||||||
listResp, err := carol.WalletKitClient.ListAccounts(
|
listResp := carol.RPC.ListAccounts(listReq)
|
||||||
ctxt, listReq,
|
require.Len(ht, listResp.Accounts, 1)
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Equal(t.t, len(listResp.Accounts), 1)
|
|
||||||
p2wkhAccount := listResp.Accounts[0]
|
p2wkhAccount := listResp.Accounts[0]
|
||||||
|
|
||||||
// Derive the external address at the given index.
|
// Derive the external address at the given index.
|
||||||
accountPubKey, err := hdkeychain.NewKeyFromString(
|
accountPubKey, err := hdkeychain.NewKeyFromString(
|
||||||
p2wkhAccount.ExtendedPublicKey,
|
p2wkhAccount.ExtendedPublicKey,
|
||||||
)
|
)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
externalAccountExtKey, err := accountPubKey.Derive(0)
|
externalAccountExtKey, err := accountPubKey.Derive(0)
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
externalAddrExtKey, err := externalAccountExtKey.Derive(keyIndex)
|
externalAddrExtKey, err := externalAccountExtKey.Derive(
|
||||||
require.NoError(t.t, err)
|
keyIndex,
|
||||||
|
)
|
||||||
|
require.NoError(ht, err)
|
||||||
externalAddrPubKey, err := externalAddrExtKey.ECPubKey()
|
externalAddrPubKey, err := externalAddrExtKey.ECPubKey()
|
||||||
require.NoError(t.t, err)
|
require.NoError(ht, err)
|
||||||
|
|
||||||
// Serialize as 32-byte x-only pubkey for Taproot addresses.
|
// Serialize as 32-byte x-only pubkey for Taproot addresses.
|
||||||
serializedPubKey := externalAddrPubKey.SerializeCompressed()
|
serializedPubKey := externalAddrPubKey.SerializeCompressed()
|
||||||
@ -840,44 +710,34 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Import the public key into Dave.
|
// Import the public key into Dave.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
defer cancel()
|
|
||||||
importReq := &walletrpc.ImportPublicKeyRequest{
|
importReq := &walletrpc.ImportPublicKeyRequest{
|
||||||
PublicKey: serializedPubKey,
|
PublicKey: serializedPubKey,
|
||||||
AddressType: addrType,
|
AddressType: addrType,
|
||||||
}
|
}
|
||||||
_, err = dave.WalletKitClient.ImportPublicKey(ctxt, importReq)
|
dave.RPC.ImportPublicKey(importReq)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// We'll also generate the same address for Carol, as it'll be
|
// We'll also generate the same address for Carol, as it'll be
|
||||||
// required later when signing.
|
// required later when signing.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
carolAddrResp := carol.RPC.NewAddress(&lnrpc.NewAddressRequest{
|
||||||
defer cancel()
|
Type: walletToLNAddrType(ht.T, addrType),
|
||||||
carolAddrResp, err := carol.NewAddress(
|
})
|
||||||
ctxt, &lnrpc.NewAddressRequest{
|
|
||||||
Type: walletToLNAddrType(t.t, addrType),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Send coins to Carol's address and confirm them, making sure
|
// Send coins to Carol's address and confirm them, making sure
|
||||||
// the balance updates accordingly.
|
// the balance updates accordingly.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
req := &lnrpc.SendCoinsRequest{
|
||||||
defer cancel()
|
|
||||||
_, err = net.Alice.SendCoins(ctxt, &lnrpc.SendCoinsRequest{
|
|
||||||
Addr: carolAddrResp.Address,
|
Addr: carolAddrResp.Address,
|
||||||
Amount: utxoAmt,
|
Amount: utxoAmt,
|
||||||
SatPerByte: 1,
|
SatPerByte: 1,
|
||||||
})
|
}
|
||||||
require.NoError(t.t, err)
|
alice.RPC.SendCoins(req)
|
||||||
|
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, dave, defaultImportedAccount, prevConfBalance,
|
dave, defaultImportedAccount, prevConfBalance,
|
||||||
prevUnconfBalance+utxoAmt,
|
prevUnconfBalance+utxoAmt,
|
||||||
)
|
)
|
||||||
_ = mineBlocks(t, net, 1, 1)
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
assertAccountBalance(
|
ht.AssertWalletAccountBalance(
|
||||||
t.t, dave, defaultImportedAccount,
|
dave, defaultImportedAccount,
|
||||||
prevConfBalance+utxoAmt, prevUnconfBalance,
|
prevConfBalance+utxoAmt, prevUnconfBalance,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -890,22 +750,15 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// construct a PSBT that sends funds to Alice, which we'll then hand
|
// construct a PSBT that sends funds to Alice, which we'll then hand
|
||||||
// over to Carol to sign.
|
// over to Carol to sign.
|
||||||
psbtSendFromImportedAccount(
|
psbtSendFromImportedAccount(
|
||||||
t, dave, net.Alice, carol, defaultImportedAccount, addrType,
|
ht, dave, alice, carol, defaultImportedAccount, addrType,
|
||||||
)
|
)
|
||||||
|
|
||||||
// We'll now attempt to fund a channel.
|
// We'll now attempt to fund a channel.
|
||||||
//
|
//
|
||||||
// We'll have Carol generate another external address, which we'll
|
// We'll have Carol generate another external address, which we'll
|
||||||
// import into Dave again.
|
// import into Dave again.
|
||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
balanceResp := dave.RPC.WalletBalance()
|
||||||
defer cancel()
|
require.Contains(ht, balanceResp.AccountBalance, defaultImportedAccount)
|
||||||
balanceResp, err := dave.WalletBalance(
|
|
||||||
ctxt, &lnrpc.WalletBalanceRequest{},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
require.Contains(
|
|
||||||
t.t, balanceResp.AccountBalance, defaultImportedAccount,
|
|
||||||
)
|
|
||||||
confBalance := balanceResp.
|
confBalance := balanceResp.
|
||||||
AccountBalance[defaultImportedAccount].ConfirmedBalance
|
AccountBalance[defaultImportedAccount].ConfirmedBalance
|
||||||
importPubKey(1, confBalance, 0)
|
importPubKey(1, confBalance, 0)
|
||||||
@ -914,7 +767,7 @@ func testWalletImportPubKeyScenario(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// test payment, and close it. This contains several balance assertions
|
// test payment, and close it. This contains several balance assertions
|
||||||
// along the way.
|
// along the way.
|
||||||
fundChanAndCloseFromImportedAccount(
|
fundChanAndCloseFromImportedAccount(
|
||||||
t, dave, net.Alice, carol, defaultImportedAccount, addrType,
|
ht, dave, alice, carol, defaultImportedAccount, addrType,
|
||||||
utxoAmt, int64(funding.MaxBtcFundingAmount),
|
utxoAmt, int64(funding.MaxBtcFundingAmount),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -5,20 +5,16 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
||||||
"github.com/btcsuite/btcd/rpcclient"
|
"github.com/btcsuite/btcd/rpcclient"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/go-errors/errors"
|
"github.com/go-errors/errors"
|
||||||
"github.com/lightningnetwork/lnd/input"
|
"github.com/lightningnetwork/lnd/input"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntemp"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/lightningnetwork/lnd/lnwallet"
|
"github.com/lightningnetwork/lnd/lnwallet"
|
||||||
@ -470,49 +466,3 @@ func findTxAtHeight(t *harnessTest, height int32,
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getOutputIndex returns the output index of the given address in the given
|
|
||||||
// transaction.
|
|
||||||
func getOutputIndex(t *harnessTest, miner *lntest.HarnessMiner,
|
|
||||||
txid *chainhash.Hash, addr string) int {
|
|
||||||
|
|
||||||
t.t.Helper()
|
|
||||||
|
|
||||||
// We'll then extract the raw transaction from the mempool in order to
|
|
||||||
// determine the index of the p2tr output.
|
|
||||||
tx, err := miner.Client.GetRawTransaction(txid)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
p2trOutputIndex := -1
|
|
||||||
for i, txOut := range tx.MsgTx().TxOut {
|
|
||||||
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
|
||||||
txOut.PkScript, miner.ActiveNet,
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
if addrs[0].String() == addr {
|
|
||||||
p2trOutputIndex = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.Greater(t.t, p2trOutputIndex, -1)
|
|
||||||
|
|
||||||
return p2trOutputIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// acceptChannel is used to accept a single channel that comes across. This
|
|
||||||
// should be run in a goroutine and is used to test nodes with the zero-conf
|
|
||||||
// feature bit.
|
|
||||||
func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
req, err := stream.Recv()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
resp := &lnrpc.ChannelAcceptResponse{
|
|
||||||
Accept: true,
|
|
||||||
PendingChanId: req.PendingChanId,
|
|
||||||
ZeroConf: zeroConf,
|
|
||||||
}
|
|
||||||
err = stream.Send(resp)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
@ -3566,9 +3566,12 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
|
|||||||
// balance to satisfy the final evaluated HTLC's.
|
// balance to satisfy the final evaluated HTLC's.
|
||||||
switch {
|
switch {
|
||||||
case int64(ourBalance) < 0:
|
case int64(ourBalance) < 0:
|
||||||
return ErrBelowChanReserve
|
return fmt.Errorf("%w: negative local balance",
|
||||||
|
ErrBelowChanReserve)
|
||||||
|
|
||||||
case int64(theirBalance) < 0:
|
case int64(theirBalance) < 0:
|
||||||
return ErrBelowChanReserve
|
return fmt.Errorf("%w: negative remote balance",
|
||||||
|
ErrBelowChanReserve)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the fee being applied is enough to be relayed across the
|
// Ensure that the fee being applied is enough to be relayed across the
|
||||||
@ -3580,17 +3583,25 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
|
|||||||
|
|
||||||
// If the added HTLCs will decrease the balance, make sure they won't
|
// If the added HTLCs will decrease the balance, make sure they won't
|
||||||
// dip the local and remote balances below the channel reserves.
|
// dip the local and remote balances below the channel reserves.
|
||||||
|
ourReserve := lnwire.NewMSatFromSatoshis(
|
||||||
|
lc.channelState.LocalChanCfg.ChanReserve,
|
||||||
|
)
|
||||||
|
theirReserve := lnwire.NewMSatFromSatoshis(
|
||||||
|
lc.channelState.RemoteChanCfg.ChanReserve,
|
||||||
|
)
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case ourBalance < ourInitialBalance &&
|
case ourBalance < ourInitialBalance && ourBalance < ourReserve:
|
||||||
ourBalance < lnwire.NewMSatFromSatoshis(
|
lc.log.Debugf("Funds below chan reserve: ourBalance=%v, "+
|
||||||
lc.channelState.LocalChanCfg.ChanReserve):
|
"ourReserve=%v", ourBalance, ourReserve)
|
||||||
|
return fmt.Errorf("%w: our balance below chan reserve",
|
||||||
|
ErrBelowChanReserve)
|
||||||
|
|
||||||
return ErrBelowChanReserve
|
case theirBalance < theirInitialBalance && theirBalance < theirReserve:
|
||||||
case theirBalance < theirInitialBalance &&
|
lc.log.Debugf("Funds below chan reserve: theirBalance=%v, "+
|
||||||
theirBalance < lnwire.NewMSatFromSatoshis(
|
"theirReserve=%v", theirBalance, theirReserve)
|
||||||
lc.channelState.RemoteChanCfg.ChanReserve):
|
return fmt.Errorf("%w: their balance below chan reserve",
|
||||||
|
ErrBelowChanReserve)
|
||||||
return ErrBelowChanReserve
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateUpdates take a set of updates, and validates them against
|
// validateUpdates take a set of updates, and validates them against
|
||||||
|
@ -2737,10 +2737,7 @@ func TestAddHTLCNegativeBalance(t *testing.T) {
|
|||||||
htlcAmt = lnwire.NewMSatFromSatoshis(2 * btcutil.SatoshiPerBitcoin)
|
htlcAmt = lnwire.NewMSatFromSatoshis(2 * btcutil.SatoshiPerBitcoin)
|
||||||
htlc, _ := createHTLC(numHTLCs+1, htlcAmt)
|
htlc, _ := createHTLC(numHTLCs+1, htlcAmt)
|
||||||
_, err = aliceChannel.AddHTLC(htlc, nil)
|
_, err = aliceChannel.AddHTLC(htlc, nil)
|
||||||
if err != ErrBelowChanReserve {
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
t.Fatalf("expected balance below channel reserve, instead "+
|
|
||||||
"got: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertNoChanSyncNeeded is a helper function that asserts that upon restart,
|
// assertNoChanSyncNeeded is a helper function that asserts that upon restart,
|
||||||
@ -5642,10 +5639,8 @@ func TestDesyncHTLCs(t *testing.T) {
|
|||||||
// balance is unavailable.
|
// balance is unavailable.
|
||||||
htlcAmt = lnwire.NewMSatFromSatoshis(1 * btcutil.SatoshiPerBitcoin)
|
htlcAmt = lnwire.NewMSatFromSatoshis(1 * btcutil.SatoshiPerBitcoin)
|
||||||
htlc, _ = createHTLC(1, htlcAmt)
|
htlc, _ = createHTLC(1, htlcAmt)
|
||||||
if _, err = aliceChannel.AddHTLC(htlc, nil); err != ErrBelowChanReserve {
|
_, err = aliceChannel.AddHTLC(htlc, nil)
|
||||||
t.Fatalf("expected ErrInsufficientBalance, instead received: %v",
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now do a state transition, which will ACK the FailHTLC, making Alice
|
// Now do a state transition, which will ACK the FailHTLC, making Alice
|
||||||
// able to add the new HTLC.
|
// able to add the new HTLC.
|
||||||
@ -6063,14 +6058,11 @@ func TestChanReserve(t *testing.T) {
|
|||||||
htlc, _ = createHTLC(bobIndex, htlcAmt)
|
htlc, _ = createHTLC(bobIndex, htlcAmt)
|
||||||
bobIndex++
|
bobIndex++
|
||||||
_, err := bobChannel.AddHTLC(htlc, nil)
|
_, err := bobChannel.AddHTLC(htlc, nil)
|
||||||
if err != ErrBelowChanReserve {
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alice will reject this htlc upon receiving the htlc.
|
// Alice will reject this htlc upon receiving the htlc.
|
||||||
if _, err := aliceChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve {
|
_, err = aliceChannel.ReceiveHTLC(htlc)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
}
|
|
||||||
|
|
||||||
// We must setup the channels again, since a violation of the channel
|
// We must setup the channels again, since a violation of the channel
|
||||||
// constraints leads to channel shutdown.
|
// constraints leads to channel shutdown.
|
||||||
@ -6105,14 +6097,11 @@ func TestChanReserve(t *testing.T) {
|
|||||||
htlc, _ = createHTLC(aliceIndex, htlcAmt)
|
htlc, _ = createHTLC(aliceIndex, htlcAmt)
|
||||||
aliceIndex++
|
aliceIndex++
|
||||||
_, err = aliceChannel.AddHTLC(htlc, nil)
|
_, err = aliceChannel.AddHTLC(htlc, nil)
|
||||||
if err != ErrBelowChanReserve {
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Likewise, Bob will reject receiving the htlc because of the same reason.
|
// Likewise, Bob will reject receiving the htlc because of the same reason.
|
||||||
if _, err := bobChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve {
|
_, err = bobChannel.ReceiveHTLC(htlc)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
}
|
|
||||||
|
|
||||||
// We must setup the channels again, since a violation of the channel
|
// We must setup the channels again, since a violation of the channel
|
||||||
// constraints leads to channel shutdown.
|
// constraints leads to channel shutdown.
|
||||||
@ -6218,22 +6207,15 @@ func TestChanReserveRemoteInitiator(t *testing.T) {
|
|||||||
// Bob should refuse to add this HTLC, since he realizes it will create
|
// Bob should refuse to add this HTLC, since he realizes it will create
|
||||||
// an invalid commitment.
|
// an invalid commitment.
|
||||||
_, err = bobChannel.AddHTLC(htlc, nil)
|
_, err = bobChannel.AddHTLC(htlc, nil)
|
||||||
if err != ErrBelowChanReserve {
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v",
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Of course Alice will also not have enough balance to add it herself.
|
// Of course Alice will also not have enough balance to add it herself.
|
||||||
_, err = aliceChannel.AddHTLC(htlc, nil)
|
_, err = aliceChannel.AddHTLC(htlc, nil)
|
||||||
if err != ErrBelowChanReserve {
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v",
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Same for Alice, she should refuse to accept this second HTLC.
|
// Same for Alice, she should refuse to accept this second HTLC.
|
||||||
if _, err := aliceChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve {
|
_, err = aliceChannel.ReceiveHTLC(htlc)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestChanReserveLocalInitiatorDustHtlc tests that fee the initiator must pay
|
// TestChanReserveLocalInitiatorDustHtlc tests that fee the initiator must pay
|
||||||
@ -6276,9 +6258,7 @@ func TestChanReserveLocalInitiatorDustHtlc(t *testing.T) {
|
|||||||
// Alice should realize that the fee she must pay to add this HTLC to
|
// Alice should realize that the fee she must pay to add this HTLC to
|
||||||
// the local commitment would take her below the channel reserve.
|
// the local commitment would take her below the channel reserve.
|
||||||
_, err = aliceChannel.AddHTLC(htlc, nil)
|
_, err = aliceChannel.AddHTLC(htlc, nil)
|
||||||
if err != ErrBelowChanReserve {
|
require.ErrorIs(t, err, ErrBelowChanReserve)
|
||||||
t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMinHTLC tests that the ErrBelowMinHTLC error is thrown if an HTLC is added
|
// TestMinHTLC tests that the ErrBelowMinHTLC error is thrown if an HTLC is added
|
||||||
|
Loading…
x
Reference in New Issue
Block a user