sweep: remove dead code and dead tests

This commit is contained in:
yyforyongyu 2024-04-11 17:33:28 +08:00
parent 7af195768a
commit 54aaeea491
No known key found for this signature in database
GPG Key ID: 9BCD95C4FF296868
8 changed files with 32 additions and 3922 deletions

View File

@ -9,113 +9,6 @@ import (
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
)
const (
// DefaultFeeRateBucketSize is the default size of fee rate buckets
// we'll use when clustering inputs into buckets with similar fee rates
// within the SimpleAggregator.
//
// Given a minimum relay fee rate of 1 sat/vbyte, a multiplier of 10
// would result in the following fee rate buckets up to the maximum fee
// rate:
//
// #1: min = 1 sat/vbyte, max = 10 sat/vbyte
// #2: min = 11 sat/vbyte, max = 20 sat/vbyte...
DefaultFeeRateBucketSize = 10
)
// inputCluster is a helper struct to gather a set of pending inputs that
// should be swept with the specified fee rate.
type inputCluster struct {
lockTime *uint32
sweepFeeRate chainfee.SatPerKWeight
inputs InputsMap
}
// createInputSets goes through the cluster's inputs and constructs sets of
// inputs that can be used to generate a sweeping transaction. Each set
// contains up to the configured maximum number of inputs. Negative yield
// inputs are skipped. No input sets with a total value after fees below the
// dust limit are returned.
func (c *inputCluster) createInputSets(maxFeeRate chainfee.SatPerKWeight,
maxInputs uint32) []InputSet {
// Turn the inputs into a slice so we can sort them.
inputList := make([]*SweeperInput, 0, len(c.inputs))
for _, input := range c.inputs {
inputList = append(inputList, input)
}
// Yield is calculated as the difference between value and added fee
// for this input. The fee calculation excludes fee components that are
// common to all inputs, as those wouldn't influence the order. The
// single component that is differentiating is witness size.
//
// For witness size, the upper limit is taken. The actual size depends
// on the signature length, which is not known yet at this point.
calcYield := func(input *SweeperInput) int64 {
size, _, err := input.WitnessType().SizeUpperBound()
if err != nil {
log.Errorf("Failed to get input weight: %v", err)
return 0
}
yield := input.SignDesc().Output.Value -
int64(c.sweepFeeRate.FeeForWeight(int64(size)))
return yield
}
// Sort input by yield. We will start constructing input sets starting
// with the highest yield inputs. This is to prevent the construction
// of a set with an output below the dust limit, causing the sweep
// process to stop, while there are still higher value inputs
// available. It also allows us to stop evaluating more inputs when the
// first input in this ordering is encountered with a negative yield.
sort.Slice(inputList, func(i, j int) bool {
// Because of the specific ordering and termination condition
// that is described above, we place force sweeps at the start
// of the list. Otherwise we can't be sure that they will be
// included in an input set.
if inputList[i].parameters().Immediate {
return true
}
return calcYield(inputList[i]) > calcYield(inputList[j])
})
// Select blocks of inputs up to the configured maximum number.
var sets []InputSet
for len(inputList) > 0 {
// Start building a set of positive-yield tx inputs under the
// condition that the tx will be published with the specified
// fee rate.
txInputs := newTxInputSet(c.sweepFeeRate, maxFeeRate, maxInputs)
// From the set of sweepable inputs, keep adding inputs to the
// input set until the tx output value no longer goes up or the
// maximum number of inputs is reached.
txInputs.addPositiveYieldInputs(inputList)
// If there are no positive yield inputs, we can stop here.
inputCount := len(txInputs.inputs)
if inputCount == 0 {
return sets
}
log.Infof("Candidate sweep set of size=%v (+%v wallet inputs),"+
" has yield=%v, weight=%v",
inputCount, len(txInputs.inputs)-inputCount,
txInputs.totalOutput()-txInputs.walletInputTotal,
txInputs.weightEstimate(true).weight())
sets = append(sets, txInputs)
inputList = inputList[inputCount:]
}
return sets
}
// UtxoAggregator defines an interface that takes a list of inputs and
// aggregate them into groups. Each group is used as the inputs to create a
// sweeping transaction.
@ -125,345 +18,6 @@ type UtxoAggregator interface {
ClusterInputs(inputs InputsMap) []InputSet
}
// SimpleAggregator aggregates inputs known by the Sweeper based on each
// input's locktime and feerate.
type SimpleAggregator struct {
// FeeEstimator is used when crafting sweep transactions to estimate
// the necessary fee relative to the expected size of the sweep
// transaction.
FeeEstimator chainfee.Estimator
// MaxFeeRate is the maximum fee rate allowed within the
// SimpleAggregator.
MaxFeeRate chainfee.SatPerKWeight
// MaxInputsPerTx specifies the default maximum number of inputs allowed
// in a single sweep tx. If more need to be swept, multiple txes are
// created and published.
MaxInputsPerTx uint32
// FeeRateBucketSize is the default size of fee rate buckets we'll use
// when clustering inputs into buckets with similar fee rates within
// the SimpleAggregator.
//
// Given a minimum relay fee rate of 1 sat/vbyte, a fee rate bucket
// size of 10 would result in the following fee rate buckets up to the
// maximum fee rate:
//
// #1: min = 1 sat/vbyte, max (exclusive) = 11 sat/vbyte
// #2: min = 11 sat/vbyte, max (exclusive) = 21 sat/vbyte...
FeeRateBucketSize int
}
// Compile-time constraint to ensure SimpleAggregator implements UtxoAggregator.
var _ UtxoAggregator = (*SimpleAggregator)(nil)
// NewSimpleUtxoAggregator creates a new instance of a SimpleAggregator.
func NewSimpleUtxoAggregator(estimator chainfee.Estimator,
max chainfee.SatPerKWeight, maxTx uint32) *SimpleAggregator {
return &SimpleAggregator{
FeeEstimator: estimator,
MaxFeeRate: max,
MaxInputsPerTx: maxTx,
FeeRateBucketSize: DefaultFeeRateBucketSize,
}
}
// ClusterInputs creates a list of input clusters from the set of pending
// inputs known by the UtxoSweeper. It clusters inputs by
// 1) Required tx locktime
// 2) Similar fee rates.
func (s *SimpleAggregator) ClusterInputs(inputs InputsMap) []InputSet {
// We start by getting the inputs clusters by locktime. Since the
// inputs commit to the locktime, they can only be clustered together
// if the locktime is equal.
lockTimeClusters, nonLockTimeInputs := s.clusterByLockTime(inputs)
// Cluster the remaining inputs by sweep fee rate.
feeClusters := s.clusterBySweepFeeRate(nonLockTimeInputs)
// Since the inputs that we clustered by fee rate don't commit to a
// specific locktime, we can try to merge a locktime cluster with a fee
// cluster.
clusters := zipClusters(lockTimeClusters, feeClusters)
sort.Slice(clusters, func(i, j int) bool {
return clusters[i].sweepFeeRate >
clusters[j].sweepFeeRate
})
// Now that we have the clusters, we can create the input sets.
var inputSets []InputSet
for _, cluster := range clusters {
sets := cluster.createInputSets(
s.MaxFeeRate, s.MaxInputsPerTx,
)
inputSets = append(inputSets, sets...)
}
return inputSets
}
// clusterByLockTime takes the given set of pending inputs and clusters those
// with equal locktime together. Each cluster contains a sweep fee rate, which
// is determined by calculating the average fee rate of all inputs within that
// cluster. In addition to the created clusters, inputs that did not specify a
// required locktime are returned.
func (s *SimpleAggregator) clusterByLockTime(
inputs InputsMap) ([]inputCluster, InputsMap) {
locktimes := make(map[uint32]InputsMap)
rem := make(InputsMap)
// Go through all inputs and check if they require a certain locktime.
for op, input := range inputs {
lt, ok := input.RequiredLockTime()
if !ok {
rem[op] = input
continue
}
// Check if we already have inputs with this locktime.
cluster, ok := locktimes[lt]
if !ok {
cluster = make(InputsMap)
}
// Get the fee rate based on the fee preference. If an error is
// returned, we'll skip sweeping this input for this round of
// cluster creation and retry it when we create the clusters
// from the pending inputs again.
feeRate, err := input.params.Fee.Estimate(
s.FeeEstimator, s.MaxFeeRate,
)
if err != nil {
log.Warnf("Skipping input %v: %v", op, err)
continue
}
log.Debugf("Adding input %v to cluster with locktime=%v, "+
"feeRate=%v", op, lt, feeRate)
// Attach the fee rate to the input.
input.lastFeeRate = feeRate
// Update the cluster about the updated input.
cluster[op] = input
locktimes[lt] = cluster
}
// We'll then determine the sweep fee rate for each set of inputs by
// calculating the average fee rate of the inputs within each set.
inputClusters := make([]inputCluster, 0, len(locktimes))
for lt, cluster := range locktimes {
lt := lt
var sweepFeeRate chainfee.SatPerKWeight
for _, input := range cluster {
sweepFeeRate += input.lastFeeRate
}
sweepFeeRate /= chainfee.SatPerKWeight(len(cluster))
inputClusters = append(inputClusters, inputCluster{
lockTime: &lt,
sweepFeeRate: sweepFeeRate,
inputs: cluster,
})
}
return inputClusters, rem
}
// clusterBySweepFeeRate takes the set of pending inputs within the UtxoSweeper
// and clusters those together with similar fee rates. Each cluster contains a
// sweep fee rate, which is determined by calculating the average fee rate of
// all inputs within that cluster.
func (s *SimpleAggregator) clusterBySweepFeeRate(
inputs InputsMap) []inputCluster {
bucketInputs := make(map[int]*bucketList)
inputFeeRates := make(map[wire.OutPoint]chainfee.SatPerKWeight)
// First, we'll group together all inputs with similar fee rates. This
// is done by determining the fee rate bucket they should belong in.
for op, input := range inputs {
feeRate, err := input.params.Fee.Estimate(
s.FeeEstimator, s.MaxFeeRate,
)
if err != nil {
log.Warnf("Skipping input %v: %v", op, err)
continue
}
// Only try to sweep inputs with an unconfirmed parent if the
// current sweep fee rate exceeds the parent tx fee rate. This
// assumes that such inputs are offered to the sweeper solely
// for the purpose of anchoring down the parent tx using cpfp.
parentTx := input.UnconfParent()
if parentTx != nil {
parentFeeRate :=
chainfee.SatPerKWeight(parentTx.Fee*1000) /
chainfee.SatPerKWeight(parentTx.Weight)
if parentFeeRate >= feeRate {
log.Debugf("Skipping cpfp input %v: "+
"fee_rate=%v, parent_fee_rate=%v", op,
feeRate, parentFeeRate)
continue
}
}
feeGroup := s.bucketForFeeRate(feeRate)
// Create a bucket list for this fee rate if there isn't one
// yet.
buckets, ok := bucketInputs[feeGroup]
if !ok {
buckets = &bucketList{}
bucketInputs[feeGroup] = buckets
}
// Request the bucket list to add this input. The bucket list
// will take into account exclusive group constraints.
buckets.add(input)
input.lastFeeRate = feeRate
inputFeeRates[op] = feeRate
}
// We'll then determine the sweep fee rate for each set of inputs by
// calculating the average fee rate of the inputs within each set.
inputClusters := make([]inputCluster, 0, len(bucketInputs))
for _, buckets := range bucketInputs {
for _, inputs := range buckets.buckets {
var sweepFeeRate chainfee.SatPerKWeight
for op := range inputs {
sweepFeeRate += inputFeeRates[op]
}
sweepFeeRate /= chainfee.SatPerKWeight(len(inputs))
inputClusters = append(inputClusters, inputCluster{
sweepFeeRate: sweepFeeRate,
inputs: inputs,
})
}
}
return inputClusters
}
// bucketForFeeReate determines the proper bucket for a fee rate. This is done
// in order to batch inputs with similar fee rates together.
func (s *SimpleAggregator) bucketForFeeRate(
feeRate chainfee.SatPerKWeight) int {
relayFeeRate := s.FeeEstimator.RelayFeePerKW()
// Create an isolated bucket for sweeps at the minimum fee rate. This
// is to prevent very small outputs (anchors) from becoming
// uneconomical if their fee rate would be averaged with higher fee
// rate inputs in a regular bucket.
if feeRate == relayFeeRate {
return 0
}
return 1 + int(feeRate-relayFeeRate)/s.FeeRateBucketSize
}
// mergeClusters attempts to merge cluster a and b if they are compatible. The
// new cluster will have the locktime set if a or b had a locktime set, and a
// sweep fee rate that is the maximum of a and b's. If the two clusters are not
// compatible, they will be returned unchanged.
func mergeClusters(a, b inputCluster) []inputCluster {
newCluster := inputCluster{}
switch {
// Incompatible locktimes, return the sets without merging them.
case a.lockTime != nil && b.lockTime != nil &&
*a.lockTime != *b.lockTime:
return []inputCluster{a, b}
case a.lockTime != nil:
newCluster.lockTime = a.lockTime
case b.lockTime != nil:
newCluster.lockTime = b.lockTime
}
if a.sweepFeeRate > b.sweepFeeRate {
newCluster.sweepFeeRate = a.sweepFeeRate
} else {
newCluster.sweepFeeRate = b.sweepFeeRate
}
newCluster.inputs = make(InputsMap)
for op, in := range a.inputs {
newCluster.inputs[op] = in
}
for op, in := range b.inputs {
newCluster.inputs[op] = in
}
return []inputCluster{newCluster}
}
// zipClusters merges pairwise clusters from as and bs such that cluster a from
// as is merged with a cluster from bs that has at least the fee rate of a.
// This to ensure we don't delay confirmation by decreasing the fee rate (the
// lock time inputs are typically second level HTLC transactions, that are time
// sensitive).
func zipClusters(as, bs []inputCluster) []inputCluster {
// Sort the clusters by decreasing fee rates.
sort.Slice(as, func(i, j int) bool {
return as[i].sweepFeeRate >
as[j].sweepFeeRate
})
sort.Slice(bs, func(i, j int) bool {
return bs[i].sweepFeeRate >
bs[j].sweepFeeRate
})
var (
finalClusters []inputCluster
j int
)
// Go through each cluster in as, and merge with the next one from bs
// if it has at least the fee rate needed.
for i := range as {
a := as[i]
switch {
// If the fee rate for the next one from bs is at least a's, we
// merge.
case j < len(bs) && bs[j].sweepFeeRate >= a.sweepFeeRate:
merged := mergeClusters(a, bs[j])
finalClusters = append(finalClusters, merged...)
// Increment j for the next round.
j++
// We did not merge, meaning all the remaining clusters from bs
// have lower fee rate. Instead we add a directly to the final
// clusters.
default:
finalClusters = append(finalClusters, a)
}
}
// Add any remaining clusters from bs.
for ; j < len(bs); j++ {
b := bs[j]
finalClusters = append(finalClusters, b)
}
return finalClusters
}
// BudgetAggregator is a budget-based aggregator that creates clusters based on
// deadlines and budgets of inputs.
type BudgetAggregator struct {

View File

@ -3,430 +3,21 @@ package sweep
import (
"bytes"
"errors"
"reflect"
"sort"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/stretchr/testify/require"
)
//nolint:lll
var (
testInputsA = InputsMap{
wire.OutPoint{Hash: chainhash.Hash{}, Index: 0}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 1}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 2}: &SweeperInput{},
}
testInputsB = InputsMap{
wire.OutPoint{Hash: chainhash.Hash{}, Index: 10}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 11}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 12}: &SweeperInput{},
}
testInputsC = InputsMap{
wire.OutPoint{Hash: chainhash.Hash{}, Index: 0}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 1}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 2}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 10}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 11}: &SweeperInput{},
wire.OutPoint{Hash: chainhash.Hash{}, Index: 12}: &SweeperInput{},
}
testHeight = int32(800000)
)
// TestMergeClusters check that we properly can merge clusters together,
// according to their required locktime.
func TestMergeClusters(t *testing.T) {
t.Parallel()
lockTime1 := uint32(100)
lockTime2 := uint32(200)
testCases := []struct {
name string
a inputCluster
b inputCluster
res []inputCluster
}{
{
name: "max fee rate",
a: inputCluster{
sweepFeeRate: 5000,
inputs: testInputsA,
},
b: inputCluster{
sweepFeeRate: 7000,
inputs: testInputsB,
},
res: []inputCluster{
{
sweepFeeRate: 7000,
inputs: testInputsC,
},
},
},
{
name: "same locktime",
a: inputCluster{
lockTime: &lockTime1,
sweepFeeRate: 5000,
inputs: testInputsA,
},
b: inputCluster{
lockTime: &lockTime1,
sweepFeeRate: 7000,
inputs: testInputsB,
},
res: []inputCluster{
{
lockTime: &lockTime1,
sweepFeeRate: 7000,
inputs: testInputsC,
},
},
},
{
name: "diff locktime",
a: inputCluster{
lockTime: &lockTime1,
sweepFeeRate: 5000,
inputs: testInputsA,
},
b: inputCluster{
lockTime: &lockTime2,
sweepFeeRate: 7000,
inputs: testInputsB,
},
res: []inputCluster{
{
lockTime: &lockTime1,
sweepFeeRate: 5000,
inputs: testInputsA,
},
{
lockTime: &lockTime2,
sweepFeeRate: 7000,
inputs: testInputsB,
},
},
},
}
for _, test := range testCases {
merged := mergeClusters(test.a, test.b)
if !reflect.DeepEqual(merged, test.res) {
t.Fatalf("[%s] unexpected result: %v",
test.name, spew.Sdump(merged))
}
}
}
// TestZipClusters tests that we can merge lists of inputs clusters correctly.
func TestZipClusters(t *testing.T) {
t.Parallel()
createCluster := func(inp InputsMap,
f chainfee.SatPerKWeight) inputCluster {
return inputCluster{
sweepFeeRate: f,
inputs: inp,
}
}
testCases := []struct {
name string
as []inputCluster
bs []inputCluster
res []inputCluster
}{
{
name: "merge A into B",
as: []inputCluster{
createCluster(testInputsA, 5000),
},
bs: []inputCluster{
createCluster(testInputsB, 7000),
},
res: []inputCluster{
createCluster(testInputsC, 7000),
},
},
{
name: "A can't merge with B",
as: []inputCluster{
createCluster(testInputsA, 7000),
},
bs: []inputCluster{
createCluster(testInputsB, 5000),
},
res: []inputCluster{
createCluster(testInputsA, 7000),
createCluster(testInputsB, 5000),
},
},
{
name: "empty bs",
as: []inputCluster{
createCluster(testInputsA, 7000),
},
bs: []inputCluster{},
res: []inputCluster{
createCluster(testInputsA, 7000),
},
},
{
name: "empty as",
as: []inputCluster{},
bs: []inputCluster{
createCluster(testInputsB, 5000),
},
res: []inputCluster{
createCluster(testInputsB, 5000),
},
},
{
name: "zip 3xA into 3xB",
as: []inputCluster{
createCluster(testInputsA, 5000),
createCluster(testInputsA, 5000),
createCluster(testInputsA, 5000),
},
bs: []inputCluster{
createCluster(testInputsB, 7000),
createCluster(testInputsB, 7000),
createCluster(testInputsB, 7000),
},
res: []inputCluster{
createCluster(testInputsC, 7000),
createCluster(testInputsC, 7000),
createCluster(testInputsC, 7000),
},
},
{
name: "zip A into 3xB",
as: []inputCluster{
createCluster(testInputsA, 2500),
},
bs: []inputCluster{
createCluster(testInputsB, 3000),
createCluster(testInputsB, 2000),
createCluster(testInputsB, 1000),
},
res: []inputCluster{
createCluster(testInputsC, 3000),
createCluster(testInputsB, 2000),
createCluster(testInputsB, 1000),
},
},
}
for _, test := range testCases {
zipped := zipClusters(test.as, test.bs)
if !reflect.DeepEqual(zipped, test.res) {
t.Fatalf("[%s] unexpected result: %v",
test.name, spew.Sdump(zipped))
}
}
}
// TestClusterByLockTime tests the method clusterByLockTime works as expected.
func TestClusterByLockTime(t *testing.T) {
t.Parallel()
// Create a mock FeePreference.
mockFeePref := &MockFeePreference{}
// Create a test param with a dummy fee preference. This is needed so
// `feeRateForPreference` won't throw an error.
param := Params{Fee: mockFeePref}
// We begin the test by creating three clusters of inputs, the first
// cluster has a locktime of 1, the second has a locktime of 2, and the
// final has no locktime.
lockTime1 := uint32(1)
lockTime2 := uint32(2)
// Create cluster one, which has a locktime of 1.
input1LockTime1 := &input.MockInput{}
input2LockTime1 := &input.MockInput{}
input1LockTime1.On("RequiredLockTime").Return(lockTime1, true)
input2LockTime1.On("RequiredLockTime").Return(lockTime1, true)
// Create cluster two, which has a locktime of 2.
input3LockTime2 := &input.MockInput{}
input4LockTime2 := &input.MockInput{}
input3LockTime2.On("RequiredLockTime").Return(lockTime2, true)
input4LockTime2.On("RequiredLockTime").Return(lockTime2, true)
// Create cluster three, which has no locktime.
input5NoLockTime := &input.MockInput{}
input6NoLockTime := &input.MockInput{}
input5NoLockTime.On("RequiredLockTime").Return(uint32(0), false)
input6NoLockTime.On("RequiredLockTime").Return(uint32(0), false)
// With the inner Input being mocked, we can now create the pending
// inputs.
input1 := &SweeperInput{Input: input1LockTime1, params: param}
input2 := &SweeperInput{Input: input2LockTime1, params: param}
input3 := &SweeperInput{Input: input3LockTime2, params: param}
input4 := &SweeperInput{Input: input4LockTime2, params: param}
input5 := &SweeperInput{Input: input5NoLockTime, params: param}
input6 := &SweeperInput{Input: input6NoLockTime, params: param}
// Create the pending inputs map, which will be passed to the method
// under test.
//
// NOTE: we don't care the actual outpoint values as long as they are
// unique.
inputs := InputsMap{
wire.OutPoint{Index: 1}: input1,
wire.OutPoint{Index: 2}: input2,
wire.OutPoint{Index: 3}: input3,
wire.OutPoint{Index: 4}: input4,
wire.OutPoint{Index: 5}: input5,
wire.OutPoint{Index: 6}: input6,
}
// Create expected clusters so we can shorten the line length in the
// test cases below.
cluster1 := InputsMap{
wire.OutPoint{Index: 1}: input1,
wire.OutPoint{Index: 2}: input2,
}
cluster2 := InputsMap{
wire.OutPoint{Index: 3}: input3,
wire.OutPoint{Index: 4}: input4,
}
// cluster3 should be the remaining inputs since they don't have
// locktime.
cluster3 := InputsMap{
wire.OutPoint{Index: 5}: input5,
wire.OutPoint{Index: 6}: input6,
}
const (
// Set the min fee rate to be 1000 sat/kw.
minFeeRate = chainfee.SatPerKWeight(1000)
// Set the max fee rate to be 10,000 sat/kw.
maxFeeRate = chainfee.SatPerKWeight(10_000)
)
// Create a test aggregator.
s := NewSimpleUtxoAggregator(nil, maxFeeRate, 100)
testCases := []struct {
name string
// setupMocker takes a testing fee rate and makes a mocker over
// `Estimate` that always return the testing fee rate.
setupMocker func()
testFeeRate chainfee.SatPerKWeight
expectedClusters []inputCluster
expectedRemainingInputs InputsMap
}{
{
// Test a successful case where the locktime clusters
// are created and the no-locktime cluster is returned
// as the remaining inputs.
name: "successfully create clusters",
setupMocker: func() {
// Expect the four inputs with locktime to call
// this method.
mockFeePref.On("Estimate", nil, maxFeeRate).
Return(minFeeRate+1, nil).Times(4)
},
// Use a fee rate above the min value so we don't hit
// an error when performing fee estimation.
//
// TODO(yy): we should customize the returned fee rate
// for each input to further test the averaging logic.
// Or we can split the method into two, one for
// grouping the clusters and the other for averaging
// the fee rates so it's easier to be tested.
testFeeRate: minFeeRate + 1,
expectedClusters: []inputCluster{
{
lockTime: &lockTime1,
sweepFeeRate: minFeeRate + 1,
inputs: cluster1,
},
{
lockTime: &lockTime2,
sweepFeeRate: minFeeRate + 1,
inputs: cluster2,
},
},
expectedRemainingInputs: cluster3,
},
{
// Test that when the input is skipped when the fee
// estimation returns an error.
name: "error from fee estimation",
setupMocker: func() {
mockFeePref.On("Estimate", nil, maxFeeRate).
Return(chainfee.SatPerKWeight(0),
errors.New("dummy")).Times(4)
},
// Use a fee rate below the min value so we hit an
// error when performing fee estimation.
testFeeRate: minFeeRate - 1,
expectedClusters: []inputCluster{},
// Remaining inputs should stay untouched.
expectedRemainingInputs: cluster3,
},
}
//nolint:paralleltest
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
// Apply the test fee rate so `feeRateForPreference` is
// mocked to return the specified value.
tc.setupMocker()
// Assert the mocked methods are called as expeceted.
defer mockFeePref.AssertExpectations(t)
// Call the method under test.
clusters, remainingInputs := s.clusterByLockTime(inputs)
// Sort by locktime as the order is not guaranteed.
sort.Slice(clusters, func(i, j int) bool {
return *clusters[i].lockTime <
*clusters[j].lockTime
})
// Validate the values are returned as expected.
require.Equal(t, tc.expectedClusters, clusters)
require.Equal(t, tc.expectedRemainingInputs,
remainingInputs,
)
// Assert the mocked methods are called as expeceted.
input1LockTime1.AssertExpectations(t)
input2LockTime1.AssertExpectations(t)
input3LockTime2.AssertExpectations(t)
input4LockTime2.AssertExpectations(t)
input5NoLockTime.AssertExpectations(t)
input6NoLockTime.AssertExpectations(t)
})
}
}
// TestBudgetAggregatorFilterInputs checks that inputs with low budget are
// filtered out.
func TestBudgetAggregatorFilterInputs(t *testing.T) {

View File

@ -1,56 +0,0 @@
package sweep
// bucket contains a set of inputs that are not mutually exclusive.
type bucket InputsMap
// tryAdd tries to add a new input to this bucket.
func (b bucket) tryAdd(input *SweeperInput) bool {
exclusiveGroup := input.params.ExclusiveGroup
if exclusiveGroup != nil {
for _, input := range b {
existingGroup := input.params.ExclusiveGroup
// Don't add an exclusive group input if other inputs
// are non-exclusive. The exclusive group input may be
// invalid (for example in the case of commitment
// anchors) and could thereby block sweeping of the
// other inputs.
if existingGroup == nil {
return false
}
// Don't combine inputs from the same exclusive group.
// Because only one input is valid, this may result in
// txes that are always invalid.
if *existingGroup == *exclusiveGroup {
return false
}
}
}
b[input.OutPoint()] = input
return true
}
// bucketList is a list of buckets that contain non-mutually exclusive inputs.
type bucketList struct {
buckets []bucket
}
// add adds a new input. If the input is not accepted by any of the existing
// buckets, a new bucket will be created.
func (b *bucketList) add(input *SweeperInput) {
for _, existingBucket := range b.buckets {
if existingBucket.tryAdd(input) {
return
}
}
// Create a new bucket and add the input. It is not necessary to check
// the return value of tryAdd because it will always succeed on an empty
// bucket.
newBucket := make(bucket)
newBucket.tryAdd(input)
b.buckets = append(b.buckets, newBucket)
}

View File

@ -2,14 +2,17 @@ package sweep
import (
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/stretchr/testify/mock"
@ -25,8 +28,37 @@ var (
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
testInputCount atomic.Uint64
)
func createTestInput(value int64,
witnessType input.WitnessType) input.BaseInput {
hash := chainhash.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
byte(testInputCount.Add(1))}
input := input.MakeBaseInput(
&wire.OutPoint{
Hash: hash,
},
witnessType,
&input.SignDescriptor{
Output: &wire.TxOut{
Value: value,
},
KeyDesc: keychain.KeyDescriptor{
PubKey: testPubKey,
},
},
0,
nil,
)
return input
}
// TestBumpResultValidate tests the validate method of the BumpResult struct.
func TestBumpResultValidate(t *testing.T) {
t.Parallel()

View File

@ -1,10 +1,6 @@
package sweep
import (
"sync"
"testing"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
@ -15,251 +11,6 @@ import (
"github.com/stretchr/testify/mock"
)
// mockBackend simulates a chain backend for realistic behaviour in unit tests
// around double spends.
type mockBackend struct {
t *testing.T
lock sync.Mutex
notifier *MockNotifier
confirmedSpendInputs map[wire.OutPoint]struct{}
unconfirmedTxes map[chainhash.Hash]*wire.MsgTx
unconfirmedSpendInputs map[wire.OutPoint]struct{}
publishChan chan wire.MsgTx
walletUtxos []*lnwallet.Utxo
utxoCnt int
}
func newMockBackend(t *testing.T, notifier *MockNotifier) *mockBackend {
return &mockBackend{
t: t,
notifier: notifier,
unconfirmedTxes: make(map[chainhash.Hash]*wire.MsgTx),
confirmedSpendInputs: make(map[wire.OutPoint]struct{}),
unconfirmedSpendInputs: make(map[wire.OutPoint]struct{}),
publishChan: make(chan wire.MsgTx, 2),
}
}
func (b *mockBackend) BackEnd() string {
return "mockbackend"
}
func (b *mockBackend) CheckMempoolAcceptance(tx *wire.MsgTx) error {
return nil
}
func (b *mockBackend) publishTransaction(tx *wire.MsgTx) error {
b.lock.Lock()
defer b.lock.Unlock()
txHash := tx.TxHash()
if _, ok := b.unconfirmedTxes[txHash]; ok {
// Tx already exists
testLog.Tracef("mockBackend duplicate tx %v", tx.TxHash())
return lnwallet.ErrDoubleSpend
}
for _, in := range tx.TxIn {
if _, ok := b.unconfirmedSpendInputs[in.PreviousOutPoint]; ok {
// Double spend
testLog.Tracef("mockBackend double spend tx %v",
tx.TxHash())
return lnwallet.ErrDoubleSpend
}
if _, ok := b.confirmedSpendInputs[in.PreviousOutPoint]; ok {
// Already included in block
testLog.Tracef("mockBackend already in block tx %v",
tx.TxHash())
return lnwallet.ErrDoubleSpend
}
}
b.unconfirmedTxes[txHash] = tx
for _, in := range tx.TxIn {
b.unconfirmedSpendInputs[in.PreviousOutPoint] = struct{}{}
}
testLog.Tracef("mockBackend publish tx %v", tx.TxHash())
return nil
}
func (b *mockBackend) PublishTransaction(tx *wire.MsgTx, _ string) error {
log.Tracef("Publishing tx %v", tx.TxHash())
err := b.publishTransaction(tx)
select {
case b.publishChan <- *tx:
case <-time.After(defaultTestTimeout):
b.t.Fatalf("unexpected tx published")
}
return err
}
func (b *mockBackend) ListUnspentWitnessFromDefaultAccount(minConfs,
maxConfs int32) ([]*lnwallet.Utxo, error) {
b.lock.Lock()
defer b.lock.Unlock()
// Each time we list output, we increment the utxo counter, to
// ensure we don't return the same outpoint every time.
b.utxoCnt++
for i := range b.walletUtxos {
b.walletUtxos[i].OutPoint.Hash[0] = byte(b.utxoCnt)
}
return b.walletUtxos, nil
}
func (b *mockBackend) WithCoinSelectLock(f func() error) error {
return f()
}
func (b *mockBackend) deleteUnconfirmed(txHash chainhash.Hash) {
b.lock.Lock()
defer b.lock.Unlock()
tx, ok := b.unconfirmedTxes[txHash]
if !ok {
// Tx already exists
testLog.Errorf("mockBackend delete tx not existing %v", txHash)
return
}
testLog.Tracef("mockBackend delete tx %v", tx.TxHash())
delete(b.unconfirmedTxes, txHash)
for _, in := range tx.TxIn {
delete(b.unconfirmedSpendInputs, in.PreviousOutPoint)
}
}
func (b *mockBackend) mine() {
b.lock.Lock()
defer b.lock.Unlock()
notifications := make(map[wire.OutPoint]*wire.MsgTx)
for _, tx := range b.unconfirmedTxes {
testLog.Tracef("mockBackend mining tx %v", tx.TxHash())
for _, in := range tx.TxIn {
b.confirmedSpendInputs[in.PreviousOutPoint] = struct{}{}
notifications[in.PreviousOutPoint] = tx
}
}
b.unconfirmedSpendInputs = make(map[wire.OutPoint]struct{})
b.unconfirmedTxes = make(map[chainhash.Hash]*wire.MsgTx)
for outpoint, tx := range notifications {
testLog.Tracef("mockBackend delivering spend ntfn for %v",
outpoint)
b.notifier.SpendOutpoint(outpoint, *tx)
}
}
func (b *mockBackend) isDone() bool {
return len(b.unconfirmedTxes) == 0
}
func (b *mockBackend) RemoveDescendants(*wire.MsgTx) error {
return nil
}
func (b *mockBackend) FetchTx(chainhash.Hash) (*wire.MsgTx, error) {
return nil, nil
}
func (b *mockBackend) CancelRebroadcast(tx chainhash.Hash) {
}
// GetTransactionDetails returns a detailed description of a tx given its
// transaction hash.
func (b *mockBackend) GetTransactionDetails(txHash *chainhash.Hash) (
*lnwallet.TransactionDetail, error) {
return nil, nil
}
// mockFeeEstimator implements a mock fee estimator. It closely resembles
// lnwallet.StaticFeeEstimator with the addition that fees can be changed for
// testing purposes in a thread safe manner.
//
// TODO(yy): replace it with chainfee.MockEstimator once it's merged.
type mockFeeEstimator struct {
feePerKW chainfee.SatPerKWeight
relayFee chainfee.SatPerKWeight
blocksToFee map[uint32]chainfee.SatPerKWeight
// A closure that when set is used instead of the
// mockFeeEstimator.EstimateFeePerKW method.
estimateFeePerKW func(numBlocks uint32) (chainfee.SatPerKWeight, error)
lock sync.Mutex
}
func newMockFeeEstimator(feePerKW,
relayFee chainfee.SatPerKWeight) *mockFeeEstimator {
return &mockFeeEstimator{
feePerKW: feePerKW,
relayFee: relayFee,
blocksToFee: make(map[uint32]chainfee.SatPerKWeight),
}
}
func (e *mockFeeEstimator) updateFees(feePerKW,
relayFee chainfee.SatPerKWeight) {
e.lock.Lock()
defer e.lock.Unlock()
e.feePerKW = feePerKW
e.relayFee = relayFee
}
func (e *mockFeeEstimator) EstimateFeePerKW(numBlocks uint32) (
chainfee.SatPerKWeight, error) {
e.lock.Lock()
defer e.lock.Unlock()
if e.estimateFeePerKW != nil {
return e.estimateFeePerKW(numBlocks)
}
if fee, ok := e.blocksToFee[numBlocks]; ok {
return fee, nil
}
return e.feePerKW, nil
}
func (e *mockFeeEstimator) RelayFeePerKW() chainfee.SatPerKWeight {
e.lock.Lock()
defer e.lock.Unlock()
return e.relayFee
}
func (e *mockFeeEstimator) Start() error {
return nil
}
func (e *mockFeeEstimator) Stop() error {
return nil
}
var _ chainfee.Estimator = (*mockFeeEstimator)(nil)
// MockSweeperStore is a mock implementation of sweeper store. This type is
// exported, because it is currently used in nursery tests too.
type MockSweeperStore struct {

File diff suppressed because it is too large Load Diff

View File

@ -14,23 +14,6 @@ import (
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
)
// addConstraints defines the constraints to apply when adding an input.
type addConstraints uint8
const (
// constraintsRegular is for regular input sweeps that should have a positive
// yield.
constraintsRegular addConstraints = iota
// constraintsWallet is for wallet inputs that are only added to bring up the tx
// output value.
constraintsWallet
// constraintsForce is for inputs that should be swept even with a negative
// yield at the set fee rate.
constraintsForce
)
var (
// ErrNotEnoughInputs is returned when there are not enough wallet
// inputs to construct a non-dust change output for an input set.
@ -83,443 +66,6 @@ type InputSet interface {
StartingFeeRate() fn.Option[chainfee.SatPerKWeight]
}
type txInputSetState struct {
// feeRate is the fee rate to use for the sweep transaction.
feeRate chainfee.SatPerKWeight
// maxFeeRate is the max allowed fee rate configured by the user.
maxFeeRate chainfee.SatPerKWeight
// inputTotal is the total value of all inputs.
inputTotal btcutil.Amount
// requiredOutput is the sum of the outputs committed to by the inputs.
requiredOutput btcutil.Amount
// changeOutput is the value of the change output. This will be what is
// left over after subtracting the requiredOutput and the tx fee from
// the inputTotal.
//
// NOTE: This might be below the dust limit, or even negative since it
// is the change remaining in csse we pay the fee for a change output.
changeOutput btcutil.Amount
// inputs is the set of tx inputs.
inputs []input.Input
// walletInputTotal is the total value of inputs coming from the wallet.
walletInputTotal btcutil.Amount
// force indicates that this set must be swept even if the total yield
// is negative.
force bool
}
// weightEstimate is the (worst case) tx weight with the current set of
// inputs. It takes a parameter whether to add a change output or not.
func (t *txInputSetState) weightEstimate(change bool) *weightEstimator {
weightEstimate := newWeightEstimator(t.feeRate, t.maxFeeRate)
for _, i := range t.inputs {
// Can ignore error, because it has already been checked when
// calculating the yields.
_ = weightEstimate.add(i)
r := i.RequiredTxOut()
if r != nil {
weightEstimate.addOutput(r)
}
}
// Add a change output to the weight estimate if requested.
if change {
weightEstimate.addP2TROutput()
}
return weightEstimate
}
// totalOutput is the total amount left for us after paying fees.
//
// NOTE: This might be dust.
func (t *txInputSetState) totalOutput() btcutil.Amount {
return t.requiredOutput + t.changeOutput
}
func (t *txInputSetState) clone() txInputSetState {
s := txInputSetState{
feeRate: t.feeRate,
inputTotal: t.inputTotal,
changeOutput: t.changeOutput,
requiredOutput: t.requiredOutput,
walletInputTotal: t.walletInputTotal,
force: t.force,
inputs: make([]input.Input, len(t.inputs)),
}
copy(s.inputs, t.inputs)
return s
}
// txInputSet is an object that accumulates tx inputs and keeps running counters
// on various properties of the tx.
type txInputSet struct {
txInputSetState
// maxInputs is the maximum number of inputs that will be accepted in
// the set.
maxInputs uint32
}
// Compile-time constraint to ensure txInputSet implements InputSet.
var _ InputSet = (*txInputSet)(nil)
// newTxInputSet constructs a new, empty input set.
func newTxInputSet(feePerKW, maxFeeRate chainfee.SatPerKWeight,
maxInputs uint32) *txInputSet {
state := txInputSetState{
feeRate: feePerKW,
maxFeeRate: maxFeeRate,
}
b := txInputSet{
maxInputs: maxInputs,
txInputSetState: state,
}
return &b
}
// Inputs returns the inputs that should be used to create a tx.
func (t *txInputSet) Inputs() []input.Input {
return t.inputs
}
// Budget gives the total amount that can be used as fees by this input set.
//
// NOTE: this field is only used for `BudgetInputSet`.
func (t *txInputSet) Budget() btcutil.Amount {
return t.totalOutput()
}
// DeadlineHeight gives the block height that this set must be confirmed by.
//
// NOTE: this field is only used for `BudgetInputSet`.
func (t *txInputSet) DeadlineHeight() int32 {
return 0
}
// StartingFeeRate returns the max starting fee rate found in the inputs.
//
// NOTE: this field is only used for `BudgetInputSet`.
func (t *txInputSet) StartingFeeRate() fn.Option[chainfee.SatPerKWeight] {
return fn.None[chainfee.SatPerKWeight]()
}
// NeedWalletInput returns true if the input set needs more wallet inputs.
func (t *txInputSet) NeedWalletInput() bool {
return !t.enoughInput()
}
// enoughInput returns true if we've accumulated enough inputs to pay the fees
// and have at least one output that meets the dust limit.
func (t *txInputSet) enoughInput() bool {
// If we have a change output above dust, then we certainly have enough
// inputs to the transaction.
if t.changeOutput >= lnwallet.DustLimitForSize(input.P2TRSize) {
return true
}
// We did not have enough input for a change output. Check if we have
// enough input to pay the fees for a transaction with no change
// output.
fee := t.weightEstimate(false).feeWithParent()
if t.inputTotal < t.requiredOutput+fee {
return false
}
// We could pay the fees, but we still need at least one output to be
// above the dust limit for the tx to be valid (we assume that these
// required outputs only get added if they are above dust)
for _, inp := range t.inputs {
if inp.RequiredTxOut() != nil {
return true
}
}
return false
}
// add adds a new input to the set. It returns a bool indicating whether the
// input was added to the set. An input is rejected if it decreases the tx
// output value after paying fees.
func (t *txInputSet) addToState(inp input.Input,
constraints addConstraints) *txInputSetState {
// Stop if max inputs is reached. Do not count additional wallet inputs,
// because we don't know in advance how many we may need.
if constraints != constraintsWallet &&
uint32(len(t.inputs)) >= t.maxInputs {
return nil
}
// If the input comes with a required tx out that is below dust, we
// won't add it.
//
// NOTE: only HtlcSecondLevelAnchorInput returns non-nil RequiredTxOut.
reqOut := inp.RequiredTxOut()
if reqOut != nil {
// Fetch the dust limit for this output.
dustLimit := lnwallet.DustLimitForSize(len(reqOut.PkScript))
if btcutil.Amount(reqOut.Value) < dustLimit {
log.Errorf("Rejected input=%v due to dust required "+
"output=%v, limit=%v", inp, reqOut.Value,
dustLimit)
// TODO(yy): we should not return here for force
// sweeps. This means when sending sweeping request,
// one must be careful to not create dust outputs. In
// an extreme rare case, where the
// minRelayTxFee/discardfee is increased when sending
// the request, what's considered non-dust at the
// caller side will be dust here, causing a force sweep
// to fail.
return nil
}
}
// Clone the current set state.
newSet := t.clone()
// Add the new input.
newSet.inputs = append(newSet.inputs, inp)
// Add the value of the new input.
value := btcutil.Amount(inp.SignDesc().Output.Value)
newSet.inputTotal += value
// Recalculate the tx fee.
fee := newSet.weightEstimate(true).feeWithParent()
// Calculate the new output value.
if reqOut != nil {
newSet.requiredOutput += btcutil.Amount(reqOut.Value)
}
// NOTE: `changeOutput` could be negative here if this input is using
// constraintsForce.
newSet.changeOutput = newSet.inputTotal - newSet.requiredOutput - fee
// Calculate the yield of this input from the change in total tx output
// value.
inputYield := newSet.totalOutput() - t.totalOutput()
switch constraints {
// Don't sweep inputs that cost us more to sweep than they give us.
case constraintsRegular:
if inputYield <= 0 {
log.Debugf("Rejected regular input=%v due to negative "+
"yield=%v", value, inputYield)
return nil
}
// For force adds, no further constraints apply.
//
// NOTE: because the inputs are sorted with force sweeps being placed
// at the start of the list, we should never see an input with
// constraintsForce come after an input with constraintsRegular. In
// other words, though we may have negative `changeOutput` from
// including force sweeps, `inputYield` should always increase when
// adding regular inputs.
case constraintsForce:
newSet.force = true
// We are attaching a wallet input to raise the tx output value above
// the dust limit.
case constraintsWallet:
// Skip this wallet input if adding it would lower the output
// value.
//
// TODO(yy): change to inputYield < 0 to allow sweeping for
// UTXO aggregation only?
if inputYield <= 0 {
log.Debugf("Rejected wallet input=%v due to negative "+
"yield=%v", value, inputYield)
return nil
}
// Calculate the total value that we spend in this tx from the
// wallet if we'd add this wallet input.
newSet.walletInputTotal += value
// In any case, we don't want to lose money by sweeping. If we
// don't get more out of the tx than we put in ourselves, do not
// add this wallet input. If there is at least one force sweep
// in the set, this does no longer apply.
//
// We should only add wallet inputs to get the tx output value
// above the dust limit, otherwise we'd only burn into fees.
// This is guarded by tryAddWalletInputsIfNeeded.
//
// TODO(joostjager): Possibly require a max ratio between the
// value of the wallet input and what we get out of this
// transaction. To prevent attaching and locking a big utxo for
// very little benefit.
if newSet.force {
break
}
// TODO(yy): change from `>=` to `>` to allow non-negative
// sweeping - we won't gain more coins from this sweep, but
// aggregating small UTXOs.
if newSet.walletInputTotal >= newSet.totalOutput() {
// TODO(yy): further check this case as it seems we can
// never reach here because it'd mean `inputYield` is
// already <= 0?
log.Debugf("Rejecting wallet input of %v, because it "+
"would make a negative yielding transaction "+
"(%v)", value,
newSet.totalOutput()-newSet.walletInputTotal)
return nil
}
}
return &newSet
}
// add adds a new input to the set. It returns a bool indicating whether the
// input was added to the set. An input is rejected if it decreases the tx
// output value after paying fees.
func (t *txInputSet) add(input input.Input, constraints addConstraints) bool {
newState := t.addToState(input, constraints)
if newState == nil {
return false
}
t.txInputSetState = *newState
return true
}
// addPositiveYieldInputs adds sweepableInputs that have a positive yield to the
// input set. This function assumes that the list of inputs is sorted descending
// by yield.
//
// TODO(roasbeef): Consider including some negative yield inputs too to clean
// up the utxo set even if it costs us some fees up front. In the spirit of
// minimizing any negative externalities we cause for the Bitcoin system as a
// whole.
func (t *txInputSet) addPositiveYieldInputs(sweepableInputs []*SweeperInput) {
for i, inp := range sweepableInputs {
// Apply relaxed constraints for force sweeps.
constraints := constraintsRegular
if inp.parameters().Immediate {
constraints = constraintsForce
}
// Try to add the input to the transaction. If that doesn't
// succeed because it wouldn't increase the output value,
// return. Assuming inputs are sorted by yield, any further
// inputs wouldn't increase the output value either.
if !t.add(inp, constraints) {
var rem []input.Input
for j := i; j < len(sweepableInputs); j++ {
rem = append(rem, sweepableInputs[j])
}
log.Debugf("%d negative yield inputs not added to "+
"input set: %v", len(rem),
inputTypeSummary(rem))
return
}
log.Debugf("Added positive yield input %v to input set",
inputTypeSummary([]input.Input{inp}))
}
// We managed to add all inputs to the set.
}
// AddWalletInputs adds wallet inputs to the set until a non-dust output can be
// made. This non-dust output is either a change output or a required output.
// Return an error if there are not enough wallet inputs.
func (t *txInputSet) AddWalletInputs(wallet Wallet) error {
// Check the current output value and add wallet utxos if needed to
// push the output value to the lower limit.
if err := t.tryAddWalletInputsIfNeeded(wallet); err != nil {
return err
}
// If the output value of this block of inputs does not reach the dust
// limit, stop sweeping. Because of the sorting, continuing with the
// remaining inputs will only lead to sets with an even lower output
// value.
if !t.enoughInput() {
// The change output is always a p2tr here.
dl := lnwallet.DustLimitForSize(input.P2TRSize)
log.Debugf("Input set value %v (required=%v, change=%v) "+
"below dust limit of %v", t.totalOutput(),
t.requiredOutput, t.changeOutput, dl)
return ErrNotEnoughInputs
}
return nil
}
// tryAddWalletInputsIfNeeded retrieves utxos from the wallet and tries adding
// as many as required to bring the tx output value above the given minimum.
func (t *txInputSet) tryAddWalletInputsIfNeeded(wallet Wallet) error {
// If we've already have enough to pay the transaction fees and have at
// least one output materialize, no action is needed.
if t.enoughInput() {
return nil
}
// Retrieve wallet utxos. Only consider confirmed utxos to prevent
// problems around RBF rules for unconfirmed inputs. This currently
// ignores the configured coin selection strategy.
utxos, err := wallet.ListUnspentWitnessFromDefaultAccount(
1, math.MaxInt32,
)
if err != nil {
return err
}
// Sort the UTXOs by putting smaller values at the start of the slice
// to avoid locking large UTXO for sweeping.
//
// TODO(yy): add more choices to CoinSelectionStrategy and use the
// configured value here.
sort.Slice(utxos, func(i, j int) bool {
return utxos[i].Value < utxos[j].Value
})
for _, utxo := range utxos {
input, err := createWalletTxInput(utxo)
if err != nil {
return err
}
// If the wallet input isn't positively-yielding at this fee
// rate, skip it.
if !t.add(input, constraintsWallet) {
continue
}
// Return if we've reached the minimum output amount.
if t.enoughInput() {
return nil
}
}
// We were not able to reach the minimum output amount.
return nil
}
// createWalletTxInput converts a wallet utxo into an object that can be added
// to the other inputs to sweep.
func createWalletTxInput(utxo *lnwallet.Utxo) (input.Input, error) {

View File

@ -14,237 +14,12 @@ import (
"github.com/stretchr/testify/require"
)
// TestTxInputSet tests adding various sized inputs to the set.
func TestTxInputSet(t *testing.T) {
const (
feeRate = 1000
maxInputs = 10
)
set := newTxInputSet(feeRate, 0, maxInputs)
// Create a 300 sat input. The fee to sweep this input to a P2WKH output
// is 439 sats. That means that this input yields -139 sats and we
// expect it not to be added.
if set.add(createP2WKHInput(300), constraintsRegular) {
t.Fatal("expected add of negatively yielding input to fail")
}
// A 700 sat input should be accepted into the set, because it yields
// positively.
if !set.add(createP2WKHInput(700), constraintsRegular) {
t.Fatal("expected add of positively yielding input to succeed")
}
fee := set.weightEstimate(true).feeWithParent()
require.Equal(t, btcutil.Amount(487), fee)
// The tx output should now be 700-487 = 213 sats. The dust limit isn't
// reached yet.
if set.totalOutput() != 213 {
t.Fatal("unexpected output value")
}
if set.enoughInput() {
t.Fatal("expected dust limit not yet to be reached")
}
// Add a 1000 sat input. This increases the tx fee to 760 sats. The tx
// output should now be 1000+700 - 760 = 940 sats.
if !set.add(createP2WKHInput(1000), constraintsRegular) {
t.Fatal("expected add of positively yielding input to succeed")
}
if set.totalOutput() != 940 {
t.Fatal("unexpected output value")
}
if !set.enoughInput() {
t.Fatal("expected dust limit to be reached")
}
}
// TestTxInputSetFromWallet tests adding a wallet input to a TxInputSet to reach
// the dust limit.
func TestTxInputSetFromWallet(t *testing.T) {
const (
feeRate = 500
maxInputs = 10
)
wallet := &mockWallet{}
set := newTxInputSet(feeRate, 0, maxInputs)
// Add a 500 sat input to the set. It yields positively, but doesn't
// reach the output dust limit.
if !set.add(createP2WKHInput(500), constraintsRegular) {
t.Fatal("expected add of positively yielding input to succeed")
}
if set.enoughInput() {
t.Fatal("expected dust limit not yet to be reached")
}
// Expect that adding a negative yield input fails.
if set.add(createP2WKHInput(50), constraintsRegular) {
t.Fatal("expected negative yield input add to fail")
}
// Force add the negative yield input. It should succeed.
if !set.add(createP2WKHInput(50), constraintsForce) {
t.Fatal("expected forced add to succeed")
}
err := set.AddWalletInputs(wallet)
if err != nil {
t.Fatal(err)
}
if !set.enoughInput() {
t.Fatal("expected dust limit to be reached")
}
}
// createP2WKHInput returns a P2WKH test input with the specified amount.
func createP2WKHInput(amt btcutil.Amount) input.Input {
input := createTestInput(int64(amt), input.WitnessKeyHash)
return &input
}
type mockWallet struct {
Wallet
}
func (m *mockWallet) ListUnspentWitnessFromDefaultAccount(minConfs, maxConfs int32) (
[]*lnwallet.Utxo, error) {
return []*lnwallet.Utxo{
{
AddressType: lnwallet.WitnessPubKey,
Value: 10000,
},
}, nil
}
type reqInput struct {
input.Input
txOut *wire.TxOut
}
func (r *reqInput) RequiredTxOut() *wire.TxOut {
return r.txOut
}
// TestTxInputSetRequiredOutput tests that the tx input set behaves as expected
// when we add inputs that have required tx outs.
func TestTxInputSetRequiredOutput(t *testing.T) {
const (
feeRate = 1000
maxInputs = 10
)
set := newTxInputSet(feeRate, 0, maxInputs)
// Attempt to add an input with a required txout below the dust limit.
// This should fail since we cannot trim such outputs.
inp := &reqInput{
Input: createP2WKHInput(500),
txOut: &wire.TxOut{
Value: 500,
PkScript: make([]byte, input.P2PKHSize),
},
}
require.False(t, set.add(inp, constraintsRegular),
"expected adding dust required tx out to fail")
// Create a 1000 sat input that also has a required TxOut of 1000 sat.
// The fee to sweep this input to a P2WKH output is 439 sats.
inp = &reqInput{
Input: createP2WKHInput(1000),
txOut: &wire.TxOut{
Value: 1000,
PkScript: make([]byte, input.P2WPKHSize),
},
}
require.True(t, set.add(inp, constraintsRegular), "failed adding input")
// The fee needed to pay for this input and output should be 439 sats.
fee := set.weightEstimate(false).feeWithParent()
require.Equal(t, btcutil.Amount(439), fee)
// Since the tx set currently pays no fees, we expect the current
// change to actually be negative, since this is what it would cost us
// in fees to add a change output.
feeWithChange := set.weightEstimate(true).feeWithParent()
if set.changeOutput != -feeWithChange {
t.Fatalf("expected negative change of %v, had %v",
-feeWithChange, set.changeOutput)
}
// This should also be reflected by not having enough input.
require.False(t, set.enoughInput())
// Get a weight estimate without change output, and add an additional
// input to it.
dummyInput := createP2WKHInput(1000)
weight := set.weightEstimate(false)
require.NoError(t, weight.add(dummyInput))
// Now we add a an input that is large enough to pay the fee for the
// transaction without a change output, but not large enough to afford
// adding a change output.
extraInput1 := weight.feeWithParent() + 100
require.True(t, set.add(
createP2WKHInput(extraInput1), constraintsRegular,
), "expected add of positively yielding input to succeed")
// The change should be negative, since we would have to add a change
// output, which we cannot yet afford.
if set.changeOutput >= 0 {
t.Fatal("expected change to be negaitve")
}
// Even though we cannot afford a change output, the tx set is valid,
// since we can pay the fees without the change output.
require.True(t, set.enoughInput())
// Get another weight estimate, this time with a change output, and
// figure out how much we must add to afford a change output.
weight = set.weightEstimate(true)
require.NoError(t, weight.add(dummyInput))
// We add what is left to reach this value.
extraInput2 := weight.feeWithParent() - extraInput1 + 100
// Add this input, which should result in the change now being 100 sats.
require.True(t, set.add(
createP2WKHInput(extraInput2), constraintsRegular,
))
// The change should be 100, since this is what is left after paying
// fees in case of a change output.
change := set.changeOutput
if change != 100 {
t.Fatalf("expected change be 100, was %v", change)
}
// Even though the change output is dust, we have enough for fees, and
// we have an output, so it should be considered enough to craft a
// valid sweep transaction.
require.True(t, set.enoughInput())
// Finally we add an input that should push the change output above the
// dust limit.
weight = set.weightEstimate(true)
require.NoError(t, weight.add(dummyInput))
// We expect the change to everything that is left after paying the tx
// fee.
extraInput3 := weight.feeWithParent() - extraInput1 - extraInput2 + 1000
require.True(t, set.add(createP2WKHInput(extraInput3), constraintsRegular))
change = set.changeOutput
if change != 1000 {
t.Fatalf("expected change to be %v, had %v", 1000, change)
}
require.True(t, set.enoughInput())
}
// TestNewBudgetInputSet checks `NewBudgetInputSet` correctly validates the
// supplied inputs and returns the error.
func TestNewBudgetInputSet(t *testing.T) {