Merge pull request #6868 from yyforyongyu/sweepr-config

sever+lncfg: add sweepr config
This commit is contained in:
Oliver Gugger 2022-09-01 10:05:11 +02:00 committed by GitHub
commit 9d04b0c3d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 56 additions and 6 deletions

View File

@ -41,6 +41,7 @@ import (
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/routing"
"github.com/lightningnetwork/lnd/signal"
"github.com/lightningnetwork/lnd/sweep"
"github.com/lightningnetwork/lnd/tor"
)
@ -439,6 +440,8 @@ type Config struct {
RemoteSigner *lncfg.RemoteSigner `group:"remotesigner" namespace:"remotesigner"`
Sweeper *lncfg.Sweeper `group:"sweeper" namespace:"sweeper"`
// LogWriter is the root logger that all of the daemon's subloggers are
// hooked up to.
LogWriter *build.RotatingLogWriter
@ -635,6 +638,9 @@ func DefaultConfig() Config {
RemoteSigner: &lncfg.RemoteSigner{
Timeout: lncfg.DefaultRemoteSignerRPCTimeout,
},
Sweeper: &lncfg.Sweeper{
BatchWindowDuration: sweep.DefaultBatchWindowDuration,
},
}
}
@ -1651,6 +1657,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
cfg.HealthChecks,
cfg.RPCMiddleware,
cfg.RemoteSigner,
cfg.Sweeper,
)
if err != nil {
return nil, err

View File

@ -60,6 +60,10 @@ minimum version needed to build the project.
* [Fix](https://github.com/lightningnetwork/lnd/pull/6875) mapslice cap out of
range error that occurs if the number of profiles is zero.
* [A new config option, `batchwindowduration` has been added to
`sweeper`](https://github.com/lightningnetwork/lnd/pull/6868) to allow
customize sweeper batch duration.
## Code Health
* [test: use `T.TempDir` to create temporary test

19
lncfg/sweeper.go Normal file
View File

@ -0,0 +1,19 @@
package lncfg
import (
"fmt"
"time"
)
type Sweeper struct {
BatchWindowDuration time.Duration `long:"batchwindowduration" description:"Duration of the sweep batch window. The sweep is held back during the batch window to allow more inputs to be added and thereby lower the fee per input."`
}
// Validate checks the values configured for the sweeper.
func (s *Sweeper) Validate() error {
if s.BatchWindowDuration < 0 {
return fmt.Errorf("batchwindowduration must be positive")
}
return nil
}

View File

@ -1415,3 +1415,10 @@ litecoin.node=ltcd
; for neutrino nodes as it means they'll only maintain edges where both nodes are
; seen as being live from it's PoV.
; routing.strictgraphpruning=true
[sweeper]
; Duration of the sweep batch window. The sweep is held back during the batch
; window to allow more inputs to be added and thereby lower the fee per input.
; sweeper.batchwindowduration=30s

View File

@ -1004,7 +1004,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
Signer: cc.Wallet.Cfg.Signer,
Wallet: cc.Wallet,
NewBatchTimer: func() <-chan time.Time {
return time.NewTimer(sweep.DefaultBatchWindowDuration).C
return time.NewTimer(cfg.Sweeper.BatchWindowDuration).C
},
Notifier: cc.ChainNotifier,
Store: sweeperStore,

View File

@ -654,6 +654,7 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) {
params: input.params,
}
s.pendingInputs[outpoint] = pendInput
log.Tracef("input %v added to pendingInputs", outpoint)
// Start watching for spend of this input, either by us
// or the remote party.
@ -674,6 +675,7 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) {
if err := s.scheduleSweep(bestHeight); err != nil {
log.Errorf("schedule sweep: %v", err)
}
log.Tracef("input %v scheduled", outpoint)
// A spend of one of our inputs is detected. Signal sweep
// results to the caller(s).
@ -1145,7 +1147,7 @@ func (s *UtxoSweeper) scheduleSweep(currentHeight int32) error {
// The timer is already ticking, no action needed for the sweep to
// happen.
if s.timer != nil {
log.Debugf("Timer still ticking")
log.Debugf("Timer still ticking at height=%v", currentHeight)
return nil
}
@ -1338,9 +1340,14 @@ func (s *UtxoSweeper) sweep(inputs inputSet, feeRate chainfee.SatPerKWeight,
return fmt.Errorf("publish tx: %v", err)
}
// Keep the output script in case of an error, so that it can be reused
// for the next transaction and causes no address inflation.
if err == nil {
// Otherwise log the error.
if err != nil {
log.Errorf("Publish sweep tx %v got error: %v", tx.TxHash(),
err)
} else {
// If there's no error, remove the output script. Otherwise
// keep it so that it can be reused for the next transaction
// and causes no address inflation.
s.currentOutputScript = nil
}
@ -1375,6 +1382,11 @@ func (s *UtxoSweeper) sweep(inputs inputSet, feeRate chainfee.SatPerKWeight,
nextAttemptDelta)
if pi.publishAttempts >= s.cfg.MaxSweepAttempts {
log.Warnf("input %v: publishAttempts(%v) exceeds "+
"MaxSweepAttempts(%v), removed",
input.PreviousOutPoint, pi.publishAttempts,
s.cfg.MaxSweepAttempts)
// Signal result channels sweep result.
s.signalAndRemove(&input.PreviousOutPoint, Result{
Err: ErrTooManyAttempts,
@ -1390,7 +1402,8 @@ func (s *UtxoSweeper) sweep(inputs inputSet, feeRate chainfee.SatPerKWeight,
func (s *UtxoSweeper) waitForSpend(outpoint wire.OutPoint,
script []byte, heightHint uint32) (func(), error) {
log.Debugf("Wait for spend of %v", outpoint)
log.Tracef("Wait for spend of %v at heightHint=%v",
outpoint, heightHint)
spendEvent, err := s.cfg.Notifier.RegisterSpendNtfn(
&outpoint, script, heightHint,