wtclient: lint the package

This commit fixes some lints in the wtclient package. This is done so
that upcoming logic changes are easier to parse.
This commit is contained in:
Elle Mouton 2023-02-02 11:31:24 +02:00
parent 85ec38f447
commit 530a8cae5d
No known key found for this signature in database
GPG Key ID: D7D916376026F177
4 changed files with 40 additions and 23 deletions

View File

@ -223,7 +223,9 @@ func (t *backupTask) bindSession(session *wtdb.ClientSessionBody) error {
// P2WKH output. Anchor channels spend a to-remote confirmed
// P2WSH output.
if t.chanType.HasAnchors() {
weightEstimate.AddWitnessInput(input.ToRemoteConfirmedWitnessSize)
weightEstimate.AddWitnessInput(
input.ToRemoteConfirmedWitnessSize,
)
} else {
weightEstimate.AddWitnessInput(input.P2WKHWitnessSize)
}
@ -231,7 +233,8 @@ func (t *backupTask) bindSession(session *wtdb.ClientSessionBody) error {
// All justice transactions will either use segwit v0 (p2wkh + p2wsh)
// or segwit v1 (p2tr).
if err := addScriptWeight(&weightEstimate, t.sweepPkScript); err != nil {
err := addScriptWeight(&weightEstimate, t.sweepPkScript)
if err != nil {
return err
}

View File

@ -34,8 +34,8 @@ const (
// a read before breaking out of a blocking read.
DefaultReadTimeout = 15 * time.Second
// DefaultWriteTimeout specifies the default duration we will wait during
// a write before breaking out of a blocking write.
// DefaultWriteTimeout specifies the default duration we will wait
// during a write before breaking out of a blocking write.
DefaultWriteTimeout = 15 * time.Second
// DefaultStatInterval specifies the default interval between logging
@ -569,8 +569,11 @@ func (c *TowerClient) Start() error {
// committed but unacked state updates. This ensures that these
// sessions will be able to flush the committed updates after a
// restart.
fetchCommittedUpdates := c.cfg.DB.FetchSessionCommittedUpdates
for _, session := range c.candidateSessions {
committedUpdates, err := c.cfg.DB.FetchSessionCommittedUpdates(&session.ID)
committedUpdates, err := fetchCommittedUpdates(
&session.ID,
)
if err != nil {
returnErr = err
return
@ -799,8 +802,8 @@ func (c *TowerClient) RegisterChannel(chanID lnwire.ChannelID) error {
// - client is force quit,
// - justice transaction would create dust outputs when trying to abide by the
// negotiated policy, or
// - breached outputs contain too little value to sweep at the target sweep fee
// rate.
// - breached outputs contain too little value to sweep at the target sweep
// fee rate.
func (c *TowerClient) BackupState(chanID *lnwire.ChannelID,
breachInfo *lnwallet.BreachRetribution,
chanType channeldb.ChannelType) error {
@ -817,8 +820,8 @@ func (c *TowerClient) BackupState(chanID *lnwire.ChannelID,
height, ok := c.chanCommitHeights[*chanID]
if ok && breachInfo.RevokedStateNum <= height {
c.backupMu.Unlock()
c.log.Debugf("Ignoring duplicate backup for chanid=%v at height=%d",
chanID, breachInfo.RevokedStateNum)
c.log.Debugf("Ignoring duplicate backup for chanid=%v at "+
"height=%d", chanID, breachInfo.RevokedStateNum)
return nil
}
@ -1496,7 +1499,9 @@ func (c *TowerClient) readMessage(peer wtserver.Peer) (wtwire.Message, error) {
}
// sendMessage sends a watchtower wire message to the target peer.
func (c *TowerClient) sendMessage(peer wtserver.Peer, msg wtwire.Message) error {
func (c *TowerClient) sendMessage(peer wtserver.Peer,
msg wtwire.Message) error {
// Encode the next wire message into the buffer.
// TODO(conner): use buffer pool
var b bytes.Buffer
@ -1664,7 +1669,9 @@ func (c *TowerClient) handleNewTower(msg *newTowerMsg) error {
// negotiations and from being used for any subsequent backups until it's added
// again. If an address is provided, then this call only serves as a way of
// removing the address from the watchtower instead.
func (c *TowerClient) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
func (c *TowerClient) RemoveTower(pubKey *btcec.PublicKey,
addr net.Addr) error {
errChan := make(chan error, 1)
select {
@ -1745,8 +1752,9 @@ func (c *TowerClient) handleStaleTower(msg *staleTowerMsg) error {
// If our active session queue corresponds to the stale tower, we'll
// proceed to negotiate a new one.
if c.sessionQueue != nil {
activeTower := c.sessionQueue.tower.IdentityKey.SerializeCompressed()
if bytes.Equal(pubKey, activeTower) {
towerKey := c.sessionQueue.tower.IdentityKey
if bytes.Equal(pubKey, towerKey.SerializeCompressed()) {
c.sessionQueue = nil
}
}

View File

@ -45,7 +45,8 @@ type sessionQueueConfig struct {
Dial func(keychain.SingleKeyECDH, *lnwire.NetAddress) (wtserver.Peer,
error)
// SendMessage encodes, encrypts, and writes a message to the given peer.
// SendMessage encodes, encrypts, and writes a message to the given
// peer.
SendMessage func(wtserver.Peer, wtwire.Message) error
// ReadMessage receives, decypts, and decodes a message from the given
@ -343,8 +344,8 @@ func (q *sessionQueue) drainBackups() {
// before attempting to dequeue any pending updates.
stateUpdate, isPending, backupID, err := q.nextStateUpdate()
if err != nil {
q.log.Errorf("SessionQueue(%v) unable to get next state "+
"update: %v", q.ID(), err)
q.log.Errorf("SessionQueue(%v) unable to get next "+
"state update: %v", q.ID(), err)
return
}

View File

@ -116,8 +116,8 @@ func (q *taskPipeline) QueueBackupTask(task *backupTask) error {
default:
}
// Queue the new task and signal the queue's condition variable to wake up
// the queueManager for processing.
// Queue the new task and signal the queue's condition variable to wake
// up the queueManager for processing.
q.queue.PushBack(task)
q.queueCond.L.Unlock()
@ -141,16 +141,21 @@ func (q *taskPipeline) queueManager() {
select {
case <-q.quit:
// Exit only after the queue has been fully drained.
// Exit only after the queue has been fully
// drained.
if q.queue.Len() == 0 {
q.queueCond.L.Unlock()
q.log.Debugf("Revoked state pipeline flushed.")
q.log.Debugf("Revoked state pipeline " +
"flushed.")
return
}
case <-q.forceQuit:
q.queueCond.L.Unlock()
q.log.Debugf("Revoked state pipeline force quit.")
q.log.Debugf("Revoked state pipeline force " +
"quit.")
return
default:
@ -164,8 +169,8 @@ func (q *taskPipeline) queueManager() {
select {
// Backup task submitted to dispatcher. We don't select on quit to
// ensure that we still drain tasks while shutting down.
// Backup task submitted to dispatcher. We don't select on quit
// to ensure that we still drain tasks while shutting down.
case q.newBackupTasks <- task:
// Force quit, return immediately to allow the client to exit.