mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-15 07:29:06 +02:00
peer: fix last-mile settle stalling in concurrent multi-hop setting
This commit fixes an issue that would at times cause the htlcManager which manages the link that’s the final hop to settle in an HTLC flow. Previously, a case would arise wherein a set of HTLC’s were settled to, but not properly committed to in the commitment transaction of the remote node. This wasn’t an issue with HTLC’s which were added but uncleared, as that batch was tracked independently. In order to fix this issue, we now track pending HTLC settles independently. This is a temporary fix, as has been noted in a TODO within this commit.
This commit is contained in:
parent
041f3f1e98
commit
e7420edd44
16
peer.go
16
peer.go
@ -1172,6 +1172,10 @@ type commitmentState struct {
|
||||
// channel update log, but not yet committed to latest commitment.
|
||||
pendingBatch []*pendingPayment
|
||||
|
||||
// pendingSettle is counter which tracks the current number of settles
|
||||
// that have been sent, but not yet committed to the commitment.
|
||||
pendingSettle uint32
|
||||
|
||||
// clearedHTCLs is a map of outgoing HTLCs we've committed to in our
|
||||
// chain which have not yet been settled by the upstream peer.
|
||||
clearedHTCLs map[uint64]*pendingPayment
|
||||
@ -1301,9 +1305,12 @@ out:
|
||||
}
|
||||
|
||||
case <-batchTimer.C:
|
||||
// If the current batch is empty, then we have no work
|
||||
// If the either batch is empty, then we have no work
|
||||
// here.
|
||||
if len(state.pendingBatch) == 0 {
|
||||
//
|
||||
// TODO(roasbeef): should be combined, will be fixed by
|
||||
// andrew's PR
|
||||
if len(state.pendingBatch) == 0 && state.pendingSettle == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1445,6 +1452,8 @@ func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
|
||||
p.queueMsg(htlc, nil)
|
||||
isSettle = true
|
||||
|
||||
state.pendingSettle++
|
||||
|
||||
case *lnwire.UpdateFailHTLC:
|
||||
// An HTLC cancellation has been triggered somewhere upstream,
|
||||
// we'll remove then HTLC from our local state machine.
|
||||
@ -1465,6 +1474,8 @@ func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
|
||||
// initially created the HTLC.
|
||||
p.queueMsg(htlc, nil)
|
||||
isSettle = true
|
||||
|
||||
state.pendingSettle++
|
||||
}
|
||||
|
||||
// If this newly added update exceeds the min batch size for adds, or
|
||||
@ -1867,6 +1878,7 @@ func (p *peer) updateCommitTx(state *commitmentState) error {
|
||||
// bool to indicate were waiting for a commitment signature.
|
||||
// TODO(roasbeef): re-slice instead to avoid GC?
|
||||
state.pendingBatch = nil
|
||||
state.pendingSettle = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user