mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-06-29 10:09:08 +02:00
Merge pull request #9068 from ziggie1984/cancel-back-dust-htlc
Cancel back outgoing dust htlcs before commitment is confirmed.
This commit is contained in:
@ -1475,16 +1475,23 @@ func decodeBreachResolution(r io.Reader, b *BreachResolution) error {
|
|||||||
return binary.Read(r, endian, &b.FundingOutPoint.Index)
|
return binary.Read(r, endian, &b.FundingOutPoint.Index)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeHtlcSetKey(w io.Writer, h *HtlcSetKey) error {
|
func encodeHtlcSetKey(w io.Writer, htlcSetKey HtlcSetKey) error {
|
||||||
err := binary.Write(w, endian, h.IsRemote)
|
err := binary.Write(w, endian, htlcSetKey.IsRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return binary.Write(w, endian, h.IsPending)
|
|
||||||
|
return binary.Write(w, endian, htlcSetKey.IsPending)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeCommitSet(w io.Writer, c *CommitSet) error {
|
func encodeCommitSet(w io.Writer, c *CommitSet) error {
|
||||||
if err := encodeHtlcSetKey(w, c.ConfCommitKey); err != nil {
|
confCommitKey, err := c.ConfCommitKey.UnwrapOrErr(
|
||||||
|
fmt.Errorf("HtlcSetKey is not set"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := encodeHtlcSetKey(w, confCommitKey); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1494,8 +1501,7 @@ func encodeCommitSet(w io.Writer, c *CommitSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for htlcSetKey, htlcs := range c.HtlcSets {
|
for htlcSetKey, htlcs := range c.HtlcSets {
|
||||||
htlcSetKey := htlcSetKey
|
if err := encodeHtlcSetKey(w, htlcSetKey); err != nil {
|
||||||
if err := encodeHtlcSetKey(w, &htlcSetKey); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1517,13 +1523,14 @@ func decodeHtlcSetKey(r io.Reader, h *HtlcSetKey) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func decodeCommitSet(r io.Reader) (*CommitSet, error) {
|
func decodeCommitSet(r io.Reader) (*CommitSet, error) {
|
||||||
c := &CommitSet{
|
confCommitKey := HtlcSetKey{}
|
||||||
ConfCommitKey: &HtlcSetKey{},
|
if err := decodeHtlcSetKey(r, &confCommitKey); err != nil {
|
||||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := decodeHtlcSetKey(r, c.ConfCommitKey); err != nil {
|
c := &CommitSet{
|
||||||
return nil, err
|
ConfCommitKey: fn.Some(confCommitKey),
|
||||||
|
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||||
}
|
}
|
||||||
|
|
||||||
var numSets uint8
|
var numSets uint8
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/lightningnetwork/lnd/channeldb"
|
"github.com/lightningnetwork/lnd/channeldb"
|
||||||
|
"github.com/lightningnetwork/lnd/fn"
|
||||||
"github.com/lightningnetwork/lnd/input"
|
"github.com/lightningnetwork/lnd/input"
|
||||||
"github.com/lightningnetwork/lnd/kvdb"
|
"github.com/lightningnetwork/lnd/kvdb"
|
||||||
"github.com/lightningnetwork/lnd/lnmock"
|
"github.com/lightningnetwork/lnd/lnmock"
|
||||||
@ -753,7 +754,7 @@ func TestCommitSetStorage(t *testing.T) {
|
|||||||
for _, pendingRemote := range []bool{true, false} {
|
for _, pendingRemote := range []bool{true, false} {
|
||||||
for _, confType := range confTypes {
|
for _, confType := range confTypes {
|
||||||
commitSet := &CommitSet{
|
commitSet := &CommitSet{
|
||||||
ConfCommitKey: &confType,
|
ConfCommitKey: fn.Some(confType),
|
||||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||||
}
|
}
|
||||||
commitSet.HtlcSets[LocalHtlcSet] = activeHTLCs
|
commitSet.HtlcSets[LocalHtlcSet] = activeHTLCs
|
||||||
|
@ -93,9 +93,9 @@ type BreachCloseInfo struct {
|
|||||||
// HTLCs to determine if any additional actions need to be made based on the
|
// HTLCs to determine if any additional actions need to be made based on the
|
||||||
// remote party's commitments.
|
// remote party's commitments.
|
||||||
type CommitSet struct {
|
type CommitSet struct {
|
||||||
// ConfCommitKey if non-nil, identifies the commitment that was
|
// When the ConfCommitKey is set, it signals that the commitment tx was
|
||||||
// confirmed in the chain.
|
// confirmed in the chain.
|
||||||
ConfCommitKey *HtlcSetKey
|
ConfCommitKey fn.Option[HtlcSetKey]
|
||||||
|
|
||||||
// HtlcSets stores the set of all known active HTLC for each active
|
// HtlcSets stores the set of all known active HTLC for each active
|
||||||
// commitment at the time of channel closure.
|
// commitment at the time of channel closure.
|
||||||
@ -509,7 +509,7 @@ func (c *chainWatcher) handleUnknownLocalState(
|
|||||||
|
|
||||||
// If this is our commitment transaction, then we try to act even
|
// If this is our commitment transaction, then we try to act even
|
||||||
// though we won't be able to sweep HTLCs.
|
// though we won't be able to sweep HTLCs.
|
||||||
chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
|
chainSet.commitSet.ConfCommitKey = fn.Some(LocalHtlcSet)
|
||||||
if err := c.dispatchLocalForceClose(
|
if err := c.dispatchLocalForceClose(
|
||||||
commitSpend, broadcastStateNum, chainSet.commitSet,
|
commitSpend, broadcastStateNum, chainSet.commitSet,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
@ -806,7 +806,7 @@ func (c *chainWatcher) handleKnownLocalState(
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
|
chainSet.commitSet.ConfCommitKey = fn.Some(LocalHtlcSet)
|
||||||
if err := c.dispatchLocalForceClose(
|
if err := c.dispatchLocalForceClose(
|
||||||
commitSpend, broadcastStateNum, chainSet.commitSet,
|
commitSpend, broadcastStateNum, chainSet.commitSet,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
@ -844,7 +844,7 @@ func (c *chainWatcher) handleKnownRemoteState(
|
|||||||
log.Infof("Remote party broadcast base set, "+
|
log.Infof("Remote party broadcast base set, "+
|
||||||
"commit_num=%v", chainSet.remoteStateNum)
|
"commit_num=%v", chainSet.remoteStateNum)
|
||||||
|
|
||||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
chainSet.commitSet.ConfCommitKey = fn.Some(RemoteHtlcSet)
|
||||||
err := c.dispatchRemoteForceClose(
|
err := c.dispatchRemoteForceClose(
|
||||||
commitSpend, chainSet.remoteCommit,
|
commitSpend, chainSet.remoteCommit,
|
||||||
chainSet.commitSet,
|
chainSet.commitSet,
|
||||||
@ -869,7 +869,7 @@ func (c *chainWatcher) handleKnownRemoteState(
|
|||||||
log.Infof("Remote party broadcast pending set, "+
|
log.Infof("Remote party broadcast pending set, "+
|
||||||
"commit_num=%v", chainSet.remoteStateNum+1)
|
"commit_num=%v", chainSet.remoteStateNum+1)
|
||||||
|
|
||||||
chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet
|
chainSet.commitSet.ConfCommitKey = fn.Some(RemotePendingHtlcSet)
|
||||||
err := c.dispatchRemoteForceClose(
|
err := c.dispatchRemoteForceClose(
|
||||||
commitSpend, *chainSet.remotePendingCommit,
|
commitSpend, *chainSet.remotePendingCommit,
|
||||||
chainSet.commitSet,
|
chainSet.commitSet,
|
||||||
@ -936,7 +936,7 @@ func (c *chainWatcher) handlePossibleBreach(commitSpend *chainntnfs.SpendDetail,
|
|||||||
// only used to ensure a nil-pointer-dereference doesn't occur and is
|
// only used to ensure a nil-pointer-dereference doesn't occur and is
|
||||||
// not used otherwise. The HTLC's may not exist for the
|
// not used otherwise. The HTLC's may not exist for the
|
||||||
// RemotePendingHtlcSet.
|
// RemotePendingHtlcSet.
|
||||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
chainSet.commitSet.ConfCommitKey = fn.Some(RemoteHtlcSet)
|
||||||
|
|
||||||
// THEY'RE ATTEMPTING TO VIOLATE THE CONTRACT LAID OUT WITHIN THE
|
// THEY'RE ATTEMPTING TO VIOLATE THE CONTRACT LAID OUT WITHIN THE
|
||||||
// PAYMENT CHANNEL. Therefore we close the signal indicating a revoked
|
// PAYMENT CHANNEL. Therefore we close the signal indicating a revoked
|
||||||
@ -997,7 +997,7 @@ func (c *chainWatcher) handleUnknownRemoteState(
|
|||||||
// means we won't be able to recover any HTLC funds.
|
// means we won't be able to recover any HTLC funds.
|
||||||
//
|
//
|
||||||
// TODO(halseth): can we try to recover some HTLCs?
|
// TODO(halseth): can we try to recover some HTLCs?
|
||||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
chainSet.commitSet.ConfCommitKey = fn.Some(RemoteHtlcSet)
|
||||||
err := c.dispatchRemoteForceClose(
|
err := c.dispatchRemoteForceClose(
|
||||||
commitSpend, channeldb.ChannelCommitment{},
|
commitSpend, channeldb.ChannelCommitment{},
|
||||||
chainSet.commitSet, commitPoint,
|
chainSet.commitSet, commitPoint,
|
||||||
|
@ -682,8 +682,14 @@ func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
|
|||||||
// chain actions may exclude some information, but we cannot recover it
|
// chain actions may exclude some information, but we cannot recover it
|
||||||
// for these older nodes at the moment.
|
// for these older nodes at the moment.
|
||||||
var confirmedHTLCs []channeldb.HTLC
|
var confirmedHTLCs []channeldb.HTLC
|
||||||
if commitSet != nil {
|
if commitSet != nil && commitSet.ConfCommitKey.IsSome() {
|
||||||
confirmedHTLCs = commitSet.HtlcSets[*commitSet.ConfCommitKey]
|
confCommitKey, err := commitSet.ConfCommitKey.UnwrapOrErr(
|
||||||
|
fmt.Errorf("no commitKey available"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
confirmedHTLCs = commitSet.HtlcSets[confCommitKey]
|
||||||
} else {
|
} else {
|
||||||
chainActions, err := c.log.FetchChainActions()
|
chainActions, err := c.log.FetchChainActions()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -932,22 +938,37 @@ func (c *ChannelArbitrator) stateStep(
|
|||||||
// arbitrating for. If a commitment has confirmed, then we'll
|
// arbitrating for. If a commitment has confirmed, then we'll
|
||||||
// use the set snapshot from the chain, otherwise we'll use our
|
// use the set snapshot from the chain, otherwise we'll use our
|
||||||
// current set.
|
// current set.
|
||||||
var htlcs map[HtlcSetKey]htlcSet
|
var (
|
||||||
if confCommitSet != nil {
|
chainActions ChainActionMap
|
||||||
htlcs = confCommitSet.toActiveHTLCSets()
|
err error
|
||||||
} else {
|
)
|
||||||
|
|
||||||
|
// Normally if we force close the channel locally we will have
|
||||||
|
// no confCommitSet. However when the remote commitment confirms
|
||||||
|
// without us ever broadcasting our local commitment we need to
|
||||||
|
// make sure we cancel all upstream HTLCs for outgoing dust
|
||||||
|
// HTLCs as well hence we need to fetch the chain actions here
|
||||||
|
// as well.
|
||||||
|
if confCommitSet == nil {
|
||||||
// Update the set of activeHTLCs so
|
// Update the set of activeHTLCs so
|
||||||
// checkLocalChainActions has an up-to-date view of the
|
// checkLocalChainActions has an up-to-date view of the
|
||||||
// commitments.
|
// commitments.
|
||||||
c.updateActiveHTLCs()
|
c.updateActiveHTLCs()
|
||||||
htlcs = c.activeHTLCs
|
htlcs := c.activeHTLCs
|
||||||
}
|
chainActions, err = c.checkLocalChainActions(
|
||||||
chainActions, err := c.checkLocalChainActions(
|
|
||||||
triggerHeight, trigger, htlcs, false,
|
triggerHeight, trigger, htlcs, false,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StateDefault, nil, err
|
return StateDefault, nil, err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
chainActions, err = c.constructChainActions(
|
||||||
|
confCommitSet, triggerHeight, trigger,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return StateDefault, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If there are no actions to be made, then we'll remain in the
|
// If there are no actions to be made, then we'll remain in the
|
||||||
// default state. If this isn't a self initiated event (we're
|
// default state. If this isn't a self initiated event (we're
|
||||||
@ -964,6 +985,25 @@ func (c *ChannelArbitrator) stateStep(
|
|||||||
log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
|
log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
|
||||||
c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
|
c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
|
||||||
|
|
||||||
|
// Cancel upstream HTLCs for all outgoing dust HTLCs available
|
||||||
|
// either on the local or the remote/remote pending commitment
|
||||||
|
// transaction.
|
||||||
|
dustHTLCs := chainActions[HtlcFailDustAction]
|
||||||
|
if len(dustHTLCs) > 0 {
|
||||||
|
log.Debugf("ChannelArbitrator(%v): canceling %v dust "+
|
||||||
|
"HTLCs backwards", c.cfg.ChanPoint,
|
||||||
|
len(dustHTLCs))
|
||||||
|
|
||||||
|
getIdx := func(htlc channeldb.HTLC) uint64 {
|
||||||
|
return htlc.HtlcIndex
|
||||||
|
}
|
||||||
|
dustHTLCSet := fn.NewSet(fn.Map(getIdx, dustHTLCs)...)
|
||||||
|
err = c.abandonForwards(dustHTLCSet)
|
||||||
|
if err != nil {
|
||||||
|
return StateError, closeTx, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Depending on the type of trigger, we'll either "tunnel"
|
// Depending on the type of trigger, we'll either "tunnel"
|
||||||
// through to a farther state, or just proceed linearly to the
|
// through to a farther state, or just proceed linearly to the
|
||||||
// next state.
|
// next state.
|
||||||
@ -1204,12 +1244,81 @@ func (c *ChannelArbitrator) stateStep(
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// First, we'll reconstruct a fresh set of chain actions as the
|
||||||
|
// set of actions we need to act on may differ based on if it
|
||||||
|
// was our commitment, or they're commitment that hit the chain.
|
||||||
|
htlcActions, err := c.constructChainActions(
|
||||||
|
confCommitSet, triggerHeight, trigger,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return StateError, closeTx, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// In case its a breach transaction we fail back all upstream
|
||||||
|
// HTLCs for their corresponding outgoing HTLCs on the remote
|
||||||
|
// commitment set (remote and remote pending set).
|
||||||
|
if contractResolutions.BreachResolution != nil {
|
||||||
|
// cancelBreachedHTLCs is a set which holds HTLCs whose
|
||||||
|
// corresponding incoming HTLCs will be failed back
|
||||||
|
// because the peer broadcasted an old state.
|
||||||
|
cancelBreachedHTLCs := fn.NewSet[uint64]()
|
||||||
|
|
||||||
|
// We'll use the CommitSet, we'll fail back all
|
||||||
|
// upstream HTLCs for their corresponding outgoing
|
||||||
|
// HTLC that exist on either of the remote commitments.
|
||||||
|
// The map is used to deduplicate any shared HTLC's.
|
||||||
|
for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
|
||||||
|
if !htlcSetKey.IsRemote {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, htlc := range htlcs {
|
||||||
|
// Only outgoing HTLCs have a
|
||||||
|
// corresponding incoming HTLC.
|
||||||
|
if htlc.Incoming {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cancelBreachedHTLCs.Add(htlc.HtlcIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.abandonForwards(cancelBreachedHTLCs)
|
||||||
|
if err != nil {
|
||||||
|
return StateError, closeTx, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If it's not a breach, we resolve all incoming dust
|
||||||
|
// HTLCs immediately after the commitment is confirmed.
|
||||||
|
err = c.failIncomingDust(
|
||||||
|
htlcActions[HtlcIncomingDustFinalAction],
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return StateError, closeTx, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We fail the upstream HTLCs for all remote pending
|
||||||
|
// outgoing HTLCs as soon as the commitment is
|
||||||
|
// confirmed. The upstream HTLCs for outgoing dust
|
||||||
|
// HTLCs have already been resolved before we reach
|
||||||
|
// this point.
|
||||||
|
getIdx := func(htlc channeldb.HTLC) uint64 {
|
||||||
|
return htlc.HtlcIndex
|
||||||
|
}
|
||||||
|
remoteDangling := fn.NewSet(fn.Map(
|
||||||
|
getIdx, htlcActions[HtlcFailDanglingAction],
|
||||||
|
)...)
|
||||||
|
err := c.abandonForwards(remoteDangling)
|
||||||
|
if err != nil {
|
||||||
|
return StateError, closeTx, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Now that we know we'll need to act, we'll process all the
|
// Now that we know we'll need to act, we'll process all the
|
||||||
// resolvers, then create the structures we need to resolve all
|
// resolvers, then create the structures we need to resolve all
|
||||||
// outstanding contracts.
|
// outstanding contracts.
|
||||||
resolvers, pktsToSend, err := c.prepContractResolutions(
|
resolvers, err := c.prepContractResolutions(
|
||||||
contractResolutions, triggerHeight, trigger,
|
contractResolutions, triggerHeight, htlcActions,
|
||||||
confCommitSet,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("ChannelArbitrator(%v): unable to "+
|
log.Errorf("ChannelArbitrator(%v): unable to "+
|
||||||
@ -1217,20 +1326,6 @@ func (c *ChannelArbitrator) stateStep(
|
|||||||
return StateError, closeTx, err
|
return StateError, closeTx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// With the commitment broadcast, we'll then send over all
|
|
||||||
// messages we can send immediately.
|
|
||||||
if len(pktsToSend) != 0 {
|
|
||||||
log.Debugf("ChannelArbitrator(%v): sending "+
|
|
||||||
"resolution message=%v", c.cfg.ChanPoint,
|
|
||||||
lnutils.SpewLogClosure(pktsToSend))
|
|
||||||
|
|
||||||
err := c.cfg.DeliverResolutionMsg(pktsToSend...)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("unable to send pkts: %v", err)
|
|
||||||
return StateError, closeTx, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
|
log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
|
||||||
"resolvers", c.cfg.ChanPoint, len(resolvers))
|
"resolvers", c.cfg.ChanPoint, len(resolvers))
|
||||||
|
|
||||||
@ -1301,134 +1396,23 @@ func (c *ChannelArbitrator) stateStep(
|
|||||||
func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
|
func (c *ChannelArbitrator) sweepAnchors(anchors *lnwallet.AnchorResolutions,
|
||||||
heightHint uint32) error {
|
heightHint uint32) error {
|
||||||
|
|
||||||
// Use the chan id as the exclusive group. This prevents any of the
|
|
||||||
// anchors from being batched together.
|
|
||||||
exclusiveGroup := c.cfg.ShortChanID.ToUint64()
|
|
||||||
|
|
||||||
// sweepWithDeadline is a helper closure that takes an anchor
|
|
||||||
// resolution and sweeps it with its corresponding deadline.
|
|
||||||
sweepWithDeadline := func(anchor *lnwallet.AnchorResolution,
|
|
||||||
htlcs htlcSet, anchorPath string) error {
|
|
||||||
|
|
||||||
// Find the deadline for this specific anchor.
|
|
||||||
deadline, value, err := c.findCommitmentDeadlineAndValue(
|
|
||||||
heightHint, htlcs,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we cannot find a deadline, it means there's no HTLCs at
|
|
||||||
// stake, which means we can relax our anchor sweeping
|
|
||||||
// conditions as we don't have any time sensitive outputs to
|
|
||||||
// sweep. However we need to register the anchor output with the
|
|
||||||
// sweeper so we are later able to bump the close fee.
|
|
||||||
if deadline.IsNone() {
|
|
||||||
log.Infof("ChannelArbitrator(%v): no HTLCs at stake, "+
|
|
||||||
"sweeping anchor with default deadline",
|
|
||||||
c.cfg.ChanPoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
witnessType := input.CommitmentAnchor
|
|
||||||
|
|
||||||
// For taproot channels, we need to use the proper witness
|
|
||||||
// type.
|
|
||||||
if txscript.IsPayToTaproot(
|
|
||||||
anchor.AnchorSignDescriptor.Output.PkScript,
|
|
||||||
) {
|
|
||||||
|
|
||||||
witnessType = input.TaprootAnchorSweepSpend
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare anchor output for sweeping.
|
|
||||||
anchorInput := input.MakeBaseInput(
|
|
||||||
&anchor.CommitAnchor,
|
|
||||||
witnessType,
|
|
||||||
&anchor.AnchorSignDescriptor,
|
|
||||||
heightHint,
|
|
||||||
&input.TxInfo{
|
|
||||||
Fee: anchor.CommitFee,
|
|
||||||
Weight: anchor.CommitWeight,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
// If we have a deadline, we'll use it to calculate the
|
|
||||||
// deadline height, otherwise default to none.
|
|
||||||
deadlineDesc := "None"
|
|
||||||
deadlineHeight := fn.MapOption(func(d int32) int32 {
|
|
||||||
deadlineDesc = fmt.Sprintf("%d", d)
|
|
||||||
|
|
||||||
return d + int32(heightHint)
|
|
||||||
})(deadline)
|
|
||||||
|
|
||||||
// Calculate the budget based on the value under protection,
|
|
||||||
// which is the sum of all HTLCs on this commitment subtracted
|
|
||||||
// by their budgets.
|
|
||||||
// The anchor output in itself has a small output value of 330
|
|
||||||
// sats so we also include it in the budget to pay for the
|
|
||||||
// cpfp transaction.
|
|
||||||
budget := calculateBudget(
|
|
||||||
value, c.cfg.Budget.AnchorCPFPRatio,
|
|
||||||
c.cfg.Budget.AnchorCPFP,
|
|
||||||
) + AnchorOutputValue
|
|
||||||
|
|
||||||
log.Infof("ChannelArbitrator(%v): offering anchor from %s "+
|
|
||||||
"commitment %v to sweeper with deadline=%v, budget=%v",
|
|
||||||
c.cfg.ChanPoint, anchorPath, anchor.CommitAnchor,
|
|
||||||
deadlineDesc, budget)
|
|
||||||
|
|
||||||
// Sweep anchor output with a confirmation target fee
|
|
||||||
// preference. Because this is a cpfp-operation, the anchor
|
|
||||||
// will only be attempted to sweep when the current fee
|
|
||||||
// estimate for the confirmation target exceeds the commit fee
|
|
||||||
// rate.
|
|
||||||
_, err = c.cfg.Sweeper.SweepInput(
|
|
||||||
&anchorInput,
|
|
||||||
sweep.Params{
|
|
||||||
ExclusiveGroup: &exclusiveGroup,
|
|
||||||
Budget: budget,
|
|
||||||
DeadlineHeight: deadlineHeight,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the set of activeHTLCs so that the sweeping routine has an
|
// Update the set of activeHTLCs so that the sweeping routine has an
|
||||||
// up-to-date view of the set of commitments.
|
// up-to-date view of the set of commitments.
|
||||||
c.updateActiveHTLCs()
|
c.updateActiveHTLCs()
|
||||||
|
|
||||||
// Sweep anchors based on different HTLC sets. Notice the HTLC sets may
|
// Prepare the sweeping requests for all possible versions of
|
||||||
// differ across commitments, thus their deadline values could vary.
|
// commitments.
|
||||||
for htlcSet, htlcs := range c.activeHTLCs {
|
sweepReqs, err := c.prepareAnchorSweeps(heightHint, anchors)
|
||||||
switch {
|
|
||||||
case htlcSet == LocalHtlcSet && anchors.Local != nil:
|
|
||||||
err := sweepWithDeadline(anchors.Local, htlcs, "local")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
case htlcSet == RemoteHtlcSet && anchors.Remote != nil:
|
// Send out the sweeping requests to the sweeper.
|
||||||
err := sweepWithDeadline(
|
for _, req := range sweepReqs {
|
||||||
anchors.Remote, htlcs, "remote",
|
_, err = c.cfg.Sweeper.SweepInput(req.input, req.params)
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
case htlcSet == RemotePendingHtlcSet &&
|
|
||||||
anchors.RemotePending != nil:
|
|
||||||
|
|
||||||
err := sweepWithDeadline(
|
|
||||||
anchors.RemotePending, htlcs, "remote pending",
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -1664,10 +1648,11 @@ const (
|
|||||||
// before its timeout period.
|
// before its timeout period.
|
||||||
HtlcClaimAction = 2
|
HtlcClaimAction = 2
|
||||||
|
|
||||||
// HtlcFailNowAction indicates that we should fail an outgoing HTLC
|
// HtlcFailDustAction indicates that we should fail the upstream HTLC
|
||||||
// immediately by cancelling it backwards as it has no corresponding
|
// for an outgoing dust HTLC immediately (even before the commitment
|
||||||
// output in our commitment transaction.
|
// transaction is confirmed) because it has no output on the commitment
|
||||||
HtlcFailNowAction = 3
|
// transaction. This also includes remote pending outgoing dust HTLCs.
|
||||||
|
HtlcFailDustAction = 3
|
||||||
|
|
||||||
// HtlcOutgoingWatchAction indicates that we can't yet timeout this
|
// HtlcOutgoingWatchAction indicates that we can't yet timeout this
|
||||||
// HTLC, but we had to go to chain on order to resolve an existing
|
// HTLC, but we had to go to chain on order to resolve an existing
|
||||||
@ -1686,6 +1671,13 @@ const (
|
|||||||
// HtlcIncomingDustFinalAction indicates that we should mark an incoming
|
// HtlcIncomingDustFinalAction indicates that we should mark an incoming
|
||||||
// dust htlc as final because it can't be claimed on-chain.
|
// dust htlc as final because it can't be claimed on-chain.
|
||||||
HtlcIncomingDustFinalAction = 6
|
HtlcIncomingDustFinalAction = 6
|
||||||
|
|
||||||
|
// HtlcFailDanglingAction indicates that we should fail the upstream
|
||||||
|
// HTLC for an outgoing HTLC immediately after the commitment
|
||||||
|
// transaction has confirmed because it has no corresponding output on
|
||||||
|
// the commitment transaction. This category does NOT include any dust
|
||||||
|
// HTLCs which are mapped in the "HtlcFailDustAction" category.
|
||||||
|
HtlcFailDanglingAction = 7
|
||||||
)
|
)
|
||||||
|
|
||||||
// String returns a human readable string describing a chain action.
|
// String returns a human readable string describing a chain action.
|
||||||
@ -1700,8 +1692,8 @@ func (c ChainAction) String() string {
|
|||||||
case HtlcClaimAction:
|
case HtlcClaimAction:
|
||||||
return "HtlcClaimAction"
|
return "HtlcClaimAction"
|
||||||
|
|
||||||
case HtlcFailNowAction:
|
case HtlcFailDustAction:
|
||||||
return "HtlcFailNowAction"
|
return "HtlcFailDustAction"
|
||||||
|
|
||||||
case HtlcOutgoingWatchAction:
|
case HtlcOutgoingWatchAction:
|
||||||
return "HtlcOutgoingWatchAction"
|
return "HtlcOutgoingWatchAction"
|
||||||
@ -1712,6 +1704,9 @@ func (c ChainAction) String() string {
|
|||||||
case HtlcIncomingDustFinalAction:
|
case HtlcIncomingDustFinalAction:
|
||||||
return "HtlcIncomingDustFinalAction"
|
return "HtlcIncomingDustFinalAction"
|
||||||
|
|
||||||
|
case HtlcFailDanglingAction:
|
||||||
|
return "HtlcFailDanglingAction"
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return "<unknown action>"
|
return "<unknown action>"
|
||||||
}
|
}
|
||||||
@ -1892,8 +1887,8 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
|
|||||||
"failing dust htlc=%x", c.cfg.ChanPoint,
|
"failing dust htlc=%x", c.cfg.ChanPoint,
|
||||||
htlc.RHash[:])
|
htlc.RHash[:])
|
||||||
|
|
||||||
actionMap[HtlcFailNowAction] = append(
|
actionMap[HtlcFailDustAction] = append(
|
||||||
actionMap[HtlcFailNowAction], htlc,
|
actionMap[HtlcFailDustAction], htlc,
|
||||||
)
|
)
|
||||||
|
|
||||||
// If we don't need to immediately act on this HTLC, then we'll
|
// If we don't need to immediately act on this HTLC, then we'll
|
||||||
@ -2086,12 +2081,30 @@ func (c *ChannelArbitrator) checkRemoteDanglingActions(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dust htlcs can be canceled back even before the commitment
|
||||||
|
// transaction confirms. Dust htlcs are not enforceable onchain.
|
||||||
|
// If another version of the commit tx would confirm we either
|
||||||
|
// gain or lose those dust amounts but there is no other way
|
||||||
|
// than cancelling the incoming back because we will never learn
|
||||||
|
// the preimage.
|
||||||
|
if htlc.OutputIndex < 0 {
|
||||||
|
log.Infof("ChannelArbitrator(%v): fail dangling dust "+
|
||||||
|
"htlc=%x from local/remote commitments diff",
|
||||||
|
c.cfg.ChanPoint, htlc.RHash[:])
|
||||||
|
|
||||||
|
actionMap[HtlcFailDustAction] = append(
|
||||||
|
actionMap[HtlcFailDustAction], htlc,
|
||||||
|
)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
|
log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
|
||||||
"local/remote commitments diff",
|
"local/remote commitments diff",
|
||||||
c.cfg.ChanPoint, htlc.RHash[:])
|
c.cfg.ChanPoint, htlc.RHash[:])
|
||||||
|
|
||||||
actionMap[HtlcFailNowAction] = append(
|
actionMap[HtlcFailDanglingAction] = append(
|
||||||
actionMap[HtlcFailNowAction], htlc,
|
actionMap[HtlcFailDanglingAction], htlc,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2136,7 +2149,7 @@ func (c *ChannelArbitrator) checkRemoteChainActions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
|
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
|
||||||
// confirmed commit and remote dangling commit for HTLCS that we need to cancel
|
// confirmed commit and remote pending commit for HTLCS that we need to cancel
|
||||||
// back. If we find any HTLCs on the remote pending but not the remote, then
|
// back. If we find any HTLCs on the remote pending but not the remote, then
|
||||||
// we'll mark them to be failed immediately.
|
// we'll mark them to be failed immediately.
|
||||||
func (c *ChannelArbitrator) checkRemoteDiffActions(
|
func (c *ChannelArbitrator) checkRemoteDiffActions(
|
||||||
@ -2159,7 +2172,7 @@ func (c *ChannelArbitrator) checkRemoteDiffActions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With the remote HTLCs assembled, we'll mark any HTLCs only on the
|
// With the remote HTLCs assembled, we'll mark any HTLCs only on the
|
||||||
// remote dangling commitment to be failed asap.
|
// remote pending commitment to be failed asap.
|
||||||
actionMap := make(ChainActionMap)
|
actionMap := make(ChainActionMap)
|
||||||
for _, htlc := range danglingHTLCs.outgoingHTLCs {
|
for _, htlc := range danglingHTLCs.outgoingHTLCs {
|
||||||
if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
|
if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
|
||||||
@ -2180,8 +2193,21 @@ func (c *ChannelArbitrator) checkRemoteDiffActions(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
actionMap[HtlcFailNowAction] = append(
|
// Dust HTLCs on the remote commitment can be failed back.
|
||||||
actionMap[HtlcFailNowAction], htlc,
|
if htlc.OutputIndex < 0 {
|
||||||
|
log.Infof("ChannelArbitrator(%v): fail dangling dust "+
|
||||||
|
"htlc=%x from remote commitments diff",
|
||||||
|
c.cfg.ChanPoint, htlc.RHash[:])
|
||||||
|
|
||||||
|
actionMap[HtlcFailDustAction] = append(
|
||||||
|
actionMap[HtlcFailDustAction], htlc,
|
||||||
|
)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
actionMap[HtlcFailDanglingAction] = append(
|
||||||
|
actionMap[HtlcFailDanglingAction], htlc,
|
||||||
)
|
)
|
||||||
|
|
||||||
log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
|
log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
|
||||||
@ -2203,15 +2229,21 @@ func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
|
|||||||
// then this is an older node that had a pending close channel before
|
// then this is an older node that had a pending close channel before
|
||||||
// the CommitSet was introduced. In this case, we'll just return the
|
// the CommitSet was introduced. In this case, we'll just return the
|
||||||
// existing ChainActionMap they had on disk.
|
// existing ChainActionMap they had on disk.
|
||||||
if confCommitSet == nil {
|
if confCommitSet == nil || confCommitSet.ConfCommitKey.IsNone() {
|
||||||
return c.log.FetchChainActions()
|
return c.log.FetchChainActions()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, we have the full commitment set written to disk, and can
|
// Otherwise, we have the full commitment set written to disk, and can
|
||||||
// proceed as normal.
|
// proceed as normal.
|
||||||
htlcSets := confCommitSet.toActiveHTLCSets()
|
htlcSets := confCommitSet.toActiveHTLCSets()
|
||||||
switch *confCommitSet.ConfCommitKey {
|
confCommitKey, err := confCommitSet.ConfCommitKey.UnwrapOrErr(
|
||||||
|
fmt.Errorf("no commitKey available"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch confCommitKey {
|
||||||
// If the local commitment transaction confirmed, then we'll examine
|
// If the local commitment transaction confirmed, then we'll examine
|
||||||
// that as well as their commitments to the set of chain actions.
|
// that as well as their commitments to the set of chain actions.
|
||||||
case LocalHtlcSet:
|
case LocalHtlcSet:
|
||||||
@ -2246,24 +2278,13 @@ func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
|
|||||||
// are properly resolved.
|
// are properly resolved.
|
||||||
func (c *ChannelArbitrator) prepContractResolutions(
|
func (c *ChannelArbitrator) prepContractResolutions(
|
||||||
contractResolutions *ContractResolutions, height uint32,
|
contractResolutions *ContractResolutions, height uint32,
|
||||||
trigger transitionTrigger,
|
htlcActions ChainActionMap) ([]ContractResolver, error) {
|
||||||
confCommitSet *CommitSet) ([]ContractResolver, []ResolutionMsg, error) {
|
|
||||||
|
|
||||||
// First, we'll reconstruct a fresh set of chain actions as the set of
|
|
||||||
// actions we need to act on may differ based on if it was our
|
|
||||||
// commitment, or they're commitment that hit the chain.
|
|
||||||
htlcActions, err := c.constructChainActions(
|
|
||||||
confCommitSet, height, trigger,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll also fetch the historical state of this channel, as it should
|
// We'll also fetch the historical state of this channel, as it should
|
||||||
// have been marked as closed by now, and supplement it to each resolver
|
// have been marked as closed by now, and supplement it to each resolver
|
||||||
// such that we can properly resolve our pending contracts.
|
// such that we can properly resolve our pending contracts.
|
||||||
var chanState *channeldb.OpenChannel
|
var chanState *channeldb.OpenChannel
|
||||||
chanState, err = c.cfg.FetchHistoricalChannel()
|
chanState, err := c.cfg.FetchHistoricalChannel()
|
||||||
switch {
|
switch {
|
||||||
// If we don't find this channel, then it may be the case that it
|
// If we don't find this channel, then it may be the case that it
|
||||||
// was closed before we started to retain the final state
|
// was closed before we started to retain the final state
|
||||||
@ -2275,16 +2296,9 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
"state", c.cfg.ChanPoint)
|
"state", c.cfg.ChanPoint)
|
||||||
|
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// There may be a class of HTLC's which we can fail back immediately,
|
|
||||||
// for those we'll prepare a slice of packets to add to our outbox. Any
|
|
||||||
// packets we need to send, will be cancels.
|
|
||||||
var (
|
|
||||||
msgsToSend []ResolutionMsg
|
|
||||||
)
|
|
||||||
|
|
||||||
incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
|
incomingResolutions := contractResolutions.HtlcResolutions.IncomingHTLCs
|
||||||
outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
|
outgoingResolutions := contractResolutions.HtlcResolutions.OutgoingHTLCs
|
||||||
|
|
||||||
@ -2313,7 +2327,6 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
}
|
}
|
||||||
|
|
||||||
commitHash := contractResolutions.CommitHash
|
commitHash := contractResolutions.CommitHash
|
||||||
failureMsg := &lnwire.FailPermanentChannelFailure{}
|
|
||||||
|
|
||||||
var htlcResolvers []ContractResolver
|
var htlcResolvers []ContractResolver
|
||||||
|
|
||||||
@ -2337,37 +2350,7 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
breachResolver := newBreachResolver(resolverCfg)
|
breachResolver := newBreachResolver(resolverCfg)
|
||||||
htlcResolvers = append(htlcResolvers, breachResolver)
|
htlcResolvers = append(htlcResolvers, breachResolver)
|
||||||
|
|
||||||
// We'll use the CommitSet, we'll fail back all outgoing HTLC's
|
return htlcResolvers, nil
|
||||||
// that exist on either of the remote commitments. The map is
|
|
||||||
// used to deduplicate any shared htlc's.
|
|
||||||
remoteOutgoing := make(map[uint64]channeldb.HTLC)
|
|
||||||
for htlcSetKey, htlcs := range confCommitSet.HtlcSets {
|
|
||||||
if !htlcSetKey.IsRemote {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, htlc := range htlcs {
|
|
||||||
if htlc.Incoming {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteOutgoing[htlc.HtlcIndex] = htlc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we'll loop over the map and create ResolutionMsgs for
|
|
||||||
// each of them.
|
|
||||||
for _, htlc := range remoteOutgoing {
|
|
||||||
failMsg := ResolutionMsg{
|
|
||||||
SourceChan: c.cfg.ShortChanID,
|
|
||||||
HtlcIndex: htlc.HtlcIndex,
|
|
||||||
Failure: failureMsg,
|
|
||||||
}
|
|
||||||
|
|
||||||
msgsToSend = append(msgsToSend, failMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return htlcResolvers, msgsToSend, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For each HTLC, we'll either act immediately, meaning we'll instantly
|
// For each HTLC, we'll either act immediately, meaning we'll instantly
|
||||||
@ -2375,20 +2358,6 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
// confirmed, in which case we'll need an HTLC resolver.
|
// confirmed, in which case we'll need an HTLC resolver.
|
||||||
for htlcAction, htlcs := range htlcActions {
|
for htlcAction, htlcs := range htlcActions {
|
||||||
switch htlcAction {
|
switch htlcAction {
|
||||||
|
|
||||||
// If we can fail an HTLC immediately (an outgoing HTLC with no
|
|
||||||
// contract), then we'll assemble an HTLC fail packet to send.
|
|
||||||
case HtlcFailNowAction:
|
|
||||||
for _, htlc := range htlcs {
|
|
||||||
failMsg := ResolutionMsg{
|
|
||||||
SourceChan: c.cfg.ShortChanID,
|
|
||||||
HtlcIndex: htlc.HtlcIndex,
|
|
||||||
Failure: failureMsg,
|
|
||||||
}
|
|
||||||
|
|
||||||
msgsToSend = append(msgsToSend, failMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we can claim this HTLC, we'll create an HTLC resolver to
|
// If we can claim this HTLC, we'll create an HTLC resolver to
|
||||||
// claim the HTLC (second-level or directly), then add the pre
|
// claim the HTLC (second-level or directly), then add the pre
|
||||||
case HtlcClaimAction:
|
case HtlcClaimAction:
|
||||||
@ -2487,36 +2456,6 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
htlcResolvers = append(htlcResolvers, resolver)
|
htlcResolvers = append(htlcResolvers, resolver)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We've lost an htlc because it isn't manifested on the
|
|
||||||
// commitment transaction that closed the channel.
|
|
||||||
case HtlcIncomingDustFinalAction:
|
|
||||||
for _, htlc := range htlcs {
|
|
||||||
htlc := htlc
|
|
||||||
|
|
||||||
key := models.CircuitKey{
|
|
||||||
ChanID: c.cfg.ShortChanID,
|
|
||||||
HtlcID: htlc.HtlcIndex,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark this dust htlc as final failed.
|
|
||||||
chainArbCfg := c.cfg.ChainArbitratorConfig
|
|
||||||
err := chainArbCfg.PutFinalHtlcOutcome(
|
|
||||||
key.ChanID, key.HtlcID, false,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send notification.
|
|
||||||
chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
|
|
||||||
key,
|
|
||||||
channeldb.FinalHtlcInfo{
|
|
||||||
Settled: false,
|
|
||||||
Offchain: false,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, if this is an outgoing HTLC we've sent, then we'll
|
// Finally, if this is an outgoing HTLC we've sent, then we'll
|
||||||
// launch a resolver to watch for the pre-image (and settle
|
// launch a resolver to watch for the pre-image (and settle
|
||||||
// backwards), or just timeout.
|
// backwards), or just timeout.
|
||||||
@ -2531,9 +2470,11 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
|
|
||||||
resolution, ok := outResolutionMap[htlcOp]
|
resolution, ok := outResolutionMap[htlcOp]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Errorf("ChannelArbitrator(%v) unable to find "+
|
log.Errorf("ChannelArbitrator(%v) "+
|
||||||
"outgoing resolution: %v",
|
"unable to find outgoing "+
|
||||||
|
"resolution: %v",
|
||||||
c.cfg.ChanPoint, htlcOp)
|
c.cfg.ChanPoint, htlcOp)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2570,7 +2511,7 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
|||||||
htlcResolvers = append(htlcResolvers, resolver)
|
htlcResolvers = append(htlcResolvers, resolver)
|
||||||
}
|
}
|
||||||
|
|
||||||
return htlcResolvers, msgsToSend, nil
|
return htlcResolvers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// replaceResolver replaces a in the list of active resolvers. If the resolver
|
// replaceResolver replaces a in the list of active resolvers. If the resolver
|
||||||
@ -3159,3 +3100,267 @@ func (c *ChannelArbitrator) checkLegacyBreach() (ArbitratorState, error) {
|
|||||||
// This is a modern breach close with resolvers.
|
// This is a modern breach close with resolvers.
|
||||||
return StateContractClosed, nil
|
return StateContractClosed, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sweepRequest wraps the arguments used when calling `SweepInput`.
|
||||||
|
type sweepRequest struct {
|
||||||
|
// input is the input to be swept.
|
||||||
|
input input.Input
|
||||||
|
|
||||||
|
// params holds the sweeping parameters.
|
||||||
|
params sweep.Params
|
||||||
|
}
|
||||||
|
|
||||||
|
// createSweepRequest creates an anchor sweeping request for a particular
|
||||||
|
// version (local/remote/remote pending) of the commitment.
|
||||||
|
func (c *ChannelArbitrator) createSweepRequest(
|
||||||
|
anchor *lnwallet.AnchorResolution, htlcs htlcSet, anchorPath string,
|
||||||
|
heightHint uint32) (sweepRequest, error) {
|
||||||
|
|
||||||
|
// Use the chan id as the exclusive group. This prevents any of the
|
||||||
|
// anchors from being batched together.
|
||||||
|
exclusiveGroup := c.cfg.ShortChanID.ToUint64()
|
||||||
|
|
||||||
|
// Find the deadline for this specific anchor.
|
||||||
|
deadline, value, err := c.findCommitmentDeadlineAndValue(
|
||||||
|
heightHint, htlcs,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return sweepRequest{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we cannot find a deadline, it means there's no HTLCs at stake,
|
||||||
|
// which means we can relax our anchor sweeping conditions as we don't
|
||||||
|
// have any time sensitive outputs to sweep. However we need to
|
||||||
|
// register the anchor output with the sweeper so we are later able to
|
||||||
|
// bump the close fee.
|
||||||
|
if deadline.IsNone() {
|
||||||
|
log.Infof("ChannelArbitrator(%v): no HTLCs at stake, "+
|
||||||
|
"sweeping anchor with default deadline",
|
||||||
|
c.cfg.ChanPoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
witnessType := input.CommitmentAnchor
|
||||||
|
|
||||||
|
// For taproot channels, we need to use the proper witness type.
|
||||||
|
if txscript.IsPayToTaproot(
|
||||||
|
anchor.AnchorSignDescriptor.Output.PkScript,
|
||||||
|
) {
|
||||||
|
|
||||||
|
witnessType = input.TaprootAnchorSweepSpend
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare anchor output for sweeping.
|
||||||
|
anchorInput := input.MakeBaseInput(
|
||||||
|
&anchor.CommitAnchor,
|
||||||
|
witnessType,
|
||||||
|
&anchor.AnchorSignDescriptor,
|
||||||
|
heightHint,
|
||||||
|
&input.TxInfo{
|
||||||
|
Fee: anchor.CommitFee,
|
||||||
|
Weight: anchor.CommitWeight,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// If we have a deadline, we'll use it to calculate the deadline
|
||||||
|
// height, otherwise default to none.
|
||||||
|
deadlineDesc := "None"
|
||||||
|
deadlineHeight := fn.MapOption(func(d int32) int32 {
|
||||||
|
deadlineDesc = fmt.Sprintf("%d", d)
|
||||||
|
|
||||||
|
return d + int32(heightHint)
|
||||||
|
})(deadline)
|
||||||
|
|
||||||
|
// Calculate the budget based on the value under protection, which is
|
||||||
|
// the sum of all HTLCs on this commitment subtracted by their budgets.
|
||||||
|
// The anchor output in itself has a small output value of 330 sats so
|
||||||
|
// we also include it in the budget to pay for the cpfp transaction.
|
||||||
|
budget := calculateBudget(
|
||||||
|
value, c.cfg.Budget.AnchorCPFPRatio, c.cfg.Budget.AnchorCPFP,
|
||||||
|
) + AnchorOutputValue
|
||||||
|
|
||||||
|
log.Infof("ChannelArbitrator(%v): offering anchor from %s commitment "+
|
||||||
|
"%v to sweeper with deadline=%v, budget=%v", c.cfg.ChanPoint,
|
||||||
|
anchorPath, anchor.CommitAnchor, deadlineDesc, budget)
|
||||||
|
|
||||||
|
// Sweep anchor output with a confirmation target fee preference.
|
||||||
|
// Because this is a cpfp-operation, the anchor will only be attempted
|
||||||
|
// to sweep when the current fee estimate for the confirmation target
|
||||||
|
// exceeds the commit fee rate.
|
||||||
|
return sweepRequest{
|
||||||
|
input: &anchorInput,
|
||||||
|
params: sweep.Params{
|
||||||
|
ExclusiveGroup: &exclusiveGroup,
|
||||||
|
Budget: budget,
|
||||||
|
DeadlineHeight: deadlineHeight,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareAnchorSweeps creates a list of requests to be used by the sweeper for
|
||||||
|
// all possible commitment versions.
|
||||||
|
func (c *ChannelArbitrator) prepareAnchorSweeps(heightHint uint32,
|
||||||
|
anchors *lnwallet.AnchorResolutions) ([]sweepRequest, error) {
|
||||||
|
|
||||||
|
// requests holds all the possible anchor sweep requests. We can have
|
||||||
|
// up to 3 different versions of commitments (local/remote/remote
|
||||||
|
// dangling) to be CPFPed by the anchors.
|
||||||
|
requests := make([]sweepRequest, 0, 3)
|
||||||
|
|
||||||
|
// remotePendingReq holds the request for sweeping the anchor output on
|
||||||
|
// the remote pending commitment. It's only set when there's an actual
|
||||||
|
// pending remote commitment and it's used to decide whether we need to
|
||||||
|
// update the fee budget when sweeping the anchor output on the local
|
||||||
|
// commitment.
|
||||||
|
remotePendingReq := fn.None[sweepRequest]()
|
||||||
|
|
||||||
|
// First we check on the remote pending commitment and optionally
|
||||||
|
// create an anchor sweeping request.
|
||||||
|
htlcs, ok := c.activeHTLCs[RemotePendingHtlcSet]
|
||||||
|
if ok && anchors.RemotePending != nil {
|
||||||
|
req, err := c.createSweepRequest(
|
||||||
|
anchors.RemotePending, htlcs, "remote pending",
|
||||||
|
heightHint,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the request.
|
||||||
|
requests = append(requests, req)
|
||||||
|
|
||||||
|
// Set the optional variable.
|
||||||
|
remotePendingReq = fn.Some(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the local commitment and optionally create an anchor sweeping
|
||||||
|
// request. The params used in this request will be influenced by the
|
||||||
|
// anchor sweeping request made from the pending remote commitment.
|
||||||
|
htlcs, ok = c.activeHTLCs[LocalHtlcSet]
|
||||||
|
if ok && anchors.Local != nil {
|
||||||
|
req, err := c.createSweepRequest(
|
||||||
|
anchors.Local, htlcs, "local", heightHint,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there's an anchor sweeping request from the pending
|
||||||
|
// remote commitment, we will compare its budget against the
|
||||||
|
// budget used here and choose the params that has a larger
|
||||||
|
// budget. The deadline when choosing the remote pending budget
|
||||||
|
// instead of the local one will always be earlier or equal to
|
||||||
|
// the local deadline because outgoing HTLCs are resolved on
|
||||||
|
// the local commitment first before they are removed from the
|
||||||
|
// remote one.
|
||||||
|
remotePendingReq.WhenSome(func(s sweepRequest) {
|
||||||
|
if s.params.Budget <= req.params.Budget {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("ChannelArbitrator(%v): replaced local "+
|
||||||
|
"anchor(%v) sweep params with pending remote "+
|
||||||
|
"anchor sweep params, \nold:[%v], \nnew:[%v]",
|
||||||
|
c.cfg.ChanPoint, anchors.Local.CommitAnchor,
|
||||||
|
req.params, s.params)
|
||||||
|
|
||||||
|
req.params = s.params
|
||||||
|
})
|
||||||
|
|
||||||
|
// Save the request.
|
||||||
|
requests = append(requests, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the remote commitment and create an anchor sweeping request if
|
||||||
|
// needed.
|
||||||
|
htlcs, ok = c.activeHTLCs[RemoteHtlcSet]
|
||||||
|
if ok && anchors.Remote != nil {
|
||||||
|
req, err := c.createSweepRequest(
|
||||||
|
anchors.Remote, htlcs, "remote", heightHint,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
requests = append(requests, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return requests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// failIncomingDust resolves the incoming dust HTLCs because they do not have
|
||||||
|
// an output on the commitment transaction and cannot be resolved onchain. We
|
||||||
|
// mark them as failed here.
|
||||||
|
func (c *ChannelArbitrator) failIncomingDust(
|
||||||
|
incomingDustHTLCs []channeldb.HTLC) error {
|
||||||
|
|
||||||
|
for _, htlc := range incomingDustHTLCs {
|
||||||
|
if !htlc.Incoming || htlc.OutputIndex >= 0 {
|
||||||
|
return fmt.Errorf("htlc with index %v is not incoming "+
|
||||||
|
"dust", htlc.OutputIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := models.CircuitKey{
|
||||||
|
ChanID: c.cfg.ShortChanID,
|
||||||
|
HtlcID: htlc.HtlcIndex,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark this dust htlc as final failed.
|
||||||
|
chainArbCfg := c.cfg.ChainArbitratorConfig
|
||||||
|
err := chainArbCfg.PutFinalHtlcOutcome(
|
||||||
|
key.ChanID, key.HtlcID, false,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send notification.
|
||||||
|
chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
|
||||||
|
key,
|
||||||
|
channeldb.FinalHtlcInfo{
|
||||||
|
Settled: false,
|
||||||
|
Offchain: false,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// abandonForwards cancels back the incoming HTLCs for their corresponding
|
||||||
|
// outgoing HTLCs. We use a set here to avoid sending duplicate failure messages
|
||||||
|
// for the same HTLC. This also needs to be done for locally initiated outgoing
|
||||||
|
// HTLCs they are special cased in the switch.
|
||||||
|
func (c *ChannelArbitrator) abandonForwards(htlcs fn.Set[uint64]) error {
|
||||||
|
log.Debugf("ChannelArbitrator(%v): cancelling back %v incoming "+
|
||||||
|
"HTLC(s)", c.cfg.ChanPoint,
|
||||||
|
len(htlcs))
|
||||||
|
|
||||||
|
msgsToSend := make([]ResolutionMsg, 0, len(htlcs))
|
||||||
|
failureMsg := &lnwire.FailPermanentChannelFailure{}
|
||||||
|
|
||||||
|
for idx := range htlcs {
|
||||||
|
failMsg := ResolutionMsg{
|
||||||
|
SourceChan: c.cfg.ShortChanID,
|
||||||
|
HtlcIndex: idx,
|
||||||
|
Failure: failureMsg,
|
||||||
|
}
|
||||||
|
|
||||||
|
msgsToSend = append(msgsToSend, failMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the msges to the switch, if there are any.
|
||||||
|
if len(msgsToSend) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("ChannelArbitrator(%v): sending resolution message=%v",
|
||||||
|
c.cfg.ChanPoint, lnutils.SpewLogClosure(msgsToSend))
|
||||||
|
|
||||||
|
err := c.cfg.DeliverResolutionMsg(msgsToSend...)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Unable to send resolution msges to switch: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -590,7 +590,7 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) {
|
|||||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||||
UnilateralCloseSummary: uniClose,
|
UnilateralCloseSummary: uniClose,
|
||||||
CommitSet: CommitSet{
|
CommitSet: CommitSet{
|
||||||
ConfCommitKey: &RemoteHtlcSet,
|
ConfCommitKey: fn.Some(RemoteHtlcSet),
|
||||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -777,7 +777,7 @@ func TestChannelArbitratorBreachClose(t *testing.T) {
|
|||||||
},
|
},
|
||||||
AnchorResolution: anchorRes,
|
AnchorResolution: anchorRes,
|
||||||
CommitSet: CommitSet{
|
CommitSet: CommitSet{
|
||||||
ConfCommitKey: &RemoteHtlcSet,
|
ConfCommitKey: fn.Some(RemoteHtlcSet),
|
||||||
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
||||||
RemoteHtlcSet: {htlc1, htlc2},
|
RemoteHtlcSet: {htlc1, htlc2},
|
||||||
},
|
},
|
||||||
@ -931,6 +931,24 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
|||||||
t.Fatalf("no response received")
|
t.Fatalf("no response received")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We expect an immediate resolution message for the outgoing dust htlc.
|
||||||
|
// It is not resolvable on-chain and it can be canceled back even before
|
||||||
|
// the commitment transaction confirmed.
|
||||||
|
select {
|
||||||
|
case msgs := <-chanArbCtx.resolutions:
|
||||||
|
if len(msgs) != 1 {
|
||||||
|
t.Fatalf("expected 1 message, instead got %v",
|
||||||
|
len(msgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex {
|
||||||
|
t.Fatalf("wrong htlc index: expected %v, got %v",
|
||||||
|
outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex)
|
||||||
|
}
|
||||||
|
case <-time.After(defaultTimeout):
|
||||||
|
t.Fatalf("resolution msgs not sent")
|
||||||
|
}
|
||||||
|
|
||||||
// Now notify about the local force close getting confirmed.
|
// Now notify about the local force close getting confirmed.
|
||||||
closeTx := &wire.MsgTx{
|
closeTx := &wire.MsgTx{
|
||||||
TxIn: []*wire.TxIn{
|
TxIn: []*wire.TxIn{
|
||||||
@ -981,7 +999,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||||
CommitSet: CommitSet{
|
CommitSet: CommitSet{
|
||||||
ConfCommitKey: &LocalHtlcSet,
|
ConfCommitKey: fn.Some(LocalHtlcSet),
|
||||||
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
||||||
LocalHtlcSet: htlcSet,
|
LocalHtlcSet: htlcSet,
|
||||||
},
|
},
|
||||||
@ -993,22 +1011,6 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
|||||||
StateWaitingFullResolution,
|
StateWaitingFullResolution,
|
||||||
)
|
)
|
||||||
|
|
||||||
// We expect an immediate resolution message for the outgoing dust htlc.
|
|
||||||
// It is not resolvable on-chain.
|
|
||||||
select {
|
|
||||||
case msgs := <-chanArbCtx.resolutions:
|
|
||||||
if len(msgs) != 1 {
|
|
||||||
t.Fatalf("expected 1 message, instead got %v", len(msgs))
|
|
||||||
}
|
|
||||||
|
|
||||||
if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex {
|
|
||||||
t.Fatalf("wrong htlc index: expected %v, got %v",
|
|
||||||
outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex)
|
|
||||||
}
|
|
||||||
case <-time.After(defaultTimeout):
|
|
||||||
t.Fatalf("resolution msgs not sent")
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll grab the old notifier here as our resolvers are still holding
|
// We'll grab the old notifier here as our resolvers are still holding
|
||||||
// a reference to this instance, and a new one will be created when we
|
// a reference to this instance, and a new one will be created when we
|
||||||
// restart the channel arb below.
|
// restart the channel arb below.
|
||||||
@ -1525,7 +1527,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
log.commitSet = &CommitSet{
|
log.commitSet = &CommitSet{
|
||||||
ConfCommitKey: &RemoteHtlcSet,
|
ConfCommitKey: fn.Some(RemoteHtlcSet),
|
||||||
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
||||||
RemoteHtlcSet: {},
|
RemoteHtlcSet: {},
|
||||||
},
|
},
|
||||||
@ -1952,8 +1954,12 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||||
CommitSet: CommitSet{
|
CommitSet: CommitSet{
|
||||||
ConfCommitKey: &testCase.confCommit,
|
ConfCommitKey: fn.Some(
|
||||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
testCase.confCommit,
|
||||||
|
),
|
||||||
|
HtlcSets: make(
|
||||||
|
map[HtlcSetKey][]channeldb.HTLC,
|
||||||
|
),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2254,7 +2260,7 @@ func TestFindCommitmentDeadlineAndValue(t *testing.T) {
|
|||||||
mockPreimageDB := newMockWitnessBeacon()
|
mockPreimageDB := newMockWitnessBeacon()
|
||||||
mockPreimageDB.lookupPreimage[rHash] = rHash
|
mockPreimageDB.lookupPreimage[rHash] = rHash
|
||||||
|
|
||||||
// Attack a mock PreimageDB and Registry to channel arbitrator.
|
// Attach a mock PreimageDB and Registry to channel arbitrator.
|
||||||
chanArb := chanArbCtx.chanArb
|
chanArb := chanArbCtx.chanArb
|
||||||
chanArb.cfg.PreimageDB = mockPreimageDB
|
chanArb.cfg.PreimageDB = mockPreimageDB
|
||||||
chanArb.cfg.Registry = &mockRegistry{}
|
chanArb.cfg.Registry = &mockRegistry{}
|
||||||
@ -2445,7 +2451,7 @@ func TestSweepAnchors(t *testing.T) {
|
|||||||
mockPreimageDB := newMockWitnessBeacon()
|
mockPreimageDB := newMockWitnessBeacon()
|
||||||
mockPreimageDB.lookupPreimage[rHash] = rHash
|
mockPreimageDB.lookupPreimage[rHash] = rHash
|
||||||
|
|
||||||
// Attack a mock PreimageDB and Registry to channel arbitrator.
|
// Attach a mock PreimageDB and Registry to channel arbitrator.
|
||||||
chanArb := chanArbCtx.chanArb
|
chanArb := chanArbCtx.chanArb
|
||||||
chanArb.cfg.PreimageDB = mockPreimageDB
|
chanArb.cfg.PreimageDB = mockPreimageDB
|
||||||
chanArb.cfg.Registry = &mockRegistry{}
|
chanArb.cfg.Registry = &mockRegistry{}
|
||||||
@ -2596,6 +2602,116 @@ func TestSweepAnchors(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestSweepLocalAnchor checks the sweep params used for the local anchor will
|
||||||
|
// be updated optionally based on the pending remote commit.
|
||||||
|
func TestSweepLocalAnchor(t *testing.T) {
|
||||||
|
// Create a testing channel arbitrator.
|
||||||
|
log := &mockArbitratorLog{
|
||||||
|
state: StateDefault,
|
||||||
|
newStates: make(chan ArbitratorState, 5),
|
||||||
|
}
|
||||||
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
||||||
|
require.NoError(t, err, "unable to create ChannelArbitrator")
|
||||||
|
|
||||||
|
// Attach a mock PreimageDB and Registry to channel arbitrator.
|
||||||
|
chanArb := chanArbCtx.chanArb
|
||||||
|
mockPreimageDB := newMockWitnessBeacon()
|
||||||
|
chanArb.cfg.PreimageDB = mockPreimageDB
|
||||||
|
chanArb.cfg.Registry = &mockRegistry{}
|
||||||
|
|
||||||
|
// Set current block height.
|
||||||
|
heightHint := uint32(1000)
|
||||||
|
chanArbCtx.chanArb.blocks <- int32(heightHint)
|
||||||
|
|
||||||
|
htlcIndex := uint64(99)
|
||||||
|
deadlineDelta := uint32(10)
|
||||||
|
|
||||||
|
htlcAmt := lnwire.MilliSatoshi(1_000_000)
|
||||||
|
|
||||||
|
// Create one testing HTLC.
|
||||||
|
deadlineSmallDelta := deadlineDelta + 4
|
||||||
|
htlcSmallExipry := channeldb.HTLC{
|
||||||
|
HtlcIndex: htlcIndex,
|
||||||
|
RefundTimeout: heightHint + deadlineSmallDelta,
|
||||||
|
Amt: htlcAmt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup our local HTLC set such that it doesn't have any HTLCs. We
|
||||||
|
// expect an anchor sweeping request to be made using the params
|
||||||
|
// created from sweeping the anchor from the pending remote commit.
|
||||||
|
chanArb.activeHTLCs[LocalHtlcSet] = htlcSet{}
|
||||||
|
|
||||||
|
// Setup our remote HTLC set such that no valid HTLCs can be used, thus
|
||||||
|
// the anchor sweeping is skipped.
|
||||||
|
chanArb.activeHTLCs[RemoteHtlcSet] = htlcSet{}
|
||||||
|
|
||||||
|
// Setup out pending remote HTLC set such that we will use the HTLC's
|
||||||
|
// CLTV from the outgoing HTLC set.
|
||||||
|
// Only half of the deadline is used since the anchor cpfp sweep. The
|
||||||
|
// other half of the deadline is used to sweep the HTLCs at stake.
|
||||||
|
expectedPendingDeadline := heightHint + deadlineSmallDelta/2
|
||||||
|
chanArb.activeHTLCs[RemotePendingHtlcSet] = htlcSet{
|
||||||
|
outgoingHTLCs: map[uint64]channeldb.HTLC{
|
||||||
|
htlcSmallExipry.HtlcIndex: htlcSmallExipry,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mock FindOutgoingHTLCDeadline so the pending remote's outgoing HTLC
|
||||||
|
// returns the small expiry value.
|
||||||
|
chanArb.cfg.FindOutgoingHTLCDeadline = func(
|
||||||
|
htlc channeldb.HTLC) fn.Option[int32] {
|
||||||
|
|
||||||
|
if htlc.RHash != htlcSmallExipry.RHash {
|
||||||
|
return fn.None[int32]()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn.Some(int32(htlcSmallExipry.RefundTimeout))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create AnchorResolutions.
|
||||||
|
anchors := &lnwallet.AnchorResolutions{
|
||||||
|
Local: &lnwallet.AnchorResolution{
|
||||||
|
AnchorSignDescriptor: input.SignDescriptor{
|
||||||
|
Output: &wire.TxOut{Value: 1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Remote: &lnwallet.AnchorResolution{
|
||||||
|
AnchorSignDescriptor: input.SignDescriptor{
|
||||||
|
Output: &wire.TxOut{Value: 1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RemotePending: &lnwallet.AnchorResolution{
|
||||||
|
AnchorSignDescriptor: input.SignDescriptor{
|
||||||
|
Output: &wire.TxOut{Value: 1},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sweep anchors and check there's no error.
|
||||||
|
err = chanArb.sweepAnchors(anchors, heightHint)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify deadlines are used as expected.
|
||||||
|
deadlines := chanArbCtx.sweeper.deadlines
|
||||||
|
|
||||||
|
// We should see two `SweepInput` calls - one for sweeping the local
|
||||||
|
// anchor, the other from the remote pending anchor.
|
||||||
|
require.Len(t, deadlines, 2)
|
||||||
|
|
||||||
|
// Both deadlines should be the same since the local anchor uses the
|
||||||
|
// parameters from the pending remote commitment.
|
||||||
|
require.EqualValues(
|
||||||
|
t, expectedPendingDeadline, deadlines[0],
|
||||||
|
"local deadline not matched, want %v, got %v",
|
||||||
|
expectedPendingDeadline, deadlines[0],
|
||||||
|
)
|
||||||
|
require.EqualValues(
|
||||||
|
t, expectedPendingDeadline, deadlines[1],
|
||||||
|
"pending remote deadline not matched, want %v, got %v",
|
||||||
|
expectedPendingDeadline, deadlines[1],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept.
|
// TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept.
|
||||||
func TestChannelArbitratorAnchors(t *testing.T) {
|
func TestChannelArbitratorAnchors(t *testing.T) {
|
||||||
log := &mockArbitratorLog{
|
log := &mockArbitratorLog{
|
||||||
@ -2619,7 +2735,7 @@ func TestChannelArbitratorAnchors(t *testing.T) {
|
|||||||
mockPreimageDB := newMockWitnessBeacon()
|
mockPreimageDB := newMockWitnessBeacon()
|
||||||
mockPreimageDB.lookupPreimage[rHash] = rHash
|
mockPreimageDB.lookupPreimage[rHash] = rHash
|
||||||
|
|
||||||
// Attack a mock PreimageDB and Registry to channel arbitrator.
|
// Attach a mock PreimageDB and Registry to channel arbitrator.
|
||||||
chanArb := chanArbCtx.chanArb
|
chanArb := chanArbCtx.chanArb
|
||||||
chanArb.cfg.PreimageDB = mockPreimageDB
|
chanArb.cfg.PreimageDB = mockPreimageDB
|
||||||
chanArb.cfg.Registry = &mockRegistry{}
|
chanArb.cfg.Registry = &mockRegistry{}
|
||||||
@ -2763,7 +2879,7 @@ func TestChannelArbitratorAnchors(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||||
CommitSet: CommitSet{
|
CommitSet: CommitSet{
|
||||||
ConfCommitKey: &LocalHtlcSet,
|
ConfCommitKey: fn.Some(LocalHtlcSet),
|
||||||
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{},
|
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -461,19 +461,23 @@ func (h *htlcTimeoutResolver) Resolve(
|
|||||||
return h.claimCleanUp(commitSpend)
|
return h.claimCleanUp(commitSpend)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("%T(%v): resolving htlc with incoming fail msg, fully "+
|
|
||||||
"confirmed", h, h.htlcResolution.ClaimOutpoint)
|
|
||||||
|
|
||||||
// At this point, the second-level transaction is sufficiently
|
// At this point, the second-level transaction is sufficiently
|
||||||
// confirmed, or a transaction directly spending the output is.
|
// confirmed, or a transaction directly spending the output is.
|
||||||
// Therefore, we can now send back our clean up message, failing the
|
// Therefore, we can now send back our clean up message, failing the
|
||||||
// HTLC on the incoming link.
|
// HTLC on the incoming link.
|
||||||
|
//
|
||||||
|
// NOTE: This can be called twice if the outgoing resolver restarts
|
||||||
|
// before the second-stage timeout transaction is confirmed.
|
||||||
|
log.Infof("%T(%v): resolving htlc with incoming fail msg, "+
|
||||||
|
"fully confirmed", h, h.htlcResolution.ClaimOutpoint)
|
||||||
|
|
||||||
failureMsg := &lnwire.FailPermanentChannelFailure{}
|
failureMsg := &lnwire.FailPermanentChannelFailure{}
|
||||||
if err := h.DeliverResolutionMsg(ResolutionMsg{
|
err = h.DeliverResolutionMsg(ResolutionMsg{
|
||||||
SourceChan: h.ShortChanID,
|
SourceChan: h.ShortChanID,
|
||||||
HtlcIndex: h.htlc.HtlcIndex,
|
HtlcIndex: h.htlc.HtlcIndex,
|
||||||
Failure: failureMsg,
|
Failure: failureMsg,
|
||||||
}); err != nil {
|
})
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,6 +40,11 @@
|
|||||||
a graceful shutdown of LND during the main chain backend sync check in certain
|
a graceful shutdown of LND during the main chain backend sync check in certain
|
||||||
cases.
|
cases.
|
||||||
|
|
||||||
|
* [Fixed a bug](https://github.com/lightningnetwork/lnd/pull/9068) where dust
|
||||||
|
htlcs although not being able to be resolved onchain were not canceled
|
||||||
|
back before the commitment tx was confirmed causing potentially force closes
|
||||||
|
of the incoming channel.
|
||||||
|
|
||||||
# New Features
|
# New Features
|
||||||
## Functional Enhancements
|
## Functional Enhancements
|
||||||
## RPC Additions
|
## RPC Additions
|
||||||
|
@ -1632,7 +1632,7 @@ out:
|
|||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
|
|
||||||
if err := s.FlushForwardingEvents(); err != nil {
|
if err := s.FlushForwardingEvents(); err != nil {
|
||||||
log.Errorf("unable to flush "+
|
log.Errorf("Unable to flush "+
|
||||||
"forwarding events: %v", err)
|
"forwarding events: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -250,6 +250,10 @@ func runMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest,
|
|||||||
op := ht.OutPointFromChannelPoint(bobChanPoint)
|
op := ht.OutPointFromChannelPoint(bobChanPoint)
|
||||||
closeTx := ht.AssertOutpointInMempool(op)
|
closeTx := ht.AssertOutpointInMempool(op)
|
||||||
|
|
||||||
|
// Dust HTLCs are immediately canceled backwards as soon as the local
|
||||||
|
// commitment tx is successfully broadcasted to the local mempool.
|
||||||
|
ht.AssertActiveHtlcs(alice, payHash)
|
||||||
|
|
||||||
// Bob's anchor output should be offered to his sweep since Bob has
|
// Bob's anchor output should be offered to his sweep since Bob has
|
||||||
// time-sensitive HTLCs - we expect both anchors are offered.
|
// time-sensitive HTLCs - we expect both anchors are offered.
|
||||||
ht.AssertNumPendingSweeps(bob, 2)
|
ht.AssertNumPendingSweeps(bob, 2)
|
||||||
@ -257,11 +261,6 @@ func runMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest,
|
|||||||
// Mine a block to confirm the closing transaction.
|
// Mine a block to confirm the closing transaction.
|
||||||
ht.MineBlocksAndAssertNumTxes(1, 1)
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
||||||
|
|
||||||
// At this point, Bob should have canceled backwards the dust HTLC
|
|
||||||
// that we sent earlier. This means Alice should now only have a single
|
|
||||||
// HTLC on her channel.
|
|
||||||
ht.AssertActiveHtlcs(alice, payHash)
|
|
||||||
|
|
||||||
// With the closing transaction confirmed, we should expect Bob's HTLC
|
// With the closing transaction confirmed, we should expect Bob's HTLC
|
||||||
// timeout transaction to be offered to the sweeper due to the expiry
|
// timeout transaction to be offered to the sweeper due to the expiry
|
||||||
// being reached. we also expect Bon and Carol's anchor sweeps.
|
// being reached. we also expect Bon and Carol's anchor sweeps.
|
||||||
|
Reference in New Issue
Block a user