multi: enable RBF co-op bumps after reconnection

In this commit, we alter the existing co-op close flow to enable RBF
bumps after re connection. With the new RBF close flow, it's possible
that after a success round _and_ a re connection, either side wants to do
another fee bump. Typically we route these requests through the switch,
but in this case, the link no longer exists in the switch, so any
requests to fee bump again would find that the link doesn't exist.

In this commit, we implement a work around wherein if we have an RBF
chan closer active, and the link isn't in the switch, then we just route
the request directly to the chan closer via the peer. Once we have the
chan closer, we can use the exact same flow as prior.
This commit is contained in:
Olaoluwa Osuntokun 2025-02-07 18:25:49 -08:00
parent 80b48fb49b
commit 3a18bf088c
3 changed files with 192 additions and 9 deletions

View File

@ -5234,3 +5234,62 @@ func (p *Brontide) scaleTimeout(timeout time.Duration) time.Duration {
return timeout
}
// CoopCloseUpdates is a struct used to communicate updates for an active close
// to the caller.
type CoopCloseUpdates struct {
UpdateChan chan interface{}
ErrChan chan error
}
// ChanHasRbfCoopCloser returns true if the channel as identifier by the channel
// point has an active RBF chan closer.
func (p *Brontide) ChanHasRbfCoopCloser(chanPoint wire.OutPoint) bool {
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
chanCloser, found := p.activeChanCloses.Load(chanID)
if !found {
return false
}
return chanCloser.IsRight()
}
// TriggerCoopCloseRbfBump given a chan ID, and the params needed to trigger a
// new RBF co-op close update, a bump is attempted. A channel used for updates,
// along with one used to o=communicate any errors is returned. If no chan
// closer is found, then false is returned for the second argument.
func (p *Brontide) TriggerCoopCloseRbfBump(ctx context.Context,
chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
deliveryScript lnwire.DeliveryAddress) (*CoopCloseUpdates, error) {
// If RBF coop close isn't permitted, then we'll an error.
if !p.rbfCoopCloseAllowed() {
return nil, fmt.Errorf("rbf coop close not enabled for " +
"channel")
}
closeUpdates := &CoopCloseUpdates{
UpdateChan: make(chan interface{}, 1),
ErrChan: make(chan error, 1),
}
// We'll re-use the existing switch struct here, even though we're
// bypassing the switch entirely.
closeReq := htlcswitch.ChanClose{
CloseType: contractcourt.CloseRegular,
ChanPoint: &chanPoint,
TargetFeePerKw: feeRate,
DeliveryScript: deliveryScript,
Updates: closeUpdates.UpdateChan,
Err: closeUpdates.ErrChan,
Ctx: ctx,
}
err := p.startRbfChanCloser(newRPCShutdownInit(&closeReq), chanPoint)
if err != nil {
return nil, err
}
return closeUpdates, nil
}

View File

@ -2801,14 +2801,33 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
}
}
var (
chanInSwitch = true
chanHasRbfCloser = r.server.ChanHasRbfCoopCloser(
channel.IdentityPub, *chanPoint,
)
)
// If the link is not known by the switch, we cannot gracefully close
// the channel.
channelID := lnwire.NewChanIDFromOutPoint(*chanPoint)
if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil {
rpcsLog.Debugf("Trying to non-force close offline channel with "+
"chan_point=%v", chanPoint)
return fmt.Errorf("unable to gracefully close channel while peer "+
"is offline (try force closing it instead): %v", err)
chanInSwitch = false
// The channel isn't in the switch, but if there's an
// active chan closer for the channel, and it's of the
// RBF variant, then we can actually bypass the switch.
// Otherwise, we'll return an error.
if !chanHasRbfCloser {
rpcsLog.Debugf("Trying to non-force close "+
"offline channel with chan_point=%v",
chanPoint)
return fmt.Errorf("unable to gracefully close "+
"channel while peer is offline (try "+
"force closing it instead): %v", err)
}
}
// Keep the old behavior prior to 0.18.0 - when the user
@ -2886,11 +2905,31 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
feeRate)
}
updateChan, errChan = r.server.htlcSwitch.CloseLink(
updateStream.Context(), chanPoint,
contractcourt.CloseRegular, feeRate, maxFee,
deliveryScript,
)
if chanHasRbfCloser && !chanInSwitch {
rpcsLog.Infof("Bypassing Switch to do fee bump "+
"for ChannelPoint(%v)", chanPoint)
closeUpdates, err := r.server.AttemptRBFCloseUpdate(
updateStream.Context(), *chanPoint, feeRate,
deliveryScript,
)
if err != nil {
return fmt.Errorf("unable to do RBF close "+
"update: %w", err)
}
updateChan = closeUpdates.UpdateChan
errChan = closeUpdates.ErrChan
} else {
maxFee := chainfee.SatPerKVByte(
in.MaxFeePerVbyte * 1000,
).FeePerKWeight()
updateChan, errChan = r.server.htlcSwitch.CloseLink(
updateStream.Context(), chanPoint,
contractcourt.CloseRegular, feeRate, maxFee,
deliveryScript,
)
}
}
// If the user doesn't want to wait for the txid to come back then we

View File

@ -5427,3 +5427,88 @@ func (s *server) getStartingBeat() (*chainio.Beat, error) {
return beat, nil
}
// ChanHasRbfCoopCloser returns true if the channel as identifier by the channel
// point has an active RBF chan closer.
func (s *server) ChanHasRbfCoopCloser(peerPub *btcec.PublicKey,
chanPoint wire.OutPoint) bool {
pubBytes := peerPub.SerializeCompressed()
s.mu.RLock()
targetPeer, ok := s.peersByPub[string(pubBytes)]
s.mu.RUnlock()
if !ok {
return false
}
return targetPeer.ChanHasRbfCoopCloser(chanPoint)
}
// attemptCoopRbfFeeBump attempts to look up the active chan closer for a
// channel given the outpoint. If found, we'll attempt to do a fee bump,
// returning channels used for updates. If the channel isn't currently active
// (p2p connection established), then his function will return an error.
func (s *server) attemptCoopRbfFeeBump(ctx context.Context,
chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
deliveryScript lnwire.DeliveryAddress) (*peer.CoopCloseUpdates, error) {
// First, we'll attempt to look up the channel based on it's
// ChannelPoint.
channel, err := s.chanStateDB.FetchChannel(chanPoint)
if err != nil {
return nil, fmt.Errorf("unable to fetch channel: %w", err)
}
// From the channel, we can now get the pubkey of the peer, then use
// that to eventually get the chan closer.
peerPub := channel.IdentityPub.SerializeCompressed()
// Now that we have the peer pub, we can look up the peer itself.
s.mu.RLock()
targetPeer, ok := s.peersByPub[string(peerPub)]
s.mu.RUnlock()
if !ok {
return nil, fmt.Errorf("peer for ChannelPoint(%v) is "+
"not online", chanPoint)
}
closeUpdates, err := targetPeer.TriggerCoopCloseRbfBump(
ctx, chanPoint, feeRate, deliveryScript,
)
if err != nil {
return nil, fmt.Errorf("unable to trigger coop rbf fee bump: "+
"%w", err)
}
return closeUpdates, nil
}
// AttemptRBFCloseUpdate attempts to trigger a new RBF iteration for a co-op
// close update. This route it to be used only if the target channel in question
// is no longer active in the link. This can happen when we restart while we
// already have done a single RBF co-op close iteration.
func (s *server) AttemptRBFCloseUpdate(ctx context.Context,
chanPoint wire.OutPoint, feeRate chainfee.SatPerKWeight,
deliveryScript lnwire.DeliveryAddress) (*peer.CoopCloseUpdates, error) {
// If the channel is present in the switch, then the request should flow
// through the switch instead.
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
if _, err := s.htlcSwitch.GetLink(chanID); err == nil {
return nil, fmt.Errorf("ChannelPoint(%v) is active in link, "+
"invalid request", chanPoint)
}
// At this point, we know that the channel isn't present in the link, so
// we'll check to see if we have an entry in the active chan closer map.
updates, err := s.attemptCoopRbfFeeBump(
ctx, chanPoint, feeRate, deliveryScript,
)
if err != nil {
return nil, fmt.Errorf("unable to attempt coop rbf fee bump "+
"ChannelPoint(%v)", chanPoint)
}
return updates, nil
}