From 73e622a31e7aac10e84bad683e395785e70a4fad Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Thu, 7 Mar 2024 20:08:13 -0800 Subject: [PATCH 1/8] lnwallet/chancloser: add fee rate to ClosePending This'll be useful to communicate what the new fee rate is to an RPC caller. --- lnwallet/chancloser/rbf_coop_states.go | 6 ++++++ lnwallet/chancloser/rbf_coop_transitions.go | 15 ++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go index b6728629b..5b8ba8bcc 100644 --- a/lnwallet/chancloser/rbf_coop_states.go +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -663,6 +663,9 @@ type LocalOfferSent struct { // ProposedFee is the fee we proposed to the remote party. ProposedFee btcutil.Amount + // ProposedFeeRate is the fee rate we proposed to the remote party. + ProposedFeeRate chainfee.SatPerVByte + // LocalSig is the signature we sent to the remote party. LocalSig lnwire.Sig } @@ -706,6 +709,9 @@ func (l *LocalOfferSent) IsTerminal() bool { type ClosePending struct { // CloseTx is the pending close transaction. CloseTx *wire.MsgTx + + // FeeRate is the fee rate of the closing transaction. + FeeRate chainfee.SatPerVByte } // String returns the name of the state for ClosePending. diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 1c7612541..9dec4ccfb 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/mempool" "github.com/btcsuite/btcd/wire" @@ -14,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnutils" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/protofsm" "github.com/lightningnetwork/lnd/tlv" @@ -171,7 +173,7 @@ func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment, } chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+ - "delivery_script=%v", env.ChanPoint, shutdownScript) + "delivery_script=%x", env.ChanPoint, shutdownScript) // From here, we'll transition to the shutdown pending state. In // this state we await their shutdown message (self loop), then @@ -730,6 +732,7 @@ func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, return &CloseStateTransition{ NextState: &LocalOfferSent{ ProposedFee: absoluteFee, + ProposedFeeRate: msg.TargetFeeRate, LocalSig: wireSig, CloseChannelTerms: l.CloseChannelTerms, }, @@ -839,6 +842,7 @@ func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment, return &CloseStateTransition{ NextState: &ClosePending{ CloseTx: closeTx, + FeeRate: l.ProposedFeeRate, }, NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ ExternalEvents: broadcastEvent, @@ -995,11 +999,20 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, sendEvent, broadcastEvent, } + // We'll also compute the final fee rate that the remote party + // paid based off the absolute fee and the size of the closing + // transaction. + vSize := mempool.GetTxVirtualSize(btcutil.NewTx(closeTx)) + feeRate := chainfee.SatPerVByte( + int64(msg.SigMsg.FeeSatoshis) / vSize, + ) + // Now that we've extracted the signature, we'll transition to // the next state where we'll sign+broadcast the sig. return &CloseStateTransition{ NextState: &ClosePending{ CloseTx: closeTx, + FeeRate: feeRate, }, NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ ExternalEvents: daemonEvents, From 7fc62840de3ce4f9aeb97268fc5a8a99061cdf71 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Thu, 7 Mar 2024 20:10:27 -0800 Subject: [PATCH 2/8] lnwallet/chancloser: ignore spurious channel flushed events If we go to close while the channel is already flushed, we might get an extra event, so we can safely ignore it and do a self state transition. --- lnwallet/chancloser/rbf_coop_transitions.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 9dec4ccfb..2667b7e4a 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -582,6 +582,12 @@ func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment, // we receive a confirmation event, or we receive a signal to restart // the co-op close process. switch msg := event.(type) { + // Ignore any potential duplicate channel flushed events. + case *ChannelFlushed: + return &CloseStateTransition{ + NextState: c, + }, nil + // If we get a confirmation, then the spend request we issued when we // were leaving the ChannelFlushing state has been confirmed. We'll // now transition to the StateFin state. From b34ff32dda1c90ad954a7c18cd1ec57117594a75 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 7 Feb 2025 18:15:36 -0800 Subject: [PATCH 3/8] lnwire: update closing_complete and closing_sig to latest spec draft Both these messages now carry the address of both parties, so you can update an address without needing to send shutdown again. --- lnwallet/chancloser/rbf_coop_transitions.go | 19 +++++++---- lnwire/closing_complete.go | 20 ++++++++++- lnwire/closing_sig.go | 38 ++++++++++++++++++++- lnwire/lnwire_test.go | 26 ++++++++++++++ 4 files changed, 94 insertions(+), 9 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 2667b7e4a..01ab5c32f 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -722,12 +722,13 @@ func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, // TODO(roasbeef): type alias for protocol event sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{ //nolint:ll TargetPeer: env.ChanPeer, - // TODO(roasbeef): mew new func Msgs: []lnwire.Message{&lnwire.ClosingComplete{ - ChannelID: env.ChanID, - FeeSatoshis: absoluteFee, - LockTime: env.BlockHeight, - ClosingSigs: closingSigs, + ChannelID: env.ChanID, + CloserScript: l.LocalDeliveryScript, + CloseeScript: l.RemoteDeliveryScript, + FeeSatoshis: absoluteFee, + LockTime: env.BlockHeight, + ClosingSigs: closingSigs, }}, }} @@ -991,8 +992,12 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{ TargetPeer: env.ChanPeer, Msgs: []lnwire.Message{&lnwire.ClosingSig{ - ChannelID: env.ChanID, - ClosingSigs: closingSigs, + ChannelID: env.ChanID, + CloserScript: l.RemoteDeliveryScript, + CloseeScript: l.LocalDeliveryScript, + FeeSatoshis: msg.SigMsg.FeeSatoshis, + LockTime: msg.SigMsg.LockTime, + ClosingSigs: closingSigs, }}, } broadcastEvent := &protofsm.BroadcastTxn{ diff --git a/lnwire/closing_complete.go b/lnwire/closing_complete.go index c16760a4e..4d390fd6f 100644 --- a/lnwire/closing_complete.go +++ b/lnwire/closing_complete.go @@ -30,6 +30,14 @@ type ClosingComplete struct { // ChannelID serves to identify which channel is to be closed. ChannelID ChannelID + // CloserScript is the script to which the channel funds will be paid + // for the closer (the person sending the ClosingComplete) message. + CloserScript DeliveryAddress + + // CloseeScript is the script to which the channel funds will be paid + // (the person receiving the ClosingComplete message). + CloseeScript DeliveryAddress + // FeeSatoshis is the total fee in satoshis that the party to the // channel would like to propose for the close transaction. FeeSatoshis btcutil.Amount @@ -79,7 +87,10 @@ func decodeClosingSigs(c *ClosingSigs, tlvRecords ExtraOpaqueData) error { // passed io.Reader. func (c *ClosingComplete) Decode(r io.Reader, _ uint32) error { // First, read out all the fields that are hard coded into the message. - err := ReadElements(r, &c.ChannelID, &c.FeeSatoshis, &c.LockTime) + err := ReadElements( + r, &c.ChannelID, &c.CloserScript, &c.CloseeScript, + &c.FeeSatoshis, &c.LockTime, + ) if err != nil { return err } @@ -125,6 +136,13 @@ func (c *ClosingComplete) Encode(w *bytes.Buffer, _ uint32) error { return err } + if err := WriteDeliveryAddress(w, c.CloserScript); err != nil { + return err + } + if err := WriteDeliveryAddress(w, c.CloseeScript); err != nil { + return err + } + if err := WriteSatoshi(w, c.FeeSatoshis); err != nil { return err } diff --git a/lnwire/closing_sig.go b/lnwire/closing_sig.go index df160d12e..2c73fa720 100644 --- a/lnwire/closing_sig.go +++ b/lnwire/closing_sig.go @@ -3,6 +3,8 @@ package lnwire import ( "bytes" "io" + + "github.com/btcsuite/btcd/btcutil" ) // ClosingSig is sent in response to a ClosingComplete message. It carries the @@ -11,6 +13,22 @@ type ClosingSig struct { // ChannelID serves to identify which channel is to be closed. ChannelID ChannelID + // CloserScript is the script to which the channel funds will be paid + // for the closer (the person sending the ClosingComplete) message. + CloserScript DeliveryAddress + + // CloseeScript is the script to which the channel funds will be paid + // (the person receiving the ClosingComplete message). + CloseeScript DeliveryAddress + + // FeeSatoshis is the total fee in satoshis that the party to the + // channel proposed for the close transaction. + FeeSatoshis btcutil.Amount + + // LockTime is the locktime number to be used in the input spending the + // funding transaction. + LockTime uint32 + // ClosingSigs houses the 3 possible signatures that can be sent. ClosingSigs @@ -24,7 +42,10 @@ type ClosingSig struct { // io.Reader. func (c *ClosingSig) Decode(r io.Reader, _ uint32) error { // First, read out all the fields that are hard coded into the message. - err := ReadElements(r, &c.ChannelID) + err := ReadElements( + r, &c.ChannelID, &c.CloserScript, &c.CloseeScript, + &c.FeeSatoshis, &c.LockTime, + ) if err != nil { return err } @@ -53,6 +74,21 @@ func (c *ClosingSig) Encode(w *bytes.Buffer, _ uint32) error { return err } + if err := WriteDeliveryAddress(w, c.CloserScript); err != nil { + return err + } + if err := WriteDeliveryAddress(w, c.CloseeScript); err != nil { + return err + } + + if err := WriteSatoshi(w, c.FeeSatoshis); err != nil { + return err + } + + if err := WriteUint32(w, c.LockTime); err != nil { + return err + } + recordProducers := closingSigRecords(&c.ClosingSigs) err := EncodeMessageExtraData(&c.ExtraData, recordProducers...) diff --git a/lnwire/lnwire_test.go b/lnwire/lnwire_test.go index db3ffd39d..123689902 100644 --- a/lnwire/lnwire_test.go +++ b/lnwire/lnwire_test.go @@ -1355,6 +1355,18 @@ func TestLightningWireProtocol(t *testing.T) { LockTime: uint32(r.Int63()), ClosingSigs: ClosingSigs{}, } + req.CloserScript, err = randDeliveryAddress(r) + if err != nil { + t.Fatalf("unable to generate delivery "+ + "address: %v", err) + return + } + req.CloseeScript, err = randDeliveryAddress(r) + if err != nil { + t.Fatalf("unable to generate delivery "+ + "address: %v", err) + return + } if r.Intn(2) == 0 { sig := req.CloserNoClosee.Zero() @@ -1403,6 +1415,20 @@ func TestLightningWireProtocol(t *testing.T) { req := ClosingSig{ ChannelID: ChannelID(c), ClosingSigs: ClosingSigs{}, + FeeSatoshis: btcutil.Amount(r.Int63()), + LockTime: uint32(r.Int63()), + } + req.CloserScript, err = randDeliveryAddress(r) + if err != nil { + t.Fatalf("unable to generate delivery "+ + "address: %v", err) + return + } + req.CloseeScript, err = randDeliveryAddress(r) + if err != nil { + t.Fatalf("unable to generate delivery "+ + "address: %v", err) + return } if r.Intn(2) == 0 { From fff99e1b3576627548c05ebd00fe0efa439d4641 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 7 Feb 2025 18:21:06 -0800 Subject: [PATCH 4/8] protofsm: update state machine w/ new spec flow In this commit, we implement the latest version of the RBF loop as described in the spec. We remove the self loop back based on sending or receiving shutdown. Instead, from the ClosePending state, we can trigger a new loop by sending SendOfferEvent (we bump), or OfferReceivedEvent (they bump). We also update the rbf state machine w/ the new close addr logic. This log ensures that the remote party always sends our current address, and that if they send a new address, we'll update our view of it, and counter sign the correct transaction. We also add a CloseErr state. With this new state, we can ensure that we're able to properly report errors back to the RPC client, and also optionally force a reconnection or send a warning to the remote party. --- lnwallet/chancloser/rbf_coop_states.go | 148 ++++++++++- lnwallet/chancloser/rbf_coop_test.go | 276 +++++++++++++------- lnwallet/chancloser/rbf_coop_transitions.go | 196 +++++++++++--- 3 files changed, 491 insertions(+), 129 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go index 5b8ba8bcc..3850abfc9 100644 --- a/lnwallet/chancloser/rbf_coop_states.go +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -49,6 +49,11 @@ var ( // ErrCloserAndClosee is returned when we expect a sig covering both // outputs, it isn't present. ErrCloserAndClosee = fmt.Errorf("expected CloserAndClosee sig") + + // ErrWrongLocalScript is returned when the remote party sends a + // ClosingComplete message that doesn't carry our last local script + // sent. + ErrWrongLocalScript = fmt.Errorf("wrong local script") ) // ProtocolEvent is a special interface used to create the equivalent of a @@ -382,7 +387,7 @@ type AsymmetricPeerState interface { type ProtocolStates interface { ChannelActive | ShutdownPending | ChannelFlushing | ClosingNegotiation | LocalCloseStart | LocalOfferSent | RemoteCloseStart | - ClosePending | CloseFin + ClosePending | CloseFin | CloseErr } // ChannelActive is the base state for the channel closer state machine. In @@ -523,6 +528,13 @@ type ClosingNegotiation struct { // the ShouldRouteTo method to determine which state route incoming // events to. PeerState lntypes.Dual[AsymmetricPeerState] + + // CloseChannelTerms is the terms we'll use to close the channel. We + // hold a value here which is pointed to by the various + // AsymmetricPeerState instances. This allows us to update this value if + // the remote peer sends a new address, with each of the state noting + // the new value via a pointer. + *CloseChannelTerms } // String returns the name of the state for ClosingNegotiation. @@ -542,6 +554,56 @@ func (c *ClosingNegotiation) IsTerminal() bool { // protocolSealed indicates that this struct is a ProtocolEvent instance. func (c *ClosingNegotiation) protocolStateSealed() {} +// ErrState can be used to introspect into a benign error related to a state +// transition. +type ErrState interface { + sealed() + + error + + // Err returns an error for the ErrState. + Err() error +} + +// ErrStateCantPayForFee is sent when the local party attempts a fee update +// that they can't actually party for. +type ErrStateCantPayForFee struct { + localBalance btcutil.Amount + + attemptedFee btcutil.Amount +} + +// NewErrStateCantPayForFee returns a new NewErrStateCantPayForFee error. +func NewErrStateCantPayForFee(localBalance, attemptedFee btcutil.Amount, +) *ErrStateCantPayForFee { + + return &ErrStateCantPayForFee{ + localBalance: localBalance, + attemptedFee: attemptedFee, + } +} + +// sealed makes this a sealed interface. +func (e *ErrStateCantPayForFee) sealed() { +} + +// Err returns an error for the ErrState. +func (e *ErrStateCantPayForFee) Err() error { + return fmt.Errorf("cannot pay for fee of %v, only have %v local "+ + "balance", e.attemptedFee, e.localBalance) +} + +// Error returns the error string for the ErrState. +func (e *ErrStateCantPayForFee) Error() string { + return e.Err().Error() +} + +// String returns the string for the ErrStateCantPayForFee. +func (e *ErrStateCantPayForFee) String() string { + return fmt.Sprintf("ErrStateCantPayForFee(local_balance=%v, "+ + "attempted_fee=%v)", e.localBalance, e.attemptedFee) +} + // CloseChannelTerms is a set of terms that we'll use to close the channel. This // includes the balances of the channel, and the scripts we'll use to send each // party's funds to. @@ -553,11 +615,11 @@ type CloseChannelTerms struct { // DeriveCloseTxOuts takes the close terms, and returns the local and remote tx // out for the close transaction. If an output is dust, then it'll be nil. -// -// TODO(roasbeef): add func for w/e heuristic to not manifest own output? func (c *CloseChannelTerms) DeriveCloseTxOuts() (*wire.TxOut, *wire.TxOut) { //nolint:ll deriveTxOut := func(balance btcutil.Amount, pkScript []byte) *wire.TxOut { + // We'll base the existence of the output on our normal dust + // check. dustLimit := lnwallet.DustLimitForSize(len(pkScript)) if balance >= dustLimit { return &wire.TxOut{ @@ -618,7 +680,7 @@ func (c *CloseChannelTerms) RemoteCanPayFees(absoluteFee btcutil.Amount) bool { // input events: // - SendOfferEvent type LocalCloseStart struct { - CloseChannelTerms + *CloseChannelTerms } // String returns the name of the state for LocalCloseStart, including proposed @@ -658,7 +720,7 @@ func (l *LocalCloseStart) protocolStateSealed() {} // input events: // - LocalSigReceived type LocalOfferSent struct { - CloseChannelTerms + *CloseChannelTerms // ProposedFee is the fee we proposed to the remote party. ProposedFee btcutil.Amount @@ -710,13 +772,26 @@ type ClosePending struct { // CloseTx is the pending close transaction. CloseTx *wire.MsgTx + *CloseChannelTerms + // FeeRate is the fee rate of the closing transaction. FeeRate chainfee.SatPerVByte + + // Party indicates which party is at this state. This is used to + // implement the state transition properly, based on ShouldRouteTo. + Party lntypes.ChannelParty } // String returns the name of the state for ClosePending. func (c *ClosePending) String() string { - return fmt.Sprintf("ClosePending(txid=%v)", c.CloseTx.TxHash()) + return fmt.Sprintf("ClosePending(txid=%v, party=%v, fee_rate=%v)", + c.CloseTx.TxHash(), c.Party, c.FeeRate) +} + +// isType returns true if the value is of type T. +func isType[T any](value any) bool { + _, ok := value.(T) + return ok } // ShouldRouteTo returns true if the target state should process the target @@ -726,6 +801,17 @@ func (c *ClosePending) ShouldRouteTo(event ProtocolEvent) bool { case *SpendEvent: return true default: + switch { + case c.Party == lntypes.Local && isType[*SendOfferEvent](event): + return true + + case c.Party == lntypes.Remote && isType[*OfferReceivedEvent]( + event, + ): + + return true + } + return false } } @@ -765,7 +851,7 @@ func (c *CloseFin) IsTerminal() bool { // - fromState: ChannelFlushing // - toState: ClosePending type RemoteCloseStart struct { - CloseChannelTerms + *CloseChannelTerms } // String returns the name of the state for RemoteCloseStart. @@ -792,6 +878,54 @@ func (l *RemoteCloseStart) IsTerminal() bool { return false } +// CloseErr is an error state in the protocol. We enter this state when a +// protocol constraint is violated, or an upfront sanity check fails. +type CloseErr struct { + ErrState + + *CloseChannelTerms + + // Party indicates which party is at this state. This is used to + // implement the state transition properly, based on ShouldRouteTo. + Party lntypes.ChannelParty +} + +// String returns the name of the state for CloseErr, including error and party +// details. +func (c *CloseErr) String() string { + return fmt.Sprintf("CloseErr(Party: %v, Error: %v)", c.Party, c.Err()) +} + +// ShouldRouteTo returns true if the target state should process the target +// event. +func (c *CloseErr) ShouldRouteTo(event ProtocolEvent) bool { + switch event.(type) { + case *SpendEvent: + return true + default: + switch { + case c.Party == lntypes.Local && isType[*SendOfferEvent](event): + return true + + case c.Party == lntypes.Remote && isType[*OfferReceivedEvent]( + event, + ): + + return true + } + + return false + } +} + +// protocolStateSealed indicates that this struct is a ProtocolEvent instance. +func (c *CloseErr) protocolStateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (c *CloseErr) IsTerminal() bool { + return true +} + // RbfChanCloser is a state machine that handles the RBF-enabled cooperative // channel close protocol. type RbfChanCloser = protofsm.StateMachine[ProtocolEvent, *Environment] diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index d0bd76472..7456025c3 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -110,7 +110,10 @@ func assertStateTransitions[Event any, Env protofsm.Environment]( t.Helper() for _, expectedState := range expectedStates { - newState := <-stateSub.NewItemCreated.ChanOut() + newState, err := fn.RecvOrTimeout( + stateSub.NewItemCreated.ChanOut(), 10*time.Millisecond, + ) + require.NoError(t, err, "expected state: %T", expectedState) require.IsType(t, expectedState, newState) } @@ -598,6 +601,8 @@ func (r *rbfCloserTestHarness) assertSingleRbfIteration( // response of the remote party, which completes one iteration localSigEvent := &LocalSigReceived{ SigMsg: lnwire.ClosingSig{ + CloserScript: localAddr, + CloseeScript: remoteAddr, ClosingSigs: lnwire.ClosingSigs{ CloserAndClosee: newSigTlv[tlv.TlvType3]( remoteWireSig, @@ -620,26 +625,16 @@ func (r *rbfCloserTestHarness) assertSingleRbfIteration( } func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration( - initEvent ProtocolEvent, balanceAfterClose, absoluteFee btcutil.Amount, - sequence uint32, iteration bool) { + initEvent *OfferReceivedEvent, balanceAfterClose, + absoluteFee btcutil.Amount, sequence uint32, iteration bool) { ctx := context.Background() - // If this is an iteration, then we expect some intermediate states, - // before we enter the main RBF/sign loop. - if iteration { - r.expectFeeEstimate(absoluteFee, 1) - - r.assertStateTransitions( - &ChannelActive{}, &ShutdownPending{}, - &ChannelFlushing{}, &ClosingNegotiation{}, - ) - } - // When we receive the signature below, our local state machine should // move to finalize the close. r.expectRemoteCloseFinalized( - &localSig, &remoteSig, localAddr, remoteAddr, + &localSig, &remoteSig, initEvent.SigMsg.CloseeScript, + initEvent.SigMsg.CloserScript, absoluteFee, balanceAfterClose, false, ) @@ -648,6 +643,13 @@ func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration( // Our outer state should transition to ClosingNegotiation state. r.assertStateTransitions(&ClosingNegotiation{}) + // If this is an iteration, then we'll go from ClosePending -> + // RemoteCloseStart -> ClosePending. So we'll assert an extra transition + // here. + if iteration { + r.assertStateTransitions(&ClosingNegotiation{}) + } + // If we examine the final resting state, we should see that the we're // now in the ClosePending state for the remote peer. currentState := assertStateT[*ClosingNegotiation](r) @@ -1184,9 +1186,10 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { startingState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: *closeTerms, + CloseChannelTerms: closeTerms, }, }, + CloseChannelTerms: closeTerms, } sendOfferEvent := &SendOfferEvent{ @@ -1195,6 +1198,8 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { balanceAfterClose := localBalance.ToSatoshis() - absoluteFee + // TODO(roasbeef): add test case for error state validation, then resume + // In this state, we'll simulate deciding that we need to send a new // offer to the remote party. t.Run("send_offer_iteration_no_dust", func(t *testing.T) { @@ -1231,6 +1236,8 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { // we'll specify 2 signature fields. localSigEvent := &LocalSigReceived{ SigMsg: lnwire.ClosingSig{ + CloserScript: localAddr, + CloseeScript: remoteAddr, ClosingSigs: lnwire.ClosingSigs{ CloserNoClosee: newSigTlv[tlv.TlvType1]( remoteWireSig, @@ -1261,9 +1268,10 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { firstState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: newCloseTerms, + CloseChannelTerms: &newCloseTerms, }, }, + CloseChannelTerms: &newCloseTerms, } closeHarness := newCloser(t, &harnessCfg{ @@ -1286,9 +1294,10 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { firstState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: *closeTerms, + CloseChannelTerms: closeTerms, }, }, + CloseChannelTerms: closeTerms, } closeHarness := newCloser(t, &harnessCfg{ @@ -1306,15 +1315,55 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { ) }) + // In this test, we'll assert that we're able to restart the RBF loop + // to trigger additional signature iterations. + t.Run("send_offer_rbf_wrong_local_script", func(t *testing.T) { + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: closeTerms, + }, + }, + CloseChannelTerms: closeTerms, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + // The remote party will send a ClosingSig message, but with the + // wrong local script. We should expect an error. + closeHarness.expectFailure(ErrWrongLocalScript) + + // We'll send this message in directly, as we shouldn't get any + // further in the process. + // assuming we start in this negotiation state. + localSigEvent := &LocalSigReceived{ + SigMsg: lnwire.ClosingSig{ + CloserScript: remoteAddr, + CloseeScript: remoteAddr, + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + closeHarness.chanCloser.SendEvent(ctx, localSigEvent) + }) + // In this test, we'll assert that we're able to restart the RBF loop // to trigger additional signature iterations. t.Run("send_offer_rbf_iteration_loop", func(t *testing.T) { firstState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: *closeTerms, + CloseChannelTerms: closeTerms, }, }, + CloseChannelTerms: closeTerms, } closeHarness := newCloser(t, &harnessCfg{ @@ -1330,44 +1379,18 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { noDustExpect, ) - // Next, we'll send in a new SendShutdown event which simulates - // the user requesting a RBF fee bump. We'll use 10x the fee we - // used in the last iteration. + // Next, we'll send in a new SendOfferEvent event which + // simulates the user requesting a RBF fee bump. We'll use 10x + // the fee we used in the last iteration. rbfFeeBump := chainfee.FeePerKwFloor.FeePerVByte() * 10 - sendShutdown := &SendShutdown{ - IdealFeeRate: rbfFeeBump, + localOffer := &SendOfferEvent{ + TargetFeeRate: rbfFeeBump, } - // We should send shutdown as normal, but skip some other - // checks as we know the close is in progress. - closeHarness.expectShutdownEvents(shutdownExpect{ - allowSend: true, - finalBalances: fn.Some(closeTerms.ShutdownBalances), - recvShutdown: true, - }) - closeHarness.expectMsgSent( - singleMsgMatcher[*lnwire.Shutdown](nil), - ) - - closeHarness.chanCloser.SendEvent(ctx, sendShutdown) - - // We should first transition to the Channel Active state - // momentarily, before transitioning to the shutdown pending - // state. - closeHarness.assertStateTransitions( - &ChannelActive{}, &ShutdownPending{}, - ) - - // Next, we'll send in the shutdown received event, which - // should transition us to the channel flushing state. - shutdownEvent := &ShutdownReceived{ - ShutdownScript: remoteAddr, - } - - // Now we expect that aanother full RBF iteration takes place - // (we initiatea a new local sig). + // Now we expect that another full RBF iteration takes place (we + // initiate a new local sig). closeHarness.assertSingleRbfIteration( - shutdownEvent, balanceAfterClose, absoluteFee, + localOffer, balanceAfterClose, absoluteFee, noDustExpect, ) @@ -1403,12 +1426,13 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { startingState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: *closeTerms, + CloseChannelTerms: closeTerms, }, Remote: &RemoteCloseStart{ - CloseChannelTerms: *closeTerms, + CloseChannelTerms: closeTerms, }, }, + CloseChannelTerms: closeTerms, } balanceAfterClose := remoteBalance.ToSatoshis() - absoluteFee @@ -1430,7 +1454,9 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { // be higher than the remote party's balance. feeOffer := &OfferReceivedEvent{ SigMsg: lnwire.ClosingComplete{ - FeeSatoshis: absoluteFee * 10, + CloserScript: remoteAddr, + CloseeScript: localAddr, + FeeSatoshis: absoluteFee * 10, }, } closeHarness.chanCloser.SendEvent(ctx, feeOffer) @@ -1449,12 +1475,13 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { firstState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: closingTerms, + CloseChannelTerms: &closingTerms, }, Remote: &RemoteCloseStart{ - CloseChannelTerms: closingTerms, + CloseChannelTerms: &closingTerms, }, }, + CloseChannelTerms: &closingTerms, } closeHarness := newCloser(t, &harnessCfg{ @@ -1469,7 +1496,9 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { // includes our output. feeOffer := &OfferReceivedEvent{ SigMsg: lnwire.ClosingComplete{ - FeeSatoshis: absoluteFee, + FeeSatoshis: absoluteFee, + CloserScript: remoteAddr, + CloseeScript: localAddr, ClosingSigs: lnwire.ClosingSigs{ CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll remoteWireSig, @@ -1498,7 +1527,9 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { // signature as it excludes an output. feeOffer := &OfferReceivedEvent{ SigMsg: lnwire.ClosingComplete{ - FeeSatoshis: absoluteFee, + FeeSatoshis: absoluteFee, + CloserScript: remoteAddr, + CloseeScript: localAddr, ClosingSigs: lnwire.ClosingSigs{ CloserNoClosee: newSigTlv[tlv.TlvType1]( //nolint:ll remoteWireSig, @@ -1516,8 +1547,8 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { // loops to enable the remote party to sign.new versions of the co-op // close transaction. t.Run("recv_offer_rbf_loop_iterations", func(t *testing.T) { - // We'll modify our s.t we're unable to pay for fees, but - // aren't yet dust. + // We'll modify our balance s.t we're unable to pay for fees, + // but aren't yet dust. closingTerms := *closeTerms closingTerms.ShutdownBalances.LocalBalance = lnwire.NewMSatFromSatoshis( //nolint:ll 9000, @@ -1526,12 +1557,13 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { firstState := &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: closingTerms, + CloseChannelTerms: &closingTerms, }, Remote: &RemoteCloseStart{ - CloseChannelTerms: closingTerms, + CloseChannelTerms: &closingTerms, }, }, + CloseChannelTerms: &closingTerms, } closeHarness := newCloser(t, &harnessCfg{ @@ -1542,8 +1574,10 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { feeOffer := &OfferReceivedEvent{ SigMsg: lnwire.ClosingComplete{ - FeeSatoshis: absoluteFee, - LockTime: 1, + CloserScript: remoteAddr, + CloseeScript: localAddr, + FeeSatoshis: absoluteFee, + LockTime: 1, ClosingSigs: lnwire.ClosingSigs{ CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll remoteWireSig, @@ -1560,31 +1594,9 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { false, ) - // At this point, we've completed a single RBF iteration, and - // want to test further iterations, so we'll use a shutdown - // even tot kick it all off. - // - // Before we send the shutdown messages below, we'll mark the - // balances as so we fast track to the negotiation state. - closeHarness.expectShutdownEvents(shutdownExpect{ - allowSend: true, - finalBalances: fn.Some(closingTerms.ShutdownBalances), - recvShutdown: true, - }) - closeHarness.expectMsgSent( - singleMsgMatcher[*lnwire.Shutdown](nil), - ) - - // We'll now simulate the start of the RBF loop, by receiving a - // new Shutdown message from the remote party. This signals - // that they want to obtain a new commit sig. - closeHarness.chanCloser.SendEvent( - ctx, &ShutdownReceived{ShutdownScript: remoteAddr}, - ) - - // Next, we'll receive an offer from the remote party, and - // drive another RBF iteration. This time, we'll increase the - // absolute fee by 1k sats. + // Next, we'll receive an offer from the remote party, and drive + // another RBF iteration. This time, we'll increase the absolute + // fee by 1k sats. feeOffer.SigMsg.FeeSatoshis += 1000 absoluteFee = feeOffer.SigMsg.FeeSatoshis closeHarness.assertSingleRemoteRbfIteration( @@ -1595,5 +1607,85 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { closeHarness.assertNoStateTransitions() }) - // TODO(roasbeef): cross sig case? tested isolation, so wolog? + t.Run("recv_offer_wrong_local_script", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + // The remote party will send a ClosingComplete message, but + // with the wrong local script. We should expect an error. + closeHarness.expectFailure(ErrWrongLocalScript) + + // We'll send our remote addr as the Closee script, which should + // trigger an error. + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee, + CloserScript: remoteAddr, + CloseeScript: remoteAddr, + ClosingSigs: lnwire.ClosingSigs{ + CloserNoClosee: newSigTlv[tlv.TlvType1]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + closeHarness.chanCloser.SendEvent(ctx, feeOffer) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + t.Run("recv_offer_remote_addr_change", func(t *testing.T) { + closingTerms := *closeTerms + + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: &closingTerms, + }, + Remote: &RemoteCloseStart{ + CloseChannelTerms: &closingTerms, + }, + }, + CloseChannelTerms: &closingTerms, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + // This time, the close request sent by the remote party will + // modify their normal remote address. This should cause us to + // recognize this, and counter sign the proper co-op close + // transaction. + newRemoteAddr := lnwire.DeliveryAddress(append( + []byte{txscript.OP_1, txscript.OP_DATA_32}, + bytes.Repeat([]byte{0x03}, 32)..., + )) + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + CloserScript: newRemoteAddr, + CloseeScript: localAddr, + FeeSatoshis: absoluteFee, + LockTime: 1, + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + + // As we're already in the negotiation phase, we'll now trigger + // a new iteration by having the remote party send a new offer + // sig. + closeHarness.assertSingleRemoteRbfIteration( + feeOffer, balanceAfterClose, absoluteFee, sequence, + false, + ) + }) } diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 01ab5c32f..452386825 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -1,6 +1,7 @@ package chancloser import ( + "bytes" "fmt" "github.com/btcsuite/btcd/btcec/v2" @@ -426,9 +427,6 @@ func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment, c.EarlyRemoteOffer = fn.Some(*msg) - // TODO(roasbeef): unit test! - // * actually do this ^ - // We'll perform a noop update so we can wait for the actual // channel flushed event. return &CloseStateTransition{ @@ -469,8 +467,6 @@ func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment, // We'll then use that fee rate to determine the absolute fee // we'd propose. - // - // TODO(roasbeef): need to sign the 3 diff versions of this? localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts() absoluteFee := env.FeeEstimator.EstimateFee( env.ChanType, localTxOut, remoteTxOut, @@ -522,12 +518,13 @@ func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment, NextState: &ClosingNegotiation{ PeerState: lntypes.Dual[AsymmetricPeerState]{ Local: &LocalCloseStart{ - CloseChannelTerms: closeTerms, + CloseChannelTerms: &closeTerms, }, Remote: &RemoteCloseStart{ - CloseChannelTerms: closeTerms, + CloseChannelTerms: &closeTerms, }, }, + CloseChannelTerms: &closeTerms, }, NewEvents: newEvents, }, nil @@ -571,6 +568,63 @@ func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent, }, nil } +// updateAndValidateCloseTerms is a helper function that validates examines the +// incoming event, and decide if we need to update the remote party's address, +// or reject it if it doesn't include our latest address. +func (c *ClosingNegotiation) updateAndValidateCloseTerms(event ProtocolEvent, +) error { + + assertLocalScriptMatches := func(localScriptInMsg []byte) error { + if !bytes.Equal( + c.LocalDeliveryScript, localScriptInMsg, + ) { + + return fmt.Errorf("%w: remote party sent wrong "+ + "script, expected %x, got %x", + ErrWrongLocalScript, c.LocalDeliveryScript, + localScriptInMsg, + ) + } + + return nil + } + + switch msg := event.(type) { + // The remote party is sending us a new request to counter sign their + // version of the commitment transaction. + case *OfferReceivedEvent: + // Make sure that they're sending our local script, and not + // something else. + err := assertLocalScriptMatches(msg.SigMsg.CloseeScript) + if err != nil { + return err + } + + oldRemoteAddr := c.RemoteDeliveryScript + newRemoteAddr := msg.SigMsg.CloserScript + + // If they're sending a new script, then we'll update to the new + // one. + if !bytes.Equal(oldRemoteAddr, newRemoteAddr) { + c.RemoteDeliveryScript = newRemoteAddr + } + + // The remote party responded to our sig request with a signature for + // our version of the commitment transaction. + case *LocalSigReceived: + // Make sure that they're sending our local script, and not + // something else. + err := assertLocalScriptMatches(msg.SigMsg.CloserScript) + if err != nil { + return err + } + + return nil + } + + return nil +} + // ProcessEvent drives forward the composite states for the local and remote // party in response to new events. From this state, we'll continue to drive // forward the local and remote states until we arrive at the StateFin stage, @@ -597,22 +651,13 @@ func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment, ConfirmedTx: msg.Tx, }, }, nil + } - // Otherwise, if we receive a shutdown, or receive an event to send a - // shutdown, then we'll go back up to the ChannelActive state, and have - // it handle this event by emitting an internal event. - // - // TODO(roasbeef): both will have fee rate specified, so ok? - case *ShutdownReceived, *SendShutdown: - chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+ - "restarting negotiation", env.ChanPoint) - - return &CloseStateTransition{ - NextState: &ChannelActive{}, - NewEvents: fn.Some(RbfEvent{ - InternalEvent: []ProtocolEvent{event}, - }), - }, nil + // At this point, we know its a new signature message. We'll validate, + // and maybe update the set of close terms based on what we receive. We + // might update the remote party's address for example. + if err := c.updateAndValidateCloseTerms(event); err != nil { + return nil, fmt.Errorf("event violates close terms: %w", err) } // If we get to this point, then we have an event that'll drive forward @@ -628,14 +673,14 @@ func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment, case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event): chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+ - "chan state", env.ChanPoint, event) + "chan state", env.ChanPoint, event) // Drive forward the remote state based on the next event. return processNegotiateEvent(c, event, env, lntypes.Remote) } - return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation", - ErrInvalidStateTransition, event) + return nil, fmt.Errorf("%w: received %T while in %v", + ErrInvalidStateTransition, event, c) } // newSigTlv is a helper function that returns a new optional TLV sig field for @@ -654,14 +699,34 @@ func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, // rate to generate for the closing transaction with our ideal fee // rate. case *SendOfferEvent: - // First, we'll figure out the absolute fee rate we should pay // given the state of the local/remote outputs. + // First, we'll figure out the absolute fee rate we should pay localTxOut, remoteTxOut := l.DeriveCloseTxOuts() absoluteFee := env.FeeEstimator.EstimateFee( env.ChanType, localTxOut, remoteTxOut, msg.TargetFeeRate.FeePerKWeight(), ) + // If we can't actually pay for fees here, then we'll just do a + // noop back to the same state to await a new fee rate. + if !l.LocalCanPayFees(absoluteFee) { + chancloserLog.Infof("ChannelPoint(%v): unable to pay "+ + "fee=%v with local balance %v, skipping "+ + "closing_complete", env.ChanPoint, absoluteFee, + l.LocalBalance) + + return &CloseStateTransition{ + NextState: &CloseErr{ + CloseChannelTerms: l.CloseChannelTerms, + Party: lntypes.Local, + ErrState: NewErrStateCantPayForFee( + l.LocalBalance.ToSatoshis(), + absoluteFee, + ), + }, + }, nil + } + // Now that we know what fee we want to pay, we'll create a new // signature over our co-op close transaction. For our // proposals, we'll just always use the known RBF sequence @@ -848,8 +913,10 @@ func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment, return &CloseStateTransition{ NextState: &ClosePending{ - CloseTx: closeTx, - FeeRate: l.ProposedFeeRate, + CloseTx: closeTx, + FeeRate: l.ProposedFeeRate, + CloseChannelTerms: l.CloseChannelTerms, + Party: lntypes.Local, }, NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ ExternalEvents: broadcastEvent, @@ -1022,8 +1089,10 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, // the next state where we'll sign+broadcast the sig. return &CloseStateTransition{ NextState: &ClosePending{ - CloseTx: closeTx, - FeeRate: feeRate, + CloseTx: closeTx, + FeeRate: feeRate, + CloseChannelTerms: l.CloseChannelTerms, + Party: lntypes.Remote, }, NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ ExternalEvents: daemonEvents, @@ -1051,6 +1120,32 @@ func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment, }, }, nil + // If we get a send offer event in this state, then we're doing a state + // transition to the LocalCloseStart state, so we can sign a new closing + // tx. + case *SendOfferEvent: + return &CloseStateTransition{ + NextState: &LocalCloseStart{ + CloseChannelTerms: c.CloseChannelTerms, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + InternalEvent: []ProtocolEvent{msg}, + }), + }, nil + + // If we get an offer received event, then we're doing a state + // transition to the RemoteCloseStart, as the remote peer wants to sign + // a new closing tx. + case *OfferReceivedEvent: + return &CloseStateTransition{ + NextState: &RemoteCloseStart{ + CloseChannelTerms: c.CloseChannelTerms, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + InternalEvent: []ProtocolEvent{msg}, + }), + }, nil + default: return &CloseStateTransition{ @@ -1068,3 +1163,44 @@ func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment, NextState: c, }, nil } + +// ProcessEvent is a semi-terminal state in the rbf-coop close state machine. +// In this state, we hit a validation error in an earlier state, so we'll remain +// in this state for the user to examine. We may also process new requests to +// continue the state machine. +func (c *CloseErr) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { + // If we get a send offer event in this state, then we're doing a state + // transition to the LocalCloseStart state, so we can sign a new closing + // tx. + case *SendOfferEvent: + return &CloseStateTransition{ + NextState: &LocalCloseStart{ + CloseChannelTerms: c.CloseChannelTerms, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + InternalEvent: []ProtocolEvent{msg}, + }), + }, nil + + // If we get an offer received event, then we're doing a state + // transition to the RemoteCloseStart, as the remote peer wants to sign + // a new closing tx. + case *OfferReceivedEvent: + return &CloseStateTransition{ + NextState: &RemoteCloseStart{ + CloseChannelTerms: c.CloseChannelTerms, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + InternalEvent: []ProtocolEvent{msg}, + }), + }, nil + + default: + return &CloseStateTransition{ + NextState: c, + }, nil + } +} From 72642f54e94ab86859697cbd627d610dd0781131 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 7 Feb 2025 18:22:15 -0800 Subject: [PATCH 5/8] lnwallet: implement special case for OP_RETURN in rbf-coop In this commit, we implement a special case for OP_RETURN scripts outlined in the spec. If a party decides that its output will be too small even after the dust check, then they can opt to set it to zero by sending an `OP_RETURN` as their script. --- lnwallet/channel.go | 22 ++++++++++--- lnwallet/channel_test.go | 67 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 4 deletions(-) diff --git a/lnwallet/channel.go b/lnwallet/channel.go index aa8e21e76..d7489f942 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -9269,13 +9269,19 @@ func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, closeTx.LockTime = lockTime }) - // TODO(roasbeef): needs support for dropping inputs - - // Create both cooperative closure outputs, properly respecting the - // dust limits of both parties. + // Create both cooperative closure outputs, properly respecting the dust + // limits of both parties. var localOutputIdx fn.Option[int] haveLocalOutput := ourBalance >= localDust if haveLocalOutput { + // If our script is an OP_RETURN, then we set our balance to + // zero. + if opts.customSequence.IsSome() && + input.ScriptIsOpReturn(ourDeliveryScript) { + + ourBalance = 0 + } + closeTx.AddTxOut(&wire.TxOut{ PkScript: ourDeliveryScript, Value: int64(ourBalance), @@ -9287,6 +9293,14 @@ func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, var remoteOutputIdx fn.Option[int] haveRemoteOutput := theirBalance >= remoteDust if haveRemoteOutput { + // If a party's script is an OP_RETURN, then we set their + // balance to zero. + if opts.customSequence.IsSome() && + input.ScriptIsOpReturn(theirDeliveryScript) { + + theirBalance = 0 + } + closeTx.AddTxOut(&wire.TxOut{ PkScript: theirDeliveryScript, Value: int64(theirBalance), diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index 9da174a3d..0a0ca261c 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -20,6 +20,7 @@ import ( "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil/txsort" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/mempool" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" @@ -2450,6 +2451,72 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { } } +// TestCooperativeCloseOpReturn tests that if either party's script is an +// OP_RETURN script, then we'll set their output value as zero on the closing +// transaction. +func TestCooperativeCloseOpReturn(t *testing.T) { + t.Parallel() + + // Create a test channel which will be used for the duration of this + // unittest. The channel will be funded evenly with Alice having 5 BTC, + // and Bob having 5 BTC. + aliceChannel, bobChannel, err := CreateTestChannels( + t, channeldb.SingleFunderTweaklessBit, + ) + require.NoError(t, err, "unable to create test channels") + + // Alice will have a "normal" looking script, while Bob will have a + // script that's just an OP_RETURN. + aliceDeliveryScript := bobsPrivKey + bobDeliveryScript := []byte{txscript.OP_RETURN} + + aliceFeeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) + aliceFee := aliceChannel.CalcFee(aliceFeeRate) + 1000 + + assertBobOpReturn := func(tx *wire.MsgTx) { + // We should still have two outputs on the commitment + // transaction, as Alice's is non-dust. + require.Len(t, tx.TxOut, 2) + + // We should find that Bob's output has a zero value. + bobTxOut := fn.Filter(tx.TxOut, func(txOut *wire.TxOut) bool { + return bytes.Equal(txOut.PkScript, bobDeliveryScript) + }) + require.Len(t, bobTxOut, 1) + + require.True(t, bobTxOut[0].Value == 0) + } + + // Next, we'll make a new co-op close proposal, initiated by Alice. + aliceSig, closeTxAlice, _, err := aliceChannel.CreateCloseProposal( + aliceFee, aliceDeliveryScript, bobDeliveryScript, + // We use a custom sequence as this rule only applies to the RBF + // coop channel type. + WithCustomSequence(mempool.MaxRBFSequence), + ) + require.NoError(t, err, "unable to close channel") + + assertBobOpReturn(closeTxAlice) + + bobSig, _, _, err := bobChannel.CreateCloseProposal( + aliceFee, bobDeliveryScript, aliceDeliveryScript, + WithCustomSequence(mempool.MaxRBFSequence), + ) + require.NoError(t, err, "unable to close channel") + + // We should now be able to complete the cooperative channel closure, + // finding that the close tx still only has a single output. + closeTx, _, err := bobChannel.CompleteCooperativeClose( + bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, + aliceFee, WithCustomSequence(mempool.MaxRBFSequence), + ) + require.NoError(t, err, "unable to accept channel close") + + assertBobOpReturn(closeTx) +} + // TestUpdateFeeAdjustments tests that the state machine is able to properly // accept valid fee changes, as well as reject any invalid fee updates. func TestUpdateFeeAdjustments(t *testing.T) { From 1353b894a5904c2b765c2583d8a0128686a99b8d Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 26 Feb 2025 18:04:16 -0800 Subject: [PATCH 6/8] lnwallet/chancloser: update RBF state machine to handle early offer case In this commit, we update the RBF state machine to handle early offer cases. This can happen if after we send out shutdown (to kick things off), the remote party sends their offer early. This can also happen if their outgoing shutdown (to ACK ours) was delayed for w/e reason, and we get their offer first. The alternative was to modify the state machine itself, but we feel that handling this early case is better in line with the Robustness principle. --- lnwallet/chancloser/rbf_coop_states.go | 5 ++ lnwallet/chancloser/rbf_coop_test.go | 97 +++++++++++++++++++++ lnwallet/chancloser/rbf_coop_transitions.go | 60 ++++++++++--- 3 files changed, 149 insertions(+), 13 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go index 3850abfc9..a195334f2 100644 --- a/lnwallet/chancloser/rbf_coop_states.go +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -454,6 +454,11 @@ type ShutdownPending struct { // IdealFeeRate is the ideal fee rate we'd like to use for the closing // attempt. IdealFeeRate fn.Option[chainfee.SatPerVByte] + + // EarlyRemoteOffer is the offer we received from the remote party + // before we received their shutdown message. We'll stash it to process + // later. + EarlyRemoteOffer fn.Option[OfferReceivedEvent] } // String returns the name of the state for ShutdownPending. diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index 7456025c3..198f1121a 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -251,6 +251,8 @@ func (r *rbfCloserTestHarness) assertNoStateTransitions() { } func (r *rbfCloserTestHarness) assertStateTransitions(states ...RbfState) { + r.T.Helper() + assertStateTransitions(r.T, r.stateSub, states) } @@ -1035,6 +1037,101 @@ func TestRbfShutdownPendingTransitions(t *testing.T) { closeHarness.assertStateTransitions(&ChannelFlushing{}) }) + // If we an early offer from the remote party, then we should stash + // that, transition to the channel flushing state. Once there, another + // self transition should emit the stashed offer. + t.Run("early_remote_offer_shutdown_complete", func(t *testing.T) { + firstState := *startingState + firstState.IdealFeeRate = fn.Some( + chainfee.FeePerKwFloor.FeePerVByte(), + ) + firstState.ShutdownScripts = ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + }) + defer closeHarness.stopAndAssert() + + // In this case we're doing the shutdown dance for the first + // time, so we'll mark the channel as not being flushed. + closeHarness.expectFinalBalances(fn.None[ShutdownBalances]()) + + // Before we send the shutdown complete event, we'll send in an + // early offer from the remote party. + closeHarness.chanCloser.SendEvent(ctx, &OfferReceivedEvent{}) + + // This will cause a self transition back to ShutdownPending. + closeHarness.assertStateTransitions(&ShutdownPending{}) + + // Next, we'll send in a shutdown complete event. + closeHarness.chanCloser.SendEvent(ctx, &ShutdownComplete{}) + + // We should transition to the channel flushing state, then the + // self event to have this state cache he early offer should + // follow. + closeHarness.assertStateTransitions( + &ChannelFlushing{}, &ChannelFlushing{}, + ) + + // If we get the current state, we should see that the offer is + // cached. + currentState := assertStateT[*ChannelFlushing](closeHarness) + require.NotNil(t, currentState.EarlyRemoteOffer) + }) + + // If we an early offer from the remote party, then we should stash + // that, transition to the channel flushing state. Once there, another + // self transition should emit the stashed offer. + t.Run("early_remote_offer_shutdown_received", func(t *testing.T) { + firstState := *startingState + firstState.IdealFeeRate = fn.Some( + chainfee.FeePerKwFloor.FeePerVByte(), + ) + firstState.ShutdownScripts = ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + }) + defer closeHarness.stopAndAssert() + + // In this case we're doing the shutdown dance for the first + // time, so we'll mark the channel as not being flushed. + closeHarness.expectFinalBalances(fn.None[ShutdownBalances]()) + closeHarness.expectIncomingAddsDisabled() + + // Before we send the shutdown complete event, we'll send in an + // early offer from the remote party. + closeHarness.chanCloser.SendEvent(ctx, &OfferReceivedEvent{}) + + // This will cause a self transition back to ShutdownPending. + closeHarness.assertStateTransitions(&ShutdownPending{}) + + // Next, we'll send in a shutdown complete event. + closeHarness.chanCloser.SendEvent(ctx, &ShutdownReceived{}) + + // We should transition to the channel flushing state, then the + // self event to have this state cache he early offer should + // follow. + closeHarness.assertStateTransitions( + &ChannelFlushing{}, &ChannelFlushing{}, + ) + + // If we get the current state, we should see that the offer is + // cached. + currentState := assertStateT[*ChannelFlushing](closeHarness) + require.NotNil(t, currentState.EarlyRemoteOffer) + }) + // Any other event should be ignored. assertUnknownEventFail(t, startingState) } diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 452386825..b83e0b752 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -291,6 +291,22 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, }, }, nil + // The remote party sent an offer early. We'll go to the ChannelFlushing + // case, and then emit the offer as a internal event, which'll be + // handled as an early offer. + case *OfferReceivedEvent: + chancloserLog.Infof("ChannelPoint(%v): got an early offer "+ + "in ShutdownPending, emitting as external event", + env.ChanPoint) + + s.EarlyRemoteOffer = fn.Some(*msg) + + // We'll perform a noop update so we can wait for the actual + // channel flushed event. + return &CloseStateTransition{ + NextState: s, + }, nil + // When we receive a shutdown from the remote party, we'll validate the // shutdown message, then transition to the ChannelFlushing state. case *ShutdownReceived: @@ -314,7 +330,7 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, // If the channel is *already* flushed, and the close is // go straight into negotiation, as this is the RBF loop. // already in progress, then we can skip the flushing state and - var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]] + var eventsToEmit []ProtocolEvent finalBalances := env.ChanObserver.FinalBalances().UnwrapOr( unknownBalance, ) @@ -322,11 +338,7 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, channelFlushed := ProtocolEvent(&ChannelFlushed{ ShutdownBalances: finalBalances, }) - eventsToEmit = fn.Some(RbfEvent{ - InternalEvent: []ProtocolEvent{ - channelFlushed, - }, - }) + eventsToEmit = append(eventsToEmit, channelFlushed) } chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds", @@ -342,6 +354,19 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+ "be flushed...", env.ChanPoint) + // If we received a remote offer early from the remote party, + // then we'll add that to the set of internal events to emit. + s.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) { + eventsToEmit = append(eventsToEmit, &offer) + }) + + var newEvents fn.Option[RbfEvent] + if len(eventsToEmit) > 0 { + newEvents = fn.Some(RbfEvent{ + InternalEvent: eventsToEmit, + }) + } + // We transition to the ChannelFlushing state, where we await // the ChannelFlushed event. return &CloseStateTransition{ @@ -352,7 +377,7 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, RemoteDeliveryScript: msg.ShutdownScript, //nolint:ll }, }, - NewEvents: eventsToEmit, + NewEvents: newEvents, }, nil // If we get this message, then this means that we were finally able to @@ -365,7 +390,7 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, // If the channel is *already* flushed, and the close is // already in progress, then we can skip the flushing state and // go straight into negotiation, as this is the RBF loop. - var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]] + var eventsToEmit []ProtocolEvent finalBalances := env.ChanObserver.FinalBalances().UnwrapOr( unknownBalance, ) @@ -373,10 +398,19 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, channelFlushed := ProtocolEvent(&ChannelFlushed{ ShutdownBalances: finalBalances, }) - eventsToEmit = fn.Some(RbfEvent{ - InternalEvent: []ProtocolEvent{ - channelFlushed, - }, + eventsToEmit = append(eventsToEmit, channelFlushed) + } + + // If we received a remote offer early from the remote party, + // then we'll add that to the set of internal events to emit. + s.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) { + eventsToEmit = append(eventsToEmit, &offer) + }) + + var newEvents fn.Option[RbfEvent] + if len(eventsToEmit) > 0 { + newEvents = fn.Some(RbfEvent{ + InternalEvent: eventsToEmit, }) } @@ -387,7 +421,7 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, IdealFeeRate: s.IdealFeeRate, ShutdownScripts: s.ShutdownScripts, }, - NewEvents: eventsToEmit, + NewEvents: newEvents, }, nil // Any other messages in this state will result in an error, as this is From dacc5dfb1f26b2b694ed57e7489603a50be14020 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 26 Feb 2025 19:04:18 -0800 Subject: [PATCH 7/8] lnwallet/chancloser: increase test coverage of state machine --- lnwallet/chancloser/rbf_coop_test.go | 290 +++++++++++++++++++- lnwallet/chancloser/rbf_coop_transitions.go | 25 +- 2 files changed, 302 insertions(+), 13 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index 198f1121a..33da8b2c3 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -157,6 +157,27 @@ func assertUnknownEventFail(t *testing.T, startingState ProtocolState) { }) } +// assertSpendEventCloseFin asserts that the state machine transitions to the +// CloseFin state when a spend event is received. +func assertSpendEventCloseFin(t *testing.T, startingState ProtocolState) { + t.Helper() + + // If a spend event is received, the state machine should transition to + // the CloseFin state. + t.Run("spend_event", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some(startingState), + }) + defer closeHarness.stopAndAssert() + + closeHarness.chanCloser.SendEvent( + context.Background(), &SpendEvent{}, + ) + + closeHarness.assertStateTransitions(&CloseFin{}) + }) +} + type harnessCfg struct { initialState fn.Option[ProtocolState] @@ -628,7 +649,8 @@ func (r *rbfCloserTestHarness) assertSingleRbfIteration( func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration( initEvent *OfferReceivedEvent, balanceAfterClose, - absoluteFee btcutil.Amount, sequence uint32, iteration bool) { + absoluteFee btcutil.Amount, sequence uint32, iteration bool, + sendInit bool) { ctx := context.Background() @@ -640,7 +662,9 @@ func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration( absoluteFee, balanceAfterClose, false, ) - r.chanCloser.SendEvent(ctx, initEvent) + if sendInit { + r.chanCloser.SendEvent(ctx, initEvent) + } // Our outer state should transition to ClosingNegotiation state. r.assertStateTransitions(&ClosingNegotiation{}) @@ -862,7 +886,28 @@ func TestRbfChannelActiveTransitions(t *testing.T) { closeHarness.waitForMsgSent() }) - // TODO(roasbeef): thaw height fail + // If the remote party attempts to close, and a thaw height is active, + // but not yet met, then we should fail. + t.Run("remote_initiated_thaw_height_close_fail", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + localUpfrontAddr: fn.Some(localAddr), + thawHeight: fn.Some(uint32(100000)), + }) + defer closeHarness.stopAndAssert() + + // Next, we'll emit the recv event, with the addr of the remote + // party. + closeHarness.chanCloser.SendEvent( + ctx, &ShutdownReceived{ + ShutdownScript: remoteAddr, + BlockHeight: 1, + }, + ) + + // We expect a failure as the block height is less than the + // start height. + closeHarness.expectFailure(ErrThawHeightNotReached) + }) // When we receive a shutdown, we should transition to the shutdown // pending state, with the local+remote shutdown addrs known. @@ -906,6 +951,9 @@ func TestRbfChannelActiveTransitions(t *testing.T) { // Any other event should be ignored. assertUnknownEventFail(t, &ChannelActive{}) + + // Sending a Spend event should transition to CloseFin. + assertSpendEventCloseFin(t, &ChannelActive{}) } // TestRbfShutdownPendingTransitions tests the transitions of the RBF closer @@ -1134,6 +1182,9 @@ func TestRbfShutdownPendingTransitions(t *testing.T) { // Any other event should be ignored. assertUnknownEventFail(t, startingState) + + // Sending a Spend event should transition to CloseFin. + assertSpendEventCloseFin(t, startingState) } // TestRbfChannelFlushingTransitions tests the transitions of the RBF closer @@ -1241,7 +1292,7 @@ func TestRbfChannelFlushingTransitions(t *testing.T) { closeHarness.expectChanPendingClose() } - // From where, we expect the state transition to go + // From here, we expect the state transition to go // back to closing negotiated, for a ClosingComplete // message to be sent and then for us to terminate at // that state. This is 1/2 of the normal RBF signer @@ -1253,8 +1304,65 @@ func TestRbfChannelFlushingTransitions(t *testing.T) { }) } + // This tests that if we receive an `OfferReceivedEvent` while in the + // flushing state, then we'll cache that, and once we receive + // ChannelFlushed, we'll emit an internal `OfferReceivedEvent` in the + // negotiation state. + t.Run("early_offer", func(t *testing.T) { + firstState := *startingState + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + }) + defer closeHarness.stopAndAssert() + + flushEvent := *flushTemplate + + // Set up the fee estimate s.t the local party doesn't have + // balance to close. + closeHarness.expectFeeEstimate(absoluteFee, 1) + + // First, we'll emit an `OfferReceivedEvent` to simulate an + // early offer (async network, they determine the channel is + // "flushed" before we do, and send their offer over). + remoteOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee, + CloserScript: remoteAddr, + CloseeScript: localAddr, + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + + closeHarness.chanCloser.SendEvent(ctx, remoteOffer) + + // We should do a self transition, and still be in the + // ChannelFlushing state. + closeHarness.assertStateTransitions(&ChannelFlushing{}) + + sequence := uint32(mempool.MaxRBFSequence) + + // Now we'll send in the channel flushed event, and assert that + // this triggers a remote RBF iteration (we process their early + // offer and send our sig). + closeHarness.chanCloser.SendEvent(ctx, &flushEvent) + closeHarness.assertSingleRemoteRbfIteration( + remoteOffer, absoluteFee, absoluteFee, sequence, false, + true, + ) + }) + // Any other event should be ignored. assertUnknownEventFail(t, startingState) + + // Sending a Spend event should transition to CloseFin. + assertSpendEventCloseFin(t, startingState) } // TestRbfCloseClosingNegotiationLocal tests the local portion of the primary @@ -1496,6 +1604,59 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { &ClosingNegotiation{}, ) }) + + // Make sure that we'll go to the error state if we try to try a close + // that we can't pay for. + t.Run("send_offer_cannot_pay_for_fees", func(t *testing.T) { + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: closeTerms, + }, + }, + CloseChannelTerms: closeTerms, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + // We'll prep to return an absolute fee that's much higher than + // the amount we have in the channel. + closeHarness.expectFeeEstimate(btcutil.SatoshiPerBitcoin, 1) + + rbfFeeBump := chainfee.FeePerKwFloor.FeePerVByte() + localOffer := &SendOfferEvent{ + TargetFeeRate: rbfFeeBump, + } + + // Next, we'll send in this event, which should fail as we can't + // actually pay for fees. + closeHarness.chanCloser.SendEvent(ctx, localOffer) + + // We should transition to the CloseErr (within + // ClosingNegotiation) state. + closeHarness.assertStateTransitions(&ClosingNegotiation{}) + + // If we get the state, we should see the expected ErrState. + currentState := assertStateT[*ClosingNegotiation](closeHarness) + + closeErrState, ok := currentState.PeerState.GetForParty( + lntypes.Local, + ).(*CloseErr) + require.True(t, ok) + require.IsType( + t, &ErrStateCantPayForFee{}, closeErrState.ErrState, + ) + }) + + // Any other event should be ignored. + assertUnknownEventFail(t, startingState) + + // Sending a Spend event should transition to CloseFin. + assertSpendEventCloseFin(t, startingState) } // TestRbfCloseClosingNegotiationRemote tests that state machine is able to @@ -1503,6 +1664,7 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { // party. func TestRbfCloseClosingNegotiationRemote(t *testing.T) { t.Parallel() + ctx := context.Background() localBalance := lnwire.NewMSatFromSatoshis(40_000) @@ -1533,7 +1695,6 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { } balanceAfterClose := remoteBalance.ToSatoshis() - absoluteFee - sequence := uint32(mempool.MaxRBFSequence) // This case tests that if we receive a signature from the remote @@ -1688,7 +1849,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { // sig. closeHarness.assertSingleRemoteRbfIteration( feeOffer, balanceAfterClose, absoluteFee, sequence, - false, + false, true, ) // Next, we'll receive an offer from the remote party, and drive @@ -1698,12 +1859,14 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { absoluteFee = feeOffer.SigMsg.FeeSatoshis closeHarness.assertSingleRemoteRbfIteration( feeOffer, balanceAfterClose, absoluteFee, sequence, - true, + true, true, ) closeHarness.assertNoStateTransitions() }) + // This tests that if we get an offer that has the wrong local script, + // then we'll emit a hard error. t.Run("recv_offer_wrong_local_script", func(t *testing.T) { closeHarness := newCloser(t, &harnessCfg{ initialState: fn.Some[ProtocolState](startingState), @@ -1734,6 +1897,9 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { closeHarness.assertNoStateTransitions() }) + // If we receive an offer from the remote party with a different remote + // script, then this ensures that we'll process that and use that create + // the next offer. t.Run("recv_offer_remote_addr_change", func(t *testing.T) { closingTerms := *closeTerms @@ -1782,7 +1948,115 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { // sig. closeHarness.assertSingleRemoteRbfIteration( feeOffer, balanceAfterClose, absoluteFee, sequence, - false, + false, true, ) }) + + // Any other event should be ignored. + assertUnknownEventFail(t, startingState) + + // Sending a Spend event should transition to CloseFin. + assertSpendEventCloseFin(t, startingState) +} + +// TestRbfCloseErr tests that the state machine is able to properly restart +// the state machine if we encounter an error. +func TestRbfCloseErr(t *testing.T) { + localBalance := lnwire.NewMSatFromSatoshis(40_000) + remoteBalance := lnwire.NewMSatFromSatoshis(50_000) + + closeTerms := &CloseChannelTerms{ + ShutdownBalances: ShutdownBalances{ + LocalBalance: localBalance, + RemoteBalance: remoteBalance, + }, + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + }, + } + startingState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &CloseErr{ + CloseChannelTerms: closeTerms, + }, + }, + CloseChannelTerms: closeTerms, + } + + absoluteFee := btcutil.Amount(10_100) + balanceAfterClose := localBalance.ToSatoshis() - absoluteFee + + // From the error state, we should be able to kick off a new iteration + // for a local fee bump. + t.Run("send_offer_restart", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + rbfFeeBump := chainfee.FeePerKwFloor.FeePerVByte() + localOffer := &SendOfferEvent{ + TargetFeeRate: rbfFeeBump, + } + + // Now we expect that another full RBF iteration takes place (we + // initiate a new local sig). + closeHarness.assertSingleRbfIteration( + localOffer, balanceAfterClose, absoluteFee, + noDustExpect, + ) + + // We should terminate in the negotiation state. + closeHarness.assertStateTransitions( + &ClosingNegotiation{}, + ) + }) + + // From the error state, we should be able to handle the remote party + // kicking off a new iteration for a fee bump. + t.Run("recv_offer_restart", func(t *testing.T) { + startingState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Remote: &CloseErr{ + CloseChannelTerms: closeTerms, + Party: lntypes.Remote, + }, + }, + CloseChannelTerms: closeTerms, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + CloserScript: remoteAddr, + CloseeScript: localAddr, + FeeSatoshis: absoluteFee, + LockTime: 1, + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + + sequence := uint32(mempool.MaxRBFSequence) + + // As we're already in the negotiation phase, we'll now trigger + // a new iteration by having the remote party send a new offer + // sig. + closeHarness.assertSingleRemoteRbfIteration( + feeOffer, balanceAfterClose, absoluteFee, sequence, + false, true, + ) + }) + + // Sending a Spend event should transition to CloseFin. + assertSpendEventCloseFin(t, startingState) } diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index b83e0b752..3ddd591ad 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -22,6 +22,12 @@ import ( "github.com/lightningnetwork/lnd/tlv" ) +var ( + // ErrInvalidStateTransition is returned if the remote party tries to + // close, but the thaw height hasn't been matched yet. + ErrThawHeightNotReached = fmt.Errorf("thaw height not reached") +) + // sendShutdownEvents is a helper function that returns a set of daemon events // we need to emit when we decide that we should send a shutdown message. We'll // also mark the channel as borked as well, as at this point, we no longer want @@ -107,11 +113,11 @@ func validateShutdown(chanThawHeight fn.Option[uint32], // reject the shutdown message as we can't yet co-op close the // channel. if msg.BlockHeight < thawHeight { - return fmt.Errorf("initiator attempting to "+ + return fmt.Errorf("%w: initiator attempting to "+ "co-op close frozen ChannelPoint(%v) "+ "(current_height=%v, thaw_height=%v)", - chanPoint, msg.BlockHeight, - thawHeight) + ErrThawHeightNotReached, chanPoint, + msg.BlockHeight, thawHeight) } return nil @@ -694,18 +700,27 @@ func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment, return nil, fmt.Errorf("event violates close terms: %w", err) } + shouldRouteTo := func(party lntypes.ChannelParty) bool { + state := c.PeerState.GetForParty(party) + if state == nil { + return false + } + + return state.ShouldRouteTo(event) + } + // If we get to this point, then we have an event that'll drive forward // the negotiation process. Based on the event, we'll figure out which // state we'll be modifying. switch { - case c.PeerState.GetForParty(lntypes.Local).ShouldRouteTo(event): + case shouldRouteTo(lntypes.Local): chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+ "chan state", env.ChanPoint, event) // Drive forward the local state based on the next event. return processNegotiateEvent(c, event, env, lntypes.Local) - case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event): + case shouldRouteTo(lntypes.Remote): chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+ "chan state", env.ChanPoint, event) From f331e2cc3538e8db8dd13259184ec926f2dc9dc6 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Tue, 4 Mar 2025 18:12:41 -0800 Subject: [PATCH 8/8] protofsm: add an upfront check for SendWhen predicates In this commit, we add an upfront check for `SendWhen` predicates before deciding to launch a goroutine. This ensures that when a message comes along that is already ready to send, we do the send in a synchronous manner. --- protofsm/state_machine.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/protofsm/state_machine.go b/protofsm/state_machine.go index 4d0215b69..c759d061a 100644 --- a/protofsm/state_machine.go +++ b/protofsm/state_machine.go @@ -377,9 +377,18 @@ func (s *StateMachine[Event, Env]) executeDaemonEvent(ctx context.Context, }) } - // If this doesn't have a SendWhen predicate, then we can just - // send it off right away. - if !daemonEvent.SendWhen.IsSome() { + canSend := func() bool { + return fn.MapOptionZ( + daemonEvent.SendWhen, + func(pred SendPredicate) bool { + return pred() + }, + ) + } + + // If this doesn't have a SendWhen predicate, or if it's already + // true, then we can just send it off right away. + if !daemonEvent.SendWhen.IsSome() || canSend() { return sendAndCleanUp() } @@ -397,14 +406,7 @@ func (s *StateMachine[Event, Env]) executeDaemonEvent(ctx context.Context, for { select { case <-predicateTicker.C: - canSend := fn.MapOptionZ( - daemonEvent.SendWhen, - func(pred SendPredicate) bool { - return pred() - }, - ) - - if canSend { + if canSend() { s.log.InfoS(ctx, "Send active predicate") err := sendAndCleanUp()