Merge pull request #10052 from guggero/rbf-close-err

chancloser: fix flakes in chancloser tests
This commit is contained in:
Oliver Gugger
2025-07-23 03:47:09 -06:00
committed by GitHub
6 changed files with 109 additions and 99 deletions

View File

@@ -286,6 +286,16 @@ flakehunter-unit:
@$(call print, "Flake hunting unit test.")
scripts/unit-test-flake-hunter.sh ${pkg} ${case}
#? flakehunter-unit-all: Run all unit tests continuously until one fails
flakehunter-unit-all: $(BTCD_BIN)
@$(call print, "Flake hunting unit tests.")
while [ $$? -eq 0 ]; do make unit; done
#? flakehunter-unit-race: Run all unit tests in race detector mode continuously until one fails
flakehunter-unit-race: $(BTCD_BIN)
@$(call print, "Flake hunting unit tests in race detector mode.")
while [ $$? -eq 0 ]; do make unit-race; done
#? flakehunter-itest-parallel: Run the integration tests continuously until one fails, running up to ITEST_PARALLELISM test tranches in parallel (default 4)
flakehunter-itest-parallel:
@$(call print, "Flake hunting ${backend} integration tests in parallel.")

View File

@@ -509,7 +509,10 @@ func TestTaprootFastClose(t *testing.T) {
aliceChan := newMockTaprootChan(t, true)
bobChan := newMockTaprootChan(t, false)
broadcastSignal := make(chan struct{}, 2)
// We'll create two distinct broadcast signals to ensure that each party
// broadcasts at the correct time.
aliceBroadcast := make(chan struct{}, 1)
bobBroadcast := make(chan struct{}, 1)
idealFee := chainfee.SatPerKWeight(506)
@@ -520,7 +523,7 @@ func TestTaprootFastClose(t *testing.T) {
Channel: aliceChan,
MusigSession: newMockMusigSession(),
BroadcastTx: func(_ *wire.MsgTx, _ string) error {
broadcastSignal <- struct{}{}
aliceBroadcast <- struct{}{}
return nil
},
MaxFee: chainfee.SatPerKWeight(1000),
@@ -538,7 +541,7 @@ func TestTaprootFastClose(t *testing.T) {
MusigSession: newMockMusigSession(),
MaxFee: chainfee.SatPerKWeight(1000),
BroadcastTx: func(_ *wire.MsgTx, _ string) error {
broadcastSignal <- struct{}{}
bobBroadcast <- struct{}{}
return nil
},
FeeEstimator: &SimpleCoopFeeEstimator{},
@@ -591,7 +594,7 @@ func TestTaprootFastClose(t *testing.T) {
// At this point, Bob has accepted the offer, so he can broadcast the
// closing transaction, and considers the channel closed.
_, err = lnutils.RecvOrTimeout(broadcastSignal, time.Second*1)
_, err = lnutils.RecvOrTimeout(bobBroadcast, time.Second*1)
require.NoError(t, err)
// Bob's fee proposal should exactly match Alice's initial fee.
@@ -623,7 +626,7 @@ func TestTaprootFastClose(t *testing.T) {
aliceClosingSigned = oClosingSigned.UnwrapOrFail(t)
// Alice should now also broadcast her closing transaction.
_, err = lnutils.RecvOrTimeout(broadcastSignal, time.Second*1)
_, err = lnutils.RecvOrTimeout(aliceBroadcast, time.Second*1)
require.NoError(t, err)
// Finally, Bob will process Alice's echo message, and conclude.

View File

@@ -225,16 +225,16 @@ type CloseSigner interface {
// balance in the created state.
CreateCloseProposal(proposedFee btcutil.Amount,
localDeliveryScript []byte, remoteDeliveryScript []byte,
closeOpt ...lnwallet.ChanCloseOpt,
) (
input.Signature, *wire.MsgTx, btcutil.Amount, error)
closeOpt ...lnwallet.ChanCloseOpt) (input.Signature,
*wire.MsgTx, btcutil.Amount, error)
// CompleteCooperativeClose persistently "completes" the cooperative
// close by producing a fully signed co-op close transaction.
CompleteCooperativeClose(localSig, remoteSig input.Signature,
localDeliveryScript, remoteDeliveryScript []byte,
proposedFee btcutil.Amount, closeOpt ...lnwallet.ChanCloseOpt,
) (*wire.MsgTx, btcutil.Amount, error)
proposedFee btcutil.Amount,
closeOpt ...lnwallet.ChanCloseOpt) (*wire.MsgTx, btcutil.Amount,
error)
}
// ChanStateObserver is an interface used to observe state changes that occur
@@ -579,8 +579,8 @@ type ErrStateCantPayForFee struct {
}
// NewErrStateCantPayForFee returns a new NewErrStateCantPayForFee error.
func NewErrStateCantPayForFee(localBalance, attemptedFee btcutil.Amount,
) *ErrStateCantPayForFee {
func NewErrStateCantPayForFee(localBalance,
attemptedFee btcutil.Amount) *ErrStateCantPayForFee {
return &ErrStateCantPayForFee{
localBalance: localBalance,
@@ -621,8 +621,9 @@ type CloseChannelTerms struct {
// DeriveCloseTxOuts takes the close terms, and returns the local and remote tx
// out for the close transaction. If an output is dust, then it'll be nil.
func (c *CloseChannelTerms) DeriveCloseTxOuts() (*wire.TxOut, *wire.TxOut) {
//nolint:ll
deriveTxOut := func(balance btcutil.Amount, pkScript []byte) *wire.TxOut {
deriveTxOut := func(balance btcutil.Amount,
pkScript []byte) *wire.TxOut {
// We'll base the existence of the output on our normal dust
// check.
dustLimit := lnwallet.DustLimitForSize(len(pkScript))
@@ -710,10 +711,10 @@ func (l *LocalCloseStart) IsTerminal() bool {
return false
}
// protocolStateaSealed indicates that this struct is a ProtocolEvent instance.
// protocolStateSealed indicates that this struct is a ProtocolEvent instance.
func (l *LocalCloseStart) protocolStateSealed() {}
// LocalOfferSent is the state we transition to after we reveiver the
// LocalOfferSent is the state we transition to after we receiver the
// SendOfferEvent in the LocalCloseStart state. With this state we send our
// offer to the remote party, then await a sig from them which concludes the
// local cooperative close process.

View File

@@ -49,12 +49,18 @@ var (
remoteSigBytes = fromHex("304502210082235e21a2300022738dabb8e1bbd9d1" +
"9cfb1e7ab8c30a23b0afbb8d178abcf3022024bf68e256c534ddfaf966b" +
"f908deb944305596f7bdcc38d69acad7f9c868724")
remoteSig = sigMustParse(remoteSigBytes)
remoteWireSig = mustWireSig(&remoteSig)
remoteSig = sigMustParse(remoteSigBytes)
remoteWireSig = mustWireSig(&remoteSig)
remoteSigRecordType3 = newSigTlv[tlv.TlvType3](remoteWireSig)
remoteSigRecordType1 = newSigTlv[tlv.TlvType1](remoteWireSig)
localTx = wire.MsgTx{Version: 2}
closeTx = wire.NewMsgTx(2)
defaultTimeout = 500 * time.Millisecond
longTimeout = 3 * time.Second
defaultPoll = 50 * time.Millisecond
)
func sigMustParse(sigBytes []byte) ecdsa.Signature {
@@ -111,7 +117,7 @@ func assertStateTransitions[Event any, Env protofsm.Environment](
for _, expectedState := range expectedStates {
newState, err := fn.RecvOrTimeout(
stateSub.NewItemCreated.ChanOut(), 10*time.Millisecond,
stateSub.NewItemCreated.ChanOut(), defaultTimeout,
)
require.NoError(t, err, "expected state: %T", expectedState)
@@ -122,7 +128,7 @@ func assertStateTransitions[Event any, Env protofsm.Environment](
select {
case newState := <-stateSub.NewItemCreated.ChanOut():
t.Fatalf("unexpected state transition: %v", newState)
default:
case <-time.After(defaultPoll):
}
}
@@ -285,10 +291,12 @@ func (r *rbfCloserTestHarness) assertStartupAssertions() {
}
func (r *rbfCloserTestHarness) assertNoStateTransitions() {
r.T.Helper()
select {
case newState := <-r.stateSub.NewItemCreated.ChanOut():
r.T.Fatalf("unexpected state transition: %T", newState)
case <-time.After(10 * time.Millisecond):
case <-time.After(defaultPoll):
}
}
@@ -436,7 +444,7 @@ func (r *rbfCloserTestHarness) waitForMsgSent() {
err := wait.Predicate(func() bool {
return r.daemonAdapters.msgSent.Load()
}, time.Second*3)
}, longTimeout)
require.NoError(r.T, err)
}
@@ -642,7 +650,7 @@ func (r *rbfCloserTestHarness) assertSingleRbfIteration(
// We'll now send in the send offer event, which should trigger 1/2 of
// the RBF loop, ending us in the LocalOfferSent state.
r.expectHalfSignerIteration(
initEvent, balanceAfterClose, absoluteFee, noDustExpect,
initEvent, balanceAfterClose, absoluteFee, dustExpect,
iteration,
)
@@ -693,15 +701,21 @@ func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration(
}
// Our outer state should transition to ClosingNegotiation state.
r.assertStateTransitions(&ClosingNegotiation{})
transitions := []RbfState{
&ClosingNegotiation{},
}
// If this is an iteration, then we'll go from ClosePending ->
// RemoteCloseStart -> ClosePending. So we'll assert an extra transition
// here.
if iteration {
r.assertStateTransitions(&ClosingNegotiation{})
transitions = append(transitions, &ClosingNegotiation{})
}
// Now that we know how many state transitions to expect, we'll wait
// for them.
r.assertStateTransitions(transitions...)
// If we examine the final resting state, we should see that the we're
// now in the ClosePending state for the remote peer.
currentState := assertStateT[*ClosingNegotiation](r)
@@ -802,7 +816,7 @@ func newRbfCloserTestHarness(t *testing.T,
MsgMapper: fn.Some[protofsm.MsgMapper[ProtocolEvent]](
msgMapper,
),
CustomPollInterval: fn.Some(time.Nanosecond),
CustomPollInterval: fn.Some(defaultPoll),
}
// Before we start we always expect an initial spend event.
@@ -811,10 +825,13 @@ func newRbfCloserTestHarness(t *testing.T,
).Return(nil)
chanCloser := protofsm.NewStateMachine(protoCfg)
chanCloser.Start(ctx)
// We register our subscriber before starting the state machine, to make
// sure we don't miss any events.
harness.stateSub = chanCloser.RegisterStateEvents()
chanCloser.Start(ctx)
harness.chanCloser = &chanCloser
return harness
@@ -1284,9 +1301,8 @@ func TestRbfChannelFlushingTransitions(t *testing.T) {
// We'll modify the starting balance to be 3x the required fee
// to ensure that we can pay for the fee.
flushEvent.ShutdownBalances.LocalBalance = lnwire.NewMSatFromSatoshis( //nolint:ll
absoluteFee * 3,
)
localBalanceMSat := lnwire.NewMSatFromSatoshis(absoluteFee * 3)
flushEvent.ShutdownBalances.LocalBalance = localBalanceMSat
testName := fmt.Sprintf("local_can_pay_for_fee/"+
"fresh_flush=%v", isFreshFlush)
@@ -1304,7 +1320,8 @@ func TestRbfChannelFlushingTransitions(t *testing.T) {
defer closeHarness.stopAndAssert()
localBalance := flushEvent.ShutdownBalances.LocalBalance
balanceAfterClose := localBalance.ToSatoshis() - absoluteFee //nolint:ll
balanceAfterClose := localBalance.ToSatoshis() -
absoluteFee
// If this is a fresh flush, then we expect the state
// to be marked on disk.
@@ -1353,9 +1370,7 @@ func TestRbfChannelFlushingTransitions(t *testing.T) {
CloserScript: remoteAddr,
CloseeScript: localAddr,
ClosingSigs: lnwire.ClosingSigs{
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -1467,9 +1482,7 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) {
CloserNoClosee: newSigTlv[tlv.TlvType1](
remoteWireSig,
),
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -1564,9 +1577,7 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) {
CloserScript: remoteAddr,
CloseeScript: remoteAddr,
ClosingSigs: lnwire.ClosingSigs{
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -1732,7 +1743,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) {
closeHarness.assertNoStateTransitions()
})
// If our balance, is dust, then the remote party should send a
// If our balance is dust, then the remote party should send a
// signature that doesn't include our output.
t.Run("recv_offer_err_closer_no_closee", func(t *testing.T) {
// We'll modify our local balance to be dust.
@@ -1764,9 +1775,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) {
CloserScript: remoteAddr,
CloseeScript: localAddr,
ClosingSigs: lnwire.ClosingSigs{
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -1792,9 +1801,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) {
CloserScript: remoteAddr,
CloseeScript: localAddr,
ClosingSigs: lnwire.ClosingSigs{
CloserNoClosee: newSigTlv[tlv.TlvType1]( //nolint:ll
remoteWireSig,
),
CloserNoClosee: remoteSigRecordType1,
},
},
}
@@ -1840,9 +1847,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) {
FeeSatoshis: absoluteFee,
LockTime: 1,
ClosingSigs: lnwire.ClosingSigs{
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -1886,9 +1891,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) {
CloserScript: remoteAddr,
CloseeScript: remoteAddr,
ClosingSigs: lnwire.ClosingSigs{
CloserNoClosee: newSigTlv[tlv.TlvType1]( //nolint:ll
remoteWireSig,
),
CloserNoClosee: remoteSigRecordType1,
},
},
}
@@ -1937,9 +1940,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) {
FeeSatoshis: absoluteFee,
LockTime: 1,
ClosingSigs: lnwire.ClosingSigs{
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -2005,12 +2006,7 @@ func TestRbfCloseErr(t *testing.T) {
// initiate a new local sig).
closeHarness.assertSingleRbfIteration(
localOffer, balanceAfterClose, absoluteFee,
noDustExpect, false,
)
// We should terminate in the negotiation state.
closeHarness.assertStateTransitions(
&ClosingNegotiation{},
noDustExpect, true,
)
})
@@ -2040,9 +2036,7 @@ func TestRbfCloseErr(t *testing.T) {
FeeSatoshis: absoluteFee,
LockTime: 1,
ClosingSigs: lnwire.ClosingSigs{
CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll
remoteWireSig,
),
CloserAndClosee: remoteSigRecordType3,
},
},
}
@@ -2054,7 +2048,7 @@ func TestRbfCloseErr(t *testing.T) {
// sig.
closeHarness.assertSingleRemoteRbfIteration(
feeOffer, balanceAfterClose, absoluteFee, sequence,
false, true,
true, true,
)
})

View File

@@ -23,7 +23,7 @@ import (
)
var (
// ErrInvalidStateTransition is returned if the remote party tries to
// ErrThawHeightNotReached is returned if the remote party tries to
// close, but the thaw height hasn't been matched yet.
ErrThawHeightNotReached = fmt.Errorf("thaw height not reached")
)
@@ -139,8 +139,8 @@ func validateShutdown(chanThawHeight fn.Option[uint32],
// the state. From this state, we can receive two possible incoming events:
// SendShutdown and ShutdownReceived. Both of these will transition us to the
// ChannelFlushing state.
func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (c *ChannelActive) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) {
// If we get a confirmation, then a prior transaction we broadcasted
@@ -284,8 +284,8 @@ func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment,
// forward once we receive the ShutdownComplete event. Receiving
// ShutdownComplete means that we've sent our shutdown, as this was specified
// as a post send event.
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (s *ShutdownPending) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) {
// If we get a confirmation, then a prior transaction we broadcasted
@@ -443,8 +443,8 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment,
// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as
// we haven't yet fully cleared the channel. Otherwise, we can move to the
// CloseReady state which'll being the channel closing process.
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) {
// If we get a confirmation, then a prior transaction we broadcasted
@@ -578,8 +578,8 @@ func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment,
// processNegotiateEvent is a helper function that processes a new event to
// local channel state once we're in the ClosingNegotiation state.
func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent,
env *Environment, chanPeer lntypes.ChannelParty,
) (*CloseStateTransition, error) {
env *Environment,
chanPeer lntypes.ChannelParty) (*CloseStateTransition, error) {
targetPeerState := c.PeerState.GetForParty(chanPeer)
@@ -611,8 +611,8 @@ func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent,
// updateAndValidateCloseTerms is a helper function that validates examines the
// incoming event, and decide if we need to update the remote party's address,
// or reject it if it doesn't include our latest address.
func (c *ClosingNegotiation) updateAndValidateCloseTerms(event ProtocolEvent,
) error {
func (c *ClosingNegotiation) updateAndValidateCloseTerms(
event ProtocolEvent) error {
assertLocalScriptMatches := func(localScriptInMsg []byte) error {
if !bytes.Equal(
@@ -669,8 +669,8 @@ func (c *ClosingNegotiation) updateAndValidateCloseTerms(event ProtocolEvent,
// party in response to new events. From this state, we'll continue to drive
// forward the local and remote states until we arrive at the StateFin stage,
// or we loop back up to the ShutdownPending state.
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
// There're two classes of events that can break us out of this state:
// we receive a confirmation event, or we receive a signal to restart
@@ -722,8 +722,8 @@ func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment,
case shouldRouteTo(lntypes.Remote):
chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+
"chan state", env.ChanPoint, event)
// Drive forward the remote state based on the next event.
return processNegotiateEvent(c, event, env, lntypes.Remote)
}
@@ -740,8 +740,8 @@ func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] {
// ProcessEvent implements the event processing to kick off the process of
// obtaining a new (possibly RBF'd) signature for our commitment transaction.
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) { //nolint:gocritic
// If we receive a SendOfferEvent, then we'll use the specified fee
@@ -905,8 +905,8 @@ func extractSig(msg lnwire.ClosingSig) fn.Result[lnwire.Sig] {
// LocalOfferSent state. In this state, we'll wait for the remote party to
// send a close_signed message which gives us the ability to broadcast a new
// co-op close transaction.
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) { //nolint:gocritic
// If we receive a LocalSigReceived event, then we'll attempt to
@@ -981,8 +981,8 @@ func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment,
// RemoteCloseStart. In this state, we'll wait for the remote party to send a
// closing_complete message. Assuming they can pay for the fees, we'll sign it
// ourselves, then transition to the next state of ClosePending.
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent,
env *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) { //nolint:gocritic
// If we receive a OfferReceived event, we'll make sure they can
@@ -1156,8 +1156,8 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment,
// ProcessEvent is a semi-terminal state in the rbf-coop close state machine.
// In this state, we're waiting for either a confirmation, or for either side
// to attempt to create a new RBF'd co-op close transaction.
func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (c *ClosePending) ProcessEvent(event ProtocolEvent,
_ *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) {
// If we can a spend while waiting for the close, then we'll go to our
@@ -1205,8 +1205,8 @@ func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment,
// ProcessEvent is the event processing for out terminal state. In this state,
// we just keep looping back on ourselves.
func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (c *CloseFin) ProcessEvent(_ ProtocolEvent,
_ *Environment) (*CloseStateTransition, error) {
return &CloseStateTransition{
NextState: c,
@@ -1217,8 +1217,8 @@ func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment,
// In this state, we hit a validation error in an earlier state, so we'll remain
// in this state for the user to examine. We may also process new requests to
// continue the state machine.
func (c *CloseErr) ProcessEvent(event ProtocolEvent, env *Environment,
) (*CloseStateTransition, error) {
func (c *CloseErr) ProcessEvent(event ProtocolEvent,
_ *Environment) (*CloseStateTransition, error) {
switch msg := event.(type) {
// If we get a send offer event in this state, then we're doing a state

View File

@@ -105,8 +105,8 @@ type DaemonAdapters interface {
// TODO(roasbeef): could abstract further?
RegisterConfirmationsNtfn(txid *chainhash.Hash, pkScript []byte,
numConfs, heightHint uint32,
opts ...chainntnfs.NotifierOption,
) (*chainntnfs.ConfirmationEvent, error)
opts ...chainntnfs.NotifierOption) (
*chainntnfs.ConfirmationEvent, error)
// RegisterSpendNtfn registers an intent to be notified once the target
// outpoint is successfully spent within a transaction. The script that
@@ -374,7 +374,8 @@ func (s *StateMachine[Event, Env]) executeDaemonEvent(ctx context.Context,
// If a post-send event was specified, then we'll funnel
// that back into the main state machine now as well.
return fn.MapOptionZ(daemonEvent.PostSendEvent, func(event Event) error { //nolint:ll
//nolint:ll
return fn.MapOptionZ(daemonEvent.PostSendEvent, func(event Event) error {
launched := s.gm.Go(
ctx, func(ctx context.Context) {
s.log.DebugS(ctx, "Sending post-send event",
@@ -590,7 +591,7 @@ func (s *StateMachine[Event, Env]) applyEvents(ctx context.Context,
}
newEvents := transition.NewEvents
err = fn.MapOptionZ(newEvents, func(events EmittedEvent[Event]) error { //nolint:ll
err = fn.MapOptionZ(newEvents, func(events EmittedEvent[Event]) error {
// With the event processed, we'll process any
// new daemon events that were emitted as part
// of this new state transition.
@@ -605,8 +606,6 @@ func (s *StateMachine[Event, Env]) applyEvents(ctx context.Context,
// Next, we'll add any new emitted events to our
// event queue.
//
//nolint:ll
for _, inEvent := range events.InternalEvent {
s.log.DebugS(ctx, "Adding new internal event to queue",
"event", lnutils.SpewLogClosure(inEvent))
@@ -692,7 +691,10 @@ func (s *StateMachine[Event, Env]) driveMachine(ctx context.Context) {
// An outside caller is querying our state, so we'll return the
// latest state.
case stateQuery := <-s.stateQuery:
if !fn.SendOrQuit(stateQuery.CurrentState, currentState, s.quit) { //nolint:ll
if !fn.SendOrQuit(
stateQuery.CurrentState, currentState, s.quit,
) {
return
}