From ca770bb4afccee33b142eda028760ebc99073411 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Mon, 30 Jun 2025 20:56:50 +0800 Subject: [PATCH] htlcswitch: notify channel active after reforwarding The channel should only be considered active when its pending tasks are finished, which includes, 1. sync channel state via reestablish. 2. send previous shutdown msg. 3. reset the mailbox's packets. 4. reforwarding logs loaded from restart. When the above tasks are finished, the channel can be considered as fully resumed from its previous disconnection. --- htlcswitch/link.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 5398978da..d1f27fa12 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -1307,13 +1307,6 @@ func (l *channelLink) htlcManager(ctx context.Context) { // allow the switch to forward HTLCs in the outbound direction. l.markReestablished() - // Now that we've received both channel_ready and channel reestablish, - // we can go ahead and send the active channel notification. We'll also - // defer the inactive notification for when the link exits to ensure - // that every active notification is matched by an inactive one. - l.cfg.NotifyActiveChannel(l.ChannelPoint()) - defer l.cfg.NotifyInactiveChannel(l.ChannelPoint()) - // With the channel states synced, we now reset the mailbox to ensure // we start processing all unacked packets in order. This is done here // to ensure that all acknowledgments that occur during channel @@ -1355,6 +1348,13 @@ func (l *channelLink) htlcManager(ctx context.Context) { go l.fwdPkgGarbager() } + // Now that we've received both channel_ready and channel reestablish, + // we can go ahead and send the active channel notification. We'll also + // defer the inactive notification for when the link exits to ensure + // that every active notification is matched by an inactive one. + l.cfg.NotifyActiveChannel(l.ChannelPoint()) + defer l.cfg.NotifyInactiveChannel(l.ChannelPoint()) + for { // We must always check if we failed at some point processing // the last update before processing the next.