discovery: grab channel mutex before any DB calls

In `handleChanUpdate`, make sure to grab the `channelMtx` lock before
making any DB calls so that the logic remains consistent.
This commit is contained in:
Elle Mouton 2025-03-05 07:41:05 +02:00
parent 5e35bd8328
commit 2e85e08556
No known key found for this signature in database
GPG Key ID: D7D916376026F177
2 changed files with 23 additions and 20 deletions

View File

@ -2997,6 +2997,12 @@ func (d *AuthenticatedGossiper) handleChanUpdate(nMsg *networkMsg,
graphScid = upd.ShortChannelID
}
// We make sure to obtain the mutex for this channel ID before we access
// the database. This ensures the state we read from the database has
// not changed between this point and when we call UpdateEdge() later.
d.channelMtx.Lock(graphScid.ToUint64())
defer d.channelMtx.Unlock(graphScid.ToUint64())
if d.cfg.Graph.IsStaleEdgePolicy(
graphScid, timestamp, upd.ChannelFlags,
) {
@ -3029,14 +3035,6 @@ func (d *AuthenticatedGossiper) handleChanUpdate(nMsg *networkMsg,
// Get the node pub key as far since we don't have it in the channel
// update announcement message. We'll need this to properly verify the
// message's signature.
//
// We make sure to obtain the mutex for this channel ID before we
// access the database. This ensures the state we read from the
// database has not changed between this point and when we call
// UpdateEdge() later.
d.channelMtx.Lock(graphScid.ToUint64())
defer d.channelMtx.Unlock(graphScid.ToUint64())
chanInfo, e1, e2, err := d.cfg.Graph.GetChannelByID(graphScid)
switch {
// No error, break.

View File

@ -4001,12 +4001,12 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) {
assertBroadcast(chanAnn2, true, true)
}
// TestRateLimitDeDup demonstrates a bug that currently exists in the handling
// of channel updates. It shows that if two identical channel updates are
// received in quick succession, then both of them might be counted towards the
// rate limit, even though only one of them should be.
// TestRateLimitDeDup tests that if we get the same channel update in very
// quick succession, then these updates should not be individually considered
// in our rate limiting logic.
//
// NOTE: this will be fixed in an upcoming commit.
// NOTE: this only tests the deduplication logic. The main rate limiting logic
// is tested by TestRateLimitChannelUpdates.
func TestRateLimitDeDup(t *testing.T) {
t.Parallel()
@ -4145,11 +4145,12 @@ func TestRateLimitDeDup(t *testing.T) {
// Now we can un-pause the thread that grabbed the mutex first.
close(pause)
// Currently, both updates make it to UpdateEdge.
// Only 1 call should have made it past the staleness check to the
// graph's UpdateEdge call.
err = wait.NoError(func() error {
count := getUpdateEdgeCount()
if count != 4 {
return fmt.Errorf("expected 4 calls to UpdateEdge, "+
if count != 3 {
return fmt.Errorf("expected 3 calls to UpdateEdge, "+
"got %v", count)
}
@ -4183,10 +4184,14 @@ func TestRateLimitDeDup(t *testing.T) {
// Show that the last update was broadcast.
assertBroadcast(true)
// We should be allowed to send another update now since only one of the
// above duplicates should count towards the rate limit.
// However, this is currently not the case, and so we will be rate
// limited early. This will be fixed in an upcoming commit.
// We should be allowed to send another update now since the rate limit
// has still not been met.
refreshUpdate()
processUpdate(&update, nodePeer1)
assertBroadcast(true)
// Our rate limit should be hit now, so a new update should not be
// broadcast.
refreshUpdate()
processUpdate(&update, nodePeer1)
assertBroadcast(false)