diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index 97ab7d5bf..b2e56f83c 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -205,7 +205,7 @@ func createTestChannelState(cdb *DB) (*OpenChannel, error) { return &OpenChannel{ ChanType: SingleFunder, ChainHash: key, - FundingOutpoint: *testOutpoint, + FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()}, ShortChannelID: chanID, IsInitiator: true, IsPending: true, @@ -852,6 +852,79 @@ func TestFetchClosedChannels(t *testing.T) { } } +// TestFetchWaitingCloseChannels ensures that the correct channels that are +// waiting to be closed are returned. +func TestFetchWaitingCloseChannels(t *testing.T) { + t.Parallel() + + const numChannels = 2 + const broadcastHeight = 99 + addr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 18555} + + // We'll start by creating two channels within our test database. One of + // them will have their funding transaction confirmed on-chain, while + // the other one will remain unconfirmed. + db, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", err) + } + defer cleanUp() + + channels := make([]*OpenChannel, numChannels) + for i := 0; i < numChannels; i++ { + channel, err := createTestChannelState(db) + if err != nil { + t.Fatalf("unable to create channel: %v", err) + } + err = channel.SyncPending(addr, broadcastHeight) + if err != nil { + t.Fatalf("unable to sync channel: %v", err) + } + channels[i] = channel + } + + // We'll only confirm the first one. + channelConf := lnwire.ShortChannelID{ + BlockHeight: broadcastHeight + 1, + TxIndex: 10, + TxPosition: 15, + } + if err := channels[0].MarkAsOpen(channelConf); err != nil { + t.Fatalf("unable to mark channel as open: %v", err) + } + + // Then, we'll mark the channels as if their commitments were broadcast. + // This would happen in the event of a force close and should make the + // channels enter a state of waiting close. + for _, channel := range channels { + if err := channel.MarkCommitmentBroadcasted(); err != nil { + t.Fatalf("unable to mark commitment broadcast: %v", err) + } + } + + // Now, we'll fetch all the channels waiting to be closed from the + // database. We should expect to see both channels above, even if any of + // them haven't had their funding transaction confirm on-chain. + waitingCloseChannels, err := db.FetchWaitingCloseChannels() + if err != nil { + t.Fatalf("unable to fetch all waiting close channels: %v", err) + } + if len(waitingCloseChannels) != 2 { + t.Fatalf("expected %d channels waiting to be closed, got %d", 2, + len(waitingCloseChannels)) + } + expectedChannels := make(map[wire.OutPoint]struct{}) + for _, channel := range channels { + expectedChannels[channel.FundingOutpoint] = struct{}{} + } + for _, channel := range waitingCloseChannels { + if _, ok := expectedChannels[channel.FundingOutpoint]; !ok { + t.Fatalf("expected channel %v to be waiting close", + channel.FundingOutpoint) + } + } +} + // TestRefreshShortChanID asserts that RefreshShortChanID updates the in-memory // short channel ID of another OpenChannel to reflect a preceding call to // MarkOpen on a different OpenChannel. diff --git a/channeldb/db.go b/channeldb/db.go index 0f9183185..d71656a72 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -450,9 +450,20 @@ func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) { } // FetchWaitingCloseChannels will return all channels that have been opened, -// but now is waiting for a closing transaction to be confirmed. +// but are now waiting for a closing transaction to be confirmed. +// +// NOTE: This includes channels that are also pending to be opened. func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, error) { - return fetchChannels(d, false, true) + waitingClose, err := fetchChannels(d, false, true) + if err != nil { + return nil, err + } + pendingWaitingClose, err := fetchChannels(d, true, true) + if err != nil { + return nil, err + } + + return append(waitingClose, pendingWaitingClose...), nil } // fetchChannels attempts to retrieve channels currently stored in the