From dee8ad3754d15b8cb0a2f3abd9f0f339ba3882d2 Mon Sep 17 00:00:00 2001 From: Boris Nagaev Date: Tue, 19 Aug 2025 14:54:41 -0300 Subject: [PATCH] multi: context.Background() -> t.Context() Use the new feature of Go 1.24, fix linter warnings. This change was produced by: - running golangci-lint run --fix - sed 's/context.Background/t.Context/' -i `git grep -l context.Background | grep test.go` - manually fixing broken tests - itest, lntest: use ht.Context() where ht or hn is available - in HarnessNode.Stop() we keep using context.Background(), because it is called from a cleanup handler in which t.Context() is canceled already. --- accessman_test.go | 5 +- autopilot/betweenness_centrality_test.go | 5 +- autopilot/externalscoreattach_test.go | 3 +- autopilot/prefattach_test.go | 8 +- autopilot/top_centrality_test.go | 3 +- batch/batch_test.go | 9 +-- chanbackup/backup_test.go | 5 +- chanbackup/pubsub_test.go | 7 +- channeldb/addr_source_test.go | 2 +- channeldb/db_test.go | 3 +- cluster/etcd_elector_test.go | 4 +- contractcourt/chain_watcher_test.go | 2 +- discovery/gossiper_test.go | 54 +++++++------- discovery/reliable_sender_test.go | 4 +- discovery/syncer_atomic_test.go | 2 +- discovery/syncer_test.go | 28 +++---- fn/context_guard_test.go | 20 ++--- fn/goroutine_manager_test.go | 6 +- funding/batch_test.go | 2 +- graph/builder_test.go | 27 ++++--- graph/db/benchmark_test.go | 18 ++--- graph/db/graph_test.go | 74 +++++++++---------- graph/db/sql_migration_test.go | 19 +++-- graph/notifications_test.go | 15 ++-- htlcswitch/link_isolated_test.go | 2 +- htlcswitch/link_test.go | 46 ++++++------ htlcswitch/switch_test.go | 3 +- invoices/invoiceregistry_test.go | 37 +++++----- invoices/invoices_test.go | 43 ++++++----- invoices/kv_sql_migration_test.go | 5 +- invoices/sql_migration_test.go | 3 +- itest/lnd_channel_funding_fund_max_test.go | 7 +- ...lnd_channel_funding_utxo_selection_test.go | 15 ++-- itest/lnd_funding_test.go | 3 +- itest/lnd_graph_migration_test.go | 2 +- itest/lnd_invoice_acceptor_test.go | 3 +- itest/lnd_misc_test.go | 4 +- itest/lnd_payment_test.go | 2 +- itest/lnd_route_blinding_test.go | 2 +- itest/lnd_rpc_middleware_interceptor_test.go | 8 +- itest/lnd_signer_test.go | 3 +- kvdb/etcd/commit_queue_test.go | 2 +- kvdb/etcd/db_test.go | 6 +- kvdb/etcd/readwrite_tx_test.go | 5 +- kvdb/etcd/stm_test.go | 10 +-- kvdb/etcd/walletdb_interface_test.go | 3 +- kvdb/go.mod | 2 +- kvdb/postgres/db_test.go | 3 +- kvdb/sqlite/db_test.go | 3 +- lnrpc/routerrpc/router_backend_test.go | 3 +- lnrpc/routerrpc/router_server_test.go | 6 +- lntest/fee_service.go | 3 +- lntest/harness.go | 2 +- lntest/node/harness_node.go | 30 ++++---- lnwallet/chancloser/rbf_coop_test.go | 22 +++--- lnwallet/channel_test.go | 2 +- macaroons/bake_test.go | 3 +- macaroons/fuzz_test.go | 3 +- macaroons/service_test.go | 11 ++- macaroons/store_test.go | 2 +- msgmux/msg_router_test.go | 2 +- payments/db/kv_store_test.go | 3 +- payments/db/payment_test.go | 3 +- protofsm/state_machine_test.go | 13 ++-- routing/localchans/manager_test.go | 2 +- routing/pathfind_test.go | 26 +++---- routing/payment_lifecycle_test.go | 24 +++--- routing/router_test.go | 6 +- sqldb/migrations_test.go | 17 ++--- sqldb/paginate_test.go | 8 +- walletunlocker/service_test.go | 16 ++-- 71 files changed, 361 insertions(+), 393 deletions(-) diff --git a/accessman_test.go b/accessman_test.go index 06adab1cd..4f0c49276 100644 --- a/accessman_test.go +++ b/accessman_test.go @@ -1,7 +1,6 @@ package lnd import ( - "context" "testing" "github.com/btcsuite/btcd/btcec/v2" @@ -490,7 +489,7 @@ func TestAssignPeerPermsBypassExisting(t *testing.T) { func TestHasPeer(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Create a testing accessMan. a := &accessMan{ @@ -708,7 +707,7 @@ func TestAddPeerAccessOutbound(t *testing.T) { // accessman's internal state based on the peer's status. func TestRemovePeerAccess(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Create a testing accessMan. a := &accessMan{ diff --git a/autopilot/betweenness_centrality_test.go b/autopilot/betweenness_centrality_test.go index b1de27834..5571a78d2 100644 --- a/autopilot/betweenness_centrality_test.go +++ b/autopilot/betweenness_centrality_test.go @@ -1,7 +1,6 @@ package autopilot import ( - "context" "fmt" "testing" @@ -32,7 +31,7 @@ func TestBetweennessCentralityMetricConstruction(t *testing.T) { // Tests that empty graph results in empty centrality result. func TestBetweennessCentralityEmptyGraph(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() centralityMetric, err := NewBetweennessCentralityMetric(1) require.NoError( @@ -64,7 +63,7 @@ func TestBetweennessCentralityEmptyGraph(t *testing.T) { // Test betweenness centrality calculating using an example graph. func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() workers := []int{1, 3, 9, 100} diff --git a/autopilot/externalscoreattach_test.go b/autopilot/externalscoreattach_test.go index bef50a674..7bf440aa5 100644 --- a/autopilot/externalscoreattach_test.go +++ b/autopilot/externalscoreattach_test.go @@ -1,7 +1,6 @@ package autopilot_test import ( - "context" "testing" "github.com/btcsuite/btcd/btcec/v2" @@ -23,7 +22,7 @@ func randKey() (*btcec.PublicKey, error) { // ExternalScoreAttachment correctly reflects the scores we set last. func TestSetNodeScores(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() const name = "externalscore" diff --git a/autopilot/prefattach_test.go b/autopilot/prefattach_test.go index d7a578f7d..2d1c7d4a7 100644 --- a/autopilot/prefattach_test.go +++ b/autopilot/prefattach_test.go @@ -76,7 +76,7 @@ var chanGraphs = []struct { // empty graph, the NodeSores function always returns a score of 0. func TestPrefAttachmentSelectEmptyGraph(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() prefAttach := NewPrefAttachment() // Create a random public key, which we will query to get a score for. @@ -116,7 +116,7 @@ func TestPrefAttachmentSelectEmptyGraph(t *testing.T) { // and the funds are appropriately allocated across each peer. func TestPrefAttachmentSelectTwoVertexes(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() prand.Seed(time.Now().Unix()) @@ -203,7 +203,7 @@ func TestPrefAttachmentSelectTwoVertexes(t *testing.T) { // allocate all funds to each vertex (up to the max channel size). func TestPrefAttachmentSelectGreedyAllocation(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() prand.Seed(time.Now().Unix()) @@ -316,7 +316,7 @@ func TestPrefAttachmentSelectGreedyAllocation(t *testing.T) { // of zero during scoring. func TestPrefAttachmentSelectSkipNodes(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() prand.Seed(time.Now().Unix()) diff --git a/autopilot/top_centrality_test.go b/autopilot/top_centrality_test.go index e8ca3616d..282e60c36 100644 --- a/autopilot/top_centrality_test.go +++ b/autopilot/top_centrality_test.go @@ -1,7 +1,6 @@ package autopilot import ( - "context" "testing" "github.com/btcsuite/btcd/btcec/v2" @@ -59,7 +58,7 @@ func testTopCentrality(t *testing.T, graph testGraph, // Attempt to get centrality scores and expect // that the result equals with the expected set. scores, err := topCentrality.NodeScores( - context.Background(), graph, channels, chanSize, nodes, + t.Context(), graph, channels, chanSize, nodes, ) require.NoError(t, err) diff --git a/batch/batch_test.go b/batch/batch_test.go index 49ce0eabc..4abbc5a5b 100644 --- a/batch/batch_test.go +++ b/batch/batch_test.go @@ -1,7 +1,6 @@ package batch import ( - "context" "database/sql" "encoding/binary" "errors" @@ -31,7 +30,7 @@ var batchTestIntervals = []time.Duration{ // TestRetry tests the retry logic of the batch scheduler. func TestRetry(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() dbDir := t.TempDir() @@ -99,7 +98,7 @@ func TestRetry(t *testing.T) { // and then continue to add a write transaction to the same batch. func TestReadOnly(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() t.Run("bbolt-ReadWrite", func(t *testing.T) { db, err := walletdb.Create( @@ -455,7 +454,7 @@ func BenchmarkBoltBatching(b *testing.B) { // batchTest benches the performance of the batch scheduler configured // with/without the LazyAdd option and with the given commit interval. batchTest := func(b *testing.B, lazy bool, interval time.Duration) { - ctx := context.Background() + ctx := b.Context() db := setUpDB(b) @@ -549,7 +548,7 @@ func benchmarkSQLBatching(b *testing.B, sqlite bool) { ) } - ctx := context.Background() + ctx := b.Context() opts := sqldb.WriteTxOpt() // writeRecord is a helper that adds a single new invoice to the diff --git a/chanbackup/backup_test.go b/chanbackup/backup_test.go index d73b3769f..2c324caa7 100644 --- a/chanbackup/backup_test.go +++ b/chanbackup/backup_test.go @@ -121,8 +121,7 @@ func TestFetchBackupForChan(t *testing.T) { } for i, testCase := range testCases { _, err := FetchBackupForChan( - context.Background(), testCase.chanPoint, chanSource, - chanSource, + t.Context(), testCase.chanPoint, chanSource, chanSource, ) switch { // If this is a valid test case, and we failed, then we'll @@ -143,7 +142,7 @@ func TestFetchBackupForChan(t *testing.T) { // channel source for all channels and construct a Single for each channel. func TestFetchStaticChanBackups(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll make the set of channels that we want to seed the // channel source with. Both channels will be fully populated in the diff --git a/chanbackup/pubsub_test.go b/chanbackup/pubsub_test.go index 49cfd3fb1..615f170fa 100644 --- a/chanbackup/pubsub_test.go +++ b/chanbackup/pubsub_test.go @@ -81,7 +81,7 @@ func (m *mockChannelNotifier) SubscribeChans(_ context.Context, // channel subscription, then the entire sub-swapper will fail to start. func TestNewSubSwapperSubscribeFail(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() keyRing := &lnencrypt.MockKeyRing{} @@ -159,7 +159,7 @@ func TestSubSwapperIdempotentStartStop(t *testing.T) { swapper := newMockSwapper(keyRing) subSwapper, err := NewSubSwapper( - context.Background(), nil, &chanNotifier, keyRing, swapper, + t.Context(), nil, &chanNotifier, keyRing, swapper, ) require.NoError(t, err, "unable to init subSwapper") @@ -226,8 +226,7 @@ func TestSubSwapperUpdater(t *testing.T) { // With our channel set created, we'll make a fresh sub swapper // instance to begin our test. subSwapper, err := NewSubSwapper( - context.Background(), initialChanSet, chanNotifier, keyRing, - swapper, + t.Context(), initialChanSet, chanNotifier, keyRing, swapper, ) require.NoError(t, err, "unable to make swapper") if err := subSwapper.Start(); err != nil { diff --git a/channeldb/addr_source_test.go b/channeldb/addr_source_test.go index 7562a2dd1..7a47e9a81 100644 --- a/channeldb/addr_source_test.go +++ b/channeldb/addr_source_test.go @@ -20,7 +20,7 @@ var ( // deduplicates the results of a set of AddrSource implementations. func TestMultiAddrSource(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() var pk1 = newTestPubKey(t) diff --git a/channeldb/db_test.go b/channeldb/db_test.go index 5cc1e16f6..2dd799365 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -1,7 +1,6 @@ package channeldb import ( - "context" "image/color" "math" "math/rand" @@ -181,7 +180,7 @@ func TestFetchClosedChannelForID(t *testing.T) { // channel db and graph db. func TestMultiSourceAddrsForNode(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() fullDB, err := MakeTestDB(t) require.NoError(t, err, "unable to make test database") diff --git a/cluster/etcd_elector_test.go b/cluster/etcd_elector_test.go index f7fcebec3..b270ca9d2 100644 --- a/cluster/etcd_elector_test.go +++ b/cluster/etcd_elector_test.go @@ -46,7 +46,7 @@ func TestEtcdElector(t *testing.T) { require.NoError(t, err) t.Cleanup(cleanup) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() const ( @@ -70,7 +70,7 @@ func TestEtcdElector(t *testing.T) { ch := make(chan *etcdLeaderElector) wg.Add(2) - ctxb := context.Background() + ctxb := t.Context() go func() { defer wg.Done() diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index 0b3e23e71..2dc3605d3 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -173,7 +173,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { // With the HTLC added, we'll now manually initiate a state transition // from Alice to Bob. - testQuit, testQuitFunc := context.WithCancel(context.Background()) + testQuit, testQuitFunc := context.WithCancel(t.Context()) t.Cleanup(testQuitFunc) _, err = aliceChannel.SignNextCommitment(testQuit) require.NoError(t, err) diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index 58c975bd4..2e743257e 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -1024,7 +1024,7 @@ func createTestCtx(t *testing.T, startHeight uint32, isChanPeer bool) ( // the router subsystem. func TestProcessAnnouncement(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() timestamp := testTimestamp tCtx, err := createTestCtx(t, 0, false) @@ -1140,7 +1140,7 @@ func TestProcessAnnouncement(t *testing.T) { // propagated to the router subsystem. func TestPrematureAnnouncement(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() timestamp := testTimestamp @@ -1176,7 +1176,7 @@ func TestPrematureAnnouncement(t *testing.T) { // properly processes partial and fully announcement signatures message. func TestSignatureAnnouncementLocalFirst(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -1355,7 +1355,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { // processes announcement with unknown channel ids. func TestOrphanSignatureAnnouncement(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -1547,7 +1547,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { // assembled. func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -1783,7 +1783,7 @@ out: // the full proof (ChannelAnnouncement) to the remote peer. func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -2225,7 +2225,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // announcements for nodes who do not intend to publicly advertise themselves. func TestForwardPrivateNodeAnnouncement(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() const ( startingHeight = 100 @@ -2334,7 +2334,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { // zombie edges. func TestRejectZombieEdge(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll start by creating our test context with a batch of // announcements. @@ -2436,7 +2436,7 @@ func TestRejectZombieEdge(t *testing.T) { // becomes live by receiving a fresh update. func TestProcessZombieEdgeNowLive(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll start by creating our test context with a batch of // announcements. @@ -2594,7 +2594,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) { // be reprocessed later, after our ChannelAnnouncement. func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -2798,7 +2798,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { // currently know of. func TestExtraDataChannelAnnouncementValidation(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") @@ -2835,7 +2835,7 @@ func TestExtraDataChannelAnnouncementValidation(t *testing.T) { // know of. func TestExtraDataChannelUpdateValidation(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() timestamp := testTimestamp tCtx, err := createTestCtx(t, 0, false) @@ -2896,7 +2896,7 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) { // currently know of. func TestExtraDataNodeAnnouncementValidation(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") @@ -2969,7 +2969,7 @@ func assertProcessAnnouncement(t *testing.T, result chan error) { // the retransmit ticker ticks. func TestRetransmit(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -3079,7 +3079,7 @@ func TestRetransmit(t *testing.T) { // no existing channels in the graph do not get forwarded. func TestNodeAnnouncementNoChannels(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") @@ -3171,7 +3171,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { // validate the msg flags and max HTLC field of a ChannelUpdate. func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 0, false) require.NoError(t, err, "can't create context") @@ -3269,7 +3269,7 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { // channel is always sent upon the remote party reconnecting. func TestSendChannelUpdateReliably(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll start by creating our test context and a batch of // announcements. @@ -3587,7 +3587,7 @@ func sendRemoteMsg(t *testing.T, ctx *testCtx, msg lnwire.Message, select { case err := <-ctx.gossiper.ProcessRemoteAnnouncement( - context.Background(), msg, remotePeer, + t.Context(), msg, remotePeer, ): if err != nil { t.Fatalf("unable to process channel msg: %v", err) @@ -3718,7 +3718,7 @@ out: // policy of all of them. const newTimeLockDelta = 100 var edgesToUpdate []EdgeWithInfo - err = ctx.router.ForAllOutgoingChannels(context.Background(), func( + err = ctx.router.ForAllOutgoingChannels(t.Context(), func( info *models.ChannelEdgeInfo, edge *models.ChannelEdgePolicy) error { @@ -3986,7 +3986,7 @@ func (m *SyncManager) markGraphSyncing() { // initial historical sync has completed. func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 10, false) require.NoError(t, err, "can't create context") @@ -4064,7 +4064,7 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { // is tested by TestRateLimitChannelUpdates. func TestRateLimitDeDup(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Create our test harness. const blockHeight = 100 @@ -4261,7 +4261,7 @@ func TestRateLimitDeDup(t *testing.T) { // channel updates. func TestRateLimitChannelUpdates(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Create our test harness. const blockHeight = 100 @@ -4416,7 +4416,7 @@ func TestRateLimitChannelUpdates(t *testing.T) { // about our own channels when coming from a remote peer. func TestIgnoreOwnAnnouncement(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -4563,7 +4563,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { // error. func TestRejectCacheChannelAnn(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, proofMatureDelta, false) require.NoError(t, err, "can't create context") @@ -4644,7 +4644,7 @@ func TestFutureMsgCacheEviction(t *testing.T) { // channel announcements are banned properly. func TestChanAnnBanningNonChanPeer(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 1000, false) require.NoError(t, err, "can't create context") @@ -4739,7 +4739,7 @@ func TestChanAnnBanningNonChanPeer(t *testing.T) { // get disconnected. func TestChanAnnBanningChanPeer(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tCtx, err := createTestCtx(t, 1000, true) require.NoError(t, err, "can't create context") @@ -4832,7 +4832,7 @@ func assertChanChainRejection(t *testing.T, ctx *testCtx, } _, added := ctx.gossiper.handleChanAnnouncement( - context.Background(), nMsg, edge, + t.Context(), nMsg, edge, ) require.False(t, added) diff --git a/discovery/reliable_sender_test.go b/discovery/reliable_sender_test.go index fc94d57f3..32a9908c7 100644 --- a/discovery/reliable_sender_test.go +++ b/discovery/reliable_sender_test.go @@ -71,7 +71,7 @@ func assertMsgsSent(t *testing.T, msgChan chan lnwire.Message, // a peer while taking into account its connection lifecycle works as expected. func TestReliableSenderFlow(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() reliableSender := newTestReliableSender(t) @@ -188,7 +188,7 @@ func TestReliableSenderFlow(t *testing.T) { // them as stale. func TestReliableSenderStaleMessages(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() reliableSender := newTestReliableSender(t) diff --git a/discovery/syncer_atomic_test.go b/discovery/syncer_atomic_test.go index 9396dbc39..10a6b252c 100644 --- a/discovery/syncer_atomic_test.go +++ b/discovery/syncer_atomic_test.go @@ -15,7 +15,7 @@ import ( // backlog at a time using the atomic flag. func TestGossipSyncerSingleBacklogSend(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Track how many goroutines are actively sending. var ( diff --git a/discovery/syncer_test.go b/discovery/syncer_test.go index a7d36e964..328b3d944 100644 --- a/discovery/syncer_test.go +++ b/discovery/syncer_test.go @@ -232,7 +232,7 @@ func newTestSyncer(hID lnwire.ShortChannelID, // doesn't have a horizon set, then we won't send any incoming messages to it. func TestGossipSyncerFilterGossipMsgsNoHorizon(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -278,7 +278,7 @@ func unixStamp(a int64) uint32 { // channel ann that already has a channel update on disk. func TestGossipSyncerFilterGossipMsgsAllInMemory(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -421,7 +421,7 @@ func TestGossipSyncerFilterGossipMsgsAllInMemory(t *testing.T) { // messages which are within their desired time horizon. func TestGossipSyncerApplyNoHistoricalGossipFilter(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -482,7 +482,7 @@ func TestGossipSyncerApplyNoHistoricalGossipFilter(t *testing.T) { // within their desired time horizon. func TestGossipSyncerApplyGossipFilter(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -602,7 +602,7 @@ func TestGossipSyncerApplyGossipFilter(t *testing.T) { // channels and complete=0. func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -655,7 +655,7 @@ func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) { // complete=0. func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -705,7 +705,7 @@ func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) { // announcements, as well as an ending ReplyShortChanIDsEnd message. func TestGossipSyncerReplyShortChanIDs(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -811,7 +811,7 @@ func TestGossipSyncerReplyShortChanIDs(t *testing.T) { // the remote peer. func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll use a smaller chunk size so we can easily test all the edge // cases. @@ -983,7 +983,7 @@ func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { // executed with the correct block range. func TestGossipSyncerReplyChanRangeQueryBlockRange(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First create our test gossip syncer that will handle and // respond to the test queries @@ -1097,7 +1097,7 @@ func TestGossipSyncerReplyChanRangeQueryBlockRange(t *testing.T) { // back a single response that signals completion. func TestGossipSyncerReplyChanRangeQueryNoNewChans(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll now create our test gossip syncer that will shortly respond to // our canned query. @@ -1177,7 +1177,7 @@ func TestGossipSyncerReplyChanRangeQueryNoNewChans(t *testing.T) { // channel ID, we properly generate an correct initial channel range response. func TestGossipSyncerGenChanRangeQuery(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -1238,7 +1238,7 @@ func TestGossipSyncerProcessChanRangeReply(t *testing.T) { // each reply instead. func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. @@ -1499,7 +1499,7 @@ func TestGossipSyncerSynchronizeChanIDs(t *testing.T) { for i := 0; i < chunkSize*2; i += 2 { // With our set up complete, we'll request a sync of chan ID's. - done := syncer.synchronizeChanIDs(context.Background()) + done := syncer.synchronizeChanIDs(t.Context()) // At this point, we shouldn't yet be done as only 2 items // should have been queried for. @@ -1546,7 +1546,7 @@ func TestGossipSyncerSynchronizeChanIDs(t *testing.T) { } // If we issue another query, the syncer should tell us that it's done. - done := syncer.synchronizeChanIDs(context.Background()) + done := syncer.synchronizeChanIDs(t.Context()) if done { t.Fatalf("syncer should be finished!") } diff --git a/fn/context_guard_test.go b/fn/context_guard_test.go index 576ca5364..10cad41ae 100644 --- a/fn/context_guard_test.go +++ b/fn/context_guard_test.go @@ -20,7 +20,7 @@ func TestContextGuard(t *testing.T) { t.Run("Parent context is cancelled", func(t *testing.T) { t.Parallel() var ( - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(t.Context()) g = NewContextGuard() ) @@ -42,7 +42,7 @@ func TestContextGuard(t *testing.T) { t.Run("Derived context is cancelled", func(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() g = NewContextGuard() ) @@ -66,7 +66,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() g = NewContextGuard() ) ctxc, _ := g.Create(ctx) @@ -87,7 +87,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(t.Context()) g = NewContextGuard() ) cancel() @@ -107,7 +107,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() g = NewContextGuard() ) @@ -130,7 +130,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(t.Context()) g = NewContextGuard() task = make(chan struct{}) done = make(chan struct{}) @@ -172,7 +172,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() g = NewContextGuard() task = make(chan struct{}) done = make(chan struct{}) @@ -218,7 +218,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() g = NewContextGuard() task = make(chan struct{}) done = make(chan struct{}) @@ -316,7 +316,7 @@ func TestContextGuard(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() g = NewContextGuard() task = make(chan struct{}) done = make(chan struct{}) @@ -452,7 +452,7 @@ func TestContextGuardCountGoroutines(t *testing.T) { g := NewContextGuard() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) // Count goroutines before contexts are created. count1 := runtime.NumGoroutine() diff --git a/fn/goroutine_manager_test.go b/fn/goroutine_manager_test.go index e39dce995..1c393d7ec 100644 --- a/fn/goroutine_manager_test.go +++ b/fn/goroutine_manager_test.go @@ -18,7 +18,7 @@ func TestGoroutineManager(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() m = NewGoroutineManager() taskChan = make(chan struct{}) ) @@ -61,7 +61,7 @@ func TestGoroutineManager(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() m = NewGoroutineManager() taskChan = make(chan struct{}) ) @@ -114,7 +114,7 @@ func TestGoroutineManager(t *testing.T) { t.Parallel() var ( - ctx = context.Background() + ctx = t.Context() m = NewGoroutineManager() stopChan = make(chan struct{}) ) diff --git a/funding/batch_test.go b/funding/batch_test.go index 9ca522b03..7a674d60b 100644 --- a/funding/batch_test.go +++ b/funding/batch_test.go @@ -359,7 +359,7 @@ func TestBatchFund(t *testing.T) { MinConfs: 1, } updates, err := h.batcher.BatchFund( - context.Background(), req, + t.Context(), req, ) if tc.failUpdate1 || tc.failUpdate2 || tc.failPublish { diff --git a/graph/builder_test.go b/graph/builder_test.go index 4d1b4ef39..82a44300d 100644 --- a/graph/builder_test.go +++ b/graph/builder_test.go @@ -2,7 +2,6 @@ package graph import ( "bytes" - "context" "crypto/sha256" "encoding/hex" "encoding/json" @@ -44,7 +43,7 @@ const ( // info was added to the database. func TestAddProof(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() ctx := createTestCtxSingleNode(t, 0) @@ -109,7 +108,7 @@ func TestIgnoreNodeAnnouncement(t *testing.T) { } copy(node.PubKeyBytes[:], pub.SerializeCompressed()) - err := ctx.builder.AddNode(context.Background(), node) + err := ctx.builder.AddNode(t.Context(), node) if !IsError(err, ErrIgnored) { t.Fatalf("expected to get ErrIgnore, instead got: %v", err) } @@ -119,7 +118,7 @@ func TestIgnoreNodeAnnouncement(t *testing.T) { // ignore a channel policy for a channel not in the graph. func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 @@ -194,7 +193,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) { // confirmed on the stale chain, and resync to the main chain. func TestWakeUpOnStaleBranch(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -354,7 +353,7 @@ func TestWakeUpOnStaleBranch(t *testing.T) { // Give time to process new blocks. time.Sleep(time.Millisecond * 500) - selfNode, err := ctx.graph.SourceNode(context.Background()) + selfNode, err := ctx.graph.SourceNode(t.Context()) require.NoError(t, err) // Create new router with same graph database. @@ -408,7 +407,7 @@ func TestWakeUpOnStaleBranch(t *testing.T) { // it is active. func TestDisconnectedBlocks(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -609,7 +608,7 @@ func TestDisconnectedBlocks(t *testing.T) { // ChannelRouter, then the channels are properly pruned. func TestChansClosedOfflinePruneGraph(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -1037,7 +1036,7 @@ func testPruneChannelGraphDoubleDisabled(t *testing.T, assumeValid bool) { // node announcements. func TestIsStaleNode(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -1095,7 +1094,7 @@ func TestIsStaleNode(t *testing.T) { Features: testFeatures, } copy(n1.PubKeyBytes[:], priv1.PubKey().SerializeCompressed()) - if err := ctx.builder.AddNode(context.Background(), n1); err != nil { + if err := ctx.builder.AddNode(t.Context(), n1); err != nil { t.Fatalf("could not add node: %v", err) } @@ -1117,7 +1116,7 @@ func TestIsStaleNode(t *testing.T) { // channel announcements. func TestIsKnownEdge(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -1167,7 +1166,7 @@ func TestIsKnownEdge(t *testing.T) { // stale channel edge update announcements. func TestIsStaleEdgePolicy(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath) @@ -1359,7 +1358,7 @@ func createTestCtxFromFile(t *testing.T, func parseTestGraph(t *testing.T, useCache bool, path string) ( *testGraphInstance, error) { - ctx := context.Background() + ctx := t.Context() graphJSON, err := os.ReadFile(path) if err != nil { @@ -1751,7 +1750,7 @@ func createTestGraphFromChannels(t *testing.T, useCache bool, testChannels []*testChannel, source string) (*testGraphInstance, error) { - ctx := context.Background() + ctx := t.Context() // We'll use this fake address for the IP address of all the nodes in // our tests. This value isn't needed for path finding so it doesn't diff --git a/graph/db/benchmark_test.go b/graph/db/benchmark_test.go index 3d690e357..611653d53 100644 --- a/graph/db/benchmark_test.go +++ b/graph/db/benchmark_test.go @@ -189,7 +189,7 @@ func sqlSQLite(t testing.TB, dbPath, file string) BatchedSQLQueries { // database for testing purposes. func kvdbPostgres(t testing.TB, dsn string) kvdb.Backend { kvStore, err := kvdb.Open( - kvdb.PostgresBackendName, context.Background(), + kvdb.PostgresBackendName, t.Context(), &postgres.Config{ Dsn: dsn, MaxConnections: testMaxPostgresConnections, @@ -217,7 +217,7 @@ func connectKVDBPostgres(t testing.TB, dsn string) V1Store { func kvdbSqlite(t testing.TB, dbPath, fileName string) kvdb.Backend { sqlbase.Init(testMaxSQLiteConnections) kvStore, err := kvdb.Open( - kvdb.SqliteBackendName, context.Background(), + kvdb.SqliteBackendName, t.Context(), &sqlite.Config{ BusyTimeout: testSQLBusyTimeout, MaxConnections: testMaxSQLiteConnections, @@ -273,7 +273,7 @@ func newKVStore(t testing.TB, backend kvdb.Backend) V1Store { // provided sqldb.DB instance. func newSQLExecutor(t testing.TB, db sqldb.DB) BatchedSQLQueries { err := db.ApplyAllMigrations( - context.Background(), sqldb.GetMigrations(), + t.Context(), sqldb.GetMigrations(), ) require.NoError(t, err) @@ -317,7 +317,7 @@ func newSQLStore(t testing.TB, cfg *sqldb.QueryConfig, // very long to sync the graph. func TestPopulateDBs(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // NOTE: uncomment the line below to run this test locally, then provide // the desired source database (and make sure the destination Postgres @@ -539,7 +539,7 @@ func TestPopulateViaMigration(t *testing.T) { // Use the graph migration to populate the SQL graph from the // kvdb graph. - ctx := context.Background() + ctx := t.Context() err := dstSQL.ExecTx( ctx, sqldb.WriteTxOpt(), func(queries SQLQueries) error { return MigrateGraphToSQL( @@ -556,7 +556,7 @@ func TestPopulateViaMigration(t *testing.T) { // syncGraph synchronizes the source graph with the destination graph by // copying all nodes and channels from the source to the destination. func syncGraph(t *testing.T, src, dest *ChannelGraph) { - ctx := context.Background() + ctx := t.Context() var ( s = rate.Sometimes{ @@ -683,7 +683,7 @@ func syncGraph(t *testing.T, src, dest *ChannelGraph) { // NOTE: the TestPopulateDBs test helper can be used to populate a set of test // DBs from a single source db. func BenchmarkCacheLoading(b *testing.B) { - ctx := context.Background() + ctx := b.Context() tests := []dbConnection{ kvdbBBoltConn, @@ -723,7 +723,7 @@ func BenchmarkCacheLoading(b *testing.B) { // NOTE: the TestPopulateDBs test helper can be used to populate a set of test // DBs from a single source db. func BenchmarkGraphReadMethods(b *testing.B) { - ctx := context.Background() + ctx := b.Context() backends := []dbConnection{ kvdbBBoltConn, @@ -900,7 +900,7 @@ func BenchmarkFindOptimalSQLQueryConfig(b *testing.B) { for _, size := range testSizes { b.Run(fmt.Sprintf("%s-%s-%d", configOption, dbName, size), func(b *testing.B) { - ctx := context.Background() + ctx := b.Context() cfg := sqldb.DefaultSQLiteConfig() if testPostgres { diff --git a/graph/db/graph_test.go b/graph/db/graph_test.go index b9d972b3b..c158ea5d9 100644 --- a/graph/db/graph_test.go +++ b/graph/db/graph_test.go @@ -100,7 +100,7 @@ func createTestVertex(t testing.TB) *models.LightningNode { // TestNodeInsertionAndDeletion tests the CRUD operations for a LightningNode. func TestNodeInsertionAndDeletion(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -276,7 +276,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) { // only the pubkey is known to the database. func TestPartialNode(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -344,7 +344,7 @@ func TestPartialNode(t *testing.T) { // TestAliasLookup tests the alias lookup functionality of the graph store. func TestAliasLookup(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -375,7 +375,7 @@ func TestAliasLookup(t *testing.T) { // TestSourceNode tests the source node functionality of the graph store. func TestSourceNode(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -402,7 +402,7 @@ func TestSourceNode(t *testing.T) { // TestEdgeInsertionDeletion tests the basic CRUD operations for channel edges. func TestEdgeInsertionDeletion(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -527,7 +527,7 @@ func createEdge(height, txIndex uint32, txPosition uint16, outPointIndex uint32, // database is what we expect after calling DisconnectBlockAtHeight. func TestDisconnectBlockAtHeight(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -818,7 +818,7 @@ func createChannelEdge(node1, node2 *models.LightningNode, func TestEdgeInfoUpdates(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -914,7 +914,7 @@ func TestEdgeInfoUpdates(t *testing.T) { // TestEdgePolicyCRUD tests basic CRUD operations for edge policies. func TestEdgePolicyCRUD(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -1208,7 +1208,7 @@ func newEdgePolicy(chanID uint64, updateTime int64) *models.ChannelEdgePolicy { // TestAddEdgeProof tests the ability to add an edge proof to an existing edge. func TestAddEdgeProof(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -1267,7 +1267,7 @@ func TestAddEdgeProof(t *testing.T) { // correctly iterates through the channels of the set source node. func TestForEachSourceNodeChannel(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -1356,7 +1356,7 @@ func TestForEachSourceNodeChannel(t *testing.T) { func TestGraphTraversal(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -1455,7 +1455,7 @@ func TestGraphTraversal(t *testing.T) { // working correctly. func TestGraphTraversalCacheable(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -1582,7 +1582,7 @@ func TestGraphCacheTraversal(t *testing.T) { func fillTestGraph(t testing.TB, graph *ChannelGraph, numNodes, numChannels int) (map[uint64]struct{}, []*models.LightningNode) { - ctx := context.Background() + ctx := t.Context() nodes := make([]*models.LightningNode, numNodes) nodeIndex := map[string]struct{}{} @@ -1694,7 +1694,7 @@ func assertPruneTip(t *testing.T, graph *ChannelGraph, func assertNumChans(t *testing.T, graph *ChannelGraph, n int) { numChans := 0 err := graph.ForEachChannel( - context.Background(), func(*models.ChannelEdgeInfo, + t.Context(), func(*models.ChannelEdgeInfo, *models.ChannelEdgePolicy, *models.ChannelEdgePolicy) error { @@ -1710,7 +1710,7 @@ func assertNumChans(t *testing.T, graph *ChannelGraph, n int) { func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) { numNodes := 0 - err := graph.ForEachNode(context.Background(), + err := graph.ForEachNode(t.Context(), func(_ *models.LightningNode) error { numNodes++ @@ -1772,7 +1772,7 @@ func assertChanViewEqualChanPoints(t *testing.T, a []EdgePoint, func TestGraphPruning(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -1963,7 +1963,7 @@ func TestGraphPruning(t *testing.T) { // known channel ID in the database. func TestHighestChanID(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -2023,7 +2023,7 @@ func TestHighestChanID(t *testing.T) { // insertion of a new edge, the edge update index is updated properly. func TestChanUpdatesInHorizon(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -2181,7 +2181,7 @@ func TestChanUpdatesInHorizon(t *testing.T) { // the most recent node updates within a particular time horizon. func TestNodeUpdatesInHorizon(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -2352,7 +2352,7 @@ func TestFilterKnownChanIDsZombieRevival(t *testing.T) { // know of on disk. func TestFilterKnownChanIDs(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -2535,7 +2535,7 @@ func TestStressTestChannelGraphAPI(t *testing.T) { t.Skipf("Skipping test in short mode") } - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -2824,7 +2824,7 @@ func TestStressTestChannelGraphAPI(t *testing.T) { // set of short channel ID's for a given block range. func TestFilterChannelRange(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3043,7 +3043,7 @@ func TestFilterChannelRange(t *testing.T) { // of ChannelEdge structs for a given set of short channel ID's. func TestFetchChanInfos(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3146,7 +3146,7 @@ func TestFetchChanInfos(t *testing.T) { // both sides. func TestIncompleteChannelPolicies(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3239,7 +3239,7 @@ func TestIncompleteChannelPolicies(t *testing.T) { // up. func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3387,7 +3387,7 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { // PruneSyncState method. func TestPruneGraphNodes(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3453,7 +3453,7 @@ func TestPruneGraphNodes(t *testing.T) { // database, then shell edges are created for each node if needed. func TestAddChannelEdgeShellNodes(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3492,7 +3492,7 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { // well. func TestNodePruningUpdateIndexDeletion(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3553,7 +3553,7 @@ func nextUpdateTime() time.Time { // public within the network graph. func TestNodeIsPublic(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll start off the test by creating a small network of 3 // participants with the following graph: @@ -3688,7 +3688,7 @@ func TestNodeIsPublic(t *testing.T) { // DisabledChannelIDs is correct. func TestDisabledChannelIDs(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3773,7 +3773,7 @@ func TestDisabledChannelIDs(t *testing.T) { // receive the proper update. func TestEdgePolicyMissingMaxHTLC(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -3916,7 +3916,7 @@ func assertNumZombies(t *testing.T, graph *ChannelGraph, expZombies uint64) { // TestGraphZombieIndex ensures that we can mark edges correctly as zombie/live. func TestGraphZombieIndex(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // We'll start by creating our test graph along with a test edge. graph := MakeTestGraph(t) @@ -4103,7 +4103,7 @@ func TestComputeFee(t *testing.T) { // executes multiple AddChannelEdge requests in a single txn. func TestBatchedAddChannelEdge(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -4180,7 +4180,7 @@ func TestBatchedAddChannelEdge(t *testing.T) { // executes multiple UpdateEdgePolicy requests in a single txn. func TestBatchedUpdateEdgePolicy(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -4233,7 +4233,7 @@ func TestBatchedUpdateEdgePolicy(t *testing.T) { // allocations and the total memory consumed by the full graph traversal. func BenchmarkForEachChannel(b *testing.B) { graph := MakeTestGraph(b) - ctx := context.Background() + ctx := b.Context() const numNodes = 100 const numChannels = 4 @@ -4286,7 +4286,7 @@ func BenchmarkForEachChannel(b *testing.B) { // method works as expected, and is able to handle nil self edges. func TestGraphCacheForEachNodeChannel(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() graph := MakeTestGraph(t) @@ -4448,7 +4448,7 @@ var testNodeAnn = "01012674c2e7ef68c73a086b7de2603f4ef1567358df84bb4edaa06c" + // that the two messages are equal. func TestLightningNodePersistence(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Create a new test graph instance. graph := MakeTestGraph(t) diff --git a/graph/db/sql_migration_test.go b/graph/db/sql_migration_test.go index c555e433d..9d13238b7 100644 --- a/graph/db/sql_migration_test.go +++ b/graph/db/sql_migration_test.go @@ -5,7 +5,6 @@ package graphdb import ( "bytes" "cmp" - "context" "crypto/rand" "errors" "fmt" @@ -58,7 +57,7 @@ var ( // and expected to have the exact same data in them. func TestMigrateGraphToSQL(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() dbFixture := NewTestDBFixture(t) @@ -393,7 +392,7 @@ func assertInSync(t *testing.T, kvDB *KVStore, sqlDB *SQLStore, func fetchAllNodes(t *testing.T, store V1Store) []*models.LightningNode { nodes := make([]*models.LightningNode, 0) - err := store.ForEachNode(context.Background(), + err := store.ForEachNode(t.Context(), func(node *models.LightningNode) error { // Call PubKey to ensure the objects cached pubkey is set so that @@ -423,7 +422,7 @@ func fetchAllNodes(t *testing.T, store V1Store) []*models.LightningNode { // fetchSourceNode retrieves the source node from the given store. func fetchSourceNode(t *testing.T, store V1Store) *models.LightningNode { - node, err := store.SourceNode(context.Background()) + node, err := store.SourceNode(t.Context()) if errors.Is(err, ErrSourceNodeNotSet) { return nil } else { @@ -461,7 +460,7 @@ func (c chanSet) CountPolicies() int { // fetchAllChannelsAndPolicies retrieves all channels and their policies // from the given store and returns them sorted by their channel ID. func fetchAllChannelsAndPolicies(t *testing.T, store V1Store) chanSet { - ctx := context.Background() + ctx := t.Context() channels := make(chanSet, 0) err := store.ForEachChannel(ctx, func(info *models.ChannelEdgeInfo, p1 *models.ChannelEdgePolicy, @@ -506,7 +505,7 @@ func checkKVPruneLogEntries(t *testing.T, kv *KVStore, sql *SQLStore, err := forEachPruneLogEntry( kv.db, func(height uint32, hash *chainhash.Hash) error { sqlHash, err := sql.db.GetPruneHashByHeight( - context.Background(), int64(height), + t.Context(), int64(height), ) require.NoError(t, err) require.Equal(t, hash[:], sqlHash) @@ -736,7 +735,7 @@ func makeTestPolicy(chanID uint64, toNode route.Vertex, isNode1 bool, // and set the "fileName" variable to the name of the channel database file you // want to use for the migration test. func TestMigrationWithChannelDB(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // NOTE: comment this line out to run the test. t.Skipf("skipping test meant for local debugging only") @@ -855,7 +854,7 @@ func TestMigrationWithChannelDB(t *testing.T) { // will differ slightly. func TestSQLMigrationEdgeCases(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() var invalidTLVData = []byte{0x01, 0x02, 0x03} @@ -1115,7 +1114,7 @@ func TestSQLMigrationEdgeCases(t *testing.T) { func runTestMigration(t *testing.T, populateKV func(t *testing.T, db *KVStore), expState dbState) { - ctx := context.Background() + ctx := t.Context() // Set up our source kvdb DB. kvDB := setUpKVStore(t) @@ -1208,7 +1207,7 @@ func TestMigrateGraphToSQLRapid(t *testing.T) { func testMigrateGraphToSQLRapidOnce(t *testing.T, rt *rapid.T, dbFixture *sqldb.TestPgFixture, maxNumNodes, maxNumChannels int) { - ctx := context.Background() + ctx := t.Context() // Set up our source kvdb DB. kvDB := setUpKVStore(t) diff --git a/graph/notifications_test.go b/graph/notifications_test.go index 4d847bdd2..fc7d4fb4d 100644 --- a/graph/notifications_test.go +++ b/graph/notifications_test.go @@ -2,7 +2,6 @@ package graph import ( "bytes" - "context" "encoding/hex" "fmt" "image/color" @@ -422,7 +421,7 @@ func (m *mockChainView) FilterBlock(blockHash *chainhash.Hash) (*chainview.Filte // a proper notification is sent of to all registered clients. func TestEdgeUpdateNotification(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() ctx := createTestCtxSingleNode(t, 0) @@ -611,7 +610,7 @@ func TestEdgeUpdateNotification(t *testing.T) { // attributes with new data. func TestNodeUpdateNotification(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -795,7 +794,7 @@ func TestNodeUpdateNotification(t *testing.T) { // when the client wishes to exit. func TestNotificationCancellation(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -879,7 +878,7 @@ func TestNotificationCancellation(t *testing.T) { // properly dispatched to all registered clients. func TestChannelCloseNotification(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxSingleNode(t, startingBlockHeight) @@ -1067,7 +1066,7 @@ func createTestCtxSingleNode(t *testing.T, sourceNode := createTestNode(t) require.NoError(t, - graph.SetSourceNode(context.Background(), sourceNode), + graph.SetSourceNode(t.Context(), sourceNode), "failed to set source node", ) @@ -1083,7 +1082,7 @@ func createTestCtxSingleNode(t *testing.T, func (c *testCtx) RestartBuilder(t *testing.T) { c.chainView.Reset() - selfNode, err := c.graph.SourceNode(context.Background()) + selfNode, err := c.graph.SourceNode(t.Context()) require.NoError(t, err) // With the chainView reset, we'll now re-create the builder itself, and @@ -1156,7 +1155,7 @@ func createTestCtxFromGraphInstanceAssumeValid(t *testing.T, ConfChan: make(chan *chainntnfs.TxConfirmation), } - selfnode, err := graphInstance.graph.SourceNode(context.Background()) + selfnode, err := graphInstance.graph.SourceNode(t.Context()) require.NoError(t, err) graphBuilder, err := NewBuilder(&Config{ diff --git a/htlcswitch/link_isolated_test.go b/htlcswitch/link_isolated_test.go index 5281cbed7..9e74c4875 100644 --- a/htlcswitch/link_isolated_test.go +++ b/htlcswitch/link_isolated_test.go @@ -95,7 +95,7 @@ func (l *linkTestContext) receiveHtlcAliceToBob() { func (l *linkTestContext) sendCommitSigBobToAlice(expHtlcs int) { l.t.Helper() - testQuit, testQuitFunc := context.WithCancel(context.Background()) + testQuit, testQuitFunc := context.WithCancel(l.t.Context()) defer testQuitFunc() sigs, err := l.bobChannel.SignNextCommitment(testQuit) if err != nil { diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index cdeae6d81..101a47b98 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -498,7 +498,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { // Check that alice invoice was settled and bandwidth of HTLC // links was changed. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") if invoice.State != invpkg.ContractSettled { @@ -618,7 +618,7 @@ func testChannelLinkMultiHopPayment(t *testing.T, // Check that Carol invoice was settled and bandwidth of HTLC // links were changed. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") if invoice.State != invpkg.ContractSettled { @@ -811,7 +811,7 @@ func testChannelLinkInboundFee(t *testing.T, //nolint:thelper // Check that Carol invoice was settled and bandwidth of HTLC // links were changed. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") require.Equal(t, invpkg.ContractSettled, invoice.State, @@ -929,7 +929,7 @@ func TestChannelLinkCancelFullCommitment(t *testing.T) { // to settle. err = wait.NoError(func() error { return n.bobServer.registry.SettleHodlInvoice( - context.Background(), preimage, + t.Context(), preimage, ) }, time.Minute) if err != nil { @@ -1397,7 +1397,7 @@ func TestUpdateForwardingPolicy(t *testing.T) { // Carol's invoice should now be shown as settled as the payment // succeeded. invoice, err := n.carolServer.registry.LookupInvoice( - context.Background(), payResp, + t.Context(), payResp, ) require.NoError(t, err, "unable to get invoice") if invoice.State != invpkg.ContractSettled { @@ -1553,7 +1553,7 @@ func TestChannelLinkMultiHopInsufficientPayment(t *testing.T) { // Check that alice invoice wasn't settled and bandwidth of htlc // links hasn't been changed. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") if invoice.State == invpkg.ContractSettled { @@ -1733,7 +1733,7 @@ func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { // Check that alice invoice wasn't settled and bandwidth of htlc // links hasn't been changed. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") if invoice.State == invpkg.ContractSettled { @@ -1843,7 +1843,7 @@ func TestChannelLinkMultiHopDecodeError(t *testing.T) { // Check that alice invoice wasn't settled and bandwidth of htlc // links hasn't been changed. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") if invoice.State == invpkg.ContractSettled { @@ -2716,7 +2716,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { ) require.NoError(t, err, "unable to create payment") - ctxb := context.Background() + ctxb := t.Context() // We must add the invoice to the registry, such that Alice expects // this payment. err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( @@ -4067,7 +4067,7 @@ func TestChannelRetransmission(t *testing.T) { // Check that alice invoice wasn't settled and // bandwidth of htlc links hasn't been changed. invoice, err = receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) if err != nil { err = fmt.Errorf( @@ -4552,7 +4552,7 @@ func TestChannelLinkAcceptDuplicatePayment(t *testing.T) { } err = n.carolServer.registry.AddInvoice( - context.Background(), *invoice, htlc.PaymentHash, + t.Context(), *invoice, htlc.PaymentHash, ) require.NoError(t, err, "unable to add invoice in carol registry") @@ -4642,7 +4642,7 @@ func TestChannelLinkAcceptOverpay(t *testing.T) { // Even though we sent 2x what was asked for, Carol should still have // accepted the payment and marked it as settled. invoice, err := receiver.registry.LookupInvoice( - context.Background(), rhash, + t.Context(), rhash, ) require.NoError(t, err, "unable to get invoice") if invoice.State != invpkg.ContractSettled { @@ -4973,7 +4973,7 @@ func generateHtlc(t *testing.T, coreLink *channelLink, // We must add the invoice to the registry, such that Alice // expects this payment. err := coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( - context.Background(), *invoice, htlc.PaymentHash, + t.Context(), *invoice, htlc.PaymentHash, ) require.NoError(t, err, "unable to add invoice to registry") @@ -5933,7 +5933,7 @@ func TestChannelLinkFail(t *testing.T) { // Sign a commitment that will include // signature for the HTLC just sent. quitCtx, done := c.cg.Create( - context.Background(), + t.Context(), ) defer done() @@ -5982,7 +5982,7 @@ func TestChannelLinkFail(t *testing.T) { // Sign a commitment that will include // signature for the HTLC just sent. quitCtx, done := c.cg.Create( - context.Background(), + t.Context(), ) defer done() @@ -6388,7 +6388,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) { // Cancel the invoice at bob's end. hash := invoice.Terms.PaymentPreimage.Hash() - err = n.bobServer.registry.CancelInvoice(context.Background(), hash) + err = n.bobServer.registry.CancelInvoice(t.Context(), hash) if err != nil { t.Fatal(err) } @@ -6506,7 +6506,7 @@ func TestChannelLinkHoldInvoiceSettle(t *testing.T) { } err = ctx.n.bobServer.registry.SettleHodlInvoice( - context.Background(), ctx.preimage, + t.Context(), ctx.preimage, ) if err != nil { t.Fatal(err) @@ -6552,7 +6552,7 @@ func TestChannelLinkHoldInvoiceCancel(t *testing.T) { } err = ctx.n.bobServer.registry.CancelInvoice( - context.Background(), ctx.hash, + t.Context(), ctx.hash, ) if err != nil { t.Fatal(err) @@ -6608,7 +6608,7 @@ func TestChannelLinkHoldInvoiceRestart(t *testing.T) { // We must add the invoice to the registry, such that Alice // expects this payment. err = registry.AddInvoice( - context.Background(), *invoice, htlc.PaymentHash, + t.Context(), *invoice, htlc.PaymentHash, ) require.NoError(t, err, "unable to add invoice to registry") @@ -6644,7 +6644,7 @@ func TestChannelLinkHoldInvoiceRestart(t *testing.T) { <-registry.settleChan // Settle the invoice with the preimage. - err = registry.SettleHodlInvoice(context.Background(), *preimage) + err = registry.SettleHodlInvoice(t.Context(), *preimage) require.NoError(t, err, "settle hodl invoice") // Expect alice to send a settle and commitsig message to bob. @@ -6703,7 +6703,7 @@ func TestChannelLinkRevocationWindowRegular(t *testing.T) { htlc1, invoice1 := generateHtlcAndInvoice(t, 0) htlc2, invoice2 := generateHtlcAndInvoice(t, 1) - ctxb := context.Background() + ctxb := t.Context() // We must add the invoice to the registry, such that Alice // expects this payment. err = registry.AddInvoice(ctxb, *invoice1, htlc1.PaymentHash) @@ -6789,7 +6789,7 @@ func TestChannelLinkRevocationWindowHodl(t *testing.T) { invoice2.Terms.PaymentPreimage = nil invoice2.HodlInvoice = true - ctxb := context.Background() + ctxb := t.Context() // We must add the invoices to the registry, such that Alice // expects the payments. err = registry.AddInvoice(ctxb, *invoice1, htlc1.PaymentHash) @@ -7106,7 +7106,7 @@ func TestPipelineSettle(t *testing.T) { // Add the invoice to Alice's registry so she expects it. aliceReg := alice.coreLink.cfg.Registry.(*mockInvoiceRegistry) err = aliceReg.AddInvoice( - context.Background(), *invoice1, htlc1.PaymentHash, + t.Context(), *invoice1, htlc1.PaymentHash, ) require.NoError(t, err) diff --git a/htlcswitch/switch_test.go b/htlcswitch/switch_test.go index 17b0eef07..e8176aaeb 100644 --- a/htlcswitch/switch_test.go +++ b/htlcswitch/switch_test.go @@ -1,7 +1,6 @@ package htlcswitch import ( - "context" "crypto/rand" "crypto/sha256" "errors" @@ -3547,7 +3546,7 @@ func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHT } err = n.carolServer.registry.AddInvoice( - context.Background(), *invoice, htlc.PaymentHash, + t.Context(), *invoice, htlc.PaymentHash, ) require.NoError(t, err, "unable to add invoice in carol registry") diff --git a/invoices/invoiceregistry_test.go b/invoices/invoiceregistry_test.go index 746a8461d..5e13f8735 100644 --- a/invoices/invoiceregistry_test.go +++ b/invoices/invoiceregistry_test.go @@ -1,7 +1,6 @@ package invoices_test import ( - "context" "crypto/rand" "database/sql" "fmt" @@ -194,7 +193,7 @@ func testSettleInvoice(t *testing.T, ctx := newTestContext(t, nil, makeDB) - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := ctx.registry.SubscribeNotifications(ctxb, 0, 0) require.Nil(t, err) defer allSubscriptions.Cancel() @@ -383,7 +382,7 @@ func testCancelInvoiceImpl(t *testing.T, gc bool, cfg.GcCanceledInvoicesOnTheFly = gc ctx := newTestContext(t, &cfg, makeDB) - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := ctx.registry.SubscribeNotifications(ctxb, 0, 0) require.Nil(t, err) defer allSubscriptions.Cancel() @@ -547,7 +546,7 @@ func testSettleHoldInvoice(t *testing.T, require.NoError(t, err) defer registry.Stop() - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := registry.SubscribeNotifications(ctxb, 0, 0) require.Nil(t, err) defer allSubscriptions.Cancel() @@ -720,7 +719,7 @@ func testCancelHoldInvoice(t *testing.T, require.NoError(t, registry.Stop()) }) - ctxb := context.Background() + ctxb := t.Context() // Add the invoice. invoice := newInvoice(t, true, false) @@ -824,7 +823,7 @@ func testKeySendImpl(t *testing.T, keySendEnabled bool, ctx := newTestContext(t, &cfg, makeDB) allSubscriptions, err := ctx.registry.SubscribeNotifications( - context.Background(), 0, 0, + t.Context(), 0, 0, ) require.NoError(t, err) defer allSubscriptions.Cancel() @@ -959,7 +958,7 @@ func testHoldKeysendImpl(t *testing.T, timeoutKeysend bool, cfg.KeysendHoldTime = holdDuration ctx := newTestContext(t, &cfg, makeDB) - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := ctx.registry.SubscribeNotifications(ctxb, 0, 0) require.NoError(t, err) defer allSubscriptions.Cancel() @@ -1047,7 +1046,7 @@ func testMppPayment(t *testing.T, defer timeout()() ctx := newTestContext(t, nil, makeDB) - ctxb := context.Background() + ctxb := t.Context() // Add the invoice. testInvoice := newInvoice(t, false, false) @@ -1143,7 +1142,7 @@ func testMppPaymentWithOverpayment(t *testing.T, t.Parallel() - ctxb := context.Background() + ctxb := t.Context() f := func(overpaymentRand uint64) bool { ctx := newTestContext(t, nil, makeDB) @@ -1241,7 +1240,7 @@ func testInvoiceExpiryWithRegistry(t *testing.T, t, testTime, 0, numExpired, numPending, ) - ctxb := context.Background() + ctxb := t.Context() var expectedCancellations []lntypes.Hash expiredInvoices := existingInvoices.expiredInvoices @@ -1351,7 +1350,7 @@ func testOldInvoiceRemovalOnStart(t *testing.T, t, testTime, 0, numExpired, numPending, ) - ctxb := context.Background() + ctxb := t.Context() i := 0 for paymentHash, invoice := range existingInvoices.expiredInvoices { @@ -1443,7 +1442,7 @@ func testHeightExpiryWithRegistryImpl(t *testing.T, numParts int, settle bool, testInvoice.HodlInvoice = true testInvoice.PaymentRequest = []byte{1, 2, 3} - ctxb := context.Background() + ctxb := t.Context() _, err := ctx.registry.AddInvoice( ctxb, testInvoice, testInvoicePaymentHash, ) @@ -1554,7 +1553,7 @@ func testMultipleSetHeightExpiry(t *testing.T, // Add a hold invoice. testInvoice := newInvoice(t, true, false) - ctxb := context.Background() + ctxb := t.Context() _, err := ctx.registry.AddInvoice( ctxb, testInvoice, testInvoicePaymentHash, ) @@ -1646,7 +1645,7 @@ func testSettleInvoicePaymentAddrRequired(t *testing.T, t.Parallel() ctx := newTestContext(t, nil, makeDB) - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := ctx.registry.SubscribeNotifications(ctxb, 0, 0) require.NoError(t, err) @@ -1738,7 +1737,7 @@ func testSettleInvoicePaymentAddrRequiredOptionalGrace(t *testing.T, t.Parallel() ctx := newTestContext(t, nil, makeDB) - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := ctx.registry.SubscribeNotifications(ctxb, 0, 0) require.NoError(t, err) @@ -1946,7 +1945,7 @@ func testSpontaneousAmpPaymentImpl( cfg := defaultRegistryConfig() cfg.AcceptAMP = ampEnabled ctx := newTestContext(t, &cfg, makeDB) - ctxb := context.Background() + ctxb := t.Context() allSubscriptions, err := ctx.registry.SubscribeNotifications(ctxb, 0, 0) require.Nil(t, err) @@ -2132,7 +2131,7 @@ func testFailPartialMPPPaymentExternal(t *testing.T, // Add an invoice which we are going to pay via a MPP set. testInvoice := newInvoice(t, false, false) - ctxb := context.Background() + ctxb := t.Context() _, err := ctx.registry.AddInvoice( ctxb, testInvoice, testInvoicePaymentHash, ) @@ -2243,7 +2242,7 @@ func testFailPartialAMPPayment(t *testing.T, t.Parallel() ctx := newTestContext(t, nil, makeDB) - ctxb := context.Background() + ctxb := t.Context() const ( expiry = uint32(testCurrentHeight + 20) @@ -2449,7 +2448,7 @@ func testCancelAMPInvoicePendingHTLCs(t *testing.T, t.Parallel() ctx := newTestContext(t, nil, makeDB) - ctxb := context.Background() + ctxb := t.Context() const ( expiry = uint32(testCurrentHeight + 20) diff --git a/invoices/invoices_test.go b/invoices/invoices_test.go index 33e7ebb5b..e4e41423e 100644 --- a/invoices/invoices_test.go +++ b/invoices/invoices_test.go @@ -1,7 +1,6 @@ package invoices_test import ( - "context" "crypto/rand" "database/sql" "fmt" @@ -373,7 +372,7 @@ func testInvoiceWorkflowImpl(t *testing.T, test invWorkflowTest, ref = invpkg.InvoiceRefByHash(payHash) } - ctxb := context.Background() + ctxb := t.Context() // Add the invoice to the database, this should succeed as there aren't // any existing invoices within the database with the same payment // hash. @@ -498,7 +497,7 @@ func testAddDuplicatePayAddr(t *testing.T, require.NoError(t, err) invoice2.Terms.PaymentAddr = invoice1.Terms.PaymentAddr - ctxb := context.Background() + ctxb := t.Context() // First insert should succeed. inv1Hash := invoice1.Terms.PaymentPreimage.Hash() @@ -529,7 +528,7 @@ func testAddDuplicateKeysendPayAddr(t *testing.T, require.NoError(t, err) invoice2.Terms.PaymentAddr = invpkg.BlankPayAddr - ctxb := context.Background() + ctxb := t.Context() // Inserting both should succeed without a duplicate payment address // failure. @@ -573,7 +572,7 @@ func testFailInvoiceLookupMPPPayAddrOnly(t *testing.T, payHash := invoice.Terms.PaymentPreimage.Hash() payAddr := invoice.Terms.PaymentAddr - ctxb := context.Background() + ctxb := t.Context() _, err = db.AddInvoice(ctxb, invoice, payHash) require.NoError(t, err) @@ -601,7 +600,7 @@ func testInvRefEquivocation(t *testing.T, invoice1, err := randInvoice(1000) require.NoError(t, err) - ctxb := context.Background() + ctxb := t.Context() inv1Hash := invoice1.Terms.PaymentPreimage.Hash() _, err = db.AddInvoice(ctxb, invoice1, inv1Hash) require.NoError(t, err) @@ -651,7 +650,7 @@ func testInvoiceCancelSingleHtlc(t *testing.T, }, } - ctxb := context.Background() + ctxb := t.Context() if _, err := db.AddInvoice(ctxb, testInvoice, paymentHash); err != nil { t.Fatalf("unable to find invoice: %v", err) } @@ -729,7 +728,7 @@ func testInvoiceCancelSingleHtlcAMP(t *testing.T, // preimages. invoice.Terms.Features = ampFeatures - ctxb := context.Background() + ctxb := t.Context() preimage := *invoice.Terms.PaymentPreimage payHash := preimage.Hash() _, err = db.AddInvoice(ctxb, invoice, payHash) @@ -898,7 +897,7 @@ func testInvoiceAddTimeSeries(t *testing.T, t.Parallel() db := makeDB(t) - ctxb := context.Background() + ctxb := t.Context() _, err := db.InvoicesAddedSince(ctxb, 0) require.NoError(t, err) @@ -1073,7 +1072,7 @@ func testSettleIndexAmpPayments(t *testing.T, // Add the invoice to the DB, we use a dummy payment hash here but the // invoice will have a valid payment address set. - ctxb := context.Background() + ctxb := t.Context() preimage := *testInvoice.Terms.PaymentPreimage payHash := preimage.Hash() _, err = db.AddInvoice(ctxb, testInvoice, payHash) @@ -1236,7 +1235,7 @@ func testFetchPendingInvoices(t *testing.T, t.Parallel() db := makeDB(t) - ctxb := context.Background() + ctxb := t.Context() // Make sure that fetching pending invoices from an empty database // returns an empty result and no errors. @@ -1305,7 +1304,7 @@ func testDuplicateSettleInvoice(t *testing.T, payHash := invoice.Terms.PaymentPreimage.Hash() - ctxb := context.Background() + ctxb := t.Context() if _, err := db.AddInvoice(ctxb, invoice, payHash); err != nil { t.Fatalf("unable to add invoice %v", err) } @@ -1372,7 +1371,7 @@ func testQueryInvoices(t *testing.T, pendingInvoices []invpkg.Invoice ) - ctxb := context.Background() + ctxb := t.Context() for i := 1; i <= numInvoices; i++ { amt := lnwire.MilliSatoshi(i) invoice, err := randInvoice(amt) @@ -1813,7 +1812,7 @@ func testCustomRecords(t *testing.T, }, } - ctxb := context.Background() + ctxb := t.Context() if _, err := db.AddInvoice(ctxb, testInvoice, paymentHash); err != nil { t.Fatalf("unable to add invoice: %v", err) } @@ -1893,7 +1892,7 @@ func testInvoiceHtlcAMPFieldsImpl(t *testing.T, isAMP bool, testInvoice.Terms.Features = ampFeatures } - ctxb := context.Background() + ctxb := t.Context() payHash := testInvoice.Terms.PaymentPreimage.Hash() _, err = db.AddInvoice(ctxb, testInvoice, payHash) require.Nil(t, err) @@ -2101,7 +2100,7 @@ func testAddInvoiceWithHTLCs(t *testing.T, testInvoice.Htlcs[key] = &invpkg.InvoiceHTLC{} payHash := testInvoice.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(context.Background(), testInvoice, payHash) + _, err = db.AddInvoice(t.Context(), testInvoice, payHash) require.Equal(t, invpkg.ErrInvoiceHasHtlcs, err) } @@ -2121,7 +2120,7 @@ func testSetIDIndex(t *testing.T, makeDB func(t *testing.T) invpkg.InvoiceDB) { // preimages. invoice.Terms.Features = ampFeatures - ctxb := context.Background() + ctxb := t.Context() preimage := *invoice.Terms.PaymentPreimage payHash := preimage.Hash() _, err = db.AddInvoice(ctxb, invoice, payHash) @@ -2460,7 +2459,7 @@ func testUnexpectedInvoicePreimage(t *testing.T, invoice, err := randInvoice(lnwire.MilliSatoshi(100)) require.NoError(t, err) - ctxb := context.Background() + ctxb := t.Context() // Add a random invoice indexed by payment hash and payment addr. paymentHash := invoice.Terms.PaymentPreimage.Hash() @@ -2530,7 +2529,7 @@ func testUpdateHTLCPreimagesImpl(t *testing.T, test updateHTLCPreimageTestCase, // preimages. invoice.Terms.Features = ampFeatures - ctxb := context.Background() + ctxb := t.Context() _, err = db.AddInvoice(ctxb, invoice, payHash) require.Nil(t, err) @@ -2594,7 +2593,7 @@ func testDeleteInvoices(t *testing.T, numInvoices := 3 invoicesToDelete := make([]invpkg.InvoiceDeleteRef, numInvoices) - ctxb := context.Background() + ctxb := t.Context() for i := 0; i < numInvoices; i++ { invoice, err := randInvoice(lnwire.MilliSatoshi(i + 1)) require.NoError(t, err) @@ -2689,7 +2688,7 @@ func testDeleteCanceledInvoices(t *testing.T, } // Test deletion of canceled invoices when there are none. - ctxb := context.Background() + ctxb := t.Context() require.NoError(t, db.DeleteCanceledInvoices(ctxb)) // Add some invoices to the test db. @@ -2750,7 +2749,7 @@ func testAddInvoiceInvalidFeatureDeps(t *testing.T, ) hash := invoice.Terms.PaymentPreimage.Hash() - _, err = db.AddInvoice(context.Background(), invoice, hash) + _, err = db.AddInvoice(t.Context(), invoice, hash) require.Error(t, err, feature.NewErrMissingFeatureDep( lnwire.PaymentAddrOptional, )) diff --git a/invoices/kv_sql_migration_test.go b/invoices/kv_sql_migration_test.go index b4d934b9e..709a3c8e1 100644 --- a/invoices/kv_sql_migration_test.go +++ b/invoices/kv_sql_migration_test.go @@ -1,7 +1,6 @@ package invoices_test import ( - "context" "database/sql" "os" "path" @@ -67,7 +66,7 @@ func TestMigrationWithChannelDB(t *testing.T) { sqlite bool) { sqlInvoiceStore, sqlStore := makeSQLDB(t, sqlite) - ctxb := context.Background() + ctxb := t.Context() const batchSize = 11 err := sqlStore.ExecTx( @@ -163,7 +162,7 @@ func TestMigrationWithChannelDB(t *testing.T) { sqliteBackend, err := kvdb.Open( kvdb.SqliteBackendName, - context.Background(), + t.Context(), sqliteConfig, test.dbPath, lncfg.SqliteChannelDBName, lncfg.NSChannelDB, diff --git a/invoices/sql_migration_test.go b/invoices/sql_migration_test.go index 6277df597..3005909ee 100644 --- a/invoices/sql_migration_test.go +++ b/invoices/sql_migration_test.go @@ -1,7 +1,6 @@ package invoices import ( - "context" crand "crypto/rand" "database/sql" "math/rand" @@ -305,7 +304,7 @@ func TestMigrateSingleInvoiceRapid(t *testing.T) { func testMigrateSingleInvoiceRapid(t *rapid.T, store *SQLStore, mpp bool, amp bool) { - ctxb := context.Background() + ctxb := t.Context() invoices := make(map[lntypes.Hash]*Invoice) for i := 0; i < 100; i++ { diff --git a/itest/lnd_channel_funding_fund_max_test.go b/itest/lnd_channel_funding_fund_max_test.go index 5a19ccdeb..f2c73851d 100644 --- a/itest/lnd_channel_funding_fund_max_test.go +++ b/itest/lnd_channel_funding_fund_max_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "errors" "testing" @@ -65,7 +64,7 @@ func testChannelFundMaxError(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -155,7 +154,7 @@ func testChannelFundMaxWalletAmount(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -220,7 +219,7 @@ func testChannelFundMaxAnchorReserve(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) diff --git a/itest/lnd_channel_funding_utxo_selection_test.go b/itest/lnd_channel_funding_utxo_selection_test.go index c5ee72e6a..32caf2b57 100644 --- a/itest/lnd_channel_funding_utxo_selection_test.go +++ b/itest/lnd_channel_funding_utxo_selection_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "errors" "fmt" "testing" @@ -104,7 +103,7 @@ func testChannelUtxoSelectionError(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -198,7 +197,7 @@ func testUtxoSelectionSelectedValidChanSize(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -240,7 +239,7 @@ func testUtxoSelectionSelectedValidChanReserve(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -281,7 +280,7 @@ func testUtxoSelectionReserveFromSelected(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -325,7 +324,7 @@ func testUtxoSelectionFundmax(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -364,7 +363,7 @@ func testUtxoSelectionFundmaxReserve(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) @@ -404,7 +403,7 @@ func testUtxoSelectionReuseUTXO(ht *lntest.HarnessTest) { // Calculate reserve amount for one channel. reserveResp, _ := alice.RPC.WalletKit.RequiredReserve( - context.Background(), &walletrpc.RequiredReserveRequest{ + ht.Context(), &walletrpc.RequiredReserveRequest{ AdditionalPublicChannels: 1, }, ) diff --git a/itest/lnd_funding_test.go b/itest/lnd_funding_test.go index 52f0f7805..b6734e032 100644 --- a/itest/lnd_funding_test.go +++ b/itest/lnd_funding_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "fmt" "testing" @@ -1164,7 +1163,7 @@ func ensurePolicy(ht *lntest.HarnessTest, alice, peer *node.HarnessNode, channel := ht.AssertChannelExists(peer, chanPoint) policy, err := peer.RPC.LN.GetChanInfo( - context.Background(), &lnrpc.ChanInfoRequest{ + ht.Context(), &lnrpc.ChanInfoRequest{ ChanId: channel.ChanId, }, ) diff --git a/itest/lnd_graph_migration_test.go b/itest/lnd_graph_migration_test.go index 652b1e652..3d1aec544 100644 --- a/itest/lnd_graph_migration_test.go +++ b/itest/lnd_graph_migration_test.go @@ -21,7 +21,7 @@ func testGraphMigration(ht *lntest.HarnessTest) { ht.Skip("not running with test_native_sql tag") } - ctx := context.Background() + ctx := ht.Context() alice := ht.NewNodeWithCoins("Alice", nil) // Make sure we run the test with SQLite or Postgres. diff --git a/itest/lnd_invoice_acceptor_test.go b/itest/lnd_invoice_acceptor_test.go index 9195b102e..f7c617b25 100644 --- a/itest/lnd_invoice_acceptor_test.go +++ b/itest/lnd_invoice_acceptor_test.go @@ -1,7 +1,6 @@ package itest import ( - "context" "time" "github.com/btcsuite/btcd/btcutil" @@ -47,7 +46,7 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) { // Make sure we get an error if we try to register a second modifier and // then try to use it (the error won't be returned on connect, only on // the first _read_ interaction on the stream). - mod2, err := carol.RPC.Invoice.HtlcModifier(context.Background()) + mod2, err := carol.RPC.Invoice.HtlcModifier(ht.Context()) require.NoError(ht, err) _, err = mod2.Recv() require.ErrorContains( diff --git a/itest/lnd_misc_test.go b/itest/lnd_misc_test.go index 30b6b77f7..138232706 100644 --- a/itest/lnd_misc_test.go +++ b/itest/lnd_misc_test.go @@ -1400,7 +1400,7 @@ func testGRPCNotFound(ht *lntest.HarnessTest) { // during a reorg. A reorg notification is produced after a reorg affects the // block which has produced a spending notification for this registration. func testReorgNotifications(ht *lntest.HarnessTest) { - ctxb := context.Background() + ctxb := ht.Context() const timeout = wait.DefaultTimeout alice := ht.NewNodeWithCoins("Alice", nil) @@ -1599,7 +1599,7 @@ func testReorgNotifications(ht *lntest.HarnessTest) { func testEstimateFee(ht *lntest.HarnessTest) { alice := ht.NewNode("Alice", nil) - ctx := context.Background() + ctx := ht.Context() testCases := []struct { name string diff --git a/itest/lnd_payment_test.go b/itest/lnd_payment_test.go index b25ea6205..37aff0522 100644 --- a/itest/lnd_payment_test.go +++ b/itest/lnd_payment_test.go @@ -1081,7 +1081,7 @@ func sendPaymentInterceptAndCancel(ht *lntest.HarnessTest, // We initiate a payment from Alice and define the payment context // cancellable. - ctx, cancelPaymentContext := context.WithCancel(context.Background()) + ctx, cancelPaymentContext := context.WithCancel(ht.Context()) var paymentStream rpc.PaymentClient go func() { req := &routerrpc.SendPaymentRequest{ diff --git a/itest/lnd_route_blinding_test.go b/itest/lnd_route_blinding_test.go index 0e3c61f61..af2612d24 100644 --- a/itest/lnd_route_blinding_test.go +++ b/itest/lnd_route_blinding_test.go @@ -335,7 +335,7 @@ type blindedForwardTest struct { func newBlindedForwardTest(ht *lntest.HarnessTest) (context.Context, *blindedForwardTest) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ht.Context()) return ctx, &blindedForwardTest{ ht: ht, diff --git a/itest/lnd_rpc_middleware_interceptor_test.go b/itest/lnd_rpc_middleware_interceptor_test.go index 007692334..5b16da015 100644 --- a/itest/lnd_rpc_middleware_interceptor_test.go +++ b/itest/lnd_rpc_middleware_interceptor_test.go @@ -220,7 +220,7 @@ func middlewareInterceptionTest(t *testing.T, // Everything we test here should be executed in a matter of // milliseconds, so we can use one single timeout context for all calls. - ctxb := context.Background() + ctxb := t.Context() ctxc, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() @@ -372,7 +372,7 @@ func middlewareResponseManipulationTest(t *testing.T, // Everything we test here should be executed in a matter of // milliseconds, so we can use one single timeout context for all calls. - ctxb := context.Background() + ctxb := t.Context() ctxc, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() @@ -489,7 +489,7 @@ func middlewareRequestManipulationTest(t *testing.T, node *node.HarnessNode, // Everything we test here should be executed in a matter of // milliseconds, so we can use one single timeout context for all calls. - ctxb := context.Background() + ctxb := t.Context() ctxc, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() @@ -582,7 +582,7 @@ func middlewareMandatoryTest(ht *lntest.HarnessTest, node *node.HarnessNode) { err = node.WaitUntilServerActive() require.NoError(ht, err) - ctxb := context.Background() + ctxb := ht.Context() ctxc, cancel := context.WithTimeout(ctxb, defaultTimeout) defer cancel() diff --git a/itest/lnd_signer_test.go b/itest/lnd_signer_test.go index ada753408..8c408aa1e 100644 --- a/itest/lnd_signer_test.go +++ b/itest/lnd_signer_test.go @@ -2,7 +2,6 @@ package itest import ( "bytes" - "context" "crypto/sha256" "github.com/btcsuite/btcd/btcec/v2" @@ -500,7 +499,7 @@ func runSignVerifyMessage(ht *lntest.HarnessTest, alice *node.HarnessNode) { expectedErr := "tag can only be used when the Schnorr signature " + "option is set" - ctxt := context.Background() + ctxt := ht.Context() _, err := alice.RPC.Signer.SignMessage(ctxt, signMsgReq) require.ErrorContains(ht, err, expectedErr) diff --git a/kvdb/etcd/commit_queue_test.go b/kvdb/etcd/commit_queue_test.go index 900b08ca7..95c3d4d55 100644 --- a/kvdb/etcd/commit_queue_test.go +++ b/kvdb/etcd/commit_queue_test.go @@ -40,7 +40,7 @@ func TestCommitQueue(t *testing.T) { } } - ctx := context.Background() + ctx := t.Context() ctx, cancel := context.WithCancel(ctx) q := NewCommitQueue(ctx) defer q.Stop() diff --git a/kvdb/etcd/db_test.go b/kvdb/etcd/db_test.go index 9ef68d9fe..1ccd593d0 100644 --- a/kvdb/etcd/db_test.go +++ b/kvdb/etcd/db_test.go @@ -19,7 +19,7 @@ func TestDump(t *testing.T) { f := NewEtcdTestFixture(t) - db, err := newEtcdBackend(context.Background(), f.BackendConfig()) + db, err := newEtcdBackend(t.Context(), f.BackendConfig()) require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { @@ -53,7 +53,7 @@ func TestAbortContext(t *testing.T) { f := NewEtcdTestFixture(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) config := f.BackendConfig() @@ -83,7 +83,7 @@ func TestNewEtcdClient(t *testing.T) { f := NewEtcdTestFixture(t) client, ctx, cancel, err := NewEtcdClient( - context.Background(), f.BackendConfig(), + t.Context(), f.BackendConfig(), ) require.NoError(t, err) t.Cleanup(cancel) diff --git a/kvdb/etcd/readwrite_tx_test.go b/kvdb/etcd/readwrite_tx_test.go index d66b2e251..2db544f59 100644 --- a/kvdb/etcd/readwrite_tx_test.go +++ b/kvdb/etcd/readwrite_tx_test.go @@ -4,7 +4,6 @@ package etcd import ( - "context" "testing" "github.com/btcsuite/btcwallet/walletdb" @@ -16,7 +15,7 @@ func TestChangeDuringManualTx(t *testing.T) { f := NewEtcdTestFixture(t) - db, err := newEtcdBackend(context.Background(), f.BackendConfig()) + db, err := newEtcdBackend(t.Context(), f.BackendConfig()) require.NoError(t, err) tx, err := db.BeginReadWriteTx() @@ -44,7 +43,7 @@ func TestChangeDuringUpdate(t *testing.T) { f := NewEtcdTestFixture(t) - db, err := newEtcdBackend(context.Background(), f.BackendConfig()) + db, err := newEtcdBackend(t.Context(), f.BackendConfig()) require.NoError(t, err) count := 0 diff --git a/kvdb/etcd/stm_test.go b/kvdb/etcd/stm_test.go index 614019a17..c1815e5da 100644 --- a/kvdb/etcd/stm_test.go +++ b/kvdb/etcd/stm_test.go @@ -23,7 +23,7 @@ func TestPutToEmpty(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) txQueue := NewCommitQueue(ctx) t.Cleanup(func() { @@ -50,7 +50,7 @@ func TestGetPutDel(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) txQueue := NewCommitQueue(ctx) t.Cleanup(func() { @@ -151,7 +151,7 @@ func testFirstLastNextPrev(t *testing.T, prefetchKeys []string, prefetchRange []string, expectedCallCount int) { f := NewEtcdTestFixture(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) txQueue := NewCommitQueue(ctx) t.Cleanup(func() { @@ -325,7 +325,7 @@ func TestCommitError(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) txQueue := NewCommitQueue(ctx) t.Cleanup(func() { @@ -374,7 +374,7 @@ func TestManualTxError(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) txQueue := NewCommitQueue(ctx) t.Cleanup(func() { diff --git a/kvdb/etcd/walletdb_interface_test.go b/kvdb/etcd/walletdb_interface_test.go index 483becbb2..eaafbbeed 100644 --- a/kvdb/etcd/walletdb_interface_test.go +++ b/kvdb/etcd/walletdb_interface_test.go @@ -4,7 +4,6 @@ package etcd import ( - "context" "testing" "github.com/btcsuite/btcwallet/walletdb/walletdbtest" @@ -15,5 +14,5 @@ import ( func TestWalletDBInterface(t *testing.T) { f := NewEtcdTestFixture(t) cfg := f.BackendConfig() - walletdbtest.TestInterface(t, dbType, context.Background(), &cfg) + walletdbtest.TestInterface(t, dbType, t.Context(), &cfg) } diff --git a/kvdb/go.mod b/kvdb/go.mod index baa995dd8..a46999c24 100644 --- a/kvdb/go.mod +++ b/kvdb/go.mod @@ -15,7 +15,6 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.12 go.etcd.io/etcd/client/v3 v3.5.12 go.etcd.io/etcd/server/v3 v3.5.12 - golang.org/x/net v0.39.0 modernc.org/sqlite v1.29.10 ) @@ -118,6 +117,7 @@ require ( golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.39.0 // indirect golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect golang.org/x/text v0.24.0 // indirect diff --git a/kvdb/postgres/db_test.go b/kvdb/postgres/db_test.go index 0378b4dbb..1f660087c 100644 --- a/kvdb/postgres/db_test.go +++ b/kvdb/postgres/db_test.go @@ -9,7 +9,6 @@ import ( "github.com/btcsuite/btcwallet/walletdb" "github.com/btcsuite/btcwallet/walletdb/walletdbtest" "github.com/stretchr/testify/require" - "golang.org/x/net/context" ) // TestInterface performs all interfaces tests for this database driver. @@ -24,7 +23,7 @@ func TestInterface(t *testing.T) { // dbType is the database type name for this driver. const dbType = "postgres" - ctx := context.Background() + ctx := t.Context() cfg := &Config{ Dsn: f.Dsn, } diff --git a/kvdb/sqlite/db_test.go b/kvdb/sqlite/db_test.go index f12acf47f..e444803f2 100644 --- a/kvdb/sqlite/db_test.go +++ b/kvdb/sqlite/db_test.go @@ -9,14 +9,13 @@ import ( "github.com/btcsuite/btcwallet/walletdb/walletdbtest" "github.com/lightningnetwork/lnd/kvdb/sqlbase" "github.com/stretchr/testify/require" - "golang.org/x/net/context" ) // TestInterface performs all interfaces tests for this database driver. func TestInterface(t *testing.T) { // dbType is the database type name for this driver. dir := t.TempDir() - ctx := context.Background() + ctx := t.Context() sqlbase.Init(0) diff --git a/lnrpc/routerrpc/router_backend_test.go b/lnrpc/routerrpc/router_backend_test.go index 080fc7531..b94e7a00b 100644 --- a/lnrpc/routerrpc/router_backend_test.go +++ b/lnrpc/routerrpc/router_backend_test.go @@ -2,7 +2,6 @@ package routerrpc import ( "bytes" - "context" "encoding/hex" "testing" "time" @@ -264,7 +263,7 @@ func testQueryRoutes(t *testing.T, useMissionControl bool, useMsat bool, backend.MaxTotalTimelock = 1000 } - resp, err := backend.QueryRoutes(context.Background(), request) + resp, err := backend.QueryRoutes(t.Context(), request) // If we're using both OutgoingChanId and OutgoingChanIds, we should get // an error. diff --git a/lnrpc/routerrpc/router_server_test.go b/lnrpc/routerrpc/router_server_test.go index 70e3fbda8..477a9b75c 100644 --- a/lnrpc/routerrpc/router_server_test.go +++ b/lnrpc/routerrpc/router_server_test.go @@ -82,7 +82,7 @@ func TestTrackPaymentsReturnsOnCancelContext(t *testing.T) { } towerMock := makeControlTowerMock() - streamCtx, cancelStream := context.WithCancel(context.Background()) + streamCtx, cancelStream := context.WithCancel(t.Context()) stream := makeStreamMock(streamCtx) server := &Server{ @@ -110,7 +110,7 @@ func TestTrackPaymentsInflightUpdates(t *testing.T) { } towerMock := makeControlTowerMock() - streamCtx, cancelStream := context.WithCancel(context.Background()) + streamCtx, cancelStream := context.WithCancel(t.Context()) stream := makeStreamMock(streamCtx) defer cancelStream() @@ -172,7 +172,7 @@ func TestTrackPaymentsNoInflightUpdates(t *testing.T) { } towerMock.queue.Start() - streamCtx, cancelStream := context.WithCancel(context.Background()) + streamCtx, cancelStream := context.WithCancel(t.Context()) stream := makeStreamMock(streamCtx) defer cancelStream() diff --git a/lntest/fee_service.go b/lntest/fee_service.go index b65cdc66a..ee20fdc06 100644 --- a/lntest/fee_service.go +++ b/lntest/fee_service.go @@ -1,7 +1,6 @@ package lntest import ( - "context" "encoding/json" "fmt" "io" @@ -130,7 +129,7 @@ func (f *FeeService) handleRequest(w http.ResponseWriter, _ *http.Request) { // Stop stops the web server. func (f *FeeService) Stop() error { - err := f.srv.Shutdown(context.Background()) + err := f.srv.Shutdown(f.Context()) require.NoError(f, err, "cannot stop fee api") f.wg.Wait() diff --git a/lntest/harness.go b/lntest/harness.go index cf948254b..5c5325220 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -147,7 +147,7 @@ func NewHarnessTest(t *testing.T, lndBinary string, feeService WebFeeService, t.Helper() // Create the run context. - ctxt, cancel := context.WithCancel(context.Background()) + ctxt, cancel := context.WithCancel(t.Context()) manager := newNodeManager(lndBinary, dbBackend, nativeSQL) diff --git a/lntest/node/harness_node.go b/lntest/node/harness_node.go index 87dfa9ce6..62419d687 100644 --- a/lntest/node/harness_node.go +++ b/lntest/node/harness_node.go @@ -115,7 +115,7 @@ func NewHarnessNode(t *testing.T, cfg *BaseNodeConfig) (*HarnessNode, error) { var dbName string if cfg.DBBackend == BackendPostgres { var err error - dbName, err = createTempPgDB() + dbName, err = createTempPgDB(t.Context()) if err != nil { return nil, err } @@ -702,6 +702,11 @@ func (hn *HarnessNode) Stop() error { // gets closed before a response is returned. req := lnrpc.StopRequest{} + // We have to use context.Background(), because both hn.Context + // and hn.runCtx have been canceled by this point. We canceled + // hn.runCtx just few lines above. hn.Context() is canceled by + // Go before calling cleanup callbacks; HarnessNode.Stop() is + // called from shutdownAllNodes which is called from a cleanup. ctxt, cancel := context.WithCancel(context.Background()) defer cancel() @@ -841,8 +846,8 @@ func (hn *HarnessNode) BackupDB() error { // Backup database. backupDBName := hn.Cfg.postgresDBName + "_backup" err := executePgQuery( - "CREATE DATABASE " + backupDBName + " WITH TEMPLATE " + - hn.Cfg.postgresDBName, + hn.Context(), "CREATE DATABASE "+backupDBName+ + " WITH TEMPLATE "+hn.Cfg.postgresDBName, ) if err != nil { return err @@ -872,14 +877,14 @@ func (hn *HarnessNode) RestoreDB() error { // Restore database. backupDBName := hn.Cfg.postgresDBName + "_backup" err := executePgQuery( - "DROP DATABASE " + hn.Cfg.postgresDBName, + hn.Context(), "DROP DATABASE "+hn.Cfg.postgresDBName, ) if err != nil { return err } err = executePgQuery( - "ALTER DATABASE " + backupDBName + " RENAME TO " + - hn.Cfg.postgresDBName, + hn.Context(), "ALTER DATABASE "+backupDBName+ + " RENAME TO "+hn.Cfg.postgresDBName, ) if err != nil { return err @@ -924,7 +929,7 @@ func postgresDatabaseDsn(dbName string) string { } // createTempPgDB creates a temp postgres database. -func createTempPgDB() (string, error) { +func createTempPgDB(ctx context.Context) (string, error) { // Create random database name. randBytes := make([]byte, 8) _, err := rand.Read(randBytes) @@ -934,7 +939,7 @@ func createTempPgDB() (string, error) { dbName := "itest_" + hex.EncodeToString(randBytes) // Create database. - err = executePgQuery("CREATE DATABASE " + dbName) + err = executePgQuery(ctx, "CREATE DATABASE "+dbName) if err != nil { return "", err } @@ -943,17 +948,14 @@ func createTempPgDB() (string, error) { } // executePgQuery executes a SQL statement in a postgres db. -func executePgQuery(query string) error { - pool, err := pgxpool.Connect( - context.Background(), - postgresDatabaseDsn("postgres"), - ) +func executePgQuery(ctx context.Context, query string) error { + pool, err := pgxpool.Connect(ctx, postgresDatabaseDsn("postgres")) if err != nil { return fmt.Errorf("unable to connect to database: %w", err) } defer pool.Close() - _, err = pool.Exec(context.Background(), query) + _, err = pool.Exec(ctx, query) return err } diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index 485aef17b..088f7f4e1 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -153,7 +153,7 @@ func assertUnknownEventFail(t *testing.T, startingState ProtocolState) { defer closeHarness.stopAndAssert() closeHarness.sendEventAndExpectFailure( - context.Background(), &unknownEvent{}, + t.Context(), &unknownEvent{}, ErrInvalidStateTransition, ) }) @@ -173,7 +173,7 @@ func assertSpendEventCloseFin(t *testing.T, startingState ProtocolState) { defer closeHarness.stopAndAssert() closeHarness.chanCloser.SendEvent( - context.Background(), &SpendEvent{}, + t.Context(), &SpendEvent{}, ) closeHarness.assertStateTransitions(&CloseFin{}) @@ -539,7 +539,7 @@ func (r *rbfCloserTestHarness) expectHalfSignerIteration( initEvent ProtocolEvent, balanceAfterClose, absoluteFee btcutil.Amount, dustExpect dustExpectation, iteration bool) { - ctx := context.Background() + ctx := r.T.Context() numFeeCalls := 2 // If we're using the SendOfferEvent as a trigger, we only need to call @@ -645,7 +645,7 @@ func (r *rbfCloserTestHarness) assertSingleRbfIteration( initEvent ProtocolEvent, balanceAfterClose, absoluteFee btcutil.Amount, dustExpect dustExpectation, iteration bool) { - ctx := context.Background() + ctx := r.T.Context() // We'll now send in the send offer event, which should trigger 1/2 of // the RBF loop, ending us in the LocalOfferSent state. @@ -686,7 +686,7 @@ func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration( absoluteFee btcutil.Amount, sequence uint32, iteration bool, sendInit bool) { - ctx := context.Background() + ctx := r.T.Context() // When we receive the signature below, our local state machine should // move to finalize the close. @@ -744,7 +744,7 @@ func assertStateT[T ProtocolState](h *rbfCloserTestHarness) T { func newRbfCloserTestHarness(t *testing.T, cfg *harnessCfg) *rbfCloserTestHarness { - ctx := context.Background() + ctx := t.Context() startingHeight := 200 @@ -850,7 +850,7 @@ func newCloser(t *testing.T, cfg *harnessCfg) *rbfCloserTestHarness { // TestRbfChannelActiveTransitions tests the transitions of from the // ChannelActive state. func TestRbfChannelActiveTransitions(t *testing.T) { - ctx := context.Background() + ctx := t.Context() localAddr := lnwire.DeliveryAddress(bytes.Repeat([]byte{0x01}, 20)) remoteAddr := lnwire.DeliveryAddress(bytes.Repeat([]byte{0x02}, 20)) @@ -1001,7 +1001,7 @@ func TestRbfChannelActiveTransitions(t *testing.T) { // shutdown ourselves. func TestRbfShutdownPendingTransitions(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() startingState := &ShutdownPending{} @@ -1229,7 +1229,7 @@ func TestRbfShutdownPendingTransitions(t *testing.T) { // transition to the negotiation state. func TestRbfChannelFlushingTransitions(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() localBalance := lnwire.NewMSatFromSatoshis(10_000) remoteBalance := lnwire.NewMSatFromSatoshis(50_000) @@ -1406,7 +1406,7 @@ func TestRbfChannelFlushingTransitions(t *testing.T) { // rate. func TestRbfCloseClosingNegotiationLocal(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() localBalance := lnwire.NewMSatFromSatoshis(40_000) remoteBalance := lnwire.NewMSatFromSatoshis(50_000) @@ -1687,7 +1687,7 @@ func TestRbfCloseClosingNegotiationLocal(t *testing.T) { func TestRbfCloseClosingNegotiationRemote(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() localBalance := lnwire.NewMSatFromSatoshis(40_000) remoteBalance := lnwire.NewMSatFromSatoshis(50_000) diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index 0316dcc54..6e175ba73 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -3836,7 +3836,7 @@ func TestQuitDuringSignNextCommitment(t *testing.T) { ) quitDelay := time.Millisecond * 20 - quit, quitFunc := context.WithCancel(context.Background()) + quit, quitFunc := context.WithCancel(t.Context()) // Alice's channel will be stuck waiting for aux sig job responses until // we send the quit signal. We add an explicit sleep here so that we can diff --git a/macaroons/bake_test.go b/macaroons/bake_test.go index 20c22b647..c018cfdb7 100644 --- a/macaroons/bake_test.go +++ b/macaroons/bake_test.go @@ -1,7 +1,6 @@ package macaroons_test import ( - "context" "encoding/hex" "testing" @@ -49,7 +48,7 @@ func TestBakeFromRootKey(t *testing.T) { md := metadata.New(map[string]string{ "macaroon": hex.EncodeToString(macaroonBytes), }) - macCtx := metadata.NewIncomingContext(context.Background(), md) + macCtx := metadata.NewIncomingContext(t.Context(), md) // The macaroon should be valid for the service, since the root key was // the same. diff --git a/macaroons/fuzz_test.go b/macaroons/fuzz_test.go index defae4143..51bb59f1d 100644 --- a/macaroons/fuzz_test.go +++ b/macaroons/fuzz_test.go @@ -1,7 +1,6 @@ package macaroons import ( - "context" "testing" "gopkg.in/macaroon-bakery.v2/bakery" @@ -17,7 +16,7 @@ func FuzzUnmarshalMacaroon(f *testing.F) { func FuzzAuthChecker(f *testing.F) { rootKeyStore := bakery.NewMemRootKeyStore() - ctx := context.Background() + ctx := f.Context() f.Fuzz(func(t *testing.T, location, entity, action, method string, rootKey, id []byte) { diff --git a/macaroons/service_test.go b/macaroons/service_test.go index 40d164dd4..cb593f0ba 100644 --- a/macaroons/service_test.go +++ b/macaroons/service_test.go @@ -1,7 +1,6 @@ package macaroons_test import ( - "context" "encoding/hex" "path" "testing" @@ -55,7 +54,7 @@ func setupTestRootKeyStorage(t *testing.T) kvdb.Backend { // TestNewService tests the creation of the macaroon service. func TestNewService(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. @@ -109,7 +108,7 @@ func TestNewService(t *testing.T) { // incoming context. func TestValidateMacaroon(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, initialize the service and unlock it. db := setupTestRootKeyStorage(t) @@ -157,7 +156,7 @@ func TestValidateMacaroon(t *testing.T) { // TestListMacaroonIDs checks that ListMacaroonIDs returns the expected result. func TestListMacaroonIDs(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. @@ -192,7 +191,7 @@ func TestListMacaroonIDs(t *testing.T) { func TestDeleteMacaroonID(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. @@ -308,7 +307,7 @@ func TestCloneMacaroons(t *testing.T) { func TestMacaroonVersionDecode(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. diff --git a/macaroons/store_test.go b/macaroons/store_test.go index 7a2562140..b17044de9 100644 --- a/macaroons/store_test.go +++ b/macaroons/store_test.go @@ -59,7 +59,7 @@ func openTestStore(t *testing.T, tempDir string) *macaroons.RootKeyStorage { // reading keys and closing it. func TestStore(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() tempDir, store := newTestStore(t) diff --git a/msgmux/msg_router_test.go b/msgmux/msg_router_test.go index 8ecad778e..a26fe0373 100644 --- a/msgmux/msg_router_test.go +++ b/msgmux/msg_router_test.go @@ -34,7 +34,7 @@ func (m *mockEndpoint) SendMessage(ctx context.Context, msg PeerMsg) bool { // TestMessageRouterOperation tests the basic operation of the message router: // add new endpoints, route to them, remove, them, etc. func TestMessageRouterOperation(t *testing.T) { - ctx := context.Background() + ctx := t.Context() msgRouter := NewMultiMsgRouter() msgRouter.Start(ctx) defer msgRouter.Stop() diff --git a/payments/db/kv_store_test.go b/payments/db/kv_store_test.go index caa46b5d0..73df8ead4 100644 --- a/payments/db/kv_store_test.go +++ b/payments/db/kv_store_test.go @@ -2,7 +2,6 @@ package paymentsdb import ( "bytes" - "context" "math" "reflect" "testing" @@ -932,7 +931,7 @@ func TestQueryPayments(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() paymentDB := NewKVTestDB(t) diff --git a/payments/db/payment_test.go b/payments/db/payment_test.go index c07d0e9cf..534a1b1e5 100644 --- a/payments/db/payment_test.go +++ b/payments/db/payment_test.go @@ -1,7 +1,6 @@ package paymentsdb import ( - "context" "crypto/rand" "crypto/sha256" "errors" @@ -304,7 +303,7 @@ func assertDBPayments(t *testing.T, paymentDB DB, payments []*payment) { t.Helper() response, err := paymentDB.QueryPayments( - context.Background(), Query{ + t.Context(), Query{ IndexOffset: 0, MaxPayments: uint64(len(payments)), IncludeIncomplete: true, diff --git a/protofsm/state_machine_test.go b/protofsm/state_machine_test.go index 844256fb4..ca060614f 100644 --- a/protofsm/state_machine_test.go +++ b/protofsm/state_machine_test.go @@ -1,7 +1,6 @@ package protofsm import ( - "context" "encoding/hex" "fmt" "sync/atomic" @@ -410,7 +409,7 @@ func (d *dummyAdapters) RegisterSpendNtfn(outpoint *wire.OutPoint, // TestStateMachineOnInitDaemonEvent tests that the state machine will properly // execute any init-level daemon events passed into it. func TestStateMachineOnInitDaemonEvent(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // First, we'll create our state machine given the env, and our // starting state. @@ -464,7 +463,7 @@ func TestStateMachineOnInitDaemonEvent(t *testing.T) { // transition. func TestStateMachineInternalEvents(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create our state machine given the env, and our // starting state. @@ -513,7 +512,7 @@ func TestStateMachineInternalEvents(t *testing.T) { // daemon emitted as part of the state transition process. func TestStateMachineDaemonEvents(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // First, we'll create our state machine given the env, and our // starting state. @@ -598,7 +597,7 @@ func TestStateMachineDaemonEvents(t *testing.T) { // scenario where full block details are requested in the confirmation // notification. func testStateMachineConfMapperImpl(t *testing.T, fullBlock bool) { - ctx := context.Background() + ctx := t.Context() // Create the state machine. env := &dummyEnv{} @@ -722,7 +721,7 @@ func TestStateMachineConfMapper(t *testing.T) { // transition. func TestStateMachineSpendMapper(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() // Create the state machine. env := &dummyEnv{} @@ -808,7 +807,7 @@ func (d *dummyMsgMapper) MapMsg(wireMsg msgmux.PeerMsg) fn.Option[dummyEvents] { // TestStateMachineMsgMapper tests that given a message mapper, we can properly // send in wire messages get mapped to FSM events. func TestStateMachineMsgMapper(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // First, we'll create our state machine given the env, and our // starting state. diff --git a/routing/localchans/manager_test.go b/routing/localchans/manager_test.go index 09b4f0bc8..a2e7164b2 100644 --- a/routing/localchans/manager_test.go +++ b/routing/localchans/manager_test.go @@ -317,7 +317,7 @@ func TestManager(t *testing.T) { expectedNumUpdates = test.expectedNumUpdates failedUpdates, err := manager.UpdatePolicy( - context.Background(), + t.Context(), test.newPolicy, test.createMissingEdge, test.specifiedChanPoints...) diff --git a/routing/pathfind_test.go b/routing/pathfind_test.go index 2f746ac9e..6c23f794c 100644 --- a/routing/pathfind_test.go +++ b/routing/pathfind_test.go @@ -184,7 +184,7 @@ func makeTestGraph(t *testing.T, useCache bool) (*graphdb.ChannelGraph, func parseTestGraph(t *testing.T, useCache bool, path string) ( *testGraphInstance, error) { - ctx := context.Background() + ctx := t.Context() graphJSON, err := os.ReadFile(path) if err != nil { @@ -526,7 +526,7 @@ func createTestGraphFromChannels(t *testing.T, useCache bool, testChannels []*testChannel, source string, sourceFeatureBits ...lnwire.FeatureBit) (*testGraphInstance, error) { - ctx := context.Background() + ctx := t.Context() // We'll use this fake address for the IP address of all the nodes in // our tests. This value isn't needed for path finding so it doesn't @@ -1090,7 +1090,7 @@ func runBasicGraphPathFinding(t *testing.T, useCache bool) { func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstance, test *basicGraphPathFindingTestCase) { - ctx := context.Background() + ctx := t.Context() aliases := graphInstance.aliasMap expectedHops := test.expectedHops expectedHopCount := len(expectedHops) @@ -1235,7 +1235,7 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -1320,7 +1320,7 @@ func runPathFindingWithBlindedPathDuplicateHop(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -1807,7 +1807,7 @@ func runPathNotAvailable(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -1865,7 +1865,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) { ctx := newPathFindingTestContext(t, useCache, testChannels, "roasbeef") - sourceNode, err := ctx.graph.SourceNode(context.Background()) + sourceNode, err := ctx.graph.SourceNode(t.Context()) require.NoError(t, err, "unable to fetch source node") find := func(r *RestrictParams, @@ -2083,7 +2083,7 @@ func runPathInsufficientCapacity(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -2114,7 +2114,7 @@ func runRouteFailMinHTLC(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -2179,7 +2179,7 @@ func runRouteFailMaxHTLC(t *testing.T, useCache bool) { midEdge.MessageFlags = 1 midEdge.MaxHTLC = payAmt - 1 midEdge.LastUpdate = midEdge.LastUpdate.Add(time.Second) - err = graph.UpdateEdgePolicy(context.Background(), midEdge) + err = graph.UpdateEdgePolicy(t.Context(), midEdge) require.NoError(t, err) // We'll now attempt to route through that edge with a payment above @@ -2199,7 +2199,7 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -2268,7 +2268,7 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) { graph, err := parseTestGraph(t, useCache, basicGraphFilePath) require.NoError(t, err, "unable to create graph") - ctx := context.Background() + ctx := t.Context() sourceNode, err := graph.graph.SourceNode(ctx) require.NoError(t, err, "unable to fetch source node") @@ -3202,7 +3202,7 @@ func newPathFindingTestContext(t *testing.T, useCache bool, require.NoError(t, err, "unable to create graph") sourceNode, err := testGraphInstance.graph.SourceNode( - context.Background(), + t.Context(), ) require.NoError(t, err, "unable to fetch source node") diff --git a/routing/payment_lifecycle_test.go b/routing/payment_lifecycle_test.go index 8094f5075..7e94315a7 100644 --- a/routing/payment_lifecycle_test.go +++ b/routing/payment_lifecycle_test.go @@ -198,7 +198,7 @@ func sendPaymentAndAssertSucceeded(t *testing.T, // We now make a call to `resumePayment` and expect it to return the // preimage. go func() { - preimage, _, err := p.resumePayment(context.Background()) + preimage, _, err := p.resumePayment(t.Context()) resultChan <- &resumePaymentResult{ preimage: preimage, err: err, @@ -294,7 +294,7 @@ func TestCheckTimeoutTimedOut(t *testing.T) { t.Parallel() deadline := time.Now().Add(time.Nanosecond) - ctx, cancel := context.WithDeadline(context.Background(), deadline) + ctx, cancel := context.WithDeadline(t.Context(), deadline) defer cancel() p := createTestPaymentLifecycle() @@ -330,7 +330,7 @@ func TestCheckTimeoutTimedOut(t *testing.T) { // Make the timeout happens instantly. deadline = time.Now().Add(time.Nanosecond) - ctx, cancel = context.WithDeadline(context.Background(), deadline) + ctx, cancel = context.WithDeadline(t.Context(), deadline) defer cancel() // Sleep one millisecond to make sure it timed out. @@ -349,7 +349,7 @@ func TestCheckTimeoutTimedOut(t *testing.T) { func TestCheckTimeoutOnRouterQuit(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() p := createTestPaymentLifecycle() @@ -795,7 +795,7 @@ func TestResumePaymentFailOnFetchPayment(t *testing.T) { m.control.On("FetchPayment", p.identifier).Return(nil, errDummy) // Send the payment and assert it failed. - sendPaymentAndAssertError(t, context.Background(), p, errDummy) + sendPaymentAndAssertError(t, t.Context(), p, errDummy) // Expected collectResultAsync to not be called. require.Zero(t, m.collectResultsCount) @@ -831,7 +831,7 @@ func TestResumePaymentFailOnTimeout(t *testing.T) { // 3. make the timeout happens instantly and sleep one millisecond to // make sure it timed out. deadline := time.Now().Add(time.Nanosecond) - ctx, cancel := context.WithDeadline(context.Background(), deadline) + ctx, cancel := context.WithDeadline(t.Context(), deadline) defer cancel() time.Sleep(1 * time.Millisecond) @@ -882,7 +882,7 @@ func TestResumePaymentFailOnTimeoutErr(t *testing.T) { // Send the payment and assert it failed when router is shutting down. sendPaymentAndAssertError( - t, context.Background(), p, ErrRouterShuttingDown, + t, t.Context(), p, ErrRouterShuttingDown, ) // Expected collectResultAsync to not be called. @@ -900,7 +900,7 @@ func TestResumePaymentFailContextCancel(t *testing.T) { p, m := setupTestPaymentLifecycle(t) // Create the cancelable payment context. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) paymentAmt := lnwire.MilliSatoshi(10000) @@ -977,7 +977,7 @@ func TestResumePaymentFailOnStepErr(t *testing.T) { m.payment.On("AllowMoreAttempts").Return(false, errDummy).Once() // Send the payment and assert it failed. - sendPaymentAndAssertError(t, context.Background(), p, errDummy) + sendPaymentAndAssertError(t, t.Context(), p, errDummy) // Expected collectResultAsync to not be called. require.Zero(t, m.collectResultsCount) @@ -1021,7 +1021,7 @@ func TestResumePaymentFailOnRequestRouteErr(t *testing.T) { ).Return(nil, errDummy).Once() // Send the payment and assert it failed. - sendPaymentAndAssertError(t, context.Background(), p, errDummy) + sendPaymentAndAssertError(t, t.Context(), p, errDummy) // Expected collectResultAsync to not be called. require.Zero(t, m.collectResultsCount) @@ -1081,7 +1081,7 @@ func TestResumePaymentFailOnRegisterAttemptErr(t *testing.T) { ).Return(nil, errDummy).Once() // Send the payment and assert it failed. - sendPaymentAndAssertError(t, context.Background(), p, errDummy) + sendPaymentAndAssertError(t, t.Context(), p, errDummy) // Expected collectResultAsync to not be called. require.Zero(t, m.collectResultsCount) @@ -1173,7 +1173,7 @@ func TestResumePaymentFailOnSendAttemptErr(t *testing.T) { ).Return(nil, errDummy).Once() // Send the payment and assert it failed. - sendPaymentAndAssertError(t, context.Background(), p, errDummy) + sendPaymentAndAssertError(t, t.Context(), p, errDummy) // Expected collectResultAsync to not be called. require.Zero(t, m.collectResultsCount) diff --git a/routing/router_test.go b/routing/router_test.go index 702c29db9..acf7a5212 100644 --- a/routing/router_test.go +++ b/routing/router_test.go @@ -132,7 +132,7 @@ func createTestCtxFromGraphInstanceAssumeValid(t *testing.T, ) require.NoError(t, err) - sourceNode, err := graphInstance.graph.SourceNode(context.Background()) + sourceNode, err := graphInstance.graph.SourceNode(t.Context()) require.NoError(t, err) sessionSource := &SessionSource{ GraphSessionFactory: graphInstance.graph, @@ -1202,7 +1202,7 @@ func TestFindPathFeeWeighting(t *testing.T) { var preImage [32]byte copy(preImage[:], bytes.Repeat([]byte{9}, 32)) - sourceNode, err := ctx.graph.SourceNode(context.Background()) + sourceNode, err := ctx.graph.SourceNode(t.Context()) require.NoError(t, err, "unable to fetch source node") amt := lnwire.MilliSatoshi(100) @@ -2706,7 +2706,7 @@ func TestNewRouteRequest(t *testing.T) { // announcements for the channel vertexes to be able to use the channel. func TestAddEdgeUnknownVertexes(t *testing.T) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() const startingBlockHeight = 101 ctx := createTestCtxFromFile(t, startingBlockHeight, basicGraphFilePath) diff --git a/sqldb/migrations_test.go b/sqldb/migrations_test.go index 304a61189..063f810b0 100644 --- a/sqldb/migrations_test.go +++ b/sqldb/migrations_test.go @@ -1,7 +1,6 @@ package sqldb import ( - "context" "database/sql" "fmt" "path/filepath" @@ -75,7 +74,7 @@ func TestMigrations(t *testing.T) { // 2592000 seconds for AMP invoices. func testInvoiceExpiryMigration(t *testing.T, makeDB makeMigrationTestDB) { t.Parallel() - ctxb := context.Background() + ctxb := t.Context() // Create a new database that already has the first version of the // native invoice schema. @@ -276,7 +275,7 @@ func TestCustomMigration(t *testing.T) { }, } - ctxb := context.Background() + ctxb := t.Context() for _, test := range tests { // checkSchemaVersion checks the database schema version against // the expected version. @@ -391,7 +390,7 @@ func TestCustomMigration(t *testing.T) { "migrations", dbName) _, err := fixture.db.ExecContext( - context.Background(), "CREATE DATABASE "+dbName, + t.Context(), "CREATE DATABASE "+dbName, ) require.NoError(t, err) @@ -490,7 +489,7 @@ func TestSchemaMigrationIdempotency(t *testing.T) { ) }) - ctxb := context.Background() + ctxb := t.Context() require.NoError( t, db.ApplyAllMigrations(ctxb, GetMigrations()), ) @@ -536,7 +535,7 @@ func TestSchemaMigrationIdempotency(t *testing.T) { "migrations", dbName) _, err := fixture.db.ExecContext( - context.Background(), "CREATE DATABASE "+dbName, + t.Context(), "CREATE DATABASE "+dbName, ) require.NoError(t, err) @@ -550,7 +549,7 @@ func TestSchemaMigrationIdempotency(t *testing.T) { db, err = NewPostgresStore(cfg) require.NoError(t, err) - ctxb := context.Background() + ctxb := t.Context() require.NoError( t, db.ApplyAllMigrations(ctxb, GetMigrations()), ) @@ -601,7 +600,7 @@ func TestMigrationSucceedsAfterDirtyStateMigrationFailure19RC1(t *testing.T) { failingSchemaVersion = 3 ) - ctxb := context.Background() + ctxb := t.Context() migrations := GetMigrations() migrations = migrations[:maxSchemaVersionBefore19RC1] lastMigration := migrations[len(migrations)-1] @@ -692,7 +691,7 @@ func TestMigrationSucceedsAfterDirtyStateMigrationFailure19RC1(t *testing.T) { "migrations", dbName) _, err := fixture.db.ExecContext( - context.Background(), "CREATE DATABASE "+dbName, + t.Context(), "CREATE DATABASE "+dbName, ) require.NoError(t, err) diff --git a/sqldb/paginate_test.go b/sqldb/paginate_test.go index 20abf422e..f229da6f9 100644 --- a/sqldb/paginate_test.go +++ b/sqldb/paginate_test.go @@ -18,7 +18,7 @@ import ( func TestExecuteBatchQuery(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() t.Run("empty input returns nil", func(t *testing.T) { var ( @@ -245,7 +245,7 @@ func TestExecuteBatchQuery(t *testing.T) { // executing the query in pages. func TestSQLSliceQueries(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() db := NewTestDB(t) @@ -327,7 +327,7 @@ func TestSQLSliceQueries(t *testing.T) { // page until all items are processed or an error occurs. func TestExecutePaginatedQuery(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := t.Context() type testItem struct { id int64 @@ -944,7 +944,7 @@ func TestExecuteCollectAndBatchWithSharedDataQuery(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cfg := &QueryConfig{ MaxPageSize: tt.maxPageSize, } diff --git a/walletunlocker/service_test.go b/walletunlocker/service_test.go index 7de2961cc..3572b2224 100644 --- a/walletunlocker/service_test.go +++ b/walletunlocker/service_test.go @@ -161,7 +161,7 @@ func TestGenSeed(t *testing.T) { SeedEntropy: testEntropy[:], } - ctx := context.Background() + ctx := t.Context() seedResp, err := service.GenSeed(ctx, genSeedReq) require.NoError(t, err) @@ -193,7 +193,7 @@ func TestGenSeedGenerateEntropy(t *testing.T) { AezeedPassphrase: aezeedPass, } - ctx := context.Background() + ctx := t.Context() seedResp, err := service.GenSeed(ctx, genSeedReq) require.NoError(t, err) @@ -228,7 +228,7 @@ func TestGenSeedInvalidEntropy(t *testing.T) { } // We should get an error now since the entropy source was invalid. - ctx := context.Background() + ctx := t.Context() _, err := service.GenSeed(ctx, genSeedReq) require.Error(t, err) require.Contains(t, err.Error(), "incorrect entropy length") @@ -256,7 +256,7 @@ func TestInitWallet(t *testing.T) { // command to the wallet. This should check the validity of the cipher // seed, then send over the initialization information over the init // channel. - ctx := context.Background() + ctx := t.Context() req := &lnrpc.InitWalletRequest{ WalletPassword: testPassword, CipherSeedMnemonic: mnemonic[:], @@ -339,7 +339,7 @@ func TestCreateWalletInvalidEntropy(t *testing.T) { AezeedPassphrase: []byte("fake pass"), } - ctx := context.Background() + ctx := t.Context() _, err := service.InitWallet(ctx, req) require.Error(t, err) } @@ -359,7 +359,7 @@ func TestUnlockWallet(t *testing.T) { testNetParams, nil, true, testLoaderOpts(testDir), ) - ctx := context.Background() + ctx := t.Context() req := &lnrpc.UnlockWalletRequest{ WalletPassword: testPassword, RecoveryWindow: int32(testRecoveryWindow), @@ -448,7 +448,7 @@ func TestChangeWalletPasswordNewRootKey(t *testing.T) { ) service.SetMacaroonDB(store.Backend) - ctx := context.Background() + ctx := t.Context() newPassword := []byte("hunter2???") req := &lnrpc.ChangePasswordRequest{ @@ -590,7 +590,7 @@ func TestChangeWalletPasswordStateless(t *testing.T) { NewPassword: testPassword, NewMacaroonRootKey: true, } - ctx := context.Background() + ctx := t.Context() _, err = service.ChangePassword(ctx, badReq) require.Error(t, err)