From 9a10c80bcb48f44e84697eb4d34f3ef12b0d5e70 Mon Sep 17 00:00:00 2001 From: Tommy Volk Date: Thu, 5 May 2022 20:11:50 +0000 Subject: [PATCH] multi: move many t.Fatalf calls to require.NoError --- autopilot/agent_test.go | 33 +- autopilot/prefattach_test.go | 5 +- brontide/noise_test.go | 65 +- chainntnfs/bitcoindnotify/bitcoind_test.go | 61 +- chainntnfs/btcdnotify/btcd_test.go | 69 +- chainntnfs/height_hint_cache_test.go | 20 +- chainntnfs/test/test_interface.go | 264 +-- chainntnfs/test_utils.go | 25 +- chainntnfs/txnotifier_test.go | 396 ++--- chanbackup/backup_test.go | 21 +- chanbackup/backupfile_test.go | 26 +- chanbackup/multi_test.go | 10 +- chanbackup/pubsub_test.go | 13 +- chanbackup/recover_test.go | 9 +- chanbackup/single_test.go | 21 +- channeldb/channel_test.go | 156 +- channeldb/db_test.go | 104 +- channeldb/forwarding_log_test.go | 37 +- channeldb/forwarding_package_test.go | 4 +- channeldb/graph_test.go | 488 ++---- channeldb/invoice_test.go | 80 +- channeldb/meta_test.go | 17 +- channeldb/nodes_test.go | 25 +- channeldb/payment_control_test.go | 100 +- channeldb/payments_test.go | 12 +- channeldb/waitingproof_test.go | 9 +- channeldb/witness_cache_test.go | 33 +- cluster/etcd_elector_test.go | 4 +- contractcourt/breacharbiter_test.go | 44 +- contractcourt/briefcase_test.go | 97 +- contractcourt/chain_arbitrator_test.go | 25 +- contractcourt/chain_watcher_test.go | 21 +- contractcourt/channel_arbitrator_test.go | 72 +- contractcourt/nursery_store_test.go | 41 +- contractcourt/utxonursery_test.go | 5 +- discovery/gossiper_test.go | 512 ++---- discovery/message_store_test.go | 21 +- discovery/syncer_test.go | 36 +- feature/manager_internal_test.go | 5 +- funding/manager_test.go | 44 +- htlcswitch/circuit_test.go | 85 +- htlcswitch/decayedlog_test.go | 73 +- htlcswitch/hop/iterator_test.go | 5 +- htlcswitch/link_test.go | 460 ++--- htlcswitch/mailbox_test.go | 16 +- htlcswitch/payment_result_test.go | 12 +- htlcswitch/switch_test.go | 364 +--- htlcswitch/test_utils.go | 21 +- input/script_utils_test.go | 68 +- input/size_test.go | 32 +- invoices/invoice_expiry_watcher_test.go | 5 +- invoices/invoiceregistry_test.go | 12 +- invoices/test_utils_test.go | 8 +- keychain/bench_test.go | 4 +- lncfg/address_test.go | 4 +- lnwallet/chainfee/estimator_test.go | 16 +- lnwallet/chancloser/chancloser_test.go | 5 +- lnwallet/chanfunding/psbt_assembler_test.go | 64 +- lnwallet/channel_test.go | 1548 +++++------------ lnwallet/chanvalidate/validate_test.go | 5 +- lnwallet/test/test_interface.go | 444 ++--- lnwallet/transactions_test.go | 40 +- lnwire/extra_bytes_test.go | 5 +- lnwire/netaddress_test.go | 9 +- lnwire/onion_error_test.go | 21 +- macaroons/constraints_test.go | 16 +- macaroons/service_test.go | 48 +- netann/chan_status_manager_test.go | 21 +- netann/channel_announcement_test.go | 5 +- peer/brontide_test.go | 88 +- pool/worker_test.go | 9 +- routing/chainview/interface_test.go | 217 +-- routing/control_tower_test.go | 33 +- routing/integrated_routing_test.go | 8 +- routing/notifications_test.go | 72 +- routing/pathfind_test.go | 228 +-- routing/payment_lifecycle_test.go | 8 +- routing/router_test.go | 160 +- server_test.go | 33 +- shachain/element_test.go | 57 +- sweep/store_test.go | 5 +- sweep/sweeper_test.go | 4 +- sweep/walletsweep_test.go | 5 +- tlv/stream_test.go | 4 +- tor/cmd_onion_test.go | 8 +- watchtower/blob/justice_kit_test.go | 4 +- watchtower/lookout/lookout_test.go | 17 +- .../wtclient/backup_task_internal_test.go | 9 +- .../wtclient/candidate_iterator_test.go | 5 +- watchtower/wtclient/client_test.go | 32 +- watchtower/wtserver/server_test.go | 13 +- zpay32/invoice_internal_test.go | 5 +- 92 files changed, 1905 insertions(+), 5565 deletions(-) diff --git a/autopilot/agent_test.go b/autopilot/agent_test.go index 037a49626..ccae20a14 100644 --- a/autopilot/agent_test.go +++ b/autopilot/agent_test.go @@ -11,6 +11,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" ) type moreChansResp struct { @@ -160,9 +161,7 @@ func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) { // First, we'll create all the dependencies that we'll need in order to // create the autopilot agent. self, err := randKey() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") quit := make(chan struct{}) heuristic := &mockHeuristic{ @@ -216,9 +215,7 @@ func setup(t *testing.T, initialChans []LocalChannel) (*testContext, func()) { } agent, err := New(testCfg, initialChans) - if err != nil { - t.Fatalf("unable to create agent: %v", err) - } + require.NoError(t, err, "unable to create agent") ctx.agent = agent // With the autopilot agent and all its dependencies we'll start the @@ -331,9 +328,7 @@ func TestAgentHeuristicUpdateSignal(t *testing.T) { defer cleanup() pub, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") // We'll send an initial "no" response to advance the agent past its // initial check. @@ -397,9 +392,7 @@ func TestAgentChannelFailureSignal(t *testing.T) { testCtx.chanController = &mockFailingChanController{} node, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to add node: %v", err) - } + require.NoError(t, err, "unable to add node") // First ensure the agent will attempt to open a new channel. Return // that we need more channels, and have 5BTC to use. @@ -664,9 +657,7 @@ func TestAgentPendingChannelState(t *testing.T) { // We'll only return a single directive for a pre-chosen node. nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") nodeID := NewNodeID(nodeKey) nodeDirective := &NodeScore{ NodeID: nodeID, @@ -876,9 +867,7 @@ func TestAgentSkipPendingConns(t *testing.T) { // We'll only return a single directive for a pre-chosen node. nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") nodeID := NewNodeID(nodeKey) nodeDirective := &NodeScore{ NodeID: nodeID, @@ -888,9 +877,7 @@ func TestAgentSkipPendingConns(t *testing.T) { // We'll also add a second node to the graph, to keep the first one // company. nodeKey2, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") nodeID2 := NewNodeID(nodeKey2) // We'll send an initial "yes" response to advance the agent past its @@ -1062,9 +1049,7 @@ func TestAgentQuitWhenPendingConns(t *testing.T) { // We'll only return a single directive for a pre-chosen node. nodeKey, err := testCtx.graph.addRandNode() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") nodeID := NewNodeID(nodeKey) nodeDirective := &NodeScore{ NodeID: nodeID, diff --git a/autopilot/prefattach_test.go b/autopilot/prefattach_test.go index 3b1dcfec3..64a27802b 100644 --- a/autopilot/prefattach_test.go +++ b/autopilot/prefattach_test.go @@ -11,6 +11,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/lightningnetwork/lnd/channeldb" + "github.com/stretchr/testify/require" ) type genGraphFunc func() (testGraph, func(), error) @@ -77,9 +78,7 @@ func TestPrefAttachmentSelectEmptyGraph(t *testing.T) { // Create a random public key, which we will query to get a score for. pub, err := randKey() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") nodes := map[NodeID]struct{}{ NewNodeID(pub): {}, diff --git a/brontide/noise_test.go b/brontide/noise_test.go index 20c6d6b24..edddbf200 100644 --- a/brontide/noise_test.go +++ b/brontide/noise_test.go @@ -14,6 +14,7 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/tor" + "github.com/stretchr/testify/require" ) type maybeNetConn struct { @@ -103,9 +104,7 @@ func TestConnectionCorrectness(t *testing.T) { // into local variables. If the initial crypto handshake fails, then // we'll get a non-nil error here. localConn, remoteConn, cleanUp, err := establishTestConnection() - if err != nil { - t.Fatalf("unable to establish test connection: %v", err) - } + require.NoError(t, err, "unable to establish test connection") defer cleanUp() // Test out some message full-message reads. @@ -155,9 +154,7 @@ func TestConnectionCorrectness(t *testing.T) { // stalled. func TestConcurrentHandshakes(t *testing.T) { listener, netAddr, err := makeListener() - if err != nil { - t.Fatalf("unable to create listener connection: %v", err) - } + require.NoError(t, err, "unable to create listener connection") defer listener.Close() const nblocking = 5 @@ -194,9 +191,7 @@ func TestConcurrentHandshakes(t *testing.T) { // Now, construct a new private key and use the brontide dialer to // connect to the listener. remotePriv, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to generate private key: %v", err) - } + require.NoError(t, err, "unable to generate private key") remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv} go func() { @@ -210,9 +205,7 @@ func TestConcurrentHandshakes(t *testing.T) { // This connection should be accepted without error, as the brontide // connection should bypass stalled tcp connections. conn, err := listener.Accept() - if err != nil { - t.Fatalf("unable to accept dial: %v", err) - } + require.NoError(t, err, "unable to accept dial") defer conn.Close() result := <-connChan @@ -265,9 +258,7 @@ func TestWriteMessageChunking(t *testing.T) { // into local variables. If the initial crypto handshake fails, then // we'll get a non-nil error here. localConn, remoteConn, cleanUp, err := establishTestConnection() - if err != nil { - t.Fatalf("unable to establish test connection: %v", err) - } + require.NoError(t, err, "unable to establish test connection") defer cleanUp() // Attempt to write a message which is over 3x the max allowed payload @@ -322,9 +313,7 @@ func TestBolt0008TestVectors(t *testing.T) { // vectors at the appendix of BOLT-0008 initiatorKeyBytes, err := hex.DecodeString("1111111111111111111111" + "111111111111111111111111111111111111111111") - if err != nil { - t.Fatalf("unable to decode hex: %v", err) - } + require.NoError(t, err, "unable to decode hex") initiatorPriv, _ := btcec.PrivKeyFromBytes( initiatorKeyBytes, ) @@ -333,9 +322,7 @@ func TestBolt0008TestVectors(t *testing.T) { // We'll then do the same for the responder. responderKeyBytes, err := hex.DecodeString("212121212121212121212121" + "2121212121212121212121212121212121212121") - if err != nil { - t.Fatalf("unable to decode hex: %v", err) - } + require.NoError(t, err, "unable to decode hex") responderPriv, responderPub := btcec.PrivKeyFromBytes( responderKeyBytes, ) @@ -382,15 +369,11 @@ func TestBolt0008TestVectors(t *testing.T) { // the payload return is _exactly_ the same as what's specified within // the test vectors. actOne, err := initiator.GenActOne() - if err != nil { - t.Fatalf("unable to generate act one: %v", err) - } + require.NoError(t, err, "unable to generate act one") expectedActOne, err := hex.DecodeString("00036360e856310ce5d294e" + "8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df608655115" + "1f58b8afe6c195782c6a") - if err != nil { - t.Fatalf("unable to parse expected act one: %v", err) - } + require.NoError(t, err, "unable to parse expected act one") if !bytes.Equal(expectedActOne, actOne[:]) { t.Fatalf("act one mismatch: expected %x, got %x", expectedActOne, actOne) @@ -407,15 +390,11 @@ func TestBolt0008TestVectors(t *testing.T) { // produce the _exact_ same byte stream as advertised within the spec's // test vectors. actTwo, err := responder.GenActTwo() - if err != nil { - t.Fatalf("unable to generate act two: %v", err) - } + require.NoError(t, err, "unable to generate act two") expectedActTwo, err := hex.DecodeString("0002466d7fcae563e5cb09a0" + "d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac58" + "3c9ef6eafca3f730ae") - if err != nil { - t.Fatalf("unable to parse expected act two: %v", err) - } + require.NoError(t, err, "unable to parse expected act two") if !bytes.Equal(expectedActTwo, actTwo[:]) { t.Fatalf("act two mismatch: expected %x, got %x", expectedActTwo, actTwo) @@ -430,15 +409,11 @@ func TestBolt0008TestVectors(t *testing.T) { // At the final step, we'll generate the last act from the initiator // and once again verify that it properly matches the test vectors. actThree, err := initiator.GenActThree() - if err != nil { - t.Fatalf("unable to generate act three: %v", err) - } + require.NoError(t, err, "unable to generate act three") expectedActThree, err := hex.DecodeString("00b9e3a702e93e3a9948c2e" + "d6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8f" + "c28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba") - if err != nil { - t.Fatalf("unable to parse expected act three: %v", err) - } + require.NoError(t, err, "unable to parse expected act three") if !bytes.Equal(expectedActThree, actThree[:]) { t.Fatalf("act three mismatch: expected %x, got %x", expectedActThree, actThree) @@ -454,20 +429,14 @@ func TestBolt0008TestVectors(t *testing.T) { // proper symmetric encryption keys. sendingKey, err := hex.DecodeString("969ab31b4d288cedf6218839b27a3e2" + "140827047f2c0f01bf5c04435d43511a9") - if err != nil { - t.Fatalf("unable to parse sending key: %v", err) - } + require.NoError(t, err, "unable to parse sending key") recvKey, err := hex.DecodeString("bb9020b8965f4df047e07f955f3c4b884" + "18984aadc5cdb35096b9ea8fa5c3442") - if err != nil { - t.Fatalf("unable to parse receiving key: %v", err) - } + require.NoError(t, err, "unable to parse receiving key") chainKey, err := hex.DecodeString("919219dbb2920afa8db80f9a51787a840" + "bcf111ed8d588caf9ab4be716e42b01") - if err != nil { - t.Fatalf("unable to parse chaining key: %v", err) - } + require.NoError(t, err, "unable to parse chaining key") if !bytes.Equal(initiator.sendCipher.secretKey[:], sendingKey) { t.Fatalf("sending key mismatch: expected %x, got %x", diff --git a/chainntnfs/bitcoindnotify/bitcoind_test.go b/chainntnfs/bitcoindnotify/bitcoind_test.go index f9511ffe0..39a29e9b3 100644 --- a/chainntnfs/bitcoindnotify/bitcoind_test.go +++ b/chainntnfs/bitcoindnotify/bitcoind_test.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/blockcache" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/stretchr/testify/require" ) var ( @@ -35,20 +36,14 @@ func initHintCache(t *testing.T) *chainntnfs.HeightHintCache { t.Helper() tempDir, err := ioutil.TempDir("", "kek") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } + require.NoError(t, err, "unable to create db") testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend) - if err != nil { - t.Fatalf("unable to create hint cache: %v", err) - } + require.NoError(t, err, "unable to create hint cache") return hintCache } @@ -81,9 +76,7 @@ func syncNotifierWithMiner(t *testing.T, notifier *BitcoindNotifier, t.Helper() _, minerHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve miner's current height: %v", err) - } + require.NoError(t, err, "unable to retrieve miner's current height") timeout := time.After(10 * time.Second) for { @@ -139,13 +132,9 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) { var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") switch txStatus { case chainntnfs.TxNotFoundIndex: @@ -158,22 +147,16 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) { // Now, we'll create a test transaction, confirm it, and attempt to // retrieve its confirmation details. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } + require.NoError(t, err, "unable to create tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatal(err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") // The transaction should be found in the mempool at this point. _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since it has yet to be included in a block, it should have been found // within the mempool. @@ -193,9 +176,7 @@ func testHistoricalConfDetailsTxIndex(t *testing.T, rpcPolling bool) { syncNotifierWithMiner(t, notifier, miner) _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since the backend node's txindex is enabled and the transaction has // confirmed, we should be able to retrieve it using the txindex. @@ -238,16 +219,12 @@ func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) { var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") broadcastHeight := syncNotifierWithMiner(t, notifier, miner) _, txStatus, err := notifier.historicalConfDetails( unknownConfReq, uint32(broadcastHeight), uint32(broadcastHeight), ) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") switch txStatus { case chainntnfs.TxNotFoundManually: @@ -267,9 +244,7 @@ func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) { outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) spendTxHash, err := miner.Client.SendRawTransaction(spendTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } + require.NoError(t, err, "unable to broadcast tx") if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } @@ -280,16 +255,12 @@ func testHistoricalConfDetailsNoTxIndex(t *testing.T, rpcpolling bool) { // Ensure the notifier and miner are synced to the same height to ensure // we can find the transaction when manually scanning the chain. confReq, err := chainntnfs.NewConfRequest(&outpoint.Hash, output.PkScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") currentHeight := syncNotifierWithMiner(t, notifier, miner) _, txStatus, err = notifier.historicalConfDetails( confReq, uint32(broadcastHeight), uint32(currentHeight), ) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since the backend node's txindex is disabled and the transaction has // confirmed, we should be able to find it by falling back to scanning diff --git a/chainntnfs/btcdnotify/btcd_test.go b/chainntnfs/btcdnotify/btcd_test.go index 16782d463..4ff3b89d4 100644 --- a/chainntnfs/btcdnotify/btcd_test.go +++ b/chainntnfs/btcdnotify/btcd_test.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/blockcache" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/stretchr/testify/require" ) var ( @@ -33,20 +34,14 @@ func initHintCache(t *testing.T) *chainntnfs.HeightHintCache { t.Helper() tempDir, err := ioutil.TempDir("", "kek") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } + require.NoError(t, err, "unable to create db") testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend) - if err != nil { - t.Fatalf("unable to create hint cache: %v", err) - } + require.NoError(t, err, "unable to create hint cache") return hintCache } @@ -61,9 +56,7 @@ func setUpNotifier(t *testing.T, h *rpctest.Harness) *BtcdNotifier { notifier, err := New( &rpcCfg, chainntnfs.NetParams, hintCache, hintCache, blockCache, ) - if err != nil { - t.Fatalf("unable to create notifier: %v", err) - } + require.NoError(t, err, "unable to create notifier") if err := notifier.Start(); err != nil { t.Fatalf("unable to start notifier: %v", err) } @@ -90,13 +83,9 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) { var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") switch txStatus { case chainntnfs.TxNotFoundIndex: @@ -109,22 +98,16 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) { // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } + require.NoError(t, err, "unable to create tx") if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") // The transaction should be found in the mempool at this point. _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since it has yet to be included in a block, it should have been found // within the mempool. @@ -142,9 +125,7 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) { } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since the backend node's txindex is enabled and the transaction has // confirmed, we should be able to retrieve it using the txindex. @@ -174,13 +155,9 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") switch txStatus { case chainntnfs.TxNotFoundManually: @@ -194,26 +171,18 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { // confirmation details. We'll note its broadcast height to use as the // height hint when manually scanning the chain. _, currentHeight, err := harness.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } + require.NoError(t, err, "unable to retrieve current height") txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } + require.NoError(t, err, "unable to create tx") if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) - if err != nil { - t.Fatalf("unable to create conf request: %v", err) - } + require.NoError(t, err, "unable to create conf request") _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since it has yet to be included in a block, it should have been found // within the mempool. @@ -231,9 +200,7 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { _, txStatus, err = notifier.historicalConfDetails( confReq, uint32(currentHeight), uint32(currentHeight)+1, ) - if err != nil { - t.Fatalf("unable to retrieve historical conf details: %v", err) - } + require.NoError(t, err, "unable to retrieve historical conf details") // Since the backend node's txindex is disabled and the transaction has // confirmed, we should be able to find it by falling back to scanning diff --git a/chainntnfs/height_hint_cache_test.go b/chainntnfs/height_hint_cache_test.go index ad2f48426..f984b871d 100644 --- a/chainntnfs/height_hint_cache_test.go +++ b/chainntnfs/height_hint_cache_test.go @@ -25,17 +25,11 @@ func initHintCacheWithConfig(t *testing.T, cfg CacheConfig) *HeightHintCache { t.Helper() tempDir, err := ioutil.TempDir("", "kek") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } + require.NoError(t, err, "unable to create db") hintCache, err := NewHeightHintCache(cfg, db.Backend) - if err != nil { - t.Fatalf("unable to create hint cache: %v", err) - } + require.NoError(t, err, "unable to create hint cache") return hintCache } @@ -69,9 +63,7 @@ func TestHeightHintCacheConfirms(t *testing.T) { } err = hintCache.CommitConfirmHint(height, confRequests...) - if err != nil { - t.Fatalf("unable to add entries to cache: %v", err) - } + require.NoError(t, err, "unable to add entries to cache") // With the hashes committed, we'll now query the cache to ensure that // we're able to properly retrieve the confirm hints. @@ -130,9 +122,7 @@ func TestHeightHintCacheSpends(t *testing.T) { } err = hintCache.CommitSpendHint(height, spendRequests...) - if err != nil { - t.Fatalf("unable to add entries to cache: %v", err) - } + require.NoError(t, err, "unable to add entries to cache") // With the outpoints committed, we'll now query the cache to ensure // that we're able to properly retrieve the confirm hints. diff --git a/chainntnfs/test/test_interface.go b/chainntnfs/test/test_interface.go index 291d72a56..7faf468b1 100644 --- a/chainntnfs/test/test_interface.go +++ b/chainntnfs/test/test_interface.go @@ -39,17 +39,13 @@ func testSingleConfirmationNotification(miner *rpctest.Harness, // We're spending from a coinbase output here, so we use the dedicated // function. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now that we have a txid, register a confirmation notification with // the chainntfn source. @@ -64,16 +60,12 @@ func testSingleConfirmationNotification(miner *rpctest.Harness, txid, pkScript, numConfs, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // Now generate a single block, the transaction should be included which // should trigger a notification event. blockHash, err := miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } + require.NoError(t, err, "unable to generate single block") select { case confInfo := <-confIntent.Confirmed: @@ -113,17 +105,13 @@ func testMultiConfirmationNotification(miner *rpctest.Harness, // Again, we'll begin by creating a fresh transaction, so we can obtain // a fresh txid. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test addr: %v", err) - } + require.NoError(t, err, "unable to create test addr") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") numConfs := uint32(6) var confIntent *chainntnfs.ConfirmationEvent @@ -136,9 +124,7 @@ func testMultiConfirmationNotification(miner *rpctest.Harness, txid, pkScript, numConfs, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // Now generate a six blocks. The transaction should be included in the // first block, which will be built upon by the other 5 blocks. @@ -167,9 +153,7 @@ func testBatchConfirmationNotification(miner *rpctest.Harness, confIntents := make([]*chainntnfs.ConfirmationEvent, len(confSpread)) _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Create a new txid spending miner coins for each confirmation entry // in confSpread, we collect each conf intent into a slice so we can @@ -279,9 +263,7 @@ func testSpendNotification(miner *rpctest.Harness, outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now that we have an output index and the pkScript, register for a // spentness notification for the newly created output with multiple @@ -312,9 +294,7 @@ func testSpendNotification(miner *rpctest.Harness, // Broadcast our spending transaction. spenderSha, err := miner.Client.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } + require.NoError(t, err, "unable to broadcast tx") if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil { t.Fatalf("tx not relayed to miner: %v", err) @@ -354,9 +334,7 @@ func testSpendNotification(miner *rpctest.Harness, outpoint, output.PkScript, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register for spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register for spend ntfn") select { case <-spentIntent.Spend: @@ -373,9 +351,7 @@ func testSpendNotification(miner *rpctest.Harness, } _, currentHeight, err = miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") for _, c := range spendClients { select { @@ -464,9 +440,7 @@ func testMultiClientConfirmationNotification(miner *rpctest.Harness, // We'd like to test the case of a multiple clients registered to // receive a confirmation notification for the same transaction. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } @@ -478,9 +452,7 @@ func testMultiClientConfirmationNotification(miner *rpctest.Harness, ) _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Register for a conf notification for the above generated txid with // numConfsClients distinct clients. @@ -535,9 +507,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, // spending from a coinbase output here, so we use the dedicated // function. txid3, pkScript3, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid3); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } @@ -548,36 +518,26 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, // that the TXID hasn't already been included in the chain, otherwise the // notification will never be sent. _, err = miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") txid1, pkScript1, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid1); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } txid2, pkScript2, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid2); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now generate another block containing txs 1 & 2. blockHash, err := miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") // Register a confirmation notification with the chainntfn source for tx2, // which is included in the last block. The height hint is the height before @@ -593,9 +553,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, txid1, pkScript1, 1, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") select { case confInfo := <-ntfn1.Confirmed: @@ -639,15 +597,11 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, txid2, pkScript2, 3, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // Fully confirm tx3. _, err = miner.Client.Generate(2) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") select { case <-ntfn2.Confirmed: @@ -674,9 +628,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, txid3, pkScript3, 1, uint32(currentHeight-1), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // We'll also register for a confirmation notification with the pkscript // of a different transaction. This notification shouldn't fire since we @@ -685,9 +637,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness, ntfn4, err = notifier.RegisterConfirmationsNtfn( txid3, pkScript2, 1, uint32(currentHeight-1), ) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") select { case <-ntfn3.Confirmed: @@ -725,17 +675,13 @@ func testLazyNtfnConsumer(miner *rpctest.Harness, // Create a transaction to be notified about. We'll register for // notifications on this transaction but won't be prompt in checking them txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") numConfs := uint32(3) @@ -755,9 +701,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness, txid, pkScript, numConfs, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // Generate another 2 blocks, this should dispatch the confirm notification if _, err := miner.Client.Generate(2); err != nil { @@ -768,17 +712,13 @@ func testLazyNtfnConsumer(miner *rpctest.Harness, // if the first transaction has confirmed doesn't mean that we shouldn't // be able to see if this transaction confirms first txid, pkScript, err = chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, currentHeight, err = miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") numConfs = 1 var secondConfIntent *chainntnfs.ConfirmationEvent @@ -791,9 +731,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness, txid, pkScript, numConfs, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") if _, err := miner.Client.Generate(1); err != nil { t.Fatalf("unable to generate blocks: %v", err) @@ -829,16 +767,12 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness, outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) _, heightHint, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // We'll then spend this output and broadcast the spend transaction. spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) spenderSha, err := miner.Client.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } + require.NoError(t, err, "unable to broadcast tx") if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } @@ -846,18 +780,14 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness, // We create an epoch client we can use to make sure the notifier is // caught up to the mining node's chain. epochClient, err := notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - t.Fatalf("unable to register for block epoch: %v", err) - } + require.NoError(t, err, "unable to register for block epoch") // Now we mine an additional block, which should include our spend. if _, err := miner.Client.Generate(1); err != nil { t.Fatalf("unable to generate single block: %v", err) } _, spendHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // checkSpends registers two clients to be notified of a spend that has // already happened. The notifier should dispatch a spend notification @@ -948,9 +878,7 @@ func testCancelSpendNtfn(node *rpctest.Harness, outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, node) _, currentHeight, err := node.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Create two clients that each registered to the spend notification. // We'll cancel the notification for the first client and leave the @@ -984,9 +912,7 @@ func testCancelSpendNtfn(node *rpctest.Harness, // Broadcast our spending transaction. spenderSha, err := node.Client.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast tx: %v", err) - } + require.NoError(t, err, "unable to broadcast tx") if err := chainntnfs.WaitForMempoolTx(node, spenderSha); err != nil { t.Fatalf("tx not relayed to miner: %v", err) @@ -1092,9 +1018,7 @@ func testReorgConf(miner *rpctest.Harness, miner2, err := rpctest.New( chainntnfs.NetParams, nil, []string{"--txindex"}, "", ) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") if err := miner2.SetUp(false, 0); err != nil { t.Fatalf("unable to set up mining node: %v", err) } @@ -1129,22 +1053,16 @@ func testReorgConf(miner *rpctest.Harness, // We disconnect the two nodes, such that we can start mining on them // individually without the other one learning about the new blocks. err = miner.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } + require.NoError(t, err, "unable to remove node") txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner) - if err != nil { - t.Fatalf("unable to create test tx: %v", err) - } + require.NoError(t, err, "unable to create test tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, currentHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now that we have a txid, register a confirmation notification with // the chainntfn source. @@ -1159,15 +1077,11 @@ func testReorgConf(miner *rpctest.Harness, txid, pkScript, numConfs, uint32(currentHeight), ) } - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // Now generate a single block, the transaction should be included. _, err = miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } + require.NoError(t, err, "unable to generate single block") // Transaction only has one confirmation, and the notification is registered // with 2 confirmations, so we should not be notified yet. @@ -1219,22 +1133,16 @@ func testReorgConf(miner *rpctest.Harness, // Now confirm the transaction on the longest chain and verify that we // receive the notification. tx, err := miner.Client.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get raw tx: %v", err) - } + require.NoError(t, err, "unable to get raw tx") txid, err = miner2.Client.SendRawTransaction(tx.MsgTx(), false) - if err != nil { - t.Fatalf("unable to get send tx: %v", err) - } + require.NoError(t, err, "unable to get send tx") if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil { t.Fatalf("tx not relayed to miner: %v", err) } _, err = miner.Client.Generate(3) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } + require.NoError(t, err, "unable to generate single block") select { case <-confIntent.Confirmed: @@ -1253,9 +1161,7 @@ func testReorgSpend(miner *rpctest.Harness, // notification for it. outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner) _, heightHint, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } + require.NoError(t, err, "unable to retrieve current height") var spendIntent *chainntnfs.SpendEvent if scriptDispatch { @@ -1267,17 +1173,13 @@ func testReorgSpend(miner *rpctest.Harness, outpoint, output.PkScript, uint32(heightHint), ) } - if err != nil { - t.Fatalf("unable to register for spend: %v", err) - } + require.NoError(t, err, "unable to register for spend") // Set up a new miner that we can use to cause a reorg. miner2, err := rpctest.New( chainntnfs.NetParams, nil, []string{"--txindex"}, "", ) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") if err := miner2.SetUp(false, 0); err != nil { t.Fatalf("unable to set up mining node: %v", err) } @@ -1294,13 +1196,9 @@ func testReorgSpend(miner *rpctest.Harness, t.Fatalf("unable to sync miners: %v", err) } _, minerHeight1, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner1's current height: %v", err) - } + require.NoError(t, err, "unable to get miner1's current height") _, minerHeight2, err := miner2.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner2's current height: %v", err) - } + require.NoError(t, err, "unable to get miner2's current height") if minerHeight1 != minerHeight2 { t.Fatalf("expected both miners to be on the same height: "+ "%v vs %v", minerHeight1, minerHeight2) @@ -1309,17 +1207,13 @@ func testReorgSpend(miner *rpctest.Harness, // We disconnect the two nodes, such that we can start mining on them // individually without the other one learning about the new blocks. err = miner.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to disconnect miners: %v", err) - } + require.NoError(t, err, "unable to disconnect miners") // Craft the spending transaction for the outpoint created above and // confirm it under the chain of the original miner. spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey) spendTxHash, err := miner.Client.SendRawTransaction(spendTx, true) - if err != nil { - t.Fatalf("unable to broadcast spend tx: %v", err) - } + require.NoError(t, err, "unable to broadcast spend tx") if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil { t.Fatalf("spend tx not relayed to miner: %v", err) } @@ -1328,9 +1222,7 @@ func testReorgSpend(miner *rpctest.Harness, t.Fatalf("unable to generate blocks: %v", err) } _, spendHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get spend height: %v", err) - } + require.NoError(t, err, "unable to get spend height") // We should see a spend notification dispatched with the correct spend // details. @@ -1356,13 +1248,9 @@ func testReorgSpend(miner *rpctest.Harness, t.Fatalf("unable to sync miners: %v", err) } _, minerHeight1, err = miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner1's current height: %v", err) - } + require.NoError(t, err, "unable to get miner1's current height") _, minerHeight2, err = miner2.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get miner2's current height: %v", err) - } + require.NoError(t, err, "unable to get miner2's current height") if minerHeight1 != minerHeight2 { t.Fatalf("expected both miners to be on the same height: "+ "%v vs %v", minerHeight1, minerHeight2) @@ -1391,9 +1279,7 @@ func testReorgSpend(miner *rpctest.Harness, t.Fatalf("unable to generate single block: %v", err) } _, spendHeight, err = miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } + require.NoError(t, err, "unable to retrieve current height") select { case spendDetails := <-spendIntent.Spend: @@ -1416,9 +1302,7 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness, var wg sync.WaitGroup outdatedHash, outdatedHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to retrieve current height: %v", err) - } + require.NoError(t, err, "unable to retrieve current height") // This function is used by UnsafeStart to ensure all notifications // are fully drained before clients register for notifications. @@ -1432,9 +1316,7 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness, // client may not receive all historical notifications. bestHeight := outdatedHeight + numBlocks err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks) - if err != nil { - t.Fatalf("unable to unsafe start the notifier: %v", err) - } + require.NoError(t, err, "unable to unsafe start the notifier") defer notifier.Stop() // Create numClients clients whose best known block is 10 blocks behind @@ -1523,9 +1405,7 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness, err = notifier.UnsafeStart( bestHeight, nil, bestHeight+numBlocks, generateBlocks, ) - if err != nil { - t.Fatalf("unable to unsafe start the notifier: %v", err) - } + require.NoError(t, err, "unable to unsafe start the notifier") defer notifier.Stop() // Create numClients clients who will listen for block notifications. @@ -1622,9 +1502,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness, miner2, err := rpctest.New( chainntnfs.NetParams, nil, []string{"--txindex"}, "", ) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") if err := miner2.SetUp(false, 0); err != nil { t.Fatalf("unable to set up mining node: %v", err) } @@ -1659,22 +1537,16 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness, // We disconnect the two nodes, such that we can start mining on them // individually without the other one learning about the new blocks. err = miner1.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to remove node: %v", err) - } + require.NoError(t, err, "unable to remove node") // Now mine on each chain separately blocks, err := miner1.Client.Generate(numBlocks) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } + require.NoError(t, err, "unable to generate single block") // We generate an extra block on miner 2's chain to ensure it is the // longer chain. _, err = miner2.Client.Generate(numBlocks + 1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } + require.NoError(t, err, "unable to generate single block") // Sync the two chains to ensure they will sync to miner2's chain. if err := rpctest.ConnectNode(miner1, miner2); err != nil { @@ -1717,9 +1589,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness, err = notifier.UnsafeStart( nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil, ) - if err != nil { - t.Fatalf("Unable to unsafe start the notifier: %v", err) - } + require.NoError(t, err, "Unable to unsafe start the notifier") defer notifier.Stop() // Create numClients clients who will listen for block notifications. @@ -1745,9 +1615,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness, // Generate a single block, which should trigger the notifier to rewind // to the common ancestor and dispatch notifications from there. _, err = miner2.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } + require.NoError(t, err, "unable to generate single block") // If the chain backend to the notifier stores information about reorged // blocks, the notifier is able to rewind the chain to the common diff --git a/chainntnfs/test_utils.go b/chainntnfs/test_utils.go index 65eaeb072..2898b30cc 100644 --- a/chainntnfs/test_utils.go +++ b/chainntnfs/test_utils.go @@ -27,6 +27,7 @@ import ( "github.com/lightninglabs/neutrino" "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" ) var ( @@ -127,14 +128,10 @@ func CreateSpendableOutput(t *testing.T, // Create a transaction that only has one output, the one destined for // the recipient. pkScript, privKey, err := randPubKeyHashScript() - if err != nil { - t.Fatalf("unable to generate pkScript: %v", err) - } + require.NoError(t, err, "unable to generate pkScript") output := &wire.TxOut{Value: 2e8, PkScript: pkScript} txid, err := miner.SendOutputsWithoutChange([]*wire.TxOut{output}, 10) - if err != nil { - t.Fatalf("unable to create tx: %v", err) - } + require.NoError(t, err, "unable to create tx") // Mine the transaction to mark the output as spendable. if err := WaitForMempoolTx(miner, txid); err != nil { @@ -161,9 +158,7 @@ func CreateSpendTx(t *testing.T, prevOutPoint *wire.OutPoint, spendingTx, 0, prevOutput.PkScript, txscript.SigHashAll, privKey, true, ) - if err != nil { - t.Fatalf("unable to sign tx: %v", err) - } + require.NoError(t, err, "unable to sign tx") spendingTx.TxIn[0].SignatureScript = sigScript return spendingTx @@ -181,9 +176,7 @@ func NewMiner(t *testing.T, extraArgs []string, createChain bool, extraArgs = append(extraArgs, trickle) node, err := rpctest.New(NetParams, nil, extraArgs, "") - if err != nil { - t.Fatalf("unable to create backend node: %v", err) - } + require.NoError(t, err, "unable to create backend node") if err := node.SetUp(createChain, spendableOutputs); err != nil { node.TearDown() t.Fatalf("unable to set up backend node: %v", err) @@ -204,9 +197,7 @@ func NewBitcoindBackend(t *testing.T, minerAddr string, txindex, t.Helper() tempBitcoindDir, err := ioutil.TempDir("", "bitcoind") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") rpcPort := rand.Intn(65536-1024) + 1024 zmqBlockHost := "ipc:///" + tempBitcoindDir + "/blocks.socket" @@ -289,9 +280,7 @@ func NewNeutrinoBackend(t *testing.T, minerAddr string) (*neutrino.ChainService, t.Helper() spvDir, err := ioutil.TempDir("", "neutrino") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") dbName := filepath.Join(spvDir, "neutrino.db") spvDatabase, err := walletdb.Create( diff --git a/chainntnfs/txnotifier_test.go b/chainntnfs/txnotifier_test.go index 801b2a1f9..85c502c4f 100644 --- a/chainntnfs/txnotifier_test.go +++ b/chainntnfs/txnotifier_test.go @@ -224,17 +224,13 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) { tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx1Hash := tx1.TxHash() ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") tx2 := wire.MsgTx{Version: 2} tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx2Hash := tx2.TxHash() ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // We should not receive any notifications from both transactions // since they have not been included in a block yet. @@ -261,9 +257,7 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) { }) err = n.ConnectTip(block1.Hash(), 11, block1.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(11); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -323,9 +317,7 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) { // This should confirm tx2. block2 := btcutil.NewBlock(&wire.MsgBlock{}) err = n.ConnectTip(block2.Hash(), 12, block2.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(12); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -396,15 +388,11 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) { // starting height so that they are confirmed once registering them. tx1Hash := tx1.TxHash() ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") tx2Hash := tx2.TxHash() ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") // Update tx1 with its confirmation details. We should only receive one // update since it only requires one confirmation and it already met it. @@ -415,9 +403,7 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) { Tx: &tx1, } err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, &txConf1) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } + require.NoError(t, err, "unable to update conf details") select { case numConfsLeft := <-ntfn1.Event.Updates: const expected = 0 @@ -449,9 +435,7 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) { Tx: &tx2, } err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, &txConf2) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } + require.NoError(t, err, "unable to update conf details") select { case numConfsLeft := <-ntfn2.Event.Updates: const expected = 1 @@ -477,9 +461,7 @@ func TestTxNotifierHistoricalConfDispatch(t *testing.T) { }) err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(11); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -532,9 +514,7 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) { // outpoint. op := wire.OutPoint{Index: 1} ntfn, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") // We should not receive a notification as the outpoint has not been // spent yet. @@ -557,9 +537,7 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) { Transactions: []*wire.MsgTx{spendTx}, }) err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(11); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -592,9 +570,7 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) { Transactions: []*wire.MsgTx{spendOfSpend}, }) err = n.ConnectTip(block.Hash(), 12, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(12); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -626,21 +602,15 @@ func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) { tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx1Hash := tx1.TxHash() ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") scriptNtfn1, err := n.RegisterConf(nil, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") block := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx1}, }) currentBlock++ err = n.ConnectTip(block.Hash(), currentBlock, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(currentBlock); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -685,21 +655,15 @@ func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) { tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx2Hash := tx2.TxHash() ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") scriptNtfn2, err := n.RegisterConf(nil, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") block2 := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx2}, }) currentBlock++ err = n.ConnectTip(block2.Hash(), currentBlock, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(currentBlock); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -800,9 +764,7 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) { // We'll register for a spend notification of the outpoint and ensure // that a notification isn't dispatched. ntfn, err := n.RegisterSpend(&spentOutpoint, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") select { case <-ntfn.Event.Spend: @@ -817,9 +779,7 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) { err = n.UpdateSpendDetails( ntfn.HistoricalDispatch.SpendRequest, expectedSpendDetails, ) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } + require.NoError(t, err, "unable to update spend details") // Now that we have the spending details, we should receive a spend // notification. We'll ensure that the details match as intended. @@ -842,9 +802,7 @@ func TestTxNotifierHistoricalSpendDispatch(t *testing.T) { Transactions: []*wire.MsgTx{spendOfSpend}, }) err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 1); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -872,9 +830,7 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) { // request a historical confirmation rescan as it does not have a // historical view of the chain. ntfn1, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if ntfn1.HistoricalDispatch == nil { t.Fatal("expected to receive historical dispatch request") } @@ -883,9 +839,7 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) { // transaction. This should not request a historical confirmation rescan // since the first one is still pending. ntfn2, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if ntfn2.HistoricalDispatch != nil { t.Fatal("received unexpected historical rescan request") } @@ -898,14 +852,10 @@ func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) { BlockHeight: startingHeight - 1, } err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, confDetails) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } + require.NoError(t, err, "unable to update conf details") ntfn3, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if ntfn3.HistoricalDispatch != nil { t.Fatal("received unexpected historical rescan request") } @@ -928,9 +878,7 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) { // the chain. op := wire.OutPoint{Index: 1} ntfn1, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if ntfn1.HistoricalDispatch == nil { t.Fatal("expected to receive historical dispatch request") } @@ -939,9 +887,7 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) { // should not request a historical spend rescan since the first one is // still pending. ntfn2, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if ntfn2.HistoricalDispatch != nil { t.Fatal("received unexpected historical rescan request") } @@ -960,14 +906,10 @@ func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) { err = n.UpdateSpendDetails( ntfn1.HistoricalDispatch.SpendRequest, spendDetails, ) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } + require.NoError(t, err, "unable to update spend details") ntfn3, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if ntfn3.HistoricalDispatch != nil { t.Fatal("received unexpected historical rescan request") } @@ -1026,9 +968,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) { err := n.UpdateConfDetails( confNtfns[0].HistoricalDispatch.ConfRequest, expectedConfDetails, ) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } + require.NoError(t, err, "unable to update conf details") // With the confirmation details retrieved, each client should now have // been notified of the confirmation. @@ -1047,9 +987,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) { // see a historical rescan request and the confirmation notification // should come through immediately. extraConfNtfn, err := n.RegisterConf(&txid, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn: %v", err) - } + require.NoError(t, err, "unable to register conf ntfn") if extraConfNtfn.HistoricalDispatch != nil { t.Fatal("received unexpected historical rescan request") } @@ -1095,9 +1033,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) { err = n.UpdateSpendDetails( spendNtfns[0].HistoricalDispatch.SpendRequest, expectedSpendDetails, ) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } + require.NoError(t, err, "unable to update spend details") // With the spend details retrieved, each client should now have been // notified of the spend. @@ -1116,9 +1052,7 @@ func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) { // should not see a historical rescan request and the spend notification // should come through immediately. extraSpendNtfn, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") if extraSpendNtfn.HistoricalDispatch != nil { t.Fatal("received unexpected historical rescan request") } @@ -1146,27 +1080,19 @@ func TestTxNotifierCancelConf(t *testing.T) { tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx1Hash := tx1.TxHash() ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") tx2 := wire.NewMsgTx(2) tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx2Hash := tx2.TxHash() ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") ntfn3, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") // This request will have a three block num confs. ntfn4, err := n.RegisterConf(&tx2Hash, testRawScript, 3, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") // Extend the chain with a block that will confirm both transactions. // This will queue confirmation notifications to dispatch once their @@ -1185,9 +1111,7 @@ func TestTxNotifierCancelConf(t *testing.T) { ntfn2.Event.Cancel() err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") // Cancel the third notification before notifying to ensure its queued // confirmation notification gets removed as well. @@ -1232,9 +1156,7 @@ func TestTxNotifierCancelConf(t *testing.T) { }) err = n.ConnectTip(block1.Hash(), startingHeight+2, block1.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 2); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) @@ -1266,9 +1188,7 @@ func TestTxNotifierCancelConf(t *testing.T) { }) err = n.ConnectTip(block2.Hash(), startingHeight+3, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 3); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) @@ -1291,15 +1211,11 @@ func TestTxNotifierCancelSpend(t *testing.T) { // canceled. op1 := wire.OutPoint{Index: 1} ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") op2 := wire.OutPoint{Index: 2} ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") // Construct the spending details of the outpoint and create a dummy // block containing it. @@ -1326,9 +1242,7 @@ func TestTxNotifierCancelSpend(t *testing.T) { n.CancelSpend(ntfn2.HistoricalDispatch.SpendRequest, 2) err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 1); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1377,42 +1291,30 @@ func TestTxNotifierConfReorg(t *testing.T) { tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx1Hash := tx1.TxHash() ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, tx1NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to deliver conf details: %v", err) - } + require.NoError(t, err, "unable to deliver conf details") // Tx 2 will be confirmed in block 10 and requires 1 conf. tx2 := wire.MsgTx{Version: 2} tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx2Hash := tx2.TxHash() ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, tx2NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to deliver conf details: %v", err) - } + require.NoError(t, err, "unable to deliver conf details") // Tx 3 will be confirmed in block 10 and requires 2 confs. tx3 := wire.MsgTx{Version: 3} tx3.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx3Hash := tx3.TxHash() ntfn3, err := n.RegisterConf(&tx3Hash, testRawScript, tx3NumConfs, 1) - if err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } + require.NoError(t, err, "unable to register ntfn") err = n.UpdateConfDetails(ntfn3.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to deliver conf details: %v", err) - } + require.NoError(t, err, "unable to deliver conf details") // Sync chain to block 10. Txs 1 & 2 should be confirmed. block1 := btcutil.NewBlock(&wire.MsgBlock{ @@ -1555,17 +1457,13 @@ func TestTxNotifierConfReorg(t *testing.T) { block4 := btcutil.NewBlock(&wire.MsgBlock{}) err = n.ConnectTip(block3.Hash(), 12, block3.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(12); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } err = n.ConnectTip(block4.Hash(), 13, block4.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(13); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1692,14 +1590,10 @@ func TestTxNotifierSpendReorg(t *testing.T) { // We'll register for a spend notification for each outpoint above. ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") // We'll extend the chain by connecting a new block at tip. This block // will only contain the spending transaction of the first outpoint. @@ -1707,9 +1601,7 @@ func TestTxNotifierSpendReorg(t *testing.T) { Transactions: []*wire.MsgTx{spendTx1}, }) err = n.ConnectTip(block1.Hash(), startingHeight+1, block1.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 1); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1737,9 +1629,7 @@ func TestTxNotifierSpendReorg(t *testing.T) { Transactions: []*wire.MsgTx{spendTx2}, }) err = n.ConnectTip(block2.Hash(), startingHeight+2, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 2); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1793,9 +1683,7 @@ func TestTxNotifierSpendReorg(t *testing.T) { err = n.ConnectTip( emptyBlock.Hash(), startingHeight+2, emptyBlock.Transactions(), ) - if err != nil { - t.Fatalf("unable to disconnect block: %v", err) - } + require.NoError(t, err, "unable to disconnect block") if err := n.NotifyHeight(startingHeight + 2); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1818,9 +1706,7 @@ func TestTxNotifierSpendReorg(t *testing.T) { err = n.ConnectTip( block2.Hash(), startingHeight+3, block2.Transactions(), ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 3); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1882,9 +1768,7 @@ func TestTxNotifierSpendReorgMissed(t *testing.T) { Transactions: []*wire.MsgTx{spendTx}, }) err := n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(startingHeight + 1); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -1892,9 +1776,7 @@ func TestTxNotifierSpendReorgMissed(t *testing.T) { // We register for the spend now and will not get a spend notification // until we call UpdateSpendDetails. ntfn, err := n.RegisterSpend(&op, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend: %v", err) - } + require.NoError(t, err, "unable to register spend") // Assert that the HistoricalDispatch variable is non-nil. We'll use // the SpendRequest member to update the spend details. @@ -1962,17 +1844,13 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx1Hash := tx1.TxHash() ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register tx1: %v", err) - } + require.NoError(t, err, "unable to register tx1") tx2 := wire.MsgTx{Version: 2} tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) tx2Hash := tx2.TxHash() ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 2, 1) - if err != nil { - t.Fatalf("unable to register tx2: %v", err) - } + require.NoError(t, err, "unable to register tx2") // Both transactions should not have a height hint set, as RegisterConf // should not alter the cache state. @@ -1998,9 +1876,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { }) err = n.ConnectTip(block1.Hash(), txDummyHeight, block1.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(txDummyHeight); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2026,13 +1902,9 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { // Now, update the conf details reporting that the neither txn was found // in the historical dispatch. err = n.UpdateConfDetails(ntfn1.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } + require.NoError(t, err, "unable to update conf details") err = n.UpdateConfDetails(ntfn2.HistoricalDispatch.ConfRequest, nil) - if err != nil { - t.Fatalf("unable to update conf details: %v", err) - } + require.NoError(t, err, "unable to update conf details") // We'll create another block that will include the first transaction // and extend the chain. @@ -2041,9 +1913,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { }) err = n.ConnectTip(block2.Hash(), tx1Height, block2.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(tx1Height); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2052,18 +1922,14 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { // they should have their height hints updated to the latest block // height. hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } + require.NoError(t, err, "unable to query for hint") if hint != tx1Height { t.Fatalf("expected hint %d, got %d", tx1Height, hint) } hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } + require.NoError(t, err, "unable to query for hint") if hint != tx1Height { t.Fatalf("expected hint %d, got %d", tx2Height, hint) @@ -2076,18 +1942,14 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { }) err = n.ConnectTip(block3.Hash(), tx2Height, block3.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } + require.NoError(t, err, "Failed to connect block") if err := n.NotifyHeight(tx2Height); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } // The height hint for the first transaction should remain the same. hint, err = hintCache.QueryConfirmHint(ntfn1.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } + require.NoError(t, err, "unable to query for hint") if hint != tx1Height { t.Fatalf("expected hint %d, got %d", tx1Height, hint) @@ -2096,9 +1958,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { // The height hint for the second transaction should now be updated to // reflect its confirmation. hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } + require.NoError(t, err, "unable to query for hint") if hint != tx2Height { t.Fatalf("expected hint %d, got %d", tx2Height, hint) @@ -2113,9 +1973,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { // This should update the second transaction's height hint within the // cache to the previous height. hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } + require.NoError(t, err, "unable to query for hint") if hint != tx1Height { t.Fatalf("expected hint %d, got %d", tx1Height, hint) @@ -2124,9 +1982,7 @@ func TestTxNotifierConfirmHintCache(t *testing.T) { // The first transaction's height hint should remain at the original // confirmation height. hint, err = hintCache.QueryConfirmHint(ntfn2.HistoricalDispatch.ConfRequest) - if err != nil { - t.Fatalf("unable to query for hint: %v", err) - } + require.NoError(t, err, "unable to query for hint") if hint != tx1Height { t.Fatalf("expected hint %d, got %d", tx1Height, hint) @@ -2158,14 +2014,10 @@ func TestTxNotifierSpendHintCache(t *testing.T) { // Create two test outpoints and register them for spend notifications. op1 := wire.OutPoint{Index: 1} ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend for op1: %v", err) - } + require.NoError(t, err, "unable to register spend for op1") op2 := wire.OutPoint{Index: 2} ntfn2, err := n.RegisterSpend(&op2, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend for op2: %v", err) - } + require.NoError(t, err, "unable to register spend for op2") // Both outpoints should not have a spend hint set upon registration, as // we must first determine whether they have already been spent in the @@ -2188,9 +2040,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) { err = n.ConnectTip( emptyBlock.Hash(), dummyHeight, emptyBlock.Transactions(), ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(dummyHeight); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2215,13 +2065,9 @@ func TestTxNotifierSpendHintCache(t *testing.T) { // calling UpdateSpendDetails. This should allow their spend hints to be // updated upon every block connected/disconnected. err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } + require.NoError(t, err, "unable to update spend details") err = n.UpdateSpendDetails(ntfn2.HistoricalDispatch.SpendRequest, nil) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } + require.NoError(t, err, "unable to update spend details") // We'll create a new block that only contains the spending transaction // of the first outpoint. @@ -2234,9 +2080,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) { Transactions: []*wire.MsgTx{spendTx1}, }) err = n.ConnectTip(block1.Hash(), op1Height, block1.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(op1Height); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2245,16 +2089,12 @@ func TestTxNotifierSpendHintCache(t *testing.T) { // the new block being connected due to the first outpoint being spent // at this height, and the second outpoint still being unspent. op1Hint, err := hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != op1Height { t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) } op2Hint, err := hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op2: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op2") if op2Hint != op1Height { t.Fatalf("expected hint %d, got %d", op1Height, op2Hint) } @@ -2269,9 +2109,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) { Transactions: []*wire.MsgTx{spendTx2}, }) err = n.ConnectTip(block2.Hash(), op2Height, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(op2Height); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2280,16 +2118,12 @@ func TestTxNotifierSpendHintCache(t *testing.T) { // being spent within the new block. The first outpoint's spend hint // should remain the same as it's already been spent before. op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != op1Height { t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) } op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op2: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op2") if op2Hint != op2Height { t.Fatalf("expected hint %d, got %d", op2Height, op2Hint) } @@ -2305,16 +2139,12 @@ func TestTxNotifierSpendHintCache(t *testing.T) { // included in within the chain. The first outpoint's spend hint should // remain the same. op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != op1Height { t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) } op2Hint, err = hintCache.QuerySpendHint(ntfn2.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op2: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op2") if op2Hint != op1Height { t.Fatalf("expected hint %d, got %d", op1Height, op2Hint) } @@ -2340,9 +2170,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { // Create a test outpoint and register it for spend notifications. op1 := wire.OutPoint{Index: 1} ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend for op1: %v", err) - } + require.NoError(t, err, "unable to register spend for op1") // A historical rescan should be initiated from the height hint to the // current height. @@ -2369,9 +2197,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { err = n.ConnectTip( emptyBlock.Hash(), height, emptyBlock.Transactions(), ) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(height); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2426,9 +2252,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { op1Hint, err := hintCache.QuerySpendHint( ntfn1.HistoricalDispatch.SpendRequest, ) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != spendHeight { t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) } @@ -2445,16 +2269,12 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { // tip while the rescan was in progress, the height hint should not be // updated to the latest height, but stay at the spend height. err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) - if err != nil { - t.Fatalf("unable to update spend details: %v", err) - } + require.NoError(t, err, "unable to update spend details") op1Hint, err = hintCache.QuerySpendHint( ntfn1.HistoricalDispatch.SpendRequest, ) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != spendHeight { t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) } @@ -2471,9 +2291,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { Transactions: []*wire.MsgTx{spendTx2}, }) err = n.ConnectTip(block2.Hash(), height, block2.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(height); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2481,9 +2299,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { // The outpoint's spend hint should remain the same as it's already // been spent before. op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != spendHeight { t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) } @@ -2516,9 +2332,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { // Finally, check that the height hint is still there, unchanged. op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) - if err != nil { - t.Fatalf("unable to query for spend hint of op1: %v", err) - } + require.NoError(t, err, "unable to query for spend hint of op1") if op1Hint != spendHeight { t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) } @@ -2537,13 +2351,9 @@ func TestTxNotifierNtfnDone(t *testing.T) { // We'll start by creating two notification requests: one confirmation // and one spend. confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn: %v", err) - } + require.NoError(t, err, "unable to register conf ntfn") spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend: %v", err) - } + require.NoError(t, err, "unable to register spend") // We'll create two transactions that will satisfy the notification // requests above and include them in the next block of the chain. @@ -2559,9 +2369,7 @@ func TestTxNotifierNtfnDone(t *testing.T) { }) err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(11); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2611,9 +2419,7 @@ func TestTxNotifierNtfnDone(t *testing.T) { // We'll reconnect the block that satisfies both of these requests. // We should see notifications dispatched for both once again. err = n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("unable to connect block: %v", err) - } + require.NoError(t, err, "unable to connect block") if err := n.NotifyHeight(11); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) } @@ -2667,13 +2473,9 @@ func TestTxNotifierTearDown(t *testing.T) { // To begin the test, we'll register for a confirmation and spend // notification. confNtfn, err := n.RegisterConf(&chainntnfs.ZeroHash, testRawScript, 1, 1) - if err != nil { - t.Fatalf("unable to register conf ntfn: %v", err) - } + require.NoError(t, err, "unable to register conf ntfn") spendNtfn, err := n.RegisterSpend(&chainntnfs.ZeroOutPoint, testRawScript, 1) - if err != nil { - t.Fatalf("unable to register spend ntfn: %v", err) - } + require.NoError(t, err, "unable to register spend ntfn") // With the notifications registered, we'll now tear down the notifier. // The notification channels should be closed for notifications, whether diff --git a/chanbackup/backup_test.go b/chanbackup/backup_test.go index 0c0e79c06..511b1081d 100644 --- a/chanbackup/backup_test.go +++ b/chanbackup/backup_test.go @@ -9,6 +9,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/kvdb" + "github.com/stretchr/testify/require" ) type mockChannelSource struct { @@ -86,13 +87,9 @@ func TestFetchBackupForChan(t *testing.T) { // First, we'll make two channels, only one of them will have all the // information we need to construct set of backups for them. randomChan1, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } + require.NoError(t, err, "unable to generate chan") randomChan2, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } + require.NoError(t, err, "unable to generate chan") chanSource := newMockChannelSource() chanSource.chans[randomChan1.FundingOutpoint] = randomChan1 @@ -152,13 +149,9 @@ func TestFetchStaticChanBackups(t *testing.T) { // channel source. const numChans = 2 randomChan1, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } + require.NoError(t, err, "unable to generate chan") randomChan2, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to generate chan: %v", err) - } + require.NoError(t, err, "unable to generate chan") chanSource := newMockChannelSource() chanSource.chans[randomChan1.FundingOutpoint] = randomChan1 @@ -170,9 +163,7 @@ func TestFetchStaticChanBackups(t *testing.T) { // of backups for all the channels. This should succeed, as all items // are populated within the channel source. backups, err := FetchStaticChanBackups(chanSource, chanSource) - if err != nil { - t.Fatalf("unable to create chan back ups: %v", err) - } + require.NoError(t, err, "unable to create chan back ups") if len(backups) != numChans { t.Fatalf("expected %v chans, instead got %v", numChans, diff --git a/chanbackup/backupfile_test.go b/chanbackup/backupfile_test.go index 19733c360..4992b1c68 100644 --- a/chanbackup/backupfile_test.go +++ b/chanbackup/backupfile_test.go @@ -8,6 +8,8 @@ import ( "os" "path/filepath" "testing" + + "github.com/stretchr/testify/require" ) func makeFakePackedMulti() (PackedMulti, error) { @@ -25,9 +27,7 @@ func assertBackupMatches(t *testing.T, filePath string, t.Helper() packedBackup, err := ioutil.ReadFile(filePath) - if err != nil { - t.Fatalf("unable to test file: %v", err) - } + require.NoError(t, err, "unable to test file") if !bytes.Equal(packedBackup, currentBackup) { t.Fatalf("backups don't match after first swap: "+ @@ -53,9 +53,7 @@ func TestUpdateAndSwap(t *testing.T) { t.Parallel() tempTestDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("unable to make temp dir: %v", err) - } + require.NoError(t, err, "unable to make temp dir") defer os.Remove(tempTestDir) testCases := []struct { @@ -193,9 +191,7 @@ func TestExtractMulti(t *testing.T) { // First, as prep, we'll create a single chan backup, then pack that // fully into a multi backup. channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen chan: %v", err) - } + require.NoError(t, err, "unable to gen chan") singleBackup := NewSingle(channel, nil) @@ -204,24 +200,18 @@ func TestExtractMulti(t *testing.T) { StaticBackups: []Single{singleBackup}, } err = unpackedMulti.PackToWriter(&b, keyRing) - if err != nil { - t.Fatalf("unable to pack to writer: %v", err) - } + require.NoError(t, err, "unable to pack to writer") packedMulti := PackedMulti(b.Bytes()) // Finally, we'll make a new temporary file, then write out the packed // multi directly to to it. tempFile, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("unable to create temp file: %v", err) - } + require.NoError(t, err, "unable to create temp file") defer os.Remove(tempFile.Name()) _, err = tempFile.Write(packedMulti) - if err != nil { - t.Fatalf("unable to write temp file: %v", err) - } + require.NoError(t, err, "unable to write temp file") if err := tempFile.Sync(); err != nil { t.Fatalf("unable to sync temp file: %v", err) } diff --git a/chanbackup/multi_test.go b/chanbackup/multi_test.go index a6317e09e..0881be60a 100644 --- a/chanbackup/multi_test.go +++ b/chanbackup/multi_test.go @@ -4,6 +4,8 @@ import ( "bytes" "net" "testing" + + "github.com/stretchr/testify/require" ) // TestMultiPackUnpack... @@ -126,9 +128,7 @@ func TestPackedMultiUnpack(t *testing.T) { // First, we'll make a new unpacked multi with a random channel. testChannel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen random channel: %v", err) - } + require.NoError(t, err, "unable to gen random channel") var multi Multi multi.StaticBackups = append( multi.StaticBackups, NewSingle(testChannel, nil), @@ -143,9 +143,7 @@ func TestPackedMultiUnpack(t *testing.T) { // We should be able to properly unpack this typed packed multi. packedMulti := PackedMulti(b.Bytes()) unpackedMulti, err := packedMulti.Unpack(keyRing) - if err != nil { - t.Fatalf("unable to unpack multi: %v", err) - } + require.NoError(t, err, "unable to unpack multi") // Finally, the versions should match, and the unpacked singles also // identical. diff --git a/chanbackup/pubsub_test.go b/chanbackup/pubsub_test.go index aca52df2d..0f7207e6c 100644 --- a/chanbackup/pubsub_test.go +++ b/chanbackup/pubsub_test.go @@ -7,6 +7,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/keychain" + "github.com/stretchr/testify/require" ) type mockSwapper struct { @@ -157,9 +158,7 @@ func TestSubSwapperIdempotentStartStop(t *testing.T) { swapper := newMockSwapper(keyRing) subSwapper, err := NewSubSwapper(nil, &chanNotifier, keyRing, swapper) - if err != nil { - t.Fatalf("unable to init subSwapper: %v", err) - } + require.NoError(t, err, "unable to init subSwapper") if err := subSwapper.Start(); err != nil { t.Fatalf("unable to start swapper: %v", err) @@ -226,9 +225,7 @@ func TestSubSwapperUpdater(t *testing.T) { subSwapper, err := NewSubSwapper( initialChanSet, chanNotifier, keyRing, swapper, ) - if err != nil { - t.Fatalf("unable to make swapper: %v", err) - } + require.NoError(t, err, "unable to make swapper") if err := subSwapper.Start(); err != nil { t.Fatalf("unable to start sub swapper: %v", err) } @@ -241,9 +238,7 @@ func TestSubSwapperUpdater(t *testing.T) { // Now that the sub-swapper is active, we'll notify to add a brand new // channel to the channel state. newChannel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to create new chan: %v", err) - } + require.NoError(t, err, "unable to create new chan") // With the new channel created, we'll send a new update to the main // goroutine telling it about this new channel. diff --git a/chanbackup/recover_test.go b/chanbackup/recover_test.go index 73dcbf587..12a94a773 100644 --- a/chanbackup/recover_test.go +++ b/chanbackup/recover_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcec/v2" + "github.com/stretchr/testify/require" ) type mockChannelRestorer struct { @@ -108,9 +109,7 @@ func TestUnpackAndRecoverSingles(t *testing.T) { err = UnpackAndRecoverSingles( packedBackups, keyRing, &chanRestorer, &peerConnector, ) - if err != nil { - t.Fatalf("unable to recover chans: %v", err) - } + require.NoError(t, err, "unable to recover chans") // Both the restorer, and connector should have been called 10 times, // once for each backup. @@ -204,9 +203,7 @@ func TestUnpackAndRecoverMulti(t *testing.T) { err = UnpackAndRecoverMulti( packedMulti, keyRing, &chanRestorer, &peerConnector, ) - if err != nil { - t.Fatalf("unable to recover chans: %v", err) - } + require.NoError(t, err, "unable to recover chans") // Both the restorer, and connector should have been called 10 times, // once for each backup. diff --git a/chanbackup/single_test.go b/chanbackup/single_test.go index 8bb8d824e..2f0818096 100644 --- a/chanbackup/single_test.go +++ b/chanbackup/single_test.go @@ -16,6 +16,7 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" + "github.com/stretchr/testify/require" ) var ( @@ -202,9 +203,7 @@ func TestSinglePackUnpack(t *testing.T) { // contains all the information we need to create a static channel // backup. channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen open channel: %v", err) - } + require.NoError(t, err, "unable to gen open channel") singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2}) @@ -340,9 +339,7 @@ func TestPackedSinglesUnpack(t *testing.T) { // With all singles packed, we'll create the grouped type and attempt // to Unpack all of them in a single go. freshSingles, err := PackedSingles(packedSingles).Unpack(keyRing) - if err != nil { - t.Fatalf("unable to unpack singles: %v", err) - } + require.NoError(t, err, "unable to unpack singles") // The set of freshly unpacked singles should exactly match the initial // set of singles that we packed before. @@ -386,9 +383,7 @@ func TestSinglePackStaticChanBackups(t *testing.T) { // Now that we have all of our singles are created, we'll attempt to // pack them all in a single batch. packedSingleMap, err := PackStaticChanBackups(unpackedSingles, keyRing) - if err != nil { - t.Fatalf("unable to pack backups: %v", err) - } + require.NoError(t, err, "unable to pack backups") // With our packed singles obtained, we'll ensure that each of them // match their unpacked counterparts after they themselves have been @@ -432,9 +427,7 @@ func TestSingleUnconfirmedChannel(t *testing.T) { // we need to create a static channel backup but simulate an // unconfirmed channel by setting the block height to 0. channel, err := genRandomOpenChannelShell() - if err != nil { - t.Fatalf("unable to gen open channel: %v", err) - } + require.NoError(t, err, "unable to gen open channel") channel.ShortChannelID.BlockHeight = 0 channel.FundingBroadcastHeight = fundingBroadcastHeight @@ -450,9 +443,7 @@ func TestSingleUnconfirmedChannel(t *testing.T) { } var unpackedSingle Single err = unpackedSingle.UnpackFromReader(&b, keyRing) - if err != nil { - t.Fatalf("unable to unpack single: %v", err) - } + require.NoError(t, err, "unable to unpack single") if unpackedSingle.ShortChannelID.BlockHeight != fundingBroadcastHeight { t.Fatalf("invalid block height. got %d expected %d.", unpackedSingle.ShortChannelID.BlockHeight, diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index 35607fd55..3deed674a 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -203,9 +203,7 @@ func createTestChannel(t *testing.T, cdb *ChannelStateDB, // Mark the channel as open with the short channel id provided. err = params.channel.MarkAsOpen(params.channel.ShortChannelID) - if err != nil { - t.Fatalf("unable to mark channel open: %v", err) - } + require.NoError(t, err, "unable to mark channel open") return params.channel } @@ -213,9 +211,7 @@ func createTestChannel(t *testing.T, cdb *ChannelStateDB, func createTestChannelState(t *testing.T, cdb *ChannelStateDB) *OpenChannel { // Simulate 1000 channel updates. producer, err := shachain.NewRevocationProducerFromBytes(key[:]) - if err != nil { - t.Fatalf("could not get producer: %v", err) - } + require.NoError(t, err, "could not get producer") store := shachain.NewRevocationStore() for i := 0; i < 1; i++ { preImage, err := producer.AtIndex(uint64(i)) @@ -351,9 +347,7 @@ func TestOpenChannelPutGetDelete(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -388,9 +382,7 @@ func TestOpenChannelPutGetDelete(t *testing.T) { ) openChannels, err := cdb.FetchOpenChannels(state.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch open channel: %v", err) - } + require.NoError(t, err, "unable to fetch open channel") newState := openChannels[0] @@ -405,17 +397,13 @@ func TestOpenChannelPutGetDelete(t *testing.T) { // next revocation for the state machine. This tests the initial // post-funding revocation exchange. nextRevKey, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to create new private key: %v", err) - } + require.NoError(t, err, "unable to create new private key") if err := state.InsertNextRevocation(nextRevKey.PubKey()); err != nil { t.Fatalf("unable to update revocation: %v", err) } openChannels, err = cdb.FetchOpenChannels(state.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch open channel: %v", err) - } + require.NoError(t, err, "unable to fetch open channel") updatedChan := openChannels[0] // Ensure that the revocation was set properly. @@ -442,9 +430,7 @@ func TestOpenChannelPutGetDelete(t *testing.T) { // As the channel is now closed, attempting to fetch all open channels // for our fake node ID should return an empty slice. openChans, err := cdb.FetchOpenChannels(state.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch open channels: %v", err) - } + require.NoError(t, err, "unable to fetch open channels") if len(openChans) != 0 { t.Fatalf("all channels not deleted, found %v", len(openChans)) } @@ -587,9 +573,7 @@ func TestChannelStateTransition(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -661,15 +645,11 @@ func TestChannelStateTransition(t *testing.T) { } err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates) - if err != nil { - t.Fatalf("unable to update commitment: %v", err) - } + require.NoError(t, err, "unable to update commitment") // Assert that update is correctly written to the database. dbUnsignedAckedUpdates, err := channel.UnsignedAckedUpdates() - if err != nil { - t.Fatalf("unable to fetch dangling remote updates: %v", err) - } + require.NoError(t, err, "unable to fetch dangling remote updates") if len(dbUnsignedAckedUpdates) != 1 { t.Fatalf("unexpected number of dangling remote updates") } @@ -686,14 +666,10 @@ func TestChannelStateTransition(t *testing.T) { // commitment transaction along with the modified signature should all // have been updated. updatedChannel, err := cdb.FetchOpenChannels(channel.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch updated channel: %v", err) - } + require.NoError(t, err, "unable to fetch updated channel") assertCommitmentEqual(t, &commitment, &updatedChannel[0].LocalCommitment) numDiskUpdates, err := updatedChannel[0].CommitmentHeight() - if err != nil { - t.Fatalf("unable to read commitment height from disk: %v", err) - } + require.NoError(t, err, "unable to read commitment height from disk") if numDiskUpdates != uint64(commitment.CommitHeight) { t.Fatalf("num disk updates doesn't match: %v vs %v", numDiskUpdates, commitment.CommitHeight) @@ -757,9 +733,7 @@ func TestChannelStateTransition(t *testing.T) { // The commitment tip should now match the commitment that we just // inserted. diskCommitDiff, err := channel.RemoteCommitChainTip() - if err != nil { - t.Fatalf("unable to fetch commit diff: %v", err) - } + require.NoError(t, err, "unable to fetch commit diff") if !reflect.DeepEqual(commitDiff, diskCommitDiff) { t.Fatalf("commit diffs don't match: %v vs %v", spew.Sdump(remoteCommit), spew.Sdump(diskCommitDiff)) @@ -775,9 +749,7 @@ func TestChannelStateTransition(t *testing.T) { // by the remote party. channel.RemoteCurrentRevocation = channel.RemoteNextRevocation newPriv, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to generate key: %v", err) - } + require.NoError(t, err, "unable to generate key") channel.RemoteNextRevocation = newPriv.PubKey() fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, @@ -786,9 +758,7 @@ func TestChannelStateTransition(t *testing.T) { err = channel.AdvanceCommitChainTail( fwdPkg, nil, dummyLocalOutputIndex, dummyRemoteOutIndex, ) - if err != nil { - t.Fatalf("unable to append to revocation log: %v", err) - } + require.NoError(t, err, "unable to append to revocation log") // At this point, the remote commit chain should be nil, and the posted // remote commitment should match the one we added as a diff above. @@ -801,9 +771,7 @@ func TestChannelStateTransition(t *testing.T) { diskPrevCommit, _, err := channel.FindPreviousState( oldRemoteCommit.CommitHeight, ) - if err != nil { - t.Fatalf("unable to fetch past delta: %v", err) - } + require.NoError(t, err, "unable to fetch past delta") // Check the output indexes are saved as expected. require.EqualValues( @@ -820,9 +788,7 @@ func TestChannelStateTransition(t *testing.T) { // The state number recovered from the tail of the revocation log // should be identical to this current state. logTailHeight, err := channel.revocationLogTailCommitHeight() - if err != nil { - t.Fatalf("unable to retrieve log: %v", err) - } + require.NoError(t, err, "unable to retrieve log") if logTailHeight != oldRemoteCommit.CommitHeight { t.Fatal("update number doesn't match") } @@ -844,17 +810,13 @@ func TestChannelStateTransition(t *testing.T) { err = channel.AdvanceCommitChainTail( fwdPkg, nil, dummyLocalOutputIndex, dummyRemoteOutIndex, ) - if err != nil { - t.Fatalf("unable to append to revocation log: %v", err) - } + require.NoError(t, err, "unable to append to revocation log") // Once again, fetch the state and ensure it has been properly updated. prevCommit, _, err := channel.FindPreviousState( oldRemoteCommit.CommitHeight, ) - if err != nil { - t.Fatalf("unable to fetch past delta: %v", err) - } + require.NoError(t, err, "unable to fetch past delta") // Check the output indexes are saved as expected. require.EqualValues( @@ -869,18 +831,14 @@ func TestChannelStateTransition(t *testing.T) { // Once again, state number recovered from the tail of the revocation // log should be identical to this current state. logTailHeight, err = channel.revocationLogTailCommitHeight() - if err != nil { - t.Fatalf("unable to retrieve log: %v", err) - } + require.NoError(t, err, "unable to retrieve log") if logTailHeight != oldRemoteCommit.CommitHeight { t.Fatal("update number doesn't match") } // The revocation state stored on-disk should now also be identical. updatedChannel, err = cdb.FetchOpenChannels(channel.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch updated channel: %v", err) - } + require.NoError(t, err, "unable to fetch updated channel") if !channel.RemoteCurrentRevocation.IsEqual(updatedChannel[0].RemoteCurrentRevocation) { t.Fatalf("revocation state was not synced") } @@ -908,9 +866,7 @@ func TestChannelStateTransition(t *testing.T) { // If we attempt to fetch the target channel again, it shouldn't be // found. channels, err := cdb.FetchOpenChannels(channel.IdentityPub) - if err != nil { - t.Fatalf("unable to fetch updated channels: %v", err) - } + require.NoError(t, err, "unable to fetch updated channels") if len(channels) != 0 { t.Fatalf("%v channels, found, but none should be", len(channels)) @@ -934,9 +890,7 @@ func TestFetchPendingChannels(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -946,9 +900,7 @@ func TestFetchPendingChannels(t *testing.T) { createTestChannel(t, cdb, pendingHeightOption(broadcastHeight)) pendingChannels, err := cdb.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to list pending channels: %v", err) - } + require.NoError(t, err, "unable to list pending channels") if len(pendingChannels) != 1 { t.Fatalf("incorrect number of pending channels: expecting %v,"+ @@ -969,9 +921,7 @@ func TestFetchPendingChannels(t *testing.T) { TxPosition: 15, } err = pendingChannels[0].MarkAsOpen(chanOpenLoc) - if err != nil { - t.Fatalf("unable to mark channel as open: %v", err) - } + require.NoError(t, err, "unable to mark channel as open") if pendingChannels[0].IsPending { t.Fatalf("channel marked open should no longer be pending") @@ -986,9 +936,7 @@ func TestFetchPendingChannels(t *testing.T) { // Next, we'll re-fetch the channel to ensure that the open height was // properly set. openChans, err := cdb.FetchAllChannels() - if err != nil { - t.Fatalf("unable to fetch channels: %v", err) - } + require.NoError(t, err, "unable to fetch channels") if openChans[0].ShortChanID() != chanOpenLoc { t.Fatalf("channel opening heights don't match: expected %v, "+ "got %v", spew.Sdump(openChans[0].ShortChanID()), @@ -1001,9 +949,7 @@ func TestFetchPendingChannels(t *testing.T) { } pendingChannels, err = cdb.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to list pending channels: %v", err) - } + require.NoError(t, err, "unable to list pending channels") if len(pendingChannels) != 0 { t.Fatalf("incorrect number of pending channels: expecting %v,"+ @@ -1015,9 +961,7 @@ func TestFetchClosedChannels(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -1046,9 +990,7 @@ func TestFetchClosedChannels(t *testing.T) { // closed. We should get the same result whether querying for pending // channels only, or not. pendingClosed, err := cdb.FetchClosedChannels(true) - if err != nil { - t.Fatalf("failed fetching closed channels: %v", err) - } + require.NoError(t, err, "failed fetching closed channels") if len(pendingClosed) != 1 { t.Fatalf("incorrect number of pending closed channels: expecting %v,"+ "got %v", 1, len(pendingClosed)) @@ -1058,9 +1000,7 @@ func TestFetchClosedChannels(t *testing.T) { spew.Sdump(summary), spew.Sdump(pendingClosed[0])) } closed, err := cdb.FetchClosedChannels(false) - if err != nil { - t.Fatalf("failed fetching all closed channels: %v", err) - } + require.NoError(t, err, "failed fetching all closed channels") if len(closed) != 1 { t.Fatalf("incorrect number of closed channels: expecting %v, "+ "got %v", 1, len(closed)) @@ -1072,24 +1012,18 @@ func TestFetchClosedChannels(t *testing.T) { // Mark the channel as fully closed. err = cdb.MarkChanFullyClosed(&state.FundingOutpoint) - if err != nil { - t.Fatalf("failed fully closing channel: %v", err) - } + require.NoError(t, err, "failed fully closing channel") // The channel should no longer be considered pending, but should still // be retrieved when fetching all the closed channels. closed, err = cdb.FetchClosedChannels(false) - if err != nil { - t.Fatalf("failed fetching closed channels: %v", err) - } + require.NoError(t, err, "failed fetching closed channels") if len(closed) != 1 { t.Fatalf("incorrect number of closed channels: expecting %v, "+ "got %v", 1, len(closed)) } pendingClose, err := cdb.FetchClosedChannels(true) - if err != nil { - t.Fatalf("failed fetching channels pending close: %v", err) - } + require.NoError(t, err, "failed fetching channels pending close") if len(pendingClose) != 0 { t.Fatalf("incorrect number of closed channels: expecting %v, "+ "got %v", 0, len(closed)) @@ -1108,9 +1042,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) { // them will have their funding transaction confirmed on-chain, while // the other one will remain unconfirmed. fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -1172,9 +1104,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) { // database. We should expect to see both channels above, even if any of // them haven't had their funding transaction confirm on-chain. waitingCloseChannels, err := cdb.FetchWaitingCloseChannels() - if err != nil { - t.Fatalf("unable to fetch all waiting close channels: %v", err) - } + require.NoError(t, err, "unable to fetch all waiting close channels") if len(waitingCloseChannels) != numChannels { t.Fatalf("expected %d channels waiting to be closed, got %d", 2, len(waitingCloseChannels)) @@ -1225,9 +1155,7 @@ func TestRefreshShortChanID(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -1262,9 +1190,7 @@ func TestRefreshShortChanID(t *testing.T) { } err = state.MarkAsOpen(chanOpenLoc) - if err != nil { - t.Fatalf("unable to mark channel open: %v", err) - } + require.NoError(t, err, "unable to mark channel open") // The short_chan_id of the receiver to MarkAsOpen should reflect the // open location, but the other pending channel should remain unchanged. @@ -1285,9 +1211,7 @@ func TestRefreshShortChanID(t *testing.T) { // Now, refresh the short channel ID of the pending channel. err = pendingChannel.RefreshShortChanID() - if err != nil { - t.Fatalf("unable to refresh short_chan_id: %v", err) - } + require.NoError(t, err, "unable to refresh short_chan_id") // This should result in both OpenChannel's now having the same // ShortChanID. @@ -1447,9 +1371,7 @@ func TestCloseChannelStatus(t *testing.T) { histChan, err := channel.Db.FetchHistoricalChannel( &channel.FundingOutpoint, ) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if !histChan.HasChanStatus(ChanStatusRemoteCloseInitiator) { t.Fatalf("channel should have status") diff --git a/channeldb/db_test.go b/channeldb/db_test.go index b741a2ad7..0a6f2a9ff 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -33,23 +33,17 @@ func TestOpenWithCreate(t *testing.T) { // First, create a temporary directory to be used for the duration of // this test. tempDirName, err := ioutil.TempDir("", "channeldb") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") defer os.RemoveAll(tempDirName) // Next, open thereby creating channeldb for the first time. dbPath := filepath.Join(tempDirName, "cdb") backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } + require.NoError(t, err, "unable to get test db backend") defer cleanup() cdb, err := CreateWithBackend(backend) - if err != nil { - t.Fatalf("unable to create channeldb: %v", err) - } + require.NoError(t, err, "unable to create channeldb") if err := cdb.Close(); err != nil { t.Fatalf("unable to close channeldb: %v", err) } @@ -62,9 +56,7 @@ func TestOpenWithCreate(t *testing.T) { // Now, reopen the same db in dry run migration mode. Since we have not // applied any migrations, this should ignore the flag and not fail. cdb, err = Open(dbPath, OptionDryRunMigration(true)) - if err != nil { - t.Fatalf("unable to create channeldb: %v", err) - } + require.NoError(t, err, "unable to create channeldb") if err := cdb.Close(); err != nil { t.Fatalf("unable to close channeldb: %v", err) } @@ -79,23 +71,17 @@ func TestWipe(t *testing.T) { // First, create a temporary directory to be used for the duration of // this test. tempDirName, err := ioutil.TempDir("", "channeldb") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") defer os.RemoveAll(tempDirName) // Next, open thereby creating channeldb for the first time. dbPath := filepath.Join(tempDirName, "cdb") backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } + require.NoError(t, err, "unable to get test db backend") defer cleanup() fullDB, err := CreateWithBackend(backend) - if err != nil { - t.Fatalf("unable to create channeldb: %v", err) - } + require.NoError(t, err, "unable to create channeldb") defer fullDB.Close() if err := fullDB.Wipe(); err != nil { @@ -122,9 +108,7 @@ func TestFetchClosedChannelForID(t *testing.T) { const numChans = 101 fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -195,9 +179,7 @@ func TestAddrsForNode(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() graph := fullDB.ChannelGraph() @@ -206,9 +188,7 @@ func TestAddrsForNode(t *testing.T) { // node, but this node will only have half the number of addresses it // usually does. testNode, err := createTestVertex(fullDB) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") testNode.Addresses = []net.Addr{testAddr} if err := graph.SetSourceNode(testNode); err != nil { t.Fatalf("unable to set source node: %v", err) @@ -217,9 +197,7 @@ func TestAddrsForNode(t *testing.T) { // Next, we'll make a link node with the same pubkey, but with an // additional address. nodePub, err := testNode.PubKey() - if err != nil { - t.Fatalf("unable to recv node pub: %v", err) - } + require.NoError(t, err, "unable to recv node pub") linkNode := NewLinkNode( fullDB.channelStateDB.linkNodeDB, wire.MainNet, nodePub, anotherAddr, @@ -231,9 +209,7 @@ func TestAddrsForNode(t *testing.T) { // Now that we've created a link node, as well as a vertex for the // node, we'll query for all its addresses. nodeAddrs, err := fullDB.AddrsForNode(nodePub) - if err != nil { - t.Fatalf("unable to obtain node addrs: %v", err) - } + require.NoError(t, err, "unable to obtain node addrs") expectedAddrs := make(map[string]struct{}) expectedAddrs[testAddr.String()] = struct{}{} @@ -257,9 +233,7 @@ func TestFetchChannel(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -269,9 +243,7 @@ func TestFetchChannel(t *testing.T) { // Next, attempt to fetch the channel by its chan point. dbChannel, err := cdb.FetchChannel(nil, channelState.FundingOutpoint) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } + require.NoError(t, err, "unable to fetch channel") // The decoded channel state should be identical to what we stored // above. @@ -283,9 +255,7 @@ func TestFetchChannel(t *testing.T) { // If we attempt to query for a non-exist ante channel, then we should // get an error. channelState2 := createTestChannelState(t, cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } + require.NoError(t, err, "unable to create channel state") channelState2.FundingOutpoint.Index ^= 1 _, err = cdb.FetchChannel(nil, channelState2.FundingOutpoint) @@ -361,9 +331,7 @@ func TestRestoreChannelShells(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -372,9 +340,7 @@ func TestRestoreChannelShells(t *testing.T) { // amount of information required for us to initiate the data loss // protection feature. channelShell, err := genRandomChannelShell() - if err != nil { - t.Fatalf("unable to gen channel shell: %v", err) - } + require.NoError(t, err, "unable to gen channel shell") // With the channel shell constructed, we'll now insert it into the // database with the restoration method. @@ -388,9 +354,7 @@ func TestRestoreChannelShells(t *testing.T) { // First, we'll attempt to query for all channels that we have with the // node public key that was restored. nodeChans, err := cdb.FetchOpenChannels(channelShell.Chan.IdentityPub) - if err != nil { - t.Fatalf("unable find channel: %v", err) - } + require.NoError(t, err, "unable find channel") // We should now find a single channel from the database. if len(nodeChans) != 1 { @@ -432,18 +396,14 @@ func TestRestoreChannelShells(t *testing.T) { // We should also be able to find the channel if we query for it // directly. _, err = cdb.FetchChannel(nil, channelShell.Chan.FundingOutpoint) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } + require.NoError(t, err, "unable to fetch channel") // We should also be able to find the link node that was inserted by // its public key. linkNode, err := fullDB.channelStateDB.linkNodeDB.FetchLinkNode( channelShell.Chan.IdentityPub, ) - if err != nil { - t.Fatalf("unable to fetch link node: %v", err) - } + require.NoError(t, err, "unable to fetch link node") // The node should have the same address, as specified in the channel // shell. @@ -461,9 +421,7 @@ func TestAbandonChannel(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -483,9 +441,7 @@ func TestAbandonChannel(t *testing.T) { // We should now be able to abandon the channel without any errors. closeHeight := uint32(11) err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight) - if err != nil { - t.Fatalf("unable to abandon channel: %v", err) - } + require.NoError(t, err, "unable to abandon channel") // At this point, the channel should no longer be found in the set of // open channels. @@ -497,16 +453,12 @@ func TestAbandonChannel(t *testing.T) { // However we should be able to retrieve a close channel summary for // the channel. _, err = cdb.FetchClosedChannel(&chanState.FundingOutpoint) - if err != nil { - t.Fatalf("unable to fetch closed channel: %v", err) - } + require.NoError(t, err, "unable to fetch closed channel") // Finally, if we attempt to abandon the channel again, we should get a // nil error as the channel has already been abandoned. err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight) - if err != nil { - t.Fatalf("unable to abandon channel: %v", err) - } + require.NoError(t, err, "unable to abandon channel") } // TestFetchChannels tests the filtering of open channels in fetchChannels. @@ -707,9 +659,7 @@ func TestFetchChannels(t *testing.T) { // TestFetchHistoricalChannel tests lookup of historical channels. func TestFetchHistoricalChannel(t *testing.T) { fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -737,9 +687,7 @@ func TestFetchHistoricalChannel(t *testing.T) { } histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint) - if err != nil { - t.Fatalf("unexpected error getting channel: %v", err) - } + require.NoError(t, err, "unexpected error getting channel") // FetchHistoricalChannel will attach the cdb to channel.Db, we set it // here so that we can check that all other fields on the channel equal diff --git a/channeldb/forwarding_log_test.go b/channeldb/forwarding_log_test.go index cd21f12e2..db55b8438 100644 --- a/channeldb/forwarding_log_test.go +++ b/channeldb/forwarding_log_test.go @@ -9,6 +9,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // TestForwardingLogBasicStorageAndQuery tests that we're able to store and @@ -20,9 +21,7 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) { // forwarding event log that we'll be using for the duration of the // test. db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") defer cleanUp() log := ForwardingLog{ @@ -63,9 +62,7 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) { NumMaxEvents: 1000, } timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } + require.NoError(t, err, "unable to query for events") // The set of returned events should match identically, as they should // be returned in sorted order. @@ -93,9 +90,7 @@ func TestForwardingLogQueryOptions(t *testing.T) { // forwarding event log that we'll be using for the duration of the // test. db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") defer cleanUp() log := ForwardingLog{ @@ -136,9 +131,7 @@ func TestForwardingLogQueryOptions(t *testing.T) { NumMaxEvents: 10, } timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } + require.NoError(t, err, "unable to query for events") // We should get exactly 10 events back. if len(timeSlice.ForwardingEvents) != 10 { @@ -164,9 +157,7 @@ func TestForwardingLogQueryOptions(t *testing.T) { // more events, that are the last 10 events we wrote. eventQuery.IndexOffset = 10 timeSlice, err = log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } + require.NoError(t, err, "unable to query for events") // We should get exactly 10 events back once again. if len(timeSlice.ForwardingEvents) != 10 { @@ -199,9 +190,7 @@ func TestForwardingLogQueryLimit(t *testing.T) { // forwarding event log that we'll be using for the duration of the // test. db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") defer cleanUp() log := ForwardingLog{ @@ -242,9 +231,7 @@ func TestForwardingLogQueryLimit(t *testing.T) { NumMaxEvents: 100, } timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } + require.NoError(t, err, "unable to query for events") // We should get exactly 100 events back. if len(timeSlice.ForwardingEvents) != 100 { @@ -315,9 +302,7 @@ func TestForwardingLogStoreEvent(t *testing.T) { // forwarding event log that we'll be using for the duration of the // test. db, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") defer cleanUp() log := ForwardingLog{ @@ -362,9 +347,7 @@ func TestForwardingLogStoreEvent(t *testing.T) { NumMaxEvents: uint32(numEvents * 3), } timeSlice, err := log.Query(eventQuery) - if err != nil { - t.Fatalf("unable to query for events: %v", err) - } + require.NoError(t, err, "unable to query for events") // We should get exactly 40 events back. if len(timeSlice.ForwardingEvents) != numEvents*2 { diff --git a/channeldb/forwarding_package_test.go b/channeldb/forwarding_package_test.go index a1a22fdf6..ddfb0a706 100644 --- a/channeldb/forwarding_package_test.go +++ b/channeldb/forwarding_package_test.go @@ -855,9 +855,7 @@ func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend { // nolint:unparam bdb, err := kvdb.Create( kvdb.BoltBackendName, path, true, kvdb.DefaultDBTimeout, ) - if err != nil { - t.Fatalf("unable to open boltdb: %v", err) - } + require.NoError(t, err, "unable to open boltdb") return bdb } diff --git a/channeldb/graph_test.go b/channeldb/graph_test.go index 72032ebfd..d640c1ddb 100644 --- a/channeldb/graph_test.go +++ b/channeldb/graph_test.go @@ -129,9 +129,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test basic insertion/deletion for vertexes from the // graph, so we'll create a test vertex to start with. @@ -158,9 +156,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) { // Next, fetch the node from the database to ensure everything was // serialized properly. dbNode, err := graph.FetchLightningNode(testPub) - if err != nil { - t.Fatalf("unable to locate node: %v", err) - } + require.NoError(t, err, "unable to locate node") if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil { t.Fatalf("unable to query for node: %v", err) @@ -195,9 +191,7 @@ func TestPartialNode(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We want to be able to insert nodes into the graph that only has the // PubKey set. @@ -214,9 +208,7 @@ func TestPartialNode(t *testing.T) { // Next, fetch the node from the database to ensure everything was // serialized properly. dbNode, err := graph.FetchLightningNode(testPub) - if err != nil { - t.Fatalf("unable to locate node: %v", err) - } + require.NoError(t, err, "unable to locate node") if _, exists, err := graph.HasLightningNode(dbNode.PubKeyBytes); err != nil { t.Fatalf("unable to query for node: %v", err) @@ -257,16 +249,12 @@ func TestAliasLookup(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test the alias index within the database, so first // create a new test node. testNode, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // Add the node to the graph's database, this should also insert an // entry into the alias index for this node. @@ -277,13 +265,9 @@ func TestAliasLookup(t *testing.T) { // Next, attempt to lookup the alias. The alias should exactly match // the one which the test node was assigned. nodePub, err := testNode.PubKey() - if err != nil { - t.Fatalf("unable to generate pubkey: %v", err) - } + require.NoError(t, err, "unable to generate pubkey") dbAlias, err := graph.LookupAlias(nodePub) - if err != nil { - t.Fatalf("unable to find alias: %v", err) - } + require.NoError(t, err, "unable to find alias") if dbAlias != testNode.Alias { t.Fatalf("aliases don't match, expected %v got %v", testNode.Alias, dbAlias) @@ -291,13 +275,9 @@ func TestAliasLookup(t *testing.T) { // Ensure that looking up a non-existent alias results in an error. node, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") nodePub, err = node.PubKey() - if err != nil { - t.Fatalf("unable to generate pubkey: %v", err) - } + require.NoError(t, err, "unable to generate pubkey") _, err = graph.LookupAlias(nodePub) if err != ErrNodeAliasNotFound { t.Fatalf("alias lookup should fail for non-existent pubkey") @@ -308,17 +288,13 @@ func TestSourceNode(t *testing.T) { t.Parallel() graph, cleanUp, err := MakeTestGraph() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() // We'd like to test the setting/getting of the source node, so we // first create a fake node to use within the test. testNode, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // Attempt to fetch the source node, this should return an error as the // source node hasn't yet been set. @@ -335,9 +311,7 @@ func TestSourceNode(t *testing.T) { // Retrieve the source node from the database, it should exactly match // the one we set above. sourceNode, err := graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") if err := compareNodes(testNode, sourceNode); err != nil { t.Fatalf("nodes don't match: %v", err) } @@ -348,20 +322,14 @@ func TestEdgeInsertionDeletion(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test the insertion/deletion of edges, so we create two // vertexes to connect. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // In addition to the fake vertexes we create some fake channel // identifiers. @@ -374,13 +342,9 @@ func TestEdgeInsertionDeletion(t *testing.T) { // Add the new edge to the database, this should proceed without any // errors. node1Pub, err := node1.PubKey() - if err != nil { - t.Fatalf("unable to generate node key: %v", err) - } + require.NoError(t, err, "unable to generate node key") node2Pub, err := node2.PubKey() - if err != nil { - t.Fatalf("unable to generate node key: %v", err) - } + require.NoError(t, err, "unable to generate node key") edgeInfo := ChannelEdgeInfo{ ChannelID: chanID, ChainHash: key, @@ -483,14 +447,10 @@ func TestDisconnectBlockAtHeight(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") sourceNode, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } + require.NoError(t, err, "unable to create source node") if err := graph.SetSourceNode(sourceNode); err != nil { t.Fatalf("unable to set source node: %v", err) } @@ -498,13 +458,9 @@ func TestDisconnectBlockAtHeight(t *testing.T) { // We'd like to test the insertion/deletion of edges, so we create two // vertexes to connect. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // In addition to the fake vertexes we create some fake channel // identifiers. @@ -515,16 +471,12 @@ func TestDisconnectBlockAtHeight(t *testing.T) { // Prune the graph a few times to make sure we have entries in the // prune log. _, err = graph.PruneGraph(spendOutputs, &blockHash, 155) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } + require.NoError(t, err, "unable to prune graph") var blockHash2 chainhash.Hash copy(blockHash2[:], bytes.Repeat([]byte{2}, 32)) _, err = graph.PruneGraph(spendOutputs, &blockHash2, 156) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } + require.NoError(t, err, "unable to prune graph") // We'll create 3 almost identical edges, so first create a helper // method containing all logic for doing so. @@ -585,9 +537,7 @@ func TestDisconnectBlockAtHeight(t *testing.T) { // The two first edges should be removed from the db. _, _, has, isZombie, err := graph.HasChannelEdge(edgeInfo.ChannelID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } + require.NoError(t, err, "unable to query for edge") if has { t.Fatalf("edge1 was not pruned from the graph") } @@ -595,9 +545,7 @@ func TestDisconnectBlockAtHeight(t *testing.T) { t.Fatal("reorged edge1 should not be marked as zombie") } _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo2.ChannelID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } + require.NoError(t, err, "unable to query for edge") if has { t.Fatalf("edge2 was not pruned from the graph") } @@ -607,9 +555,7 @@ func TestDisconnectBlockAtHeight(t *testing.T) { // Edge 3 should not be removed. _, _, has, isZombie, err = graph.HasChannelEdge(edgeInfo3.ChannelID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } + require.NoError(t, err, "unable to query for edge") if !has { t.Fatalf("edge3 was pruned from the graph") } @@ -620,9 +566,7 @@ func TestDisconnectBlockAtHeight(t *testing.T) { // PruneTip should be set to the blockHash we specified for the block // at height 155. hash, h, err := graph.PruneTip() - if err != nil { - t.Fatalf("unable to get prune tip: %v", err) - } + require.NoError(t, err, "unable to get prune tip") if !blockHash.IsEqual(hash) { t.Fatalf("expected best block to be %x, was %x", blockHash, hash) } @@ -775,24 +719,18 @@ func TestEdgeInfoUpdates(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test the update of edges inserted into the database, so // we create two vertexes to connect. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } assertNodeInCache(t, graph, node1, testFeatures) node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -831,9 +769,7 @@ func TestEdgeInfoUpdates(t *testing.T) { // Check for existence of the edge within the database, it should be // found. _, _, found, isZombie, err := graph.HasChannelEdge(chanID) - if err != nil { - t.Fatalf("unable to query for edge: %v", err) - } + require.NoError(t, err, "unable to query for edge") if !found { t.Fatalf("graph should have of inserted edge") } @@ -844,9 +780,7 @@ func TestEdgeInfoUpdates(t *testing.T) { // We should also be able to retrieve the channelID only knowing the // channel point of the channel. dbChanID, err := graph.ChannelID(&outpoint) - if err != nil { - t.Fatalf("unable to retrieve channel ID: %v", err) - } + require.NoError(t, err, "unable to retrieve channel ID") if dbChanID != chanID { t.Fatalf("chan ID's mismatch, expected %v got %v", dbChanID, chanID) @@ -855,9 +789,7 @@ func TestEdgeInfoUpdates(t *testing.T) { // With the edges inserted, perform some queries to ensure that they've // been inserted properly. dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } + require.NoError(t, err, "unable to fetch channel by ID") if err := compareEdgePolicies(dbEdge1, edge1); err != nil { t.Fatalf("edge doesn't match: %v", err) } @@ -869,9 +801,7 @@ func TestEdgeInfoUpdates(t *testing.T) { // Next, attempt to query the channel edges according to the outpoint // of the channel. dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByOutpoint(&outpoint) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } + require.NoError(t, err, "unable to fetch channel by ID") if err := compareEdgePolicies(dbEdge1, edge1); err != nil { t.Fatalf("edge doesn't match: %v", err) } @@ -1094,9 +1024,7 @@ func TestGraphTraversal(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test some of the graph traversal capabilities within // the DB, so we'll create a series of fake nodes to insert into the @@ -1184,9 +1112,7 @@ func TestGraphTraversalCacheable(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test some of the graph traversal capabilities within // the DB, so we'll create a series of fake nodes to insert into the @@ -1488,14 +1414,10 @@ func TestGraphPruning(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") sourceNode, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } + require.NoError(t, err, "unable to create source node") if err := graph.SetSourceNode(sourceNode); err != nil { t.Fatalf("unable to set source node: %v", err) } @@ -1587,9 +1509,7 @@ func TestGraphPruning(t *testing.T) { // With all the channel points added, we'll consult the graph to ensure // it has the same channel view as the one we just constructed. channelView, err := graph.ChannelView() - if err != nil { - t.Fatalf("unable to get graph channel view: %v", err) - } + require.NoError(t, err, "unable to get graph channel view") assertChanViewEqual(t, channelView, edgePoints) // Now with our test graph created, we can test the pruning @@ -1602,9 +1522,7 @@ func TestGraphPruning(t *testing.T) { blockHeight := uint32(1) block := channelPoints[:2] prunedChans, err := graph.PruneGraph(block, &blockHash, blockHeight) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } + require.NoError(t, err, "unable to prune graph") if len(prunedChans) != 2 { t.Fatalf("incorrect number of channels pruned: "+ "expected %v, got %v", 2, prunedChans) @@ -1619,9 +1537,7 @@ func TestGraphPruning(t *testing.T) { // Those channels should also be missing from the channel view. channelView, err = graph.ChannelView() - if err != nil { - t.Fatalf("unable to get graph channel view: %v", err) - } + require.NoError(t, err, "unable to get graph channel view") assertChanViewEqualChanPoints(t, channelView, channelPoints[2:]) // Next we'll create a block that doesn't close any channels within the @@ -1636,9 +1552,7 @@ func TestGraphPruning(t *testing.T) { prunedChans, err = graph.PruneGraph( []*wire.OutPoint{nonChannel}, &blockHash, blockHeight, ) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } + require.NoError(t, err, "unable to prune graph") // No channels should have been detected as pruned. if len(prunedChans) != 0 { @@ -1658,9 +1572,7 @@ func TestGraphPruning(t *testing.T) { prunedChans, err = graph.PruneGraph( channelPoints[2:], &blockHash, blockHeight, ) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } + require.NoError(t, err, "unable to prune graph") // The remainder of the channels should have been pruned from the // graph. @@ -1679,9 +1591,7 @@ func TestGraphPruning(t *testing.T) { // completely empty. Those channels should also be missing from the // channel view. channelView, err = graph.ChannelView() - if err != nil { - t.Fatalf("unable to get graph channel view: %v", err) - } + require.NoError(t, err, "unable to get graph channel view") if len(channelView) != 0 { t.Fatalf("channel view should be empty, instead have: %v", channelView) @@ -1695,16 +1605,12 @@ func TestHighestChanID(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // If we don't yet have any channels in the database, then we should // get a channel ID of zero if we ask for the highest channel ID. bestID, err := graph.HighestChanID() - if err != nil { - t.Fatalf("unable to get highest ID: %v", err) - } + require.NoError(t, err, "unable to get highest ID") if bestID != 0 { t.Fatalf("best ID w/ no chan should be zero, is instead: %v", bestID) @@ -1713,13 +1619,9 @@ func TestHighestChanID(t *testing.T) { // Next, we'll insert two channels into the database, with each channel // connecting the same two nodes. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // The first channel with be at height 10, while the other will be at // height 100. @@ -1736,9 +1638,7 @@ func TestHighestChanID(t *testing.T) { // Now that the edges has been inserted, we'll query for the highest // known channel ID in the database. bestID, err = graph.HighestChanID() - if err != nil { - t.Fatalf("unable to get highest ID: %v", err) - } + require.NoError(t, err, "unable to get highest ID") if bestID != chanID2.ToUint64() { t.Fatalf("expected %v got %v for best chan ID: ", @@ -1752,9 +1652,7 @@ func TestHighestChanID(t *testing.T) { t.Fatalf("unable to create channel edge: %v", err) } bestID, err = graph.HighestChanID() - if err != nil { - t.Fatalf("unable to get highest ID: %v", err) - } + require.NoError(t, err, "unable to get highest ID") if bestID != chanID3.ToUint64() { t.Fatalf("expected %v got %v for best chan ID: ", @@ -1770,18 +1668,14 @@ func TestChanUpdatesInHorizon(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // If we issue an arbitrary query before any channel updates are // inserted in the database, we should get zero results. chanUpdates, err := graph.ChanUpdatesInHorizon( time.Unix(999, 0), time.Unix(9999, 0), ) - if err != nil { - t.Fatalf("unable to updates for updates: %v", err) - } + require.NoError(t, err, "unable to updates for updates") if len(chanUpdates) != 0 { t.Fatalf("expected 0 chan updates, instead got %v", len(chanUpdates)) @@ -1789,16 +1683,12 @@ func TestChanUpdatesInHorizon(t *testing.T) { // We'll start by creating two nodes which will seed our test graph. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -1938,9 +1828,7 @@ func TestNodeUpdatesInHorizon(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") startTime := time.Unix(1234, 0) endTime := startTime @@ -1950,9 +1838,7 @@ func TestNodeUpdatesInHorizon(t *testing.T) { nodeUpdates, err := graph.NodeUpdatesInHorizon( time.Unix(999, 0), time.Unix(9999, 0), ) - if err != nil { - t.Fatalf("unable to query for node updates: %v", err) - } + require.NoError(t, err, "unable to query for node updates") if len(nodeUpdates) != 0 { t.Fatalf("expected 0 node updates, instead got %v", len(nodeUpdates)) @@ -2059,33 +1945,25 @@ func TestFilterKnownChanIDs(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // If we try to filter out a set of channel ID's before we even know of // any channels, then we should get the entire set back. preChanIDs := []uint64{1, 2, 3, 4} filteredIDs, err := graph.FilterKnownChanIDs(preChanIDs) - if err != nil { - t.Fatalf("unable to filter chan IDs: %v", err) - } + require.NoError(t, err, "unable to filter chan IDs") if !reflect.DeepEqual(preChanIDs, filteredIDs) { t.Fatalf("chan IDs shouldn't have been filtered!") } // We'll start by creating two nodes which will seed our test graph. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2174,23 +2052,17 @@ func TestFilterChannelRange(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'll first populate our graph with two nodes. All channels created // below will be made between these two nodes. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2198,9 +2070,7 @@ func TestFilterChannelRange(t *testing.T) { // If we try to filter a channel range before we have any channels // inserted, we should get an empty slice of results. resp, err := graph.FilterChannelRange(10, 100) - if err != nil { - t.Fatalf("unable to filter channels: %v", err) - } + require.NoError(t, err, "unable to filter channels") if len(resp) != 0 { t.Fatalf("expected zero chans, instead got %v", len(resp)) } @@ -2306,23 +2176,17 @@ func TestFetchChanInfos(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'll first populate our graph with two nodes. All channels created // below will be made between these two nodes. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2388,18 +2252,14 @@ func TestFetchChanInfos(t *testing.T) { t.Fatalf("unable to create channel edge: %v", err) } err = graph.DeleteChannelEdges(false, zombieChan.ChannelID) - if err != nil { - t.Fatalf("unable to delete and mark edge zombie: %v", err) - } + require.NoError(t, err, "unable to delete and mark edge zombie") edgeQuery = append(edgeQuery, zombieChanID.ToUint64()) // We'll now attempt to query for the range of channel ID's we just // inserted into the database. We should get the exact same set of // edges back. resp, err := graph.FetchChanInfos(edgeQuery) - if err != nil { - t.Fatalf("unable to fetch chan edges: %v", err) - } + require.NoError(t, err, "unable to fetch chan edges") if len(resp) != len(edges) { t.Fatalf("expected %v edges, instead got %v", len(edges), len(resp)) @@ -2426,22 +2286,16 @@ func TestIncompleteChannelPolicies(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // Create two nodes. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2532,14 +2386,10 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") sourceNode, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } + require.NoError(t, err, "unable to create source node") if err := graph.SetSourceNode(sourceNode); err != nil { t.Fatalf("unable to set source node: %v", err) } @@ -2547,16 +2397,12 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { // We'll first populate our graph with two nodes. All channels created // below will be made between these two nodes. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2669,9 +2515,7 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { _, err = graph.PruneGraph( []*wire.OutPoint{&edgeInfo.ChannelPoint}, &blockHash, 101, ) - if err != nil { - t.Fatalf("unable to prune graph: %v", err) - } + require.NoError(t, err, "unable to prune graph") // Finally, we'll check the database state one last time to conclude // that we should no longer be able to locate _any_ entries within the @@ -2686,16 +2530,12 @@ func TestPruneGraphNodes(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'll start off by inserting our source node, to ensure that it's // the only node left after we prune the graph. sourceNode, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create source node: %v", err) - } + require.NoError(t, err, "unable to create source node") if err := graph.SetSourceNode(sourceNode); err != nil { t.Fatalf("unable to set source node: %v", err) } @@ -2704,23 +2544,17 @@ func TestPruneGraphNodes(t *testing.T) { // channel graph, at the end of the scenario, only two of these nodes // should still be in the graph. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } node3, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node3); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2769,23 +2603,17 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // To start, we'll create two nodes, and only add one of them to the // channel graph. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // We'll now create an edge between the two nodes, as a result, node2 // should be inserted into the database as a shell node. @@ -2797,17 +2625,13 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { // Ensure that node1 was inserted as a full node, while node2 only has // a shell node present. node1, err = graph.FetchLightningNode(node1.PubKeyBytes) - if err != nil { - t.Fatalf("unable to fetch node1: %v", err) - } + require.NoError(t, err, "unable to fetch node1") if !node1.HaveNodeAnnouncement { t.Fatalf("have shell announcement for node1, shouldn't") } node2, err = graph.FetchLightningNode(node2.PubKeyBytes) - if err != nil { - t.Fatalf("unable to fetch node2: %v", err) - } + require.NoError(t, err, "unable to fetch node2") if node2.HaveNodeAnnouncement { t.Fatalf("should have shell announcement for node2, but is full") } @@ -2821,16 +2645,12 @@ func TestNodePruningUpdateIndexDeletion(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'll first populate our graph with a single node that will be // removed shortly. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -2841,9 +2661,7 @@ func TestNodePruningUpdateIndexDeletion(t *testing.T) { startTime := time.Unix(9, 0) endTime := node1.LastUpdate.Add(time.Minute) nodesInHorizon, err := graph.NodeUpdatesInHorizon(startTime, endTime) - if err != nil { - t.Fatalf("unable to fetch nodes in horizon: %v", err) - } + require.NoError(t, err, "unable to fetch nodes in horizon") // We should only have a single node, and that node should exactly // match the node we just inserted. @@ -2864,9 +2682,7 @@ func TestNodePruningUpdateIndexDeletion(t *testing.T) { // Now that the node has been deleted, we'll again query the nodes in // the horizon. This time we should have no nodes at all. nodesInHorizon, err = graph.NodeUpdatesInHorizon(startTime, endTime) - if err != nil { - t.Fatalf("unable to fetch nodes in horizon: %v", err) - } + require.NoError(t, err, "unable to fetch nodes in horizon") if len(nodesInHorizon) != 0 { t.Fatalf("should have zero nodes instead have: %v", @@ -2889,39 +2705,27 @@ func TestNodeIsPublic(t *testing.T) { // some graphs but not others, etc.). aliceGraph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") aliceNode, err := createTestVertex(aliceGraph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := aliceGraph.SetSourceNode(aliceNode); err != nil { t.Fatalf("unable to set source node: %v", err) } bobGraph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") bobNode, err := createTestVertex(bobGraph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := bobGraph.SetSourceNode(bobNode); err != nil { t.Fatalf("unable to set source node: %v", err) } carolGraph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") carolNode, err := createTestVertex(carolGraph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := carolGraph.SetSourceNode(carolNode); err != nil { t.Fatalf("unable to set source node: %v", err) } @@ -3034,25 +2838,19 @@ func TestDisabledChannelIDs(t *testing.T) { t.Parallel() graph, cleanUp, err := MakeTestGraph() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() // Create first node and add it to the graph. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } // Create second node and add it to the graph. node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node2); err != nil { t.Fatalf("unable to add node: %v", err) } @@ -3069,9 +2867,7 @@ func TestDisabledChannelIDs(t *testing.T) { // Ensure no disabled channels exist in the bucket on start. disabledChanIds, err := graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } + require.NoError(t, err, "unable to get disabled channel ids") if len(disabledChanIds) > 0 { t.Fatalf("expected empty disabled channels, got %v disabled channels", len(disabledChanIds)) @@ -3084,9 +2880,7 @@ func TestDisabledChannelIDs(t *testing.T) { t.Fatalf("unable to update edge: %v", err) } disabledChanIds, err = graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } + require.NoError(t, err, "unable to get disabled channel ids") if len(disabledChanIds) > 0 { t.Fatalf("expected empty disabled channels, got %v disabled channels", len(disabledChanIds)) @@ -3099,9 +2893,7 @@ func TestDisabledChannelIDs(t *testing.T) { t.Fatalf("unable to update edge: %v", err) } disabledChanIds, err = graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } + require.NoError(t, err, "unable to get disabled channel ids") if len(disabledChanIds) != 1 || disabledChanIds[0] != edgeInfo.ChannelID { t.Fatalf("expected disabled channel with id %v, "+ "got %v", edgeInfo.ChannelID, disabledChanIds) @@ -3112,9 +2904,7 @@ func TestDisabledChannelIDs(t *testing.T) { t.Fatalf("unable to delete channel edge: %v", err) } disabledChanIds, err = graph.DisabledChannelIDs() - if err != nil { - t.Fatalf("unable to get disabled channel ids: %v", err) - } + require.NoError(t, err, "unable to get disabled channel ids") if len(disabledChanIds) > 0 { t.Fatalf("expected empty disabled channels, got %v disabled channels", len(disabledChanIds)) @@ -3131,23 +2921,17 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") // We'd like to test the update of edges inserted into the database, so // we create two vertexes to connect. node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") if err := graph.AddLightningNode(node1); err != nil { t.Fatalf("unable to add node: %v", err) } node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") edgeInfo, edge1, edge2 := createChannelEdge(graph.db, node1, node2) if err := graph.AddLightningNode(node2); err != nil { @@ -3204,9 +2988,7 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { return nil }, func() {}) - if err != nil { - t.Fatalf("error reading db: %v", err) - } + require.NoError(t, err, "error reading db") // Put the stripped bytes in the DB. err = kvdb.Update(graph.db, func(tx kvdb.RwTx) error { @@ -3240,9 +3022,7 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { return edges.Put(edgeKey[:], stripped) }, func() {}) - if err != nil { - t.Fatalf("error writing db: %v", err) - } + require.NoError(t, err, "error writing db") // And add the second, unmodified edge. if err := graph.UpdateEdgePolicy(edge2); err != nil { @@ -3254,9 +3034,7 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { // are not aware of the policy (indicated by the policy returned being // nil) dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } + require.NoError(t, err, "unable to fetch channel by ID") // The first edge should have a nil-policy returned if dbEdge1 != nil { @@ -3274,9 +3052,7 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { } dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByID(chanID) - if err != nil { - t.Fatalf("unable to fetch channel by ID: %v", err) - } + require.NoError(t, err, "unable to fetch channel by ID") if err := compareEdgePolicies(dbEdge1, edge1); err != nil { t.Fatalf("edge doesn't match: %v", err) } @@ -3292,9 +3068,7 @@ func assertNumZombies(t *testing.T, graph *ChannelGraph, expZombies uint64) { t.Helper() numZombies, err := graph.NumZombies() - if err != nil { - t.Fatalf("unable to query number of zombies: %v", err) - } + require.NoError(t, err, "unable to query number of zombies") if numZombies != expZombies { t.Fatalf("expected %d zombies, found %d", @@ -3309,18 +3083,12 @@ func TestGraphZombieIndex(t *testing.T) { // We'll start by creating our test graph along with a test edge. graph, cleanUp, err := MakeTestGraph() defer cleanUp() - if err != nil { - t.Fatalf("unable to create test database: %v", err) - } + require.NoError(t, err, "unable to create test database") node1, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test vertex: %v", err) - } + require.NoError(t, err, "unable to create test vertex") node2, err := createTestVertex(graph.db) - if err != nil { - t.Fatalf("unable to create test vertex: %v", err) - } + require.NoError(t, err, "unable to create test vertex") // Swap the nodes if the second's pubkey is smaller than the first. // Without this, the comparisons at the end will fail probabilistically. @@ -3344,9 +3112,7 @@ func TestGraphZombieIndex(t *testing.T) { // If we delete the edge and mark it as a zombie, then we should expect // to see it within the index. err = graph.DeleteChannelEdges(false, edge.ChannelID) - if err != nil { - t.Fatalf("unable to mark edge as zombie: %v", err) - } + require.NoError(t, err, "unable to mark edge as zombie") isZombie, pubKey1, pubKey2 := graph.IsZombieEdge(edge.ChannelID) if !isZombie { t.Fatal("expected edge to be marked as zombie") @@ -3377,9 +3143,7 @@ func TestGraphZombieIndex(t *testing.T) { err = graph.MarkEdgeZombie( edge.ChannelID, node1.PubKeyBytes, node2.PubKeyBytes, ) - if err != nil { - t.Fatalf("unable to mark edge as zombie: %v", err) - } + require.NoError(t, err, "unable to mark edge as zombie") isZombie, _, _ = graph.IsZombieEdge(edge.ChannelID) if !isZombie { t.Fatal("expected edge to be marked as zombie") @@ -3494,9 +3258,7 @@ func TestLightningNodeSigVerification(t *testing.T) { // Create private key and sign the data with it. priv, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to crete priv key: %v", err) - } + require.NoError(t, err, "unable to crete priv key") sign := ecdsa.Sign(priv, data[:]) @@ -3507,22 +3269,16 @@ func TestLightningNodeSigVerification(t *testing.T) { // Create a LightningNode from the same private key. graph, cleanUp, err := MakeTestGraph() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() node, err := createLightningNode(graph.db, priv) - if err != nil { - t.Fatalf("unable to create node: %v", err) - } + require.NoError(t, err, "unable to create node") // And finally check that we can verify the same signature from the // pubkey returned from the lightning node. nodePub, err := node.PubKey() - if err != nil { - t.Fatalf("unable to get pubkey: %v", err) - } + require.NoError(t, err, "unable to get pubkey") if !sign.Verify(data[:], nodePub) { t.Fatalf("unable to verify sig") diff --git a/channeldb/invoice_test.go b/channeldb/invoice_test.go index c235899ce..cade00203 100644 --- a/channeldb/invoice_test.go +++ b/channeldb/invoice_test.go @@ -151,16 +151,12 @@ func TestInvoiceWorkflow(t *testing.T) { func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) { db, cleanUp, err := MakeTestDB() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") // Create a fake invoice which we'll use several times in the tests // below. fakeInvoice, err := randInvoice(10000) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } + require.NoError(t, err, "unable to create invoice") invPayHash := fakeInvoice.Terms.PaymentPreimage.Hash() // Select the payment hash and payment address we will use to lookup or @@ -216,13 +212,9 @@ func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) { // SettledDate payAmt := fakeInvoice.Terms.Value * 2 _, err = db.UpdateInvoice(ref, nil, getUpdateInvoice(payAmt)) - if err != nil { - t.Fatalf("unable to settle invoice: %v", err) - } + require.NoError(t, err, "unable to settle invoice") dbInvoice2, err := db.LookupInvoice(ref) - if err != nil { - t.Fatalf("unable to fetch invoice: %v", err) - } + require.NoError(t, err, "unable to fetch invoice") if dbInvoice2.State != ContractSettled { t.Fatalf("invoice should now be settled but isn't") } @@ -284,9 +276,7 @@ func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) { } response, err := db.QueryInvoices(query) - if err != nil { - t.Fatalf("invoice query failed: %v", err) - } + require.NoError(t, err, "invoice query failed") // The retrieve list of invoices should be identical as since we're // using big endian, the invoices should be retrieved in ascending @@ -443,9 +433,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) { db, cleanUp, err := MakeTestDB() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") preimage := lntypes.Preimage{1} paymentHash := preimage.Hash() @@ -479,9 +467,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) { }, }, nil }) - if err != nil { - t.Fatalf("unable to add invoice htlc: %v", err) - } + require.NoError(t, err, "unable to add invoice htlc") if len(invoice.Htlcs) != 1 { t.Fatalf("expected the htlc to be added") } @@ -498,9 +484,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) { }, }, nil }) - if err != nil { - t.Fatalf("unable to cancel htlc: %v", err) - } + require.NoError(t, err, "unable to cancel htlc") if len(invoice.Htlcs) != 1 { t.Fatalf("expected the htlc to be present") } @@ -571,9 +555,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) { SetID: (*SetID)(setID1), }, nil }) - if err != nil { - t.Fatalf("unable to cancel htlc: %v", err) - } + require.NoError(t, err, "unable to cancel htlc") freshInvoice, err := db.LookupInvoice(ref) require.Nil(t, err) @@ -623,9 +605,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) { SetID: (*SetID)(setID2), }, nil }) - if err != nil { - t.Fatalf("unable to cancel htlc: %v", err) - } + require.NoError(t, err, "unable to cancel htlc") freshInvoice, err = db.LookupInvoice(ref) require.Nil(t, err) @@ -653,9 +633,7 @@ func TestInvoiceCancelSingleHtlcAMP(t *testing.T) { SetID: (*SetID)(setID2), }, nil }) - if err != nil { - t.Fatalf("unable to cancel htlc: %v", err) - } + require.NoError(t, err, "unable to cancel htlc") freshInvoice, err = db.LookupInvoice(ref) require.Nil(t, err) @@ -680,9 +658,7 @@ func TestInvoiceAddTimeSeries(t *testing.T) { db, cleanUp, err := MakeTestDB(OptionClock(testClock)) defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") _, err = db.InvoicesAddedSince(0) require.NoError(t, err) @@ -995,9 +971,7 @@ func TestScanInvoices(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") var invoices map[lntypes.Hash]*Invoice callCount := 0 @@ -1056,16 +1030,12 @@ func TestDuplicateSettleInvoice(t *testing.T) { db, cleanUp, err := MakeTestDB(OptionClock(testClock)) defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") // We'll start out by creating an invoice and writing it to the DB. amt := lnwire.NewMSatFromSatoshis(1000) invoice, err := randInvoice(amt) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } + require.NoError(t, err, "unable to create invoice") payHash := invoice.Terms.PaymentPreimage.Hash() @@ -1076,9 +1046,7 @@ func TestDuplicateSettleInvoice(t *testing.T) { // With the invoice in the DB, we'll now attempt to settle the invoice. ref := InvoiceRefByHash(payHash) dbInvoice, err := db.UpdateInvoice(ref, nil, getUpdateInvoice(amt)) - if err != nil { - t.Fatalf("unable to settle invoice: %v", err) - } + require.NoError(t, err, "unable to settle invoice") // We'll update what we expect the settle invoice to be so that our // comparison below has the correct assumption. @@ -1121,9 +1089,7 @@ func TestQueryInvoices(t *testing.T) { db, cleanUp, err := MakeTestDB(OptionClock(testClock)) defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") // To begin the test, we'll add 50 invoices to the database. We'll // assume that the index of the invoice within the database is the same @@ -1436,9 +1402,7 @@ func TestCustomRecords(t *testing.T) { db, cleanUp, err := MakeTestDB() defer cleanUp() - if err != nil { - t.Fatalf("unable to make test db: %v", err) - } + require.NoError(t, err, "unable to make test db") preimage := lntypes.Preimage{1} paymentHash := preimage.Hash() @@ -1477,16 +1441,12 @@ func TestCustomRecords(t *testing.T) { }, nil }, ) - if err != nil { - t.Fatalf("unable to add invoice htlc: %v", err) - } + require.NoError(t, err, "unable to add invoice htlc") // Retrieve the invoice from that database and verify that the custom // records are present. dbInvoice, err := db.LookupInvoice(ref) - if err != nil { - t.Fatalf("unable to lookup invoice: %v", err) - } + require.NoError(t, err, "unable to lookup invoice") if len(dbInvoice.Htlcs) != 1 { t.Fatalf("expected the htlc to be added") diff --git a/channeldb/meta_test.go b/channeldb/meta_test.go index faf9bae5f..a2366cfc3 100644 --- a/channeldb/meta_test.go +++ b/channeldb/meta_test.go @@ -8,6 +8,7 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/kvdb" + "github.com/stretchr/testify/require" ) // applyMigration is a helper test function that encapsulates the general steps @@ -425,14 +426,10 @@ func TestMigrationReversion(t *testing.T) { defer func() { os.RemoveAll(tempDirName) }() - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") backend, cleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } + require.NoError(t, err, "unable to get test db backend") cdb, err := CreateWithBackend(backend) if err != nil { @@ -454,14 +451,10 @@ func TestMigrationReversion(t *testing.T) { cdb.Close() cleanup() - if err != nil { - t.Fatalf("unable to increase db version: %v", err) - } + require.NoError(t, err, "unable to increase db version") backend, cleanup, err = kvdb.GetTestBackend(tempDirName, "cdb") - if err != nil { - t.Fatalf("unable to get test db backend: %v", err) - } + require.NoError(t, err, "unable to get test db backend") defer cleanup() _, err = CreateWithBackend(backend) diff --git a/channeldb/nodes_test.go b/channeldb/nodes_test.go index b10d147a6..030cfb70f 100644 --- a/channeldb/nodes_test.go +++ b/channeldb/nodes_test.go @@ -8,15 +8,14 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" ) func TestLinkNodeEncodeDecode(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() @@ -26,13 +25,9 @@ func TestLinkNodeEncodeDecode(t *testing.T) { _, pub1 := btcec.PrivKeyFromBytes(key[:]) _, pub2 := btcec.PrivKeyFromBytes(rev[:]) addr1, err := net.ResolveTCPAddr("tcp", "10.0.0.1:9000") - if err != nil { - t.Fatalf("unable to create test addr: %v", err) - } + require.NoError(t, err, "unable to create test addr") addr2, err := net.ResolveTCPAddr("tcp", "10.0.0.2:9000") - if err != nil { - t.Fatalf("unable to create test addr: %v", err) - } + require.NoError(t, err, "unable to create test addr") // Create two fresh link node instances with the above dummy data, then // fully sync both instances to disk. @@ -49,9 +44,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) { // match the two created above. originalNodes := []*LinkNode{node2, node1} linkNodes, err := cdb.linkNodeDB.FetchAllLinkNodes() - if err != nil { - t.Fatalf("unable to fetch nodes: %v", err) - } + require.NoError(t, err, "unable to fetch nodes") for i, node := range linkNodes { if originalNodes[i].Network != node.Network { t.Fatalf("node networks don't match: expected %v, got %v", @@ -85,9 +78,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) { // Fetch the same node from the database according to its public key. node1DB, err := cdb.linkNodeDB.FetchLinkNode(pub1) - if err != nil { - t.Fatalf("unable to find node: %v", err) - } + require.NoError(t, err, "unable to find node") // Both the last seen timestamp and the list of reachable addresses for // the node should be updated. @@ -113,9 +104,7 @@ func TestDeleteLinkNode(t *testing.T) { t.Parallel() fullDB, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() cdb := fullDB.ChannelStateDB() diff --git a/channeldb/payment_control_test.go b/channeldb/payment_control_test.go index b67be2e00..c79151cb2 100644 --- a/channeldb/payment_control_test.go +++ b/channeldb/payment_control_test.go @@ -56,22 +56,16 @@ func TestPaymentControlSwitchFail(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewPaymentControl(db) info, attempt, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } + require.NoError(t, err, "unable to generate htlc message") // Sends base htlc message which initiate StatusInFlight. err = pControl.InitPayment(info.PaymentIdentifier, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") assertPaymentIndex(t, pControl, info.PaymentIdentifier) assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight) @@ -82,9 +76,7 @@ func TestPaymentControlSwitchFail(t *testing.T) { // Fail the payment, which should moved it to Failed. failReason := FailureReasonNoRoute _, err = pControl.Fail(info.PaymentIdentifier, failReason) - if err != nil { - t.Fatalf("unable to fail payment hash: %v", err) - } + require.NoError(t, err, "unable to fail payment hash") // Verify the status is indeed Failed. assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusFailed) @@ -100,9 +92,7 @@ func TestPaymentControlSwitchFail(t *testing.T) { // Sends the htlc again, which should succeed since the prior payment // failed. err = pControl.InitPayment(info.PaymentIdentifier, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") // Check that our index has been updated, and the old index has been // removed. @@ -118,9 +108,7 @@ func TestPaymentControlSwitchFail(t *testing.T) { // However, this is not communicated to control tower in the current // implementation. It only registers the initiation of the attempt. _, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt) - if err != nil { - t.Fatalf("unable to register attempt: %v", err) - } + require.NoError(t, err, "unable to register attempt") htlcReason := HTLCFailUnreadable _, err = pControl.FailAttempt( @@ -144,9 +132,7 @@ func TestPaymentControlSwitchFail(t *testing.T) { // Record another attempt. attempt.AttemptID = 1 _, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight) htlc = &htlcStatus{ @@ -165,9 +151,7 @@ func TestPaymentControlSwitchFail(t *testing.T) { Preimage: preimg, }, ) - if err != nil { - t.Fatalf("error shouldn't have been received, got: %v", err) - } + require.NoError(t, err, "error shouldn't have been received, got") if len(payment.HTLCs) != 2 { t.Fatalf("payment should have two htlcs, got: %d", @@ -204,23 +188,17 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewPaymentControl(db) info, attempt, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } + require.NoError(t, err, "unable to generate htlc message") // Sends base htlc message which initiate base status and move it to // StatusInFlight and verifies that it was changed. err = pControl.InitPayment(info.PaymentIdentifier, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") assertPaymentIndex(t, pControl, info.PaymentIdentifier) assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight) @@ -239,9 +217,7 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { // Record an attempt. _, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusInFlight) htlc := &htlcStatus{ @@ -265,9 +241,7 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { Preimage: preimg, }, ) - if err != nil { - t.Fatalf("error shouldn't have been received, got: %v", err) - } + require.NoError(t, err, "error shouldn't have been received, got") assertPaymentStatus(t, pControl, info.PaymentIdentifier, StatusSucceeded) htlc.settle = &preimg @@ -287,16 +261,12 @@ func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewPaymentControl(db) info, _, preimg, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } + require.NoError(t, err, "unable to generate htlc message") // Attempt to complete the payment should fail. _, err = pControl.SettleAttempt( @@ -320,16 +290,12 @@ func TestPaymentControlFailsWithoutInFlight(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewPaymentControl(db) info, _, _, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } + require.NoError(t, err, "unable to generate htlc message") // Calling Fail should return an error. _, err = pControl.Fail(info.PaymentIdentifier, FailureReasonNoRoute) @@ -348,9 +314,7 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") // Create a sequence number for duplicate payments that will not collide // with the sequence numbers for the payments we create. These values @@ -963,22 +927,16 @@ func TestPaymentControlMPPRecordValidation(t *testing.T) { db, cleanup, err := MakeTestDB() defer cleanup() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewPaymentControl(db) info, attempt, _, err := genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } + require.NoError(t, err, "unable to generate htlc message") // Init the payment. err = pControl.InitPayment(info.PaymentIdentifier, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") // Create three unique attempts we'll use for the test, and // register them with the payment control. We set each @@ -991,9 +949,7 @@ func TestPaymentControlMPPRecordValidation(t *testing.T) { ) _, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") // Now try to register a non-MPP attempt, which should fail. b := *attempt @@ -1025,20 +981,14 @@ func TestPaymentControlMPPRecordValidation(t *testing.T) { // Create and init a new payment. This time we'll check that we cannot // register an MPP attempt if we already registered a non-MPP one. info, attempt, _, err = genInfo() - if err != nil { - t.Fatalf("unable to generate htlc message: %v", err) - } + require.NoError(t, err, "unable to generate htlc message") err = pControl.InitPayment(info.PaymentIdentifier, info) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") attempt.Route.FinalHop().MPP = nil _, err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt) - if err != nil { - t.Fatalf("unable to send htlc message: %v", err) - } + require.NoError(t, err, "unable to send htlc message") // Attempt to register an MPP attempt, which should fail. b = *attempt diff --git a/channeldb/payments_test.go b/channeldb/payments_test.go index c0f6cfebb..a2102bf3f 100644 --- a/channeldb/payments_test.go +++ b/channeldb/payments_test.go @@ -87,9 +87,7 @@ func TestSentPaymentSerialization(t *testing.T) { } newCreationInfo, err := deserializePaymentCreationInfo(&b) - if err != nil { - t.Fatalf("unable to deserialize creation info: %v", err) - } + require.NoError(t, err, "unable to deserialize creation info") if !reflect.DeepEqual(c, newCreationInfo) { t.Fatalf("Payments do not match after "+ @@ -104,9 +102,7 @@ func TestSentPaymentSerialization(t *testing.T) { } newWireInfo, err := deserializeHTLCAttemptInfo(&b) - if err != nil { - t.Fatalf("unable to deserialize info: %v", err) - } + require.NoError(t, err, "unable to deserialize info") newWireInfo.AttemptID = s.AttemptID // First we verify all the records match up porperly, as they aren't @@ -673,9 +669,7 @@ func appendDuplicatePayment(t *testing.T, db *DB, paymentHash lntypes.Hash, return nil }, func() {}) - if err != nil { - t.Fatalf("could not create payment: %v", err) - } + require.NoError(t, err, "could not create payment") } // putDuplicatePayment creates a duplicate payment in the duplicates bucket diff --git a/channeldb/waitingproof_test.go b/channeldb/waitingproof_test.go index 4fd49a991..7432fbf61 100644 --- a/channeldb/waitingproof_test.go +++ b/channeldb/waitingproof_test.go @@ -7,6 +7,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) // TestWaitingProofStore tests add/get/remove functions of the waiting proof @@ -15,9 +16,7 @@ func TestWaitingProofStore(t *testing.T) { t.Parallel() db, cleanup, err := MakeTestDB() - if err != nil { - t.Fatalf("failed to make test database: %s", err) - } + require.NoError(t, err, "failed to make test database") defer cleanup() proof1 := NewWaitingProof(true, &lnwire.AnnounceSignatures{ @@ -37,9 +36,7 @@ func TestWaitingProofStore(t *testing.T) { } proof2, err := store.Get(proof1.Key()) - if err != nil { - t.Fatalf("unable retrieve proof from storage: %v", err) - } + require.NoError(t, err, "unable retrieve proof from storage") if !reflect.DeepEqual(proof1, proof2) { t.Fatalf("wrong proof retrieved: expected %v, got %v", spew.Sdump(proof1), spew.Sdump(proof2)) diff --git a/channeldb/witness_cache_test.go b/channeldb/witness_cache_test.go index fb6c9683a..c756bd3c9 100644 --- a/channeldb/witness_cache_test.go +++ b/channeldb/witness_cache_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" ) // TestWitnessCacheSha256Retrieval tests that we're able to add and lookup new @@ -13,9 +14,7 @@ func TestWitnessCacheSha256Retrieval(t *testing.T) { t.Parallel() cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() wCache := cdb.NewWitnessCache() @@ -30,9 +29,7 @@ func TestWitnessCacheSha256Retrieval(t *testing.T) { // First, we'll attempt to add the preimages to the database. err = wCache.AddSha256Witnesses(preimages...) - if err != nil { - t.Fatalf("unable to add witness: %v", err) - } + require.NoError(t, err, "unable to add witness") // With the preimages stored, we'll now attempt to look them up. for i, hash := range hashes { @@ -58,9 +55,7 @@ func TestWitnessCacheSha256Deletion(t *testing.T) { t.Parallel() cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() wCache := cdb.NewWitnessCache() @@ -83,9 +78,7 @@ func TestWitnessCacheSha256Deletion(t *testing.T) { // We'll now delete the first preimage. If we attempt to look it up, we // should get ErrNoWitnesses. err = wCache.DeleteSha256Witness(hash1) - if err != nil { - t.Fatalf("unable to delete witness: %v", err) - } + require.NoError(t, err, "unable to delete witness") _, err = wCache.LookupSha256Witness(hash1) if err != ErrNoWitnesses { t.Fatalf("expected ErrNoWitnesses instead got: %v", err) @@ -109,9 +102,7 @@ func TestWitnessCacheUnknownWitness(t *testing.T) { t.Parallel() cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() wCache := cdb.NewWitnessCache() @@ -128,9 +119,7 @@ func TestWitnessCacheUnknownWitness(t *testing.T) { // identically to the insertion via the generalized interface. func TestAddSha256Witnesses(t *testing.T) { cdb, cleanUp, err := MakeTestDB() - if err != nil { - t.Fatalf("unable to make test database: %v", err) - } + require.NoError(t, err, "unable to make test database") defer cleanUp() wCache := cdb.NewWitnessCache() @@ -152,9 +141,7 @@ func TestAddSha256Witnesses(t *testing.T) { ) err = wCache.legacyAddWitnesses(Sha256HashWitness, witnesses...) - if err != nil { - t.Fatalf("unable to add witness: %v", err) - } + require.NoError(t, err, "unable to add witness") for i, hash := range hashes { preimage := preimages[i] @@ -181,9 +168,7 @@ func TestAddSha256Witnesses(t *testing.T) { // Now, add the same witnesses using the type-safe interface for // lntypes.Preimages.. err = wCache.AddSha256Witnesses(preimages...) - if err != nil { - t.Fatalf("unable to add sha256 preimage: %v", err) - } + require.NoError(t, err, "unable to add sha256 preimage") // Finally, iterate over the keys and assert that the returned witnesses // match the original witnesses. This asserts that the specialized diff --git a/cluster/etcd_elector_test.go b/cluster/etcd_elector_test.go index 858c3f8e0..c10137045 100644 --- a/cluster/etcd_elector_test.go +++ b/cluster/etcd_elector_test.go @@ -42,9 +42,7 @@ func TestEtcdElector(t *testing.T) { defer guard() tmpDir, err := ioutil.TempDir("", "etcd") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") etcdCfg, cleanup, err := etcd.NewEmbeddedEtcdInstance(tmpDir, 0, 0, "") require.NoError(t, err) diff --git a/contractcourt/breacharbiter_test.go b/contractcourt/breacharbiter_test.go index 63161d6e5..be2e27805 100644 --- a/contractcourt/breacharbiter_test.go +++ b/contractcourt/breacharbiter_test.go @@ -711,9 +711,7 @@ func countRetributions(t *testing.T, rs RetributionStorer) int { }, func() { count = 0 }) - if err != nil { - t.Fatalf("unable to list retributions in db: %v", err) - } + require.NoError(t, err, "unable to list retributions in db") return count } @@ -979,9 +977,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter, // a spend of the funding transaction. Alice's channel will be the on // observing a breach. alice, bob, cleanUpChans, err := createInitChannels(1) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") // Instantiate a breach arbiter to handle the breach of alice's channel. contractBreaches := make(chan *ContractBreachEvent) @@ -989,9 +985,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter, brar, cleanUpArb, err := createTestArbiter( t, contractBreaches, alice.State().Db.GetParentDB(), ) - if err != nil { - t.Fatalf("unable to initialize test breach arbiter: %v", err) - } + require.NoError(t, err, "unable to initialize test breach arbiter") // Send one HTLC to Bob and perform a state transition to lock it in. htlcAmount := lnwire.NewMSatFromSatoshis(20000) @@ -1009,9 +1003,7 @@ func initBreachedState(t *testing.T) (*BreachArbiter, // Generate the force close summary at this point in time, this will // serve as the old state bob will broadcast. bobClose, err := bob.ForceClose() - if err != nil { - t.Fatalf("unable to force close bob's channel: %v", err) - } + require.NoError(t, err, "unable to force close bob's channel") // Now send another HTLC and perform a state transition, this ensures // Alice is ahead of the state Bob will broadcast. @@ -1166,9 +1158,7 @@ func TestBreachHandoffFail(t *testing.T) { brar, cleanUpArb, err := createTestArbiter( t, contractBreaches, alice.State().Db.GetParentDB(), ) - if err != nil { - t.Fatalf("unable to initialize test breach arbiter: %v", err) - } + require.NoError(t, err, "unable to initialize test breach arbiter") defer cleanUpArb() // Signal a spend of the funding transaction and wait for the close @@ -1623,9 +1613,7 @@ func testBreachSpends(t *testing.T, test breachTest) { retribution, err := lnwallet.NewBreachRetribution( alice.State(), height, 1, forceCloseTx, ) - if err != nil { - t.Fatalf("unable to create breach retribution: %v", err) - } + require.NoError(t, err, "unable to create breach retribution") processACK := make(chan error) breach := &ContractBreachEvent{ @@ -1664,9 +1652,7 @@ func testBreachSpends(t *testing.T, test breachTest) { RemoteNextRevocation: state.RemoteNextRevocation, LocalChanConfig: state.LocalChanCfg, }) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") // After exiting, the breach arbiter should have persisted the // retribution information and the channel should be shown as pending @@ -1839,9 +1825,7 @@ func TestBreachDelayedJusticeConfirmation(t *testing.T) { retribution, err := lnwallet.NewBreachRetribution( alice.State(), height, uint32(blockHeight), forceCloseTx, ) - if err != nil { - t.Fatalf("unable to create breach retribution: %v", err) - } + require.NoError(t, err, "unable to create breach retribution") processACK := make(chan error, 1) breach := &ContractBreachEvent{ @@ -1881,9 +1865,7 @@ func TestBreachDelayedJusticeConfirmation(t *testing.T) { RemoteNextRevocation: state.RemoteNextRevocation, LocalChanConfig: state.LocalChanCfg, }) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") // After exiting, the breach arbiter should have persisted the // retribution information and the channel should be shown as pending @@ -2126,9 +2108,7 @@ func assertPendingClosed(t *testing.T, c *lnwallet.LightningChannel) { t.Helper() closedChans, err := c.State().Db.FetchClosedChannels(true) - if err != nil { - t.Fatalf("unable to load pending closed channels: %v", err) - } + require.NoError(t, err, "unable to load pending closed channels") for _, chanSummary := range closedChans { if chanSummary.ChanPoint == *c.ChanPoint { @@ -2145,9 +2125,7 @@ func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) { t.Helper() closedChans, err := c.State().Db.FetchClosedChannels(true) - if err != nil { - t.Fatalf("unable to load pending closed channels: %v", err) - } + require.NoError(t, err, "unable to load pending closed channels") for _, chanSummary := range closedChans { if chanSummary.ChanPoint == *c.ChanPoint { diff --git a/contractcourt/briefcase_test.go b/contractcourt/briefcase_test.go index b6f1ec295..1c780443b 100644 --- a/contractcourt/briefcase_test.go +++ b/contractcourt/briefcase_test.go @@ -20,6 +20,7 @@ import ( "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lntest/channels" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/stretchr/testify/require" ) var ( @@ -306,9 +307,7 @@ func TestContractInsertionRetrieval(t *testing.T) { testLog, cleanUp, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp() // The log created, we'll create a series of resolvers, each properly @@ -386,17 +385,13 @@ func TestContractInsertionRetrieval(t *testing.T) { // Now, we'll insert the resolver into the log, we do not need to apply // any closures, so we will pass in nil. err = testLog.InsertUnresolvedContracts(nil, resolvers...) - if err != nil { - t.Fatalf("unable to insert resolvers: %v", err) - } + require.NoError(t, err, "unable to insert resolvers") // With the resolvers inserted, we'll now attempt to retrieve them from // the database, so we can compare them to the versions we created // above. diskResolvers, err := testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to retrieve resolvers: %v", err) - } + require.NoError(t, err, "unable to retrieve resolvers") if len(diskResolvers) != len(resolvers) { t.Fatalf("expected %v got resolvers, instead got %v: %#v", @@ -423,9 +418,7 @@ func TestContractInsertionRetrieval(t *testing.T) { t.Fatalf("unable to wipe log: %v", err) } diskResolvers, err = testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch unresolved contracts: %v", err) - } + require.NoError(t, err, "unable to fetch unresolved contracts") if len(diskResolvers) != 0 { t.Fatalf("no resolvers should be found, instead %v were", len(diskResolvers)) @@ -442,9 +435,7 @@ func TestContractResolution(t *testing.T) { testLog, cleanUp, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp() // We'll now create a timeout resolver that we'll be using for the @@ -469,13 +460,9 @@ func TestContractResolution(t *testing.T) { // we get the same resolver out the other side. We do not need to apply // any closures. err = testLog.InsertUnresolvedContracts(nil, timeoutResolver) - if err != nil { - t.Fatalf("unable to insert contract into db: %v", err) - } + require.NoError(t, err, "unable to insert contract into db") dbContracts, err := testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch contracts from db: %v", err) - } + require.NoError(t, err, "unable to fetch contracts from db") assertResolversEqual(t, timeoutResolver, dbContracts[0]) // Now, we'll mark the contract as resolved within the database. @@ -485,9 +472,7 @@ func TestContractResolution(t *testing.T) { // At this point, no contracts should exist within the log. dbContracts, err = testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch contracts from db: %v", err) - } + require.NoError(t, err, "unable to fetch contracts from db") if len(dbContracts) != 0 { t.Fatalf("no contract should be from in the db, instead %v "+ "were", len(dbContracts)) @@ -504,9 +489,7 @@ func TestContractSwapping(t *testing.T) { testLog, cleanUp, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp() // We'll create two resolvers, a regular timeout resolver, and the @@ -533,23 +516,17 @@ func TestContractSwapping(t *testing.T) { // We'll first insert the contest resolver into the log with no // additional updates. err = testLog.InsertUnresolvedContracts(nil, contestResolver) - if err != nil { - t.Fatalf("unable to insert contract into db: %v", err) - } + require.NoError(t, err, "unable to insert contract into db") // With the resolver inserted, we'll now attempt to atomically swap it // for its underlying timeout resolver. err = testLog.SwapContract(contestResolver, timeoutResolver) - if err != nil { - t.Fatalf("unable to swap contracts: %v", err) - } + require.NoError(t, err, "unable to swap contracts") // At this point, there should now only be a single contract in the // database. dbContracts, err := testLog.FetchUnresolvedContracts() - if err != nil { - t.Fatalf("unable to fetch contracts from db: %v", err) - } + require.NoError(t, err, "unable to fetch contracts from db") if len(dbContracts) != 1 { t.Fatalf("one contract should be from in the db, instead %v "+ "were", len(dbContracts)) @@ -569,9 +546,7 @@ func TestContractResolutionsStorage(t *testing.T) { testLog, cleanUp, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp() // With the test log created, we'll now craft a contact resolution that @@ -661,9 +636,7 @@ func TestContractResolutionsStorage(t *testing.T) { t.Fatalf("unable to insert resolutions into db: %v", err) } diskRes, err := testLog.FetchContractResolutions() - if err != nil { - t.Fatalf("unable to read resolution from db: %v", err) - } + require.NoError(t, err, "unable to read resolution from db") if !reflect.DeepEqual(&res, diskRes) { t.Fatalf("resolution mismatch: expected %v\n, got %v", @@ -689,16 +662,12 @@ func TestStateMutation(t *testing.T) { testLog, cleanUp, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp() // The default state of an arbitrator should be StateDefault. arbState, err := testLog.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } + require.NoError(t, err, "unable to read arb state") if arbState != StateDefault { t.Fatalf("state mismatch: expected %v, got %v", StateDefault, arbState) @@ -710,9 +679,7 @@ func TestStateMutation(t *testing.T) { t.Fatalf("unable to write state: %v", err) } arbState, err = testLog.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } + require.NoError(t, err, "unable to read arb state") if arbState != StateFullyResolved { t.Fatalf("state mismatch: expected %v, got %v", StateFullyResolved, arbState) @@ -721,16 +688,12 @@ func TestStateMutation(t *testing.T) { // Next, we'll wipe our state and ensure that if we try to query for // the current state, we get the proper error. err = testLog.WipeHistory() - if err != nil { - t.Fatalf("unable to wipe history: %v", err) - } + require.NoError(t, err, "unable to wipe history") // If we try to query for the state again, we should get the default // state again. arbState, err = testLog.CurrentState(nil) - if err != nil { - t.Fatalf("unable to query current state: %v", err) - } + require.NoError(t, err, "unable to query current state") if arbState != StateDefault { t.Fatalf("state mismatch: expected %v, got %v", StateDefault, arbState) @@ -747,17 +710,13 @@ func TestScopeIsolation(t *testing.T) { testLog1, cleanUp1, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp1() testLog2, cleanUp2, err := newTestBoltArbLog( testChainHash, testChanPoint2, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp2() // We'll now update the current state of both the logs to a unique @@ -772,13 +731,9 @@ func TestScopeIsolation(t *testing.T) { // Querying each log, the states should be the prior one we set, and be // disjoint. log1State, err := testLog1.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } + require.NoError(t, err, "unable to read arb state") log2State, err := testLog2.CurrentState(nil) - if err != nil { - t.Fatalf("unable to read arb state: %v", err) - } + require.NoError(t, err, "unable to read arb state") if log1State == log2State { t.Fatalf("log states are the same: %v", log1State) @@ -802,9 +757,7 @@ func TestCommitSetStorage(t *testing.T) { testLog, cleanUp, err := newTestBoltArbLog( testChainHash, testChanPoint1, ) - if err != nil { - t.Fatalf("unable to create test log: %v", err) - } + require.NoError(t, err, "unable to create test log") defer cleanUp() activeHTLCs := []channeldb.HTLC{ diff --git a/contractcourt/chain_arbitrator_test.go b/contractcourt/chain_arbitrator_test.go index cb1648065..9eb2b90fd 100644 --- a/contractcourt/chain_arbitrator_test.go +++ b/contractcourt/chain_arbitrator_test.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/stretchr/testify/require" ) // TestChainArbitratorRepulishCloses tests that the chain arbitrator will @@ -145,14 +146,10 @@ func TestResolveContract(t *testing.T) { // To start with, we'll create a new temp DB for the duration of this // test. tempPath, err := ioutil.TempDir("", "testdb") - if err != nil { - t.Fatalf("unable to make temp dir: %v", err) - } + require.NoError(t, err, "unable to make temp dir") defer os.RemoveAll(tempPath) db, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open db: %v", err) - } + require.NoError(t, err, "unable to open db") defer db.Close() // With the DB created, we'll make a new channel, and mark it as @@ -160,9 +157,7 @@ func TestResolveContract(t *testing.T) { newChannel, _, cleanup, err := lnwallet.CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to make new test channel: %v", err) - } + require.NoError(t, err, "unable to make new test channel") defer cleanup() channel := newChannel.State() channel.Db = db.ChannelStateDB() @@ -206,17 +201,13 @@ func TestResolveContract(t *testing.T) { // While the resolver are active, we'll now remove the channel from the // database (mark is as closed). err = db.ChannelStateDB().AbandonChannel(&channel.FundingOutpoint, 4) - if err != nil { - t.Fatalf("unable to remove channel: %v", err) - } + require.NoError(t, err, "unable to remove channel") // With the channel removed, we'll now manually call ResolveContract. // This stimulates needing to remove a channel from the chain arb due // to any possible external consistency issues. err = chainArb.ResolveContract(channel.FundingOutpoint) - if err != nil { - t.Fatalf("unable to resolve contract: %v", err) - } + require.NoError(t, err, "unable to resolve contract") // The shouldn't be an active chain watcher or channel arb for this // channel. @@ -240,7 +231,5 @@ func TestResolveContract(t *testing.T) { // If we attempt to call this method again, then we should get a nil // error, as there is no more state to be cleaned up. err = chainArb.ResolveContract(channel.FundingOutpoint) - if err != nil { - t.Fatalf("second resolve call shouldn't fail: %v", err) - } + require.NoError(t, err, "second resolve call shouldn't fail") } diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index 4078f8a5b..6e9b0acd6 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -14,6 +14,7 @@ import ( "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) // TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able @@ -27,9 +28,7 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // With the channels created, we'll now create a chain watcher instance @@ -45,13 +44,9 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { signer: aliceChannel.Signer, extractStateNumHint: lnwallet.GetStateNumHint, }) - if err != nil { - t.Fatalf("unable to create chain watcher: %v", err) - } + require.NoError(t, err, "unable to create chain watcher") err = aliceChainWatcher.Start() - if err != nil { - t.Fatalf("unable to start chain watcher: %v", err) - } + require.NoError(t, err, "unable to start chain watcher") defer aliceChainWatcher.Stop() // We'll request a new channel event subscription from Alice's chain @@ -118,9 +113,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // With the channels created, we'll now create a chain watcher instance @@ -136,9 +129,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { signer: aliceChannel.Signer, extractStateNumHint: lnwallet.GetStateNumHint, }) - if err != nil { - t.Fatalf("unable to create chain watcher: %v", err) - } + require.NoError(t, err, "unable to create chain watcher") if err := aliceChainWatcher.Start(); err != nil { t.Fatalf("unable to start chain watcher: %v", err) } diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go index 0c7dc7078..5f789f121 100644 --- a/contractcourt/channel_arbitrator_test.go +++ b/contractcourt/channel_arbitrator_test.go @@ -462,9 +462,7 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") if err := chanArbCtx.chanArb.Start(nil); err != nil { t.Fatalf("unable to start ChannelArbitrator: %v", err) @@ -523,9 +521,7 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { @@ -578,9 +574,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { @@ -686,9 +680,7 @@ func TestChannelArbitratorBreachClose(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb chanArb.cfg.PreimageDB = newMockWitnessBeacon() chanArb.cfg.Registry = &mockRegistry{} @@ -817,9 +809,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { // a real DB will be created. We need this for our test as we want to // test proper restart recovery and resolver population. chanArbCtx, err := createTestChannelArbitrator(t, nil) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb chanArb.cfg.PreimageDB = newMockWitnessBeacon() chanArb.cfg.Registry = &mockRegistry{} @@ -990,9 +980,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { // We'll no re-create the resolver, notice that we use the existing // arbLog so it carries over the same on-disk state. chanArbCtxNew, err := chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb = chanArbCtxNew.chanArb defer chanArbCtxNew.CleanUp() @@ -1088,9 +1076,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { @@ -1197,9 +1183,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { @@ -1305,9 +1289,7 @@ func TestChannelArbitratorPersistence(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { @@ -1340,9 +1322,7 @@ func TestChannelArbitratorPersistence(t *testing.T) { // Restart the channel arb, this'll use the same long and prior // context. chanArbCtx, err = chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to restart channel arb: %v", err) - } + require.NoError(t, err, "unable to restart channel arb") chanArb = chanArbCtx.chanArb // Again, it should start up in the default state. @@ -1371,9 +1351,7 @@ func TestChannelArbitratorPersistence(t *testing.T) { // Restart once again to simulate yet another restart. chanArbCtx, err = chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to restart channel arb: %v", err) - } + require.NoError(t, err, "unable to restart channel arb") chanArb = chanArbCtx.chanArb // Starts out in StateDefault. @@ -1400,9 +1378,7 @@ func TestChannelArbitratorPersistence(t *testing.T) { // Create a new arbitrator, and now make fetching resolutions succeed. log.failFetch = nil chanArbCtx, err = chanArbCtx.Restart(nil) - if err != nil { - t.Fatalf("unable to restart channel arb: %v", err) - } + require.NoError(t, err, "unable to restart channel arb") defer chanArbCtx.CleanUp() // Finally it should advance to StateFullyResolved. @@ -1431,9 +1407,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { @@ -1515,9 +1489,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) { c.chanArb.cfg.ClosingHeight = 100 c.chanArb.cfg.CloseType = channeldb.BreachClose }) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") defer chanArbCtx.CleanUp() // We should transition to StateContractClosed. @@ -1699,9 +1671,7 @@ func TestChannelArbitratorEmptyResolutions(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb chanArb.cfg.IsPendingClose = true @@ -1736,9 +1706,7 @@ func TestChannelArbitratorAlreadyForceClosed(t *testing.T) { state: StateCommitmentBroadcasted, } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb if err := chanArb.Start(nil); err != nil { t.Fatalf("unable to start ChannelArbitrator: %v", err) @@ -2009,9 +1977,7 @@ func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) { resolvers: make(map[ContractResolver]struct{}), } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") chanArb := chanArbCtx.chanArb // We'll inject a test clock implementation so we can control the uptime. @@ -2504,9 +2470,7 @@ func TestChannelArbitratorAnchors(t *testing.T) { } chanArbCtx, err := createTestChannelArbitrator(t, log) - if err != nil { - t.Fatalf("unable to create ChannelArbitrator: %v", err) - } + require.NoError(t, err, "unable to create ChannelArbitrator") // Replace our mocked put report function with one which will push // reports into a channel for us to consume. We update this function diff --git a/contractcourt/nursery_store_test.go b/contractcourt/nursery_store_test.go index 4f14b6c60..5ea542c19 100644 --- a/contractcourt/nursery_store_test.go +++ b/contractcourt/nursery_store_test.go @@ -10,6 +10,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" + "github.com/stretchr/testify/require" ) type incubateTest struct { @@ -53,15 +54,11 @@ func initIncubateTests() { // any modifying calls are made. func TestNurseryStoreInit(t *testing.T) { cdb, cleanUp, err := channeldb.MakeTestDB() - if err != nil { - t.Fatalf("unable to open channel db: %v", err) - } + require.NoError(t, err, "unable to open channel db") defer cleanUp() ns, err := NewNurseryStore(&chainHash, cdb) - if err != nil { - t.Fatalf("unable to open nursery store: %v", err) - } + require.NoError(t, err, "unable to open nursery store") assertNumChannels(t, ns, 0) assertNumPreschools(t, ns, 0) @@ -73,15 +70,11 @@ func TestNurseryStoreInit(t *testing.T) { // intermediate states. func TestNurseryStoreIncubate(t *testing.T) { cdb, cleanUp, err := channeldb.MakeTestDB() - if err != nil { - t.Fatalf("unable to open channel db: %v", err) - } + require.NoError(t, err, "unable to open channel db") defer cleanUp() ns, err := NewNurseryStore(&chainHash, cdb) - if err != nil { - t.Fatalf("unable to open nursery store: %v", err) - } + require.NoError(t, err, "unable to open nursery store") for i, test := range incubateTests { // At the beginning of each test, we do not expect to the @@ -314,15 +307,11 @@ func TestNurseryStoreIncubate(t *testing.T) { // purged height is set appropriately. func TestNurseryStoreGraduate(t *testing.T) { cdb, cleanUp, err := channeldb.MakeTestDB() - if err != nil { - t.Fatalf("unable to open channel db: %v", err) - } + require.NoError(t, err, "unable to open channel db") defer cleanUp() ns, err := NewNurseryStore(&chainHash, cdb) - if err != nil { - t.Fatalf("unable to open nursery store: %v", err) - } + require.NoError(t, err, "unable to open nursery store") kid := &kidOutputs[3] @@ -333,16 +322,12 @@ func TestNurseryStoreGraduate(t *testing.T) { // First, add a commitment output to the nursery store, which is // initially inserted in the preschool bucket. err = ns.Incubate([]kidOutput{*kid}, nil) - if err != nil { - t.Fatalf("unable to incubate commitment output: %v", err) - } + require.NoError(t, err, "unable to incubate commitment output") // Then, move the commitment output to the kindergarten bucket, such // that it resides in the height index at its maturity height. err = ns.PreschoolToKinder(kid, 0) - if err != nil { - t.Fatalf("unable to move pscl output to kndr: %v", err) - } + require.NoError(t, err, "unable to move pscl output to kndr") // Now, iteratively purge all height below the target maturity height, // checking that each class is now empty, and that the last purged @@ -394,9 +379,7 @@ func assertNumChanOutputs(t *testing.T, ns NurseryStorer, // matches the expected number. func assertNumPreschools(t *testing.T, ns NurseryStorer, expected int) { psclOutputs, err := ns.FetchPreschools() - if err != nil { - t.Fatalf("unable to retrieve preschool outputs: %v", err) - } + require.NoError(t, err, "unable to retrieve preschool outputs") if len(psclOutputs) != expected { t.Fatalf("expected number of pscl outputs to be %d, got %v", @@ -534,9 +517,7 @@ func assertChannelMaturity(t *testing.T, ns NurseryStorer, chanPoint *wire.OutPoint, expectedMaturity bool) { isMature, err := ns.IsMatureChannel(chanPoint) - if err != nil { - t.Fatalf("unable to fetch channel maturity: %v", err) - } + require.NoError(t, err, "unable to fetch channel maturity") if isMature != expectedMaturity { t.Fatalf("expected channel maturity: %v, actual: %v", diff --git a/contractcourt/utxonursery_test.go b/contractcourt/utxonursery_test.go index bb8d78af9..4563a8daa 100644 --- a/contractcourt/utxonursery_test.go +++ b/contractcourt/utxonursery_test.go @@ -24,6 +24,7 @@ import ( "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/sweep" + "github.com/stretchr/testify/require" ) var ( @@ -419,9 +420,7 @@ func createNurseryTestContext(t *testing.T, // still considerable logic in the store. cdb, cleanup, err := channeldb.MakeTestDB() - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") store, err := NewNurseryStore(&chainhash.Hash{}, cdb) if err != nil { diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index cffe80536..01c418ded 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -809,9 +809,7 @@ func TestProcessAnnouncement(t *testing.T) { timestamp := testTimestamp ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() assertSenderExistence := func(sender *btcec.PublicKey, msg msgWithSenders) { @@ -826,18 +824,14 @@ func TestProcessAnnouncement(t *testing.T) { // First, we'll craft a valid remote channel announcement and send it to // the gossiper so that it can be processed. ca, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } + require.NoError(t, err, "can't create channel announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer): case <-time.After(2 * time.Second): t.Fatal("remote announcement not processed") } - if err != nil { - t.Fatalf("can't process remote announcement: %v", err) - } + require.NoError(t, err, "can't process remote announcement") // The announcement should be broadcast and included in our local view // of the graph. @@ -855,18 +849,14 @@ func TestProcessAnnouncement(t *testing.T) { // We'll then craft the channel policy of the remote party and also send // it to the gossiper. ua, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } + require.NoError(t, err, "can't create update announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(ua, nodePeer): case <-time.After(2 * time.Second): t.Fatal("remote announcement not processed") } - if err != nil { - t.Fatalf("can't process remote announcement: %v", err) - } + require.NoError(t, err, "can't process remote announcement") // The channel policy should be broadcast to the rest of the network. select { @@ -882,18 +872,14 @@ func TestProcessAnnouncement(t *testing.T) { // Finally, we'll craft the remote party's node announcement. na, err := createNodeAnnouncement(remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(na, nodePeer): case <-time.After(2 * time.Second): t.Fatal("remote announcement not processed") } - if err != nil { - t.Fatalf("can't process remote announcement: %v", err) - } + require.NoError(t, err, "can't process remote announcement") // It should also be broadcast to the network and included in our local // view of the graph. @@ -917,15 +903,11 @@ func TestPrematureAnnouncement(t *testing.T) { timestamp := testTimestamp ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() _, err = createNodeAnnouncement(remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} @@ -934,9 +916,7 @@ func TestPrematureAnnouncement(t *testing.T) { // highest know to us, for that reason it should be ignored and not // added to the router. ca, err := createRemoteChannelAnnouncement(1) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } + require.NoError(t, err, "can't create channel announcement") select { case <-ctx.gossiper.ProcessRemoteAnnouncement(ca, nodePeer): @@ -955,9 +935,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() // Set up a channel that we can use to inspect the messages sent @@ -975,14 +953,10 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { } batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} // Recreate lightning network topology. Initialize router with channel @@ -992,9 +966,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } + require.NoError(t, err, "unable to process channel ann") select { case <-ctx.broadcastedMessage: t.Fatal("channel announcement was broadcast") @@ -1006,9 +978,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } + require.NoError(t, err, "unable to process channel update") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -1020,9 +990,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -1046,9 +1014,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } + require.NoError(t, err, "unable to process channel update") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -1062,9 +1028,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -1078,9 +1042,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process local proof: %v", err) - } + require.NoError(t, err, "unable to process local proof") select { case <-ctx.broadcastedMessage: @@ -1112,9 +1074,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } + require.NoError(t, err, "unable to process remote proof") for i := 0; i < 5; i++ { select { @@ -1148,9 +1108,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() // Set up a channel that we can use to inspect the messages sent @@ -1168,14 +1126,10 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { } batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} // Pretending that we receive local channel announcement from funding @@ -1188,9 +1142,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to proceed announcement: %v", err) - } + require.NoError(t, err, "unable to proceed announcement") number := 0 if err := ctx.gossiper.cfg.WaitingProofStore.ForAll( @@ -1217,9 +1169,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process: %v", err) - } + require.NoError(t, err, "unable to process") select { case <-ctx.broadcastedMessage: @@ -1232,9 +1182,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process: %v", err) - } + require.NoError(t, err, "unable to process") select { case <-ctx.broadcastedMessage: @@ -1247,9 +1195,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -1272,9 +1218,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -1288,9 +1232,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process: %v", err) - } + require.NoError(t, err, "unable to process") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement announcement was broadcast") @@ -1304,9 +1246,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process: %v", err) - } + require.NoError(t, err, "unable to process") // The local proof should be sent to the remote peer. select { @@ -1352,20 +1292,14 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") // Set up a channel to intercept the messages sent to the remote peer. sentToPeer := make(chan lnwire.Message, 1) @@ -1388,9 +1322,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } + require.NoError(t, err, "unable to process channel ann") select { case <-ctx.broadcastedMessage: t.Fatal("channel announcement was broadcast") @@ -1477,9 +1409,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) { PubKey: ctx.gossiper.selfKey, KeyLocator: ctx.gossiper.selfKeyLoc, }) - if err != nil { - t.Fatalf("unable to recreate gossiper: %v", err) - } + require.NoError(t, err, "unable to recreate gossiper") if err := gossiper.Start(); err != nil { t.Fatalf("unable to start recreated gossiper: %v", err) } @@ -1569,20 +1499,14 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") // Set up a channel we can use to inspect messages sent by the // gossiper to the remote peer. @@ -1606,9 +1530,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } + require.NoError(t, err, "unable to process channel ann") select { case <-ctx.broadcastedMessage: t.Fatal("channel announcement was broadcast") @@ -1622,9 +1544,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } + require.NoError(t, err, "unable to process channel update") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -1661,9 +1581,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } + require.NoError(t, err, "unable to process channel update") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -1677,9 +1595,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -1695,9 +1611,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process local proof: %v", err) - } + require.NoError(t, err, "unable to process local proof") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement( @@ -1706,9 +1620,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } + require.NoError(t, err, "unable to process remote proof") // We expect the gossiper to send this message to the remote peer. select { @@ -1753,9 +1665,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } + require.NoError(t, err, "unable to process remote proof") // We expect the gossiper to send this message to the remote peer. select { @@ -1793,9 +1703,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Ensure that remote channel announcements are properly stored // and de-duplicated. ca, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("can't create remote channel announcement: %v", err) - } + require.NoError(t, err, "can't create remote channel announcement") nodePeer := &mockPeer{bitcoinKeyPub2, nil, nil} announcements.AddMsgs(networkMsg{ @@ -1811,9 +1719,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // same channel ID. Adding this shouldn't cause an increase in the // number of items as they should be de-duplicated. ca2, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("can't create remote channel announcement: %v", err) - } + require.NoError(t, err, "can't create remote channel announcement") announcements.AddMsgs(networkMsg{ msg: ca2, peer: nodePeer, @@ -1827,9 +1733,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // stored and de-duplicated. We do this by creating two updates // announcements with the same short ID and flag. ua, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } + require.NoError(t, err, "can't create update announcement") announcements.AddMsgs(networkMsg{ msg: ua, peer: nodePeer, @@ -1842,9 +1746,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Adding the very same announcement shouldn't cause an increase in the // number of ChannelUpdate announcements stored. ua2, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } + require.NoError(t, err, "can't create update announcement") announcements.AddMsgs(networkMsg{ msg: ua2, peer: nodePeer, @@ -1857,9 +1759,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Adding an announcement with a later timestamp should replace the // stored one. ua3, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp+1) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } + require.NoError(t, err, "can't create update announcement") announcements.AddMsgs(networkMsg{ msg: ua3, peer: nodePeer, @@ -1891,9 +1791,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Adding a channel update with an earlier timestamp should NOT // replace the one stored. ua4, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create update announcement: %v", err) - } + require.NoError(t, err, "can't create update announcement") announcements.AddMsgs(networkMsg{ msg: ua4, peer: nodePeer, @@ -1907,9 +1805,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Next well ensure that node announcements are properly de-duplicated. // We'll first add a single instance with a node's private key. na, err := createNodeAnnouncement(remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") announcements.AddMsgs(networkMsg{ msg: na, peer: nodePeer, @@ -1921,9 +1817,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // We'll now add another node to the batch. na2, err := createNodeAnnouncement(remoteKeyPriv2, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") announcements.AddMsgs(networkMsg{ msg: na2, peer: nodePeer, @@ -1936,9 +1830,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Adding a new instance of the _same_ node shouldn't increase the size // of the node ann batch. na3, err := createNodeAnnouncement(remoteKeyPriv2, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") announcements.AddMsgs(networkMsg{ msg: na3, peer: nodePeer, @@ -1952,9 +1844,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // key is still de-duplicated. newNodeKeyPointer := remoteKeyPriv2 na4, err := createNodeAnnouncement(newNodeKeyPointer, timestamp) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") announcements.AddMsgs(networkMsg{ msg: na4, peer: nodePeer, @@ -1967,9 +1857,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) { // Ensure that node announcement with increased timestamp replaces // what is currently stored. na5, err := createNodeAnnouncement(remoteKeyPriv2, timestamp+1) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") announcements.AddMsgs(networkMsg{ msg: na5, peer: nodePeer, @@ -2044,9 +1932,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { ) ctx, cleanup, err := createTestCtx(startingHeight) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() // We'll start off by processing a channel announcement without a proof @@ -2075,9 +1961,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { } nodeAnn, err := createNodeAnnouncement(remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("unable to create node announcement: %v", err) - } + require.NoError(t, err, "unable to create node announcement") select { case err := <-ctx.gossiper.ProcessLocalAnnouncement(nodeAnn): @@ -2101,9 +1985,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { // ChannelAnnouncement and hand it off to the gossiper in order to // process it. remoteChanAnn, err := createRemoteChannelAnnouncement(startingHeight - 1) - if err != nil { - t.Fatalf("unable to create remote channel announcement: %v", err) - } + require.NoError(t, err, "unable to create remote channel announcement") peer := &mockPeer{pubKey, nil, nil} select { @@ -2124,9 +2006,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) { // We'll recreate the NodeAnnouncement with an updated timestamp to // prevent a stale update. The NodeAnnouncement should now be forwarded. nodeAnn, err = createNodeAnnouncement(remoteKeyPriv1, timestamp+1) - if err != nil { - t.Fatalf("unable to create node announcement: %v", err) - } + require.NoError(t, err, "unable to create node announcement") select { case err := <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, peer): @@ -2152,15 +2032,11 @@ func TestRejectZombieEdge(t *testing.T) { // We'll start by creating our test context with a batch of // announcements. ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } + require.NoError(t, err, "unable to create test context") defer cleanup() batch, err := createRemoteAnnouncements(0) - if err != nil { - t.Fatalf("unable to create announcements: %v", err) - } + require.NoError(t, err, "unable to create announcements") remotePeer := &mockPeer{pk: remoteKeyPriv2.PubKey()} // processAnnouncements is a helper closure we'll use to test that we @@ -2258,15 +2134,11 @@ func TestProcessZombieEdgeNowLive(t *testing.T) { // We'll start by creating our test context with a batch of // announcements. ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } + require.NoError(t, err, "unable to create test context") defer cleanup() batch, err := createRemoteAnnouncements(0) - if err != nil { - t.Fatalf("unable to create announcements: %v", err) - } + require.NoError(t, err, "unable to create announcements") remotePeer := &mockPeer{pk: remoteKeyPriv1.PubKey()} @@ -2420,20 +2292,14 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") // Set up a channel that we can use to inspect the messages sent // directly from the gossiper. @@ -2461,9 +2327,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { } err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, remotePeer) - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -2545,9 +2409,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { chanInfo, e1, e2, err = ctx.router.GetChannelByID( batch.chanUpdAnn1.ShortChannelID, ) - if err != nil { - t.Fatalf("unable to get channel from router: %v", err) - } + require.NoError(t, err, "unable to get channel from router") if chanInfo == nil { t.Fatalf("chanInfo was nil") } @@ -2628,9 +2490,7 @@ func TestExtraDataChannelAnnouncementValidation(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} @@ -2640,9 +2500,7 @@ func TestExtraDataChannelAnnouncementValidation(t *testing.T) { // final signature check. extraBytes := []byte("gotta validate this still!") ca, err := createRemoteChannelAnnouncement(0, extraBytes) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } + require.NoError(t, err, "can't create channel announcement") // We'll now send the announcement to the main gossiper. We should be // able to validate this announcement to problem. @@ -2664,9 +2522,7 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) { timestamp := testTimestamp ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} @@ -2675,23 +2531,17 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) { // channel announcement, and another channel update announcement, that // has additional data that we won't be interpreting. chanAnn, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("unable to create chan ann: %v", err) - } + require.NoError(t, err, "unable to create chan ann") chanUpdAnn1, err := createUpdateAnnouncement( 0, 0, remoteKeyPriv1, timestamp, []byte("must also validate"), ) - if err != nil { - t.Fatalf("unable to create chan up: %v", err) - } + require.NoError(t, err, "unable to create chan up") chanUpdAnn2, err := createUpdateAnnouncement( 0, 1, remoteKeyPriv2, timestamp, []byte("must also validate"), ) - if err != nil { - t.Fatalf("unable to create chan up: %v", err) - } + require.NoError(t, err, "unable to create chan up") // We should be able to properly validate all three messages without // any issue. @@ -2700,27 +2550,21 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn1, remotePeer): case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanUpdAnn2, remotePeer): case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") } // TestExtraDataNodeAnnouncementValidation tests that we're able to properly @@ -2730,9 +2574,7 @@ func TestExtraDataNodeAnnouncementValidation(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil} @@ -2744,18 +2586,14 @@ func TestExtraDataNodeAnnouncementValidation(t *testing.T) { nodeAnn, err := createNodeAnnouncement( remoteKeyPriv1, timestamp, []byte("gotta validate"), ) - if err != nil { - t.Fatalf("can't create node announcement: %v", err) - } + require.NoError(t, err, "can't create node announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(nodeAnn, remotePeer): case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") } // assertBroadcast checks that num messages are being broadcasted from the @@ -2805,20 +2643,14 @@ func TestRetransmit(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(proofMatureDelta) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") remotePeer := &mockPeer{remoteKey, nil, nil} // Process a local channel announcement, channel update and node @@ -2918,20 +2750,14 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() batch, err := createRemoteAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") remotePeer := &mockPeer{remoteKey, nil, nil} // Process the remote node announcement. @@ -2941,9 +2767,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") // Since no channels or node announcements were already in the graph, // the node announcement should be ignored, and not forwarded. @@ -2961,9 +2785,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2, @@ -2971,9 +2793,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") // Now process the node announcement again. select { @@ -2981,9 +2801,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") // This time the node announcement should be forwarded. The same should // the channel announcement and update be. @@ -3003,9 +2821,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") select { case <-ctx.broadcastedMessage: @@ -3020,9 +2836,7 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() chanUpdateHeight := uint32(0) @@ -3032,25 +2846,19 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { // In this scenario, we'll test whether the message flags field in a channel // update is properly handled. chanAnn, err := createRemoteChannelAnnouncement(chanUpdateHeight) - if err != nil { - t.Fatalf("can't create channel announcement: %v", err) - } + require.NoError(t, err, "can't create channel announcement") select { case err = <-ctx.gossiper.ProcessRemoteAnnouncement(chanAnn, nodePeer): case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") // The first update should fail from an invalid max HTLC field, which is // less than the min HTLC. chanUpdAnn, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp) - if err != nil { - t.Fatalf("unable to create channel update: %v", err) - } + require.NoError(t, err, "unable to create channel update") chanUpdAnn.HtlcMinimumMsat = 5000 chanUpdAnn.HtlcMaximumMsat = 4000 @@ -3096,9 +2904,7 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process announcement: %v", err) - } + require.NoError(t, err, "unable to process announcement") } // TestSendChannelUpdateReliably ensures that the latest channel update for a @@ -3109,23 +2915,17 @@ func TestSendChannelUpdateReliably(t *testing.T) { // We'll start by creating our test context and a batch of // announcements. ctx, cleanup, err := createTestCtx(uint32(proofMatureDelta)) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } + require.NoError(t, err, "unable to create test context") defer cleanup() batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") // We'll also create two keys, one for ourselves and another for the // remote party. remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") // Set up a channel we can use to inspect messages sent by the // gossiper to the remote peer. @@ -3172,9 +2972,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local channel announcement") } - if err != nil { - t.Fatalf("unable to process local channel announcement: %v", err) - } + require.NoError(t, err, "unable to process local channel announcement") // It should not be broadcast due to not having an announcement proof. select { @@ -3189,9 +2987,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local channel update") } - if err != nil { - t.Fatalf("unable to process local channel update: %v", err) - } + require.NoError(t, err, "unable to process local channel update") // It should also not be broadcast due to the announcement not having an // announcement proof. @@ -3251,9 +3047,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local channel update") } - if err != nil { - t.Fatalf("unable to process local channel update: %v", err) - } + require.NoError(t, err, "unable to process local channel update") // It should also not be broadcast due to the announcement not having an // announcement proof. @@ -3292,9 +3086,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local channel proof") } - if err != nil { - t.Fatalf("unable to process local channel proof: %v", err) - } + require.NoError(t, err, "unable to process local channel proof") // No messages should be broadcast as we don't have the full proof yet. select { @@ -3313,9 +3105,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote channel proof") } - if err != nil { - t.Fatalf("unable to process remote channel proof: %v", err) - } + require.NoError(t, err, "unable to process remote channel proof") // Now that we've constructed our full proof, we can assert that the // channel has been announced. @@ -3349,9 +3139,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local channel update") } - if err != nil { - t.Fatalf("unable to process local channel update: %v", err) - } + require.NoError(t, err, "unable to process local channel update") select { case <-ctx.broadcastedMessage: case <-time.After(2 * trickleDelay): @@ -3429,9 +3217,7 @@ func sendLocalMsg(t *testing.T, ctx *testCtx, msg lnwire.Message, case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel msg: %v", err) - } + require.NoError(t, err, "unable to process channel msg") } func sendRemoteMsg(t *testing.T, ctx *testCtx, msg lnwire.Message, @@ -3482,9 +3268,7 @@ func TestPropagateChanPolicyUpdate(t *testing.T) { // graph. startingHeight := uint32(10) ctx, cleanup, err := createTestCtx(startingHeight) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } + require.NoError(t, err, "unable to create test context") defer cleanup() const numChannels = 3 @@ -3587,9 +3371,7 @@ out: } err = ctx.gossiper.PropagateChanPolicyUpdate(edgesToUpdate) - if err != nil { - t.Fatalf("unable to chan policies: %v", err) - } + require.NoError(t, err, "unable to chan policies") // Two channel updates should now be broadcast, with neither of them // being the channel our first private channel. @@ -3664,9 +3446,7 @@ func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) { // We'll start by creating our test context and a set of test channel // announcements. ctx, cleanup, err := createTestCtx(0) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } + require.NoError(t, err, "unable to create test context") defer cleanup() chanAnn1 := createAnnouncementWithoutProof( @@ -3830,9 +3610,7 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(10) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() // We'll mark the graph as not synced. This should prevent us from @@ -3880,17 +3658,13 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { // A remote channel announcement should not be broadcast since the graph // has not yet been synced. chanAnn1, err := createRemoteChannelAnnouncement(0) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } + require.NoError(t, err, "unable to create channel announcement") assertBroadcast(chanAnn1, true, false) // A local channel announcement should be broadcast though, regardless // of whether we've synced our graph or not. chanUpd, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, 1) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } + require.NoError(t, err, "unable to create channel announcement") assertBroadcast(chanUpd, false, true) // Mark the graph as synced, which should allow the channel announcement @@ -3898,9 +3672,7 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) { ctx.gossiper.syncMgr.markGraphSynced() chanAnn2, err := createRemoteChannelAnnouncement(1) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } + require.NoError(t, err, "unable to create channel announcement") assertBroadcast(chanAnn2, true, true) } @@ -3912,9 +3684,7 @@ func TestRateLimitChannelUpdates(t *testing.T) { // Create our test harness. const blockHeight = 100 ctx, cleanup, err := createTestCtx(blockHeight) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() ctx.gossiper.cfg.RebroadcastInterval = time.Hour ctx.gossiper.cfg.MaxChannelUpdateBurst = 5 @@ -4061,20 +3831,14 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(proofMatureDelta) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() batch, err := createLocalAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") remotePeer := &mockPeer{remoteKey, nil, nil} // Try to let the remote peer tell us about the channel we are part of. @@ -4099,9 +3863,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel ann: %v", err) - } + require.NoError(t, err, "unable to process channel ann") select { case <-ctx.broadcastedMessage: t.Fatal("channel announcement was broadcast") @@ -4113,9 +3875,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } + require.NoError(t, err, "unable to process channel update") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -4127,9 +3887,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process local announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -4144,9 +3902,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process channel update: %v", err) - } + require.NoError(t, err, "unable to process channel update") select { case <-ctx.broadcastedMessage: t.Fatal("channel update announcement was broadcast") @@ -4160,9 +3916,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process node ann: %v", err) - } + require.NoError(t, err, "unable to process node ann") select { case <-ctx.broadcastedMessage: t.Fatal("node announcement was broadcast") @@ -4176,9 +3930,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process local proof: %v", err) - } + require.NoError(t, err, "unable to process local proof") select { case <-ctx.broadcastedMessage: @@ -4193,9 +3945,7 @@ func TestIgnoreOwnAnnouncement(t *testing.T) { case <-time.After(2 * time.Second): t.Fatal("did not process remote announcement") } - if err != nil { - t.Fatalf("unable to process remote proof: %v", err) - } + require.NoError(t, err, "unable to process remote proof") for i := 0; i < 5; i++ { select { @@ -4226,22 +3976,16 @@ func TestRejectCacheChannelAnn(t *testing.T) { t.Parallel() ctx, cleanup, err := createTestCtx(proofMatureDelta) - if err != nil { - t.Fatalf("can't create context: %v", err) - } + require.NoError(t, err, "can't create context") defer cleanup() // First, we create a channel announcement to send over to our test // peer. batch, err := createRemoteAnnouncements(0) - if err != nil { - t.Fatalf("can't generate announcements: %v", err) - } + require.NoError(t, err, "can't generate announcements") remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:]) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") remotePeer := &mockPeer{remoteKey, nil, nil} // Before sending over the announcement, we'll modify it such that we diff --git a/discovery/message_store_test.go b/discovery/message_store_test.go index 7542f82c6..4bb13c18c 100644 --- a/discovery/message_store_test.go +++ b/discovery/message_store_test.go @@ -13,15 +13,14 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) func createTestMessageStore(t *testing.T) (*MessageStore, func()) { t.Helper() tempDir, err := ioutil.TempDir("", "channeldb") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") db, err := channeldb.Open(tempDir) if err != nil { os.RemoveAll(tempDir) @@ -44,9 +43,7 @@ func createTestMessageStore(t *testing.T) (*MessageStore, func()) { func randPubKey(t *testing.T) *btcec.PublicKey { priv, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to create private key: %v", err) - } + require.NoError(t, err, "unable to create private key") return priv.PubKey() } @@ -242,24 +239,18 @@ func TestMessageStoreUnsupportedMessage(t *testing.T) { messageStore := tx.ReadWriteBucket(messageStoreBucket) return messageStore.Put(msgKey, rawMsg.Bytes()) }, func() {}) - if err != nil { - t.Fatalf("unable to add unsupported message to store: %v", err) - } + require.NoError(t, err, "unable to add unsupported message to store") // Finally, we'll check that the store can properly filter out messages // that are currently unknown to it. We'll make sure this is done for // both Messages and MessagesForPeer. totalMsgs, err := msgStore.Messages() - if err != nil { - t.Fatalf("unable to retrieve messages: %v", err) - } + require.NoError(t, err, "unable to retrieve messages") if len(totalMsgs) != 0 { t.Fatalf("expected to filter out unsupported message") } peerMsgs, err := msgStore.MessagesForPeer(peer) - if err != nil { - t.Fatalf("unable to retrieve peer messages: %v", err) - } + require.NoError(t, err, "unable to retrieve peer messages") if len(peerMsgs) != 0 { t.Fatalf("expected to filter out unsupported message") } diff --git a/discovery/syncer_test.go b/discovery/syncer_test.go index 01afaf391..000438921 100644 --- a/discovery/syncer_test.go +++ b/discovery/syncer_test.go @@ -489,9 +489,7 @@ func TestGossipSyncerApplyGossipFilter(t *testing.T) { // We'll now attempt to apply the gossip filter for the remote peer. err := syncer.ApplyGossipFilter(remoteHorizon) - if err != nil { - t.Fatalf("unable to apply filter: %v", err) - } + require.NoError(t, err, "unable to apply filter") // There should be no messages in the message queue as we didn't send // the syncer and messages within the horizon. @@ -539,9 +537,7 @@ func TestGossipSyncerApplyGossipFilter(t *testing.T) { } }() err = syncer.ApplyGossipFilter(remoteHorizon) - if err != nil { - t.Fatalf("unable to apply filter: %v", err) - } + require.NoError(t, err, "unable to apply filter") // We should get back the exact same message. select { @@ -587,9 +583,7 @@ func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) { NumBlocks: math.MaxUint32, } err := syncer.replyChanRangeQuery(query) - if err != nil { - t.Fatalf("unable to process short chan ID's: %v", err) - } + require.NoError(t, err, "unable to process short chan ID's") select { case <-time.After(time.Second * 15): @@ -638,9 +632,7 @@ func TestGossipSyncerReplyShortChanIDsWrongChainHash(t *testing.T) { err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{ ChainHash: *chaincfg.SimNetParams.GenesisHash, }) - if err != nil { - t.Fatalf("unable to process short chan ID's: %v", err) - } + require.NoError(t, err, "unable to process short chan ID's") select { case <-time.After(time.Second * 15): @@ -729,9 +721,7 @@ func TestGossipSyncerReplyShortChanIDs(t *testing.T) { err := syncer.replyShortChanIDs(&lnwire.QueryShortChanIDs{ ShortChanIDs: queryChanIDs, }) - if err != nil { - t.Fatalf("unable to query for chan IDs: %v", err) - } + require.NoError(t, err, "unable to query for chan IDs") for i := 0; i < len(queryReply)+1; i++ { select { @@ -1157,9 +1147,7 @@ func TestGossipSyncerGenChanRangeQuery(t *testing.T) { // should return a start height that's back chanRangeQueryBuffer // blocks. rangeQuery, err := syncer.genChanRangeQuery(false) - if err != nil { - t.Fatalf("unable to resp: %v", err) - } + require.NoError(t, err, "unable to resp") firstHeight := uint32(startingHeight - chanRangeQueryBuffer) if rangeQuery.FirstBlockHeight != firstHeight { @@ -1175,9 +1163,7 @@ func TestGossipSyncerGenChanRangeQuery(t *testing.T) { // Generating a historical range query should result in a start height // of 0. rangeQuery, err = syncer.genChanRangeQuery(true) - if err != nil { - t.Fatalf("unable to resp: %v", err) - } + require.NoError(t, err, "unable to resp") if rangeQuery.FirstBlockHeight != 0 { t.Fatalf("incorrect chan range query: expected %v, %v", 0, rangeQuery.FirstBlockHeight) @@ -1221,9 +1207,7 @@ func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) { startingState := syncer.state query, err := syncer.genChanRangeQuery(true) - if err != nil { - t.Fatalf("unable to generate channel range query: %v", err) - } + require.NoError(t, err, "unable to generate channel range query") // When interpreting block ranges, the first reply should start from // our requested first block, and the last should end at our requested @@ -1431,9 +1415,7 @@ func TestGossipSyncerSynchronizeChanIDs(t *testing.T) { // If we issue another query, the syncer should tell us that it's done. done, err := syncer.synchronizeChanIDs() - if err != nil { - t.Fatalf("unable to sync chan IDs: %v", err) - } + require.NoError(t, err, "unable to sync chan IDs") if done { t.Fatalf("syncer should be finished!") } diff --git a/feature/manager_internal_test.go b/feature/manager_internal_test.go index 2778a86d9..8debddcbe 100644 --- a/feature/manager_internal_test.go +++ b/feature/manager_internal_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) type managerTest struct { @@ -73,9 +74,7 @@ func TestManager(t *testing.T) { func testManager(t *testing.T, test managerTest) { m, err := newManager(test.cfg, testSetDesc) - if err != nil { - t.Fatalf("unable to create feature manager: %v", err) - } + require.NoError(t, err, "unable to create feature manager") sets := []Set{ SetInit, diff --git a/funding/manager_test.go b/funding/manager_test.go index 459ea8cf2..d11e40797 100644 --- a/funding/manager_test.go +++ b/funding/manager_test.go @@ -349,9 +349,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, cdb, netParams, chainNotifier, wc, signer, keyRing, bio, estimator, ) - if err != nil { - t.Fatalf("unable to create test ln wallet: %v", err) - } + require.NoError(t, err, "unable to create test ln wallet") var chanIDSeed [32]byte @@ -459,9 +457,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, } f, err := NewFundingManager(fundingCfg) - if err != nil { - t.Fatalf("failed creating fundingManager: %v", err) - } + require.NoError(t, err, "failed creating fundingManager") if err = f.Start(); err != nil { t.Fatalf("failed starting fundingManager: %v", err) } @@ -557,9 +553,7 @@ func recreateAliceFundingManager(t *testing.T, alice *testNode) { ReservationTimeout: oldCfg.ReservationTimeout, OpenChannelPredicate: chainedAcceptor, }) - if err != nil { - t.Fatalf("failed recreating aliceFundingManager: %v", err) - } + require.NoError(t, err, "failed recreating aliceFundingManager") alice.fundingMgr = f alice.msgChan = aliceMsgChan @@ -578,28 +572,20 @@ func setupFundingManagers(t *testing.T, options ...cfgOption) (*testNode, *testNode) { aliceTestDir, err := ioutil.TempDir("", "alicelnwallet") - if err != nil { - t.Fatalf("unable to create temp directory: %v", err) - } + require.NoError(t, err, "unable to create temp directory") alice, err := createTestFundingManager( t, alicePrivKey, aliceAddr, aliceTestDir, options..., ) - if err != nil { - t.Fatalf("failed creating fundingManager: %v", err) - } + require.NoError(t, err, "failed creating fundingManager") bobTestDir, err := ioutil.TempDir("", "boblnwallet") - if err != nil { - t.Fatalf("unable to create temp directory: %v", err) - } + require.NoError(t, err, "unable to create temp directory") bob, err := createTestFundingManager( t, bobPrivKey, bobAddr, bobTestDir, options..., ) - if err != nil { - t.Fatalf("failed creating fundingManager: %v", err) - } + require.NoError(t, err, "failed creating fundingManager") // With the funding manager's created, we'll now attempt to mimic a // connection pipe between them. In order to intercept the messages @@ -1968,9 +1954,7 @@ func TestFundingManagerFundingTimeout(t *testing.T) { // Bob will at this point be waiting for the funding transaction to be // confirmed, so the channel should be considered pending. pendingChannels, err := bob.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to fetch pending channels: %v", err) - } + require.NoError(t, err, "unable to fetch pending channels") if len(pendingChannels) != 1 { t.Fatalf("Expected Bob to have 1 pending channel, had %v", len(pendingChannels)) @@ -2014,9 +1998,7 @@ func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) { // Alice will at this point be waiting for the funding transaction to be // confirmed, so the channel should be considered pending. pendingChannels, err := alice.fundingMgr.cfg.Wallet.Cfg.Database.FetchPendingChannels() - if err != nil { - t.Fatalf("unable to fetch pending channels: %v", err) - } + require.NoError(t, err, "unable to fetch pending channels") if len(pendingChannels) != 1 { t.Fatalf("Expected Alice to have 1 pending channel, had %v", len(pendingChannels)) @@ -2766,9 +2748,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { // Check that the custom channel parameters were properly set in the // channel reservation. resCtx, err := alice.fundingMgr.getReservationCtx(bobPubKey, chanID) - if err != nil { - t.Fatalf("unable to find ctx: %v", err) - } + require.NoError(t, err, "unable to find ctx") // Alice's CSV delay should be 4 since Bob sent the default value, and // Bob's should be 67 since Alice sent the custom value. @@ -2792,9 +2772,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { // Also make sure the parameters are properly set on Bob's end. resCtx, err = bob.fundingMgr.getReservationCtx(alicePubKey, chanID) - if err != nil { - t.Fatalf("unable to find ctx: %v", err) - } + require.NoError(t, err, "unable to find ctx") if err := assertDelay(resCtx, csvDelay, 4); err != nil { t.Fatal(err) diff --git a/htlcswitch/circuit_test.go b/htlcswitch/circuit_test.go index d9e9f4542..256aa4aa7 100644 --- a/htlcswitch/circuit_test.go +++ b/htlcswitch/circuit_test.go @@ -16,6 +16,7 @@ import ( "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) var ( @@ -126,9 +127,7 @@ func newCircuitMap(t *testing.T, resMsg bool) (*htlcswitch.CircuitMapConfig, } circuitMap, err := htlcswitch.NewCircuitMap(circuitMapCfg) - if err != nil { - t.Fatalf("unable to create persistent circuit map: %v", err) - } + require.NoError(t, err, "unable to create persistent circuit map") return circuitMapCfg, circuitMap } @@ -489,9 +488,7 @@ func TestCircuitMapPersistence(t *testing.T) { // Test removing circuits and the subsequent lookups. err = circuitMap.DeleteCircuits(circuit1.Incoming) - if err != nil { - t.Fatalf("Remove returned unexpected error: %v", err) - } + require.NoError(t, err, "Remove returned unexpected error") // There should be exactly one remaining circuit with hash1, and it // should be circuit4. @@ -514,9 +511,7 @@ func TestCircuitMapPersistence(t *testing.T) { // Remove last remaining circuit with payment hash hash1. err = circuitMap.DeleteCircuits(circuit4.Incoming) - if err != nil { - t.Fatalf("Remove returned unexpected error: %v", err) - } + require.NoError(t, err, "Remove returned unexpected error") assertNumCircuitsWithHash(t, circuitMap, hash1, 0) assertNumCircuitsWithHash(t, circuitMap, hash2, 1) @@ -528,9 +523,7 @@ func TestCircuitMapPersistence(t *testing.T) { // Remove last remaining circuit with payment hash hash2. err = circuitMap.DeleteCircuits(circuit2.Incoming) - if err != nil { - t.Fatalf("Remove returned unexpected error: %v", err) - } + require.NoError(t, err, "Remove returned unexpected error") // There should now only be one remaining circuit, with hash3. assertNumCircuitsWithHash(t, circuitMap, hash2, 0) @@ -639,9 +632,7 @@ func makeCircuitDB(t *testing.T, path string) *channeldb.DB { } db, err := channeldb.Open(path) - if err != nil { - t.Fatalf("unable to open channel db: %v", err) - } + require.NoError(t, err, "unable to open channel db") return db } @@ -666,9 +657,7 @@ func restartCircuitMap(t *testing.T, cfg *htlcswitch.CircuitMapConfig) ( CheckResolutionMsg: cfg.CheckResolutionMsg, } cm2, err := htlcswitch.NewCircuitMap(cfg2) - if err != nil { - t.Fatalf("unable to recreate persistent circuit map: %v", err) - } + require.NoError(t, err, "unable to recreate persistent circuit map") return cfg2, cm2 } @@ -699,9 +688,7 @@ func TestCircuitMapCommitCircuits(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. actions, err := circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") if len(actions.Drops) > 0 { t.Fatalf("new circuit should not have been dropped") } @@ -723,9 +710,7 @@ func TestCircuitMapCommitCircuits(t *testing.T) { // in the circuit being dropped. This can happen if the incoming link // flaps. actions, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") if len(actions.Adds) > 0 { t.Fatalf("duplicate circuit should not have been added to circuit map") } @@ -744,9 +729,7 @@ func TestCircuitMapCommitCircuits(t *testing.T) { _, circuitMap = restartCircuitMap(t, cfg) actions, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") if len(actions.Adds) > 0 { t.Fatalf("duplicate circuit with incomplete forwarding " + "decision should not have been added to circuit map") @@ -795,9 +778,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") keystone := htlcswitch.Keystone{ InKey: circuit.Incoming, @@ -809,9 +790,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) { // Open the circuit for the first time. err = circuitMap.OpenCircuits(keystone) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } + require.NoError(t, err, "failed to open circuits") // Check that we can retrieve the open circuit if the circuit map before // the circuit map is restarted. @@ -841,9 +820,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) { // flaps OR the switch is entirely restarted and the outgoing link has // not received a response. actions, err := circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") if len(actions.Adds) > 0 { t.Fatalf("duplicate circuit should not have been added to circuit map") } @@ -882,9 +859,7 @@ func TestCircuitMapOpenCircuits(t *testing.T) { // if the incoming link flaps OR the switch is entirely restarted and // the outgoing link has not received a response. actions, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") if len(actions.Adds) > 0 { t.Fatalf("duplicate circuit should not have been added to circuit map") } @@ -1012,9 +987,7 @@ func TestCircuitMapTrimOpenCircuits(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. _, err = circuitMap.CommitCircuits(circuits...) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") // Now create a list of the keystones that we will use to preemptively // open the circuits. We set the index as the outgoing HtlcID to i @@ -1032,9 +1005,7 @@ func TestCircuitMapTrimOpenCircuits(t *testing.T) { // Open the circuits for the first time. err = circuitMap.OpenCircuits(keystones...) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } + require.NoError(t, err, "failed to open circuits") // Check that all circuits are marked open. assertCircuitsOpenedPreRestart(t, circuitMap, circuits, keystones) @@ -1152,9 +1123,7 @@ func TestCircuitMapCloseOpenCircuits(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") keystone := htlcswitch.Keystone{ InKey: circuit.Incoming, @@ -1166,9 +1135,7 @@ func TestCircuitMapCloseOpenCircuits(t *testing.T) { // Open the circuit for the first time. err = circuitMap.OpenCircuits(keystone) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } + require.NoError(t, err, "failed to open circuits") // Check that we can retrieve the open circuit if the circuit map before // the circuit map is restarted. @@ -1243,9 +1210,7 @@ func TestCircuitMapCloseUnopenedCircuit(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") // Close the open circuit for the first time, which should succeed. _, err = circuitMap.FailCircuit(circuit.Incoming) @@ -1300,9 +1265,7 @@ func TestCircuitMapDeleteUnopenedCircuit(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") // Close the open circuit for the first time, which should succeed. _, err = circuitMap.FailCircuit(circuit.Incoming) @@ -1359,9 +1322,7 @@ func TestCircuitMapDeleteOpenCircuit(t *testing.T) { // First we will try to add an new circuit to the circuit map, this // should succeed. _, err = circuitMap.CommitCircuits(circuit) - if err != nil { - t.Fatalf("failed to commit circuits: %v", err) - } + require.NoError(t, err, "failed to commit circuits") keystone := htlcswitch.Keystone{ InKey: circuit.Incoming, @@ -1373,9 +1334,7 @@ func TestCircuitMapDeleteOpenCircuit(t *testing.T) { // Open the circuit for the first time. err = circuitMap.OpenCircuits(keystone) - if err != nil { - t.Fatalf("failed to open circuits: %v", err) - } + require.NoError(t, err, "failed to open circuits") // Close the open circuit for the first time, which should succeed. _, err = circuitMap.FailCircuit(circuit.Incoming) diff --git a/htlcswitch/decayedlog_test.go b/htlcswitch/decayedlog_test.go index e7b7e6e58..7dd90d3e6 100644 --- a/htlcswitch/decayedlog_test.go +++ b/htlcswitch/decayedlog_test.go @@ -12,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lntest/mock" + "github.com/stretchr/testify/require" ) const ( @@ -22,9 +23,7 @@ const ( // decayed log instance. func tempDecayedLogPath(t *testing.T) string { dir, err := ioutil.TempDir("", "decayedlog") - if err != nil { - t.Fatalf("unable to create temporary decayed log dir: %v", err) - } + require.NoError(t, err, "unable to create temporary decayed log dir") return dir } @@ -99,16 +98,12 @@ func TestDecayedLogGarbageCollector(t *testing.T) { dbPath := tempDecayedLogPath(t) d, notifier, hashedSecret, _, err := startup(dbPath, true) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to start up DecayedLog") defer shutdown(dbPath, d) // Store in the sharedHashBucket. err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } + require.NoError(t, err, "Unable to store in channeldb") // Wait for database write (GC is in a goroutine) time.Sleep(500 * time.Millisecond) @@ -123,9 +118,7 @@ func TestDecayedLogGarbageCollector(t *testing.T) { // Assert that hashedSecret is still in the sharedHashBucket val, err := d.Get(hashedSecret) - if err != nil { - t.Fatalf("Get failed - received an error upon Get: %v", err) - } + require.NoError(t, err, "Get failed - received an error upon Get") if val != cltv { t.Fatalf("GC incorrectly deleted CLTV") @@ -160,9 +153,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { dbPath := tempDecayedLogPath(t) d, _, hashedSecret, stop, err := startup(dbPath, true) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to start up DecayedLog") defer shutdown(dbPath, d) // Store in the sharedHashBucket @@ -180,9 +171,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { stop() d2, notifier2, _, _, err := startup(dbPath, true) - if err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to restart DecayedLog") defer shutdown(dbPath, d2) // Check that the hash prefix still exists in the new db instance. @@ -216,22 +205,16 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) { dbPath := tempDecayedLogPath(t) d, _, hashedSecret, _, err := startup(dbPath, false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to start up DecayedLog") defer shutdown(dbPath, d) // Store in the sharedHashBucket. err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } + require.NoError(t, err, "Unable to store in channeldb") // Delete hashedSecret from the sharedHashBucket. err = d.Delete(hashedSecret) - if err != nil { - t.Fatalf("Unable to delete from channeldb: %v", err) - } + require.NoError(t, err, "Unable to delete from channeldb") // Assert that hashedSecret is not in the sharedHashBucket _, err = d.Get(hashedSecret) @@ -254,31 +237,23 @@ func TestDecayedLogStartAndStop(t *testing.T) { dbPath := tempDecayedLogPath(t) d, _, hashedSecret, stop, err := startup(dbPath, false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to start up DecayedLog") defer shutdown(dbPath, d) // Store in the sharedHashBucket. err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } + require.NoError(t, err, "Unable to store in channeldb") // Shutdown the DecayedLog's channeldb stop() d2, _, hashedSecret2, stop, err := startup(dbPath, false) - if err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to restart DecayedLog") defer shutdown(dbPath, d2) // Retrieve the stored cltv value given the hashedSecret key. value, err := d2.Get(hashedSecret) - if err != nil { - t.Fatalf("Unable to retrieve from channeldb: %v", err) - } + require.NoError(t, err, "Unable to retrieve from channeldb") // Check that the original cltv value matches the retrieved cltv // value. @@ -288,17 +263,13 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Delete hashedSecret from sharedHashBucket err = d2.Delete(hashedSecret2) - if err != nil { - t.Fatalf("Unable to delete from channeldb: %v", err) - } + require.NoError(t, err, "Unable to delete from channeldb") // Shutdown the DecayedLog's channeldb stop() d3, _, hashedSecret3, _, err := startup(dbPath, false) - if err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to restart DecayedLog") defer shutdown(dbPath, d3) // Assert that hashedSecret is not in the sharedHashBucket @@ -320,22 +291,16 @@ func TestDecayedLogStorageAndRetrieval(t *testing.T) { dbPath := tempDecayedLogPath(t) d, _, hashedSecret, _, err := startup(dbPath, false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } + require.NoError(t, err, "Unable to start up DecayedLog") defer shutdown(dbPath, d) // Store in the sharedHashBucket err = d.Put(hashedSecret, cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } + require.NoError(t, err, "Unable to store in channeldb") // Retrieve the stored cltv value given the hashedSecret key. value, err := d.Get(hashedSecret) - if err != nil { - t.Fatalf("Unable to retrieve from channeldb: %v", err) - } + require.NoError(t, err, "Unable to retrieve from channeldb") // If the original cltv value does not match the value retrieved, // then the test failed. diff --git a/htlcswitch/hop/iterator_test.go b/htlcswitch/hop/iterator_test.go index 20c5632b2..524b70df0 100644 --- a/htlcswitch/hop/iterator_test.go +++ b/htlcswitch/hop/iterator_test.go @@ -10,6 +10,7 @@ import ( "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/tlv" + "github.com/stretchr/testify/require" ) // TestSphinxHopIteratorForwardingInstructions tests that we're able to @@ -44,9 +45,7 @@ func TestSphinxHopIteratorForwardingInstructions(t *testing.T) { record.NewNextHopIDRecord(&nextAddrInt), } tlvStream, err := tlv.NewStream(tlvRecords...) - if err != nil { - t.Fatalf("unable to create stream: %v", err) - } + require.NoError(t, err, "unable to create stream") if err := tlvStream.Encode(&b); err != nil { t.Fatalf("unable to encode stream: %v", err) } diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index 5320336ec..478ab29a5 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -436,9 +436,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { alice, bob, cleanUp, err := createTwoClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newTwoHopNetwork( @@ -479,9 +477,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, totalTimelock, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to make the payment: %v", err) - } + require.NoError(t, err, "unable to make the payment") // Wait for Alice to receive the revocation. // @@ -491,9 +487,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { // Check that alice invoice was settled and bandwidth of HTLC // links was changed. invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State != channeldb.ContractSettled { t.Fatal("alice invoice wasn't settled") } @@ -542,9 +536,7 @@ func testChannelLinkMultiHopPayment(t *testing.T, channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -606,9 +598,7 @@ func testChannelLinkMultiHopPayment(t *testing.T, n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, totalTimelock, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t, err, "unable to send payment") // Wait for Alice and Bob's second link to receive the revocation. time.Sleep(2 * time.Second) @@ -616,9 +606,7 @@ func testChannelLinkMultiHopPayment(t *testing.T, // Check that Carol invoice was settled and bandwidth of HTLC // links were changed. invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State != channeldb.ContractSettled { t.Fatal("carol invoice haven't been settled") } @@ -656,9 +644,7 @@ func TestChannelLinkCancelFullCommitment(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newTwoHopNetwork( @@ -759,9 +745,7 @@ func TestExitNodeTimelockPayloadMismatch(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -812,9 +796,7 @@ func TestExitNodeAmountPayloadMismatch(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -853,9 +835,7 @@ func TestLinkForwardTimelockPolicyMismatch(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -911,9 +891,7 @@ func TestLinkForwardFeePolicyMismatch(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -969,9 +947,7 @@ func TestLinkForwardMinHTLCPolicyMismatch(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1027,9 +1003,7 @@ func TestLinkForwardMaxHTLCPolicyMismatch(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5, ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork( @@ -1095,9 +1069,7 @@ func TestUpdateForwardingPolicy(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1124,16 +1096,12 @@ func TestUpdateForwardingPolicy(t *testing.T) { n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, htlcAmt, htlcExpiry, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t, err, "unable to send payment") // Carol's invoice should now be shown as settled as the payment // succeeded. invoice, err := n.carolServer.registry.LookupInvoice(payResp) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State != channeldb.ContractSettled { t.Fatal("carol invoice haven't been settled") } @@ -1198,9 +1166,7 @@ func TestUpdateForwardingPolicy(t *testing.T) { n.aliceServer, n.carolServer, firstHop, hops, amountNoFee, htlcAmt, htlcExpiry, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t, err, "unable to send payment") // Now we'll update Bob's policy to lower his max HTLC to an extent // that'll cause him to reject the same HTLC that we just sent. @@ -1242,9 +1208,7 @@ func TestChannelLinkMultiHopInsufficientPayment(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1292,9 +1256,7 @@ func TestChannelLinkMultiHopInsufficientPayment(t *testing.T) { // Check that alice invoice wasn't settled and bandwidth of htlc // links hasn't been changed. invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State == channeldb.ContractSettled { t.Fatal("carol invoice have been settled") } @@ -1328,9 +1290,7 @@ func TestChannelLinkMultiHopUnknownPaymentHash(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1368,16 +1328,12 @@ func TestChannelLinkMultiHopUnknownPaymentHash(t *testing.T) { err = n.aliceServer.htlcSwitch.SendHTLC( n.firstBobChannelLink.ShortChanID(), pid, htlc, ) - if err != nil { - t.Fatalf("unable to get send payment: %v", err) - } + require.NoError(t, err, "unable to get send payment") resultChan, err := n.aliceServer.htlcSwitch.GetPaymentResult( pid, htlc.PaymentHash, newMockDeobfuscator(), ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } + require.NoError(t, err, "unable to get payment result") var result *PaymentResult var ok bool @@ -1427,9 +1383,7 @@ func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*5, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1482,9 +1436,7 @@ func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { // Check that alice invoice wasn't settled and bandwidth of htlc // links hasn't been changed. invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State == channeldb.ContractSettled { t.Fatal("carol invoice have been settled") } @@ -1513,9 +1465,7 @@ func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { // should have been rejected by the switch, and the AddRef in this link // should be acked by the failed payment. bobInFwdPkgs, err := channels.bobToAlice.State().LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load bob's fwd pkgs: %v", err) - } + require.NoError(t, err, "unable to load bob's fwd pkgs") // There should be exactly two forward packages, as a full state // transition requires two commitment dances. @@ -1543,9 +1493,7 @@ func TestChannelLinkMultiHopDecodeError(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1597,9 +1545,7 @@ func TestChannelLinkMultiHopDecodeError(t *testing.T) { // Check that alice invoice wasn't settled and bandwidth of htlc // links hasn't been changed. invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State == channeldb.ContractSettled { t.Fatal("carol invoice have been settled") } @@ -1636,9 +1582,7 @@ func TestChannelLinkExpiryTooSoonExitNode(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() const startingHeight = 200 @@ -1697,9 +1641,7 @@ func TestChannelLinkExpiryTooSoonMidNode(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() const startingHeight = 200 @@ -1758,9 +1700,7 @@ func TestChannelLinkSingleHopMessageOrdering(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -1827,9 +1767,7 @@ func TestChannelLinkSingleHopMessageOrdering(t *testing.T) { n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, totalTimelock, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to make the payment: %v", err) - } + require.NoError(t, err, "unable to make the payment") } type mockPeer struct { @@ -2179,9 +2117,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { aliceLink, bobChannel, tmr, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -2204,9 +2140,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") htlcFee := lnwire.NewMSatFromSatoshis( feePerKw.FeeForWeight(input.HTLCWeight), ) @@ -2226,9 +2160,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { invoice, htlc, _, err := generatePayment( htlcAmt, htlcAmt, 5, mockBlob, ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") addPkt := htlcPacket{ htlc: htlc, incomingChanID: hop.Source, @@ -2238,9 +2170,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { circuit := makePaymentCircuit(&htlc.PaymentHash, &addPkt) _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } + require.NoError(t, err, "unable to commit circuit") addPkt.circuit = &circuit if err := aliceLink.handleSwitchPacket(&addPkt); err != nil { @@ -2266,9 +2196,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { } bobIndex, err := bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } + require.NoError(t, err, "bob failed receiving htlc") // Lock in the HTLC. if err := updateState(tmr, coreLink, bobChannel, true); err != nil { @@ -2281,9 +2209,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { // then the bandwidth should remain unchanged as the remote party will // gain additional channel balance. err = bobChannel.SettleHTLC(*invoice.Terms.PaymentPreimage, bobIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") htlcSettle := &lnwire.UpdateFulfillHTLC{ ID: 0, PaymentPreimage: *invoice.Terms.PaymentPreimage, @@ -2306,9 +2232,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { // Next, we'll add another HTLC initiated by the switch (of the same // amount as the prior one). _, htlc, _, err = generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") addPkt = htlcPacket{ htlc: htlc, incomingChanID: hop.Source, @@ -2318,9 +2242,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt) _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } + require.NoError(t, err, "unable to commit circuit") addPkt.circuit = &circuit if err := aliceLink.handleSwitchPacket(&addPkt); err != nil { @@ -2344,9 +2266,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { } bobIndex, err = bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } + require.NoError(t, err, "bob failed receiving htlc") // Lock in the HTLC, which should not affect the bandwidth. if err := updateState(tmr, coreLink, bobChannel, true); err != nil { @@ -2359,9 +2279,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { // remote peer) to cancel the HTLC we just added. This should return us // back to the bandwidth of the link right before the HTLC was sent. err = bobChannel.FailHTLC(bobIndex, []byte("nop"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to fail htlc: %v", err) - } + require.NoError(t, err, "unable to fail htlc") failMsg := &lnwire.UpdateFailHTLC{ ID: 1, Reason: lnwire.OpaqueReason([]byte("nop")), @@ -2388,30 +2306,22 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { htlcAmt, totalTimelock, hops := generateHops(htlcAmt, testStartingHeight, coreLink) blob, err := generateRoute(hops...) - if err != nil { - t.Fatalf("unable to gen route: %v", err) - } + require.NoError(t, err, "unable to gen route") invoice, htlc, _, err = generatePayment( htlcAmt, htlcAmt, totalTimelock, blob, ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") // We must add the invoice to the registry, such that Alice expects // this payment. err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( *invoice, htlc.PaymentHash, ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") htlc.ID = 0 _, err = bobChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") aliceLink.HandleChannelUpdate(htlc) // Alice's balance remains unchanged until this HTLC is locked in. @@ -2435,17 +2345,13 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt) _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } + require.NoError(t, err, "unable to commit circuit") addPkt.outgoingChanID = carolChanID addPkt.outgoingHTLCID = 0 err = coreLink.cfg.Circuits.OpenCircuits(addPkt.keystone()) - if err != nil { - t.Fatalf("unable to set keystone: %v", err) - } + require.NoError(t, err, "unable to set keystone") // Next, we'll settle the HTLC with our knowledge of the pre-image that // we eventually learn (simulating a multi-hop payment). The bandwidth @@ -2482,9 +2388,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { t.Fatalf("expected UpdateFulfillHTLC, got %T", msg) } err = bobChannel.ReceiveHTLCSettle(settleMsg.PaymentPreimage, settleMsg.ID) - if err != nil { - t.Fatalf("failed receiving fail htlc: %v", err) - } + require.NoError(t, err, "failed receiving fail htlc") // After failing an HTLC, the link will automatically trigger // a state update. @@ -2497,21 +2401,15 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { htlcAmt, totalTimelock, hops = generateHops(htlcAmt, testStartingHeight, coreLink) blob, err = generateRoute(hops...) - if err != nil { - t.Fatalf("unable to gen route: %v", err) - } + require.NoError(t, err, "unable to gen route") invoice, htlc, _, err = generatePayment( htlcAmt, htlcAmt, totalTimelock, blob, ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") err = coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( *invoice, htlc.PaymentHash, ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") // Since we are not using the link to handle HTLC IDs for the // remote channel, we must set this manually. This is the second @@ -2519,9 +2417,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { // link will set this automatically for her side). htlc.ID = 1 _, err = bobChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") aliceLink.HandleChannelUpdate(htlc) time.Sleep(time.Millisecond * 500) @@ -2543,17 +2439,13 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { circuit = makePaymentCircuit(&htlc.PaymentHash, &addPkt) _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } + require.NoError(t, err, "unable to commit circuit") addPkt.outgoingChanID = carolChanID addPkt.outgoingHTLCID = 1 err = coreLink.cfg.Circuits.OpenCircuits(addPkt.keystone()) - if err != nil { - t.Fatalf("unable to set keystone: %v", err) - } + require.NoError(t, err, "unable to set keystone") failPkt := htlcPacket{ incomingChanID: aliceLink.ShortChanID(), @@ -2586,9 +2478,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { t.Fatalf("expected UpdateFailHTLC, got %T", msg) } err = bobChannel.ReceiveFailHTLC(failMsg.ID, []byte("fail")) - if err != nil { - t.Fatalf("failed receiving fail htlc: %v", err) - } + require.NoError(t, err, "failed receiving fail htlc") // After failing an HTLC, the link will automatically trigger // a state update. @@ -2640,9 +2530,7 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) { // state is unnecessary. aliceLink, _, batchTicker, start, cleanUp, restore, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -2657,9 +2545,7 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) { // correctness of Alice's bandwidth when forwarding HTLCs. estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") defaultCommitFee := alice.channel.StateSnapshot().CommitFee htlcFee := lnwire.NewMSatFromSatoshis( @@ -2683,9 +2569,7 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) { var mockBlob [lnwire.OnionPacketSize]byte htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") // Create `numHtlc` htlcPackets and payment circuits that will be used // to drive the test. All of the packets will use the same dummy HTLC. @@ -2916,9 +2800,7 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { // state is unnecessary. aliceLink, _, batchTicker, start, cleanUp, restore, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -2938,9 +2820,7 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { // correctness of Alice's bandwidth when forwarding HTLCs. estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") defaultCommitFee := alice.channel.StateSnapshot().CommitFee htlcFee := lnwire.NewMSatFromSatoshis( @@ -2964,9 +2844,7 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { var mockBlob [lnwire.OnionPacketSize]byte htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") // Create `numHtlc` htlcPackets and payment circuits that will be used // to drive the test. All of the packets will use the same dummy HTLC. @@ -3180,9 +3058,7 @@ func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) { // We'll start by creating a new link with our chanAmt (5 BTC). aliceLink, bobChan, batchTicker, start, cleanUp, restore, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") if err := start(); err != nil { t.Fatalf("unable to start test harness: %v", err) @@ -3197,9 +3073,7 @@ func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) { // correctness of Alice's bandwidth when forwarding HTLCs. estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") defaultCommitFee := alice.channel.StateSnapshot().CommitFee htlcFee := lnwire.NewMSatFromSatoshis( @@ -3223,9 +3097,7 @@ func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) { var mockBlob [lnwire.OnionPacketSize]byte htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") // Create `numHtlc` htlcPackets and payment circuits that will be used // to drive the test. All of the packets will use the same dummy HTLC. @@ -3297,14 +3169,10 @@ func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) { // Next, revoke Bob's current commitment and send it to Alice so that we // can test that Alice's circuits aren't trimmed. rev, _, err := bobChan.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke current commitment: %v", err) - } + require.NoError(t, err, "unable to revoke current commitment") _, _, _, _, err = alice.channel.ReceiveRevocation(rev) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") // Restart Alice's link, which simulates a disconnection with the remote // peer. @@ -3333,9 +3201,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, batchTimer, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -3353,9 +3219,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") htlcFee := lnwire.NewMSatFromSatoshis( feePerKw.FeeForWeight(input.HTLCWeight), ) @@ -3372,9 +3236,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { // now be decremented to reflect the new HTLC. htlcAmt := lnwire.NewMSatFromSatoshis(3 * btcutil.SatoshiPerBitcoin) invoice, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") addPkt := &htlcPacket{ htlc: htlc, @@ -3382,9 +3244,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { } circuit := makePaymentCircuit(&htlc.PaymentHash, addPkt) _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } + require.NoError(t, err, "unable to commit circuit") _ = aliceLink.handleSwitchPacket(addPkt) time.Sleep(time.Millisecond * 100) @@ -3404,9 +3264,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { } bobIndex, err := bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } + require.NoError(t, err, "bob failed receiving htlc") // Lock in the HTLC. if err := updateState(batchTimer, coreLink, bobChannel, true); err != nil { @@ -3419,9 +3277,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { // then the bandwidth should remain unchanged as the remote party will // gain additional channel balance. err = bobChannel.SettleHTLC(*invoice.Terms.PaymentPreimage, bobIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") htlcSettle := &lnwire.UpdateFulfillHTLC{ ID: bobIndex, PaymentPreimage: *invoice.Terms.PaymentPreimage, @@ -3449,9 +3305,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { const bobChanReserve = btcutil.SatoshiPerBitcoin * 1.5 bobLink, _, _, start, bobCleanUp, _, err := newSingleLinkTestHarness(bobChanAmt, bobChanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer bobCleanUp() if err := start(); err != nil { @@ -3860,9 +3714,7 @@ func TestChannelLinkShutdownDuringForward(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -3986,9 +3838,7 @@ func TestChannelLinkUpdateCommitFee(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( aliceInitialBalance, btcutil.SatoshiPerBitcoin*5, ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -4130,9 +3980,7 @@ func TestChannelLinkAcceptDuplicatePayment(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -4160,25 +4008,19 @@ func TestChannelLinkAcceptDuplicatePayment(t *testing.T) { } err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice in carol registry: %v", err) - } + require.NoError(t, err, "unable to add invoice in carol registry") // With the invoice now added to Carol's registry, we'll send the // payment. err = n.aliceServer.htlcSwitch.SendHTLC( n.firstBobChannelLink.ShortChanID(), pid, htlc, ) - if err != nil { - t.Fatalf("unable to send payment to carol: %v", err) - } + require.NoError(t, err, "unable to send payment to carol") resultChan, err := n.aliceServer.htlcSwitch.GetPaymentResult( pid, htlc.PaymentHash, newMockDeobfuscator(), ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } + require.NoError(t, err, "unable to get payment result") // Now, if we attempt to send the payment *again* it should be rejected // as it's a duplicate request. @@ -4216,9 +4058,7 @@ func TestChannelLinkAcceptOverpay(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -4249,9 +4089,7 @@ func TestChannelLinkAcceptOverpay(t *testing.T) { n.aliceServer, n.carolServer, firstHop, hops, amount/2, htlcAmt, totalTimelock, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t, err, "unable to send payment") // Wait for Alice and Bob's second link to receive the revocation. time.Sleep(2 * time.Second) @@ -4259,9 +4097,7 @@ func TestChannelLinkAcceptOverpay(t *testing.T) { // Even though we sent 2x what was asked for, Carol should still have // accepted the payment and marked it as settled. invoice, err := receiver.registry.LookupInvoice(rhash) - if err != nil { - t.Fatalf("unable to get invoice: %v", err) - } + require.NoError(t, err, "unable to get invoice") if invoice.State != channeldb.ContractSettled { t.Fatal("carol invoice haven't been settled") } @@ -4573,9 +4409,7 @@ func generateHtlc(t *testing.T, coreLink *channelLink, err := coreLink.cfg.Registry.(*mockInvoiceRegistry).AddInvoice( *invoice, htlc.PaymentHash, ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") return htlc } @@ -4598,16 +4432,12 @@ func generateHtlcAndInvoice(t *testing.T, }), } blob, err := generateRoute(hops...) - if err != nil { - t.Fatalf("unable to generate route: %v", err) - } + require.NoError(t, err, "unable to generate route") invoice, htlc, _, err := generatePayment( htlcAmt, htlcAmt, uint32(htlcExpiry), blob, ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } + require.NoError(t, err, "unable to create payment") htlc.ID = id @@ -4623,9 +4453,7 @@ func TestChannelLinkNoMoreUpdates(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, _, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -4736,9 +4564,7 @@ func checkHasPreimages(t *testing.T, coreLink *channelLink, return nil }, 5*time.Second) - if err != nil { - t.Fatalf("unable to find preimages: %v", err) - } + require.NoError(t, err, "unable to find preimages") } // TestChannelLinkWaitForRevocation tests that we will keep accepting updates @@ -4751,9 +4577,7 @@ func TestChannelLinkWaitForRevocation(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, _, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -4872,9 +4696,7 @@ func TestChannelLinkNoEmptySig(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -4934,9 +4756,7 @@ func TestChannelLinkNoEmptySig(t *testing.T) { err = bobChannel.ReceiveNewCommitment( commitSigAlice.CommitSig, commitSigAlice.HtlcSigs, ) - if err != nil { - t.Fatalf("bob failed receiving commitment: %v", err) - } + require.NoError(t, err, "bob failed receiving commitment") // Both Alice and Bob revoke their previous commitment txes. ctx.receiveRevAndAckAliceToBob() @@ -4981,9 +4801,7 @@ func testChannelLinkBatchPreimageWrite(t *testing.T, disconnect bool) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, batchTicker, startUp, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := startUp(); err != nil { @@ -5092,9 +4910,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, _, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -5146,9 +4962,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { time.Sleep(time.Second) aliceFwdPkgs, err := coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } + require.NoError(t, err, "unable to load alice's fwdpkgs") // Alice should have exactly one forwarding package. if len(aliceFwdPkgs) != 1 { @@ -5193,9 +5007,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { ctx.receiveCommitSigAliceToBob(1) aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } + require.NoError(t, err, "unable to load alice's fwdpkgs") // Alice should still only have one fwdpkg, as she hasn't yet received // another revocation from Bob. @@ -5253,9 +5065,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { ctx.receiveCommitSigAliceToBob(0) aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } + require.NoError(t, err, "unable to load alice's fwdpkgs") // Now that another commitment dance has completed, Alice should have 2 // forwarding packages. @@ -5313,9 +5123,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { } aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } + require.NoError(t, err, "unable to load alice's fwdpkgs") // Alice should now have 3 forwarding packages, and the latest should be // empty. @@ -5368,9 +5176,7 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { } aliceFwdPkgs, err = coreLink.channel.LoadFwdPkgs() - if err != nil { - t.Fatalf("unable to load alice's fwdpkgs: %v", err) - } + require.NoError(t, err, "unable to load alice's fwdpkgs") // Since no state transitions have been performed for the duplicate // packets, Alice should still have the same 3 forwarding packages. @@ -5720,9 +5526,7 @@ func TestForwardingAsymmetricTimeLockPolicies(t *testing.T) { btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork( @@ -5755,9 +5559,7 @@ func TestForwardingAsymmetricTimeLockPolicies(t *testing.T) { n.aliceServer, n.carolServer, firstHop, hops, amount, htlcAmt, totalTimelock, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t, err, "unable to send payment") } // TestCheckHtlcForward tests that a link is properly enforcing the HTLC @@ -5864,9 +5666,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) { alice, bob, cleanUp, err := createTwoClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight) @@ -5886,9 +5686,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) { n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, totalTimelock, ) - if err != nil { - t.Fatalf("unable to prepare the payment: %v", err) - } + require.NoError(t, err, "unable to prepare the payment") // Cancel the invoice at bob's end. hash := invoice.Terms.PaymentPreimage.Hash() @@ -5932,9 +5730,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) { btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight) if err := n.start(); err != nil { @@ -6095,9 +5891,7 @@ func TestChannelLinkHoldInvoiceRestart(t *testing.T) { // state is unnecessary. aliceLink, bobChannel, _, start, cleanUp, restore, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() alice := newPersistentLinkHarness( @@ -6127,9 +5921,7 @@ func TestChannelLinkHoldInvoiceRestart(t *testing.T) { err = registry.AddInvoice( *invoice, htlc.PaymentHash, ) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") ctx := linkTestContext{ t: t, @@ -6163,9 +5955,7 @@ func TestChannelLinkHoldInvoiceRestart(t *testing.T) { // Settle the invoice with the preimage. err = registry.SettleHodlInvoice(*preimage) - if err != nil { - t.Fatalf("settle hodl invoice: %v", err) - } + require.NoError(t, err, "settle hodl invoice") // Expect alice to send a settle and commitsig message to bob. ctx.receiveSettleAliceToBob() @@ -6196,9 +5986,7 @@ func TestChannelLinkRevocationWindowRegular(t *testing.T) { // state is unnecessary. aliceLink, bobChannel, _, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -6227,13 +6015,9 @@ func TestChannelLinkRevocationWindowRegular(t *testing.T) { // We must add the invoice to the registry, such that Alice // expects this payment. err = registry.AddInvoice(*invoice1, htlc1.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") err = registry.AddInvoice(*invoice2, htlc2.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") // Lock in htlc 1 on both sides. ctx.sendHtlcBobToAlice(htlc1) @@ -6285,9 +6069,7 @@ func TestChannelLinkRevocationWindowHodl(t *testing.T) { // state is unnecessary. aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -6318,13 +6100,9 @@ func TestChannelLinkRevocationWindowHodl(t *testing.T) { // We must add the invoices to the registry, such that Alice // expects the payments. err = registry.AddInvoice(*invoice1, htlc1.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") err = registry.AddInvoice(*invoice2, htlc2.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice to registry: %v", err) - } + require.NoError(t, err, "unable to add invoice to registry") ctx := linkTestContext{ t: t, @@ -6363,9 +6141,7 @@ func TestChannelLinkRevocationWindowHodl(t *testing.T) { // Settle invoice 1 with the preimage. err = registry.SettleHodlInvoice(*preimage1) - if err != nil { - t.Fatalf("settle hodl invoice: %v", err) - } + require.NoError(t, err, "settle hodl invoice") // Expect alice to send a settle and commitsig message to bob. Bob does // not yet send the revocation. @@ -6374,9 +6150,7 @@ func TestChannelLinkRevocationWindowHodl(t *testing.T) { // Settle invoice 2 with the preimage. err = registry.SettleHodlInvoice(*preimage2) - if err != nil { - t.Fatalf("settle hodl invoice: %v", err) - } + require.NoError(t, err, "settle hodl invoice") // Expect alice to send a settle for htlc 2. ctx.receiveSettleAliceToBob() @@ -6434,9 +6208,7 @@ func TestChannelLinkReceiveEmptySig(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") defer cleanUp() if err := start(); err != nil { @@ -6504,9 +6276,7 @@ func TestPendingCommitTicker(t *testing.T) { const chanReserve = btcutil.SatoshiPerBitcoin * 1 aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := newSingleLinkTestHarness(chanAmt, chanReserve) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } + require.NoError(t, err, "unable to create link") var ( coreLink = aliceLink.(*channelLink) diff --git a/htlcswitch/mailbox_test.go b/htlcswitch/mailbox_test.go index a65eb9983..a55dac2fc 100644 --- a/htlcswitch/mailbox_test.go +++ b/htlcswitch/mailbox_test.go @@ -122,13 +122,9 @@ func TestMailBoxCouriers(t *testing.T) { // With the packets drained and partially acked, we reset the mailbox, // simulating a link shutting down and then coming back up. err := ctx.mailbox.ResetMessages() - if err != nil { - t.Fatalf("unable to reset messages: %v", err) - } + require.NoError(t, err, "unable to reset messages") err = ctx.mailbox.ResetPackets() - if err != nil { - t.Fatalf("unable to reset packets: %v", err) - } + require.NoError(t, err, "unable to reset packets") // Now, we'll use the same alternating strategy to read from our // mailbox. All wire messages are dropped on startup, but any unacked @@ -348,9 +344,7 @@ func TestMailBoxFailAdd(t *testing.T) { // the link flapping and coming back up before the second batch's // expiries have elapsed. We should see no failures sent back. err := ctx.mailbox.ResetPackets() - if err != nil { - t.Fatalf("unable to reset packets: %v", err) - } + require.NoError(t, err, "unable to reset packets") ctx.checkFails(nil) // Redeliver the second batch to the link and hold them there. @@ -369,9 +363,7 @@ func TestMailBoxFailAdd(t *testing.T) { // Finally, reset the link which should cause the second batch to be // cancelled immediately. err = ctx.mailbox.ResetPackets() - if err != nil { - t.Fatalf("unable to reset packets: %v", err) - } + require.NoError(t, err, "unable to reset packets") ctx.checkFails(secondBatch) } diff --git a/htlcswitch/payment_result_test.go b/htlcswitch/payment_result_test.go index 828c21231..96d25bec4 100644 --- a/htlcswitch/payment_result_test.go +++ b/htlcswitch/payment_result_test.go @@ -155,9 +155,7 @@ func TestNetworkResultStore(t *testing.T) { // Let the third one subscribe now. THe result should be received // immediately. sub, err := store.subscribeResult(2) - if err != nil { - t.Fatalf("unable to subscribe: %v", err) - } + require.NoError(t, err, "unable to subscribe") select { case <-sub: case <-time.After(1 * time.Second): @@ -173,14 +171,10 @@ func TestNetworkResultStore(t *testing.T) { // Add the result and try again. err = store.storeResult(3, results[3]) - if err != nil { - t.Fatalf("unable to store result: %v", err) - } + require.NoError(t, err, "unable to store result") _, err = store.getResult(3) - if err != nil { - t.Fatalf("unable to get result: %v", err) - } + require.NoError(t, err, "unable to get result") // Since we don't delete results from the store (yet), make sure we // will get subscriptions for all of them. diff --git a/htlcswitch/switch_test.go b/htlcswitch/switch_test.go index 39ff5c2b0..d2d80ee9e 100644 --- a/htlcswitch/switch_test.go +++ b/htlcswitch/switch_test.go @@ -44,14 +44,10 @@ func TestSwitchAddDuplicateLink(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -76,9 +72,7 @@ func TestSwitchAddDuplicateLink(t *testing.T) { // Update the short chan id of the channel, so that the link goes live. aliceChannelLink.setLiveShortChanID(aliceChanID) err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } + require.NoError(t, err, "unable to update alice short_chan_id") // Alice should have a live link, adding again should fail. if err := s.AddLink(aliceChannelLink); err == nil { @@ -104,14 +98,10 @@ func TestSwitchHasActiveLink(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -138,9 +128,7 @@ func TestSwitchHasActiveLink(t *testing.T) { // Update the short chan id of the channel, so that the link goes live. aliceChannelLink.setLiveShortChanID(aliceChanID) err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } + require.NoError(t, err, "unable to update alice short_chan_id") // UpdateShortChanID will cause the mock link to become eligible to // forward. However, we can simulate the event where the short chan id @@ -174,21 +162,15 @@ func TestSwitchSendPending(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -215,9 +197,7 @@ func TestSwitchSendPending(t *testing.T) { // Create request which should is being forwarded from Bob channel // link to Alice channel link. preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) packet := &htlcPacket{ incomingChanID: bobChanID, @@ -269,9 +249,7 @@ func TestSwitchSendPending(t *testing.T) { // move the link to the live state. aliceChannelLink.setLiveShortChanID(aliceChanID) err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } + require.NoError(t, err, "unable to update alice short_chan_id") // Increment the packet's HTLC index, so that it does not collide with // the prior attempt. @@ -298,20 +276,14 @@ func TestSwitchForward(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -335,9 +307,7 @@ func TestSwitchForward(t *testing.T) { // Create request which should be forwarded from Alice channel link to // bob channel link. preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) packet := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), @@ -411,30 +381,20 @@ func TestSwitchForwardFailAfterFullAdd(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") tempPath, err := ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to temporary path: %v", err) - } + require.NoError(t, err, "unable to temporary path") cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -522,14 +482,10 @@ func TestSwitchForwardFailAfterFullAdd(t *testing.T) { } cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } + require.NoError(t, err, "unable to reopen channeldb") s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } + require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } @@ -610,30 +566,20 @@ func TestSwitchForwardSettleAfterFullAdd(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") tempPath, err := ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to temporary path: %v", err) - } + require.NoError(t, err, "unable to temporary path") cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -721,14 +667,10 @@ func TestSwitchForwardSettleAfterFullAdd(t *testing.T) { } cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } + require.NoError(t, err, "unable to reopen channeldb") s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } + require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } @@ -812,30 +754,20 @@ func TestSwitchForwardDropAfterFullAdd(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") tempPath, err := ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to temporary path: %v", err) - } + require.NoError(t, err, "unable to temporary path") cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -915,14 +847,10 @@ func TestSwitchForwardDropAfterFullAdd(t *testing.T) { } cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } + require.NoError(t, err, "unable to reopen channeldb") s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } + require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } @@ -977,30 +905,20 @@ func TestSwitchForwardFailAfterHalfAdd(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") tempPath, err := ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to temporary path: %v", err) - } + require.NoError(t, err, "unable to temporary path") cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -1075,14 +993,10 @@ func TestSwitchForwardFailAfterHalfAdd(t *testing.T) { } cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } + require.NoError(t, err, "unable to reopen channeldb") s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } + require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } @@ -1143,30 +1057,20 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") tempPath, err := ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to temporary path: %v", err) - } + require.NoError(t, err, "unable to temporary path") cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -1240,14 +1144,10 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) { } cdb2, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } + require.NoError(t, err, "unable to reopen channeldb") s2, err := initSwitchWithDB(testStartingHeight, cdb2) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } + require.NoError(t, err, "unable reinit switch") if err := s2.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } @@ -1333,14 +1233,10 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) { } cdb3, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to reopen channeldb: %v", err) - } + require.NoError(t, err, "unable to reopen channeldb") s3, err := initSwitchWithDB(testStartingHeight, cdb3) - if err != nil { - t.Fatalf("unable reinit switch: %v", err) - } + require.NoError(t, err, "unable reinit switch") if err := s3.Start(); err != nil { t.Fatalf("unable to restart switch: %v", err) } @@ -1617,20 +1513,14 @@ func testSkipIneligibleLinksMultiHopForward(t *testing.T, alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -1747,14 +1637,10 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool, alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -1773,9 +1659,7 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool, } preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) addMsg := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, @@ -1803,20 +1687,14 @@ func TestSwitchCancel(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -1840,9 +1718,7 @@ func TestSwitchCancel(t *testing.T) { // Create request which should be forwarder from alice channel link // to bob channel link. preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) request := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), @@ -1920,20 +1796,14 @@ func TestSwitchAddSamePayment(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -1955,9 +1825,7 @@ func TestSwitchAddSamePayment(t *testing.T) { // Create request which should be forwarder from alice channel link // to bob channel link. preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) request := &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), @@ -2083,14 +1951,10 @@ func TestSwitchSendPayment(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -2108,9 +1972,7 @@ func TestSwitchSendPayment(t *testing.T) { // Create request which should be forwarder from alice channel link // to bob channel link. preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) update := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, @@ -2183,9 +2045,7 @@ func TestSwitchSendPayment(t *testing.T) { obfuscator := NewMockObfuscator() failure := lnwire.NewFailIncorrectDetails(update.Amount, 100) reason, err := obfuscator.EncryptFirstHop(failure) - if err != nil { - t.Fatalf("unable obfuscate failure: %v", err) - } + require.NoError(t, err, "unable obfuscate failure") if s.IsForwardedHTLC(aliceChannelLink.ShortChanID(), update.ID) { t.Fatal("htlc should be identified as not forwarded") @@ -2224,9 +2084,7 @@ func TestLocalPaymentNoForwardingEvents(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -2250,9 +2108,7 @@ func TestLocalPaymentNoForwardingEvents(t *testing.T) { n.aliceServer, receiver, firstHop, hops, amount, htlcAmt, totalTimelock, ).Wait(30 * time.Second) - if err != nil { - t.Fatalf("unable to make the payment: %v", err) - } + require.NoError(t, err, "unable to make the payment") // At this point, we'll forcibly stop the three hop network. Doing // this will cause any pending forwarding events to be flushed by the @@ -2286,9 +2142,7 @@ func TestMultiHopPaymentForwardingEvents(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, @@ -2443,9 +2297,7 @@ func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) { channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() n := newThreeHopNetwork( @@ -2518,9 +2370,7 @@ func TestSwitchGetPaymentResult(t *testing.T) { preimg[0] = 3 s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -2549,9 +2399,7 @@ func TestSwitchGetPaymentResult(t *testing.T) { resultChan, err := s.GetPaymentResult( paymentID, lntypes.Hash{}, newMockDeobfuscator(), ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } + require.NoError(t, err, "unable to get payment result") // Add the result to the store. n := &networkResult{ @@ -2563,9 +2411,7 @@ func TestSwitchGetPaymentResult(t *testing.T) { } err = s.networkResults.storeResult(paymentID, n) - if err != nil { - t.Fatalf("unable to store result: %v", err) - } + require.NoError(t, err, "unable to store result") // The result should be available. select { @@ -2594,9 +2440,7 @@ func TestSwitchGetPaymentResult(t *testing.T) { resultChan, err = s.GetPaymentResult( paymentID, lntypes.Hash{}, newMockDeobfuscator(), ) - if err != nil { - t.Fatalf("unable to get payment result: %v", err) - } + require.NoError(t, err, "unable to get payment result") select { case res, ok := <-resultChan: @@ -2626,14 +2470,10 @@ func TestInvalidFailure(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") s, err := initSwitchWithDB(testStartingHeight, nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -2651,9 +2491,7 @@ func TestInvalidFailure(t *testing.T) { // Create a request which should be forwarded to the mock channel link. preimage, err := genPreimage() - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } + require.NoError(t, err, "unable to generate preimage") rhash := sha256.Sum256(preimage[:]) update := &lnwire.UpdateAddHTLC{ PaymentHash: rhash, @@ -2666,9 +2504,7 @@ func TestInvalidFailure(t *testing.T) { err = s.SendHTLC( aliceChannelLink.ShortChanID(), paymentID, update, ) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } + require.NoError(t, err, "unable to send payment") // Catch the packet and complete the circuit so that the switch is ready // for a response. @@ -2860,9 +2696,7 @@ func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int, channels, cleanUp, _, err := createClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } + require.NoError(t, err, "unable to create channel") defer cleanUp() // Mock time so that all events are reported with a static timestamp. @@ -3009,9 +2843,7 @@ func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHT } err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash) - if err != nil { - t.Fatalf("unable to add invoice in carol registry: %v", err) - } + require.NoError(t, err, "unable to add invoice in carol registry") if err := n.aliceServer.htlcSwitch.SendHTLC( n.firstBobChannelLink.ShortChanID(), pid, htlc, @@ -3219,30 +3051,20 @@ func TestSwitchHoldForward(t *testing.T) { alicePeer, err := newMockServer( t, "alice", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobPeer, err := newMockServer( t, "bob", testStartingHeight, nil, testDefaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") tempPath, err := ioutil.TempDir("", "circuitdb") - if err != nil { - t.Fatalf("unable to temporary path: %v", err) - } + require.NoError(t, err, "unable to temporary path") cdb, err := channeldb.Open(tempPath) - if err != nil { - t.Fatalf("unable to open channeldb: %v", err) - } + require.NoError(t, err, "unable to open channeldb") s, err := initSwitchWithDB(testStartingHeight, cdb) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } + require.NoError(t, err, "unable to init switch") if err := s.Start(); err != nil { t.Fatalf("unable to start switch: %v", err) } @@ -3320,9 +3142,7 @@ func TestSwitchHoldForward(t *testing.T) { packet.incomingTimeout = testStartingHeight + cltvRejectDelta - 1 err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet) - if err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } + require.NoError(t, err, "can't forward htlc packet") assertOutgoingLinkReceive(t, bobChannelLink, false) assertOutgoingLinkReceiveIntercepted(t, aliceChannelLink) assertNumCircuits(t, s, 0, 0) @@ -3340,9 +3160,7 @@ func TestSwitchHoldForward(t *testing.T) { } err = switchForwardInterceptor.ForwardPackets(linkQuit, false, packet) - if err != nil { - t.Fatalf("can't forward htlc packet: %v", err) - } + require.NoError(t, err, "can't forward htlc packet") receivedPkt := assertOutgoingLinkReceive(t, bobChannelLink, true) assertNumCircuits(t, s, 1, 1) diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index 7dfd1599c..7000c119e 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -39,6 +39,7 @@ import ( "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" "github.com/lightningnetwork/lnd/ticker" + "github.com/stretchr/testify/require" ) var ( @@ -966,21 +967,15 @@ func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel, aliceServer, err := newMockServer( t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobServer, err := newMockServer( t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") carolServer, err := newMockServer( t, "carol", startingHeight, carolDb, hopNetwork.defaultDelta, ) - if err != nil { - t.Fatalf("unable to create carol server: %v", err) - } + require.NoError(t, err, "unable to create carol server") // Apply all additional functional options to the servers before // creating any links. @@ -1231,15 +1226,11 @@ func newTwoHopNetwork(t testing.TB, aliceServer, err := newMockServer( t, "alice", startingHeight, aliceDb, hopNetwork.defaultDelta, ) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } + require.NoError(t, err, "unable to create alice server") bobServer, err := newMockServer( t, "bob", startingHeight, bobDb, hopNetwork.defaultDelta, ) - if err != nil { - t.Fatalf("unable to create bob server: %v", err) - } + require.NoError(t, err, "unable to create bob server") // Create mock decoder instead of sphinx one in order to mock the route // which htlc should follow. diff --git a/input/script_utils_test.go b/input/script_utils_test.go index a99873af8..5327a7ba7 100644 --- a/input/script_utils_test.go +++ b/input/script_utils_test.go @@ -28,9 +28,7 @@ func assertEngineExecution(t *testing.T, testNum int, valid bool, // Get a new VM to execute. vm, err := newEngine() - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") // Execute the VM, only go on to the step-by-step execution if // it doesn't validate as expected. @@ -42,9 +40,7 @@ func assertEngineExecution(t *testing.T, testNum int, valid bool, // Now that the execution didn't match what we expected, fetch a new VM // to step through. vm, err = newEngine() - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") // This buffer will trace execution of the Script, dumping out // to stdout. @@ -178,9 +174,7 @@ func TestHTLCSenderSpendValidation(t *testing.T) { // doesn't need to exist, as we'll only be validating spending from the // transaction that references this. txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } + require.NoError(t, err, "unable to create txid") fundingOut := &wire.OutPoint{ Hash: *txid, Index: 50, @@ -580,9 +574,7 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { // doesn't need to exist, as we'll only be validating spending from the // transaction that references this. txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } + require.NoError(t, err, "unable to create txid") fundingOut := &wire.OutPoint{ Hash: *txid, Index: 50, @@ -1009,9 +1001,7 @@ func TestSecondLevelHtlcSpends(t *testing.T) { // Next, craft a fake HTLC outpoint that we'll use to generate the // sweeping transaction using. txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } + require.NoError(t, err, "unable to create txid") htlcOutPoint := &wire.OutPoint{ Hash: *txid, Index: 0, @@ -1039,13 +1029,9 @@ func TestSecondLevelHtlcSpends(t *testing.T) { // sweep the output after a particular delay. htlcWitnessScript, err := SecondLevelHtlcScript(revocationKey, delayKey, claimDelay) - if err != nil { - t.Fatalf("unable to create htlc script: %v", err) - } + require.NoError(t, err, "unable to create htlc script") htlcPkScript, err := WitnessScriptHash(htlcWitnessScript) - if err != nil { - t.Fatalf("unable to create htlc output: %v", err) - } + require.NoError(t, err, "unable to create htlc output") htlcOutput := &wire.TxOut{ PkScript: htlcPkScript, @@ -1644,21 +1630,15 @@ func TestCommitSpendToRemoteConfirmed(t *testing.T) { aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(testWalletPrivKey) txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } + require.NoError(t, err, "unable to create txid") commitOut := &wire.OutPoint{ Hash: *txid, Index: 0, } commitScript, err := CommitScriptToRemoteConfirmed(aliceKeyPub) - if err != nil { - t.Fatalf("unable to create htlc script: %v", err) - } + require.NoError(t, err, "unable to create htlc script") commitPkScript, err := WitnessScriptHash(commitScript) - if err != nil { - t.Fatalf("unable to create htlc output: %v", err) - } + require.NoError(t, err, "unable to create htlc output") commitOutput := &wire.TxOut{ PkScript: commitPkScript, @@ -1902,9 +1882,7 @@ func TestSpendAnchor(t *testing.T) { // Create a fake anchor outpoint that we'll use to generate the // sweeping transaction. txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } + require.NoError(t, err, "unable to create txid") anchorOutPoint := &wire.OutPoint{ Hash: *txid, Index: 0, @@ -1922,13 +1900,9 @@ func TestSpendAnchor(t *testing.T) { // Generate the anchor script that can be spent by Alice immediately, // or by anyone after 16 blocks. anchorScript, err := CommitScriptAnchor(aliceKeyPub) - if err != nil { - t.Fatalf("unable to create htlc script: %v", err) - } + require.NoError(t, err, "unable to create htlc script") anchorPkScript, err := WitnessScriptHash(anchorScript) - if err != nil { - t.Fatalf("unable to create htlc output: %v", err) - } + require.NoError(t, err, "unable to create htlc output") anchorOutput := &wire.TxOut{ PkScript: anchorPkScript, @@ -2011,21 +1985,13 @@ func TestSpecificationKeyDerivation(t *testing.T) { ) baseSecret, err := privkeyFromHex(baseSecretHex) - if err != nil { - t.Fatalf("Failed to parse serialized privkey: %v", err) - } + require.NoError(t, err, "Failed to parse serialized privkey") perCommitmentSecret, err := privkeyFromHex(perCommitmentSecretHex) - if err != nil { - t.Fatalf("Failed to parse serialized privkey: %v", err) - } + require.NoError(t, err, "Failed to parse serialized privkey") basePoint, err := pubkeyFromHex(basePointHex) - if err != nil { - t.Fatalf("Failed to parse serialized pubkey: %v", err) - } + require.NoError(t, err, "Failed to parse serialized pubkey") perCommitmentPoint, err := pubkeyFromHex(perCommitmentPointHex) - if err != nil { - t.Fatalf("Failed to parse serialized pubkey: %v", err) - } + require.NoError(t, err, "Failed to parse serialized pubkey") // name: derivation of key from basepoint and per_commitment_point const expectedLocalKeyHex = "0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5" diff --git a/input/size_test.go b/input/size_test.go index c259284e3..be7e33459 100644 --- a/input/size_test.go +++ b/input/size_test.go @@ -56,42 +56,26 @@ func TestTxWeightEstimator(t *testing.T) { p2pkhAddr, err := btcutil.NewAddressPubKeyHash( make([]byte, 20), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } + require.NoError(t, err, "Failed to generate address") p2pkhScript, err := txscript.PayToAddrScript(p2pkhAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } + require.NoError(t, err, "Failed to generate scriptPubKey") p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash( make([]byte, 20), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } + require.NoError(t, err, "Failed to generate address") p2wkhScript, err := txscript.PayToAddrScript(p2wkhAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } + require.NoError(t, err, "Failed to generate scriptPubKey") p2wshAddr, err := btcutil.NewAddressWitnessScriptHash( make([]byte, 32), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } + require.NoError(t, err, "Failed to generate address") p2wshScript, err := txscript.PayToAddrScript(p2wshAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } + require.NoError(t, err, "Failed to generate scriptPubKey") p2shAddr, err := btcutil.NewAddressScriptHash([]byte{0}, netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } + require.NoError(t, err, "Failed to generate address") p2shScript, err := txscript.PayToAddrScript(p2shAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } + require.NoError(t, err, "Failed to generate scriptPubKey") testCases := []struct { numP2PKHInputs int diff --git a/invoices/invoice_expiry_watcher_test.go b/invoices/invoice_expiry_watcher_test.go index 63ddfb922..9f32ecd48 100644 --- a/invoices/invoice_expiry_watcher_test.go +++ b/invoices/invoice_expiry_watcher_test.go @@ -9,6 +9,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" ) // invoiceExpiryWatcherTest holds a test fixture and implements checks @@ -72,9 +73,7 @@ func newInvoiceExpiryWatcherTest(t *testing.T, now time.Time, return nil }) - if err != nil { - t.Fatalf("cannot start InvoiceExpiryWatcher: %v", err) - } + require.NoError(t, err, "cannot start InvoiceExpiryWatcher") return test } diff --git a/invoices/invoiceregistry_test.go b/invoices/invoiceregistry_test.go index dff66a7ce..0c2b0c2c5 100644 --- a/invoices/invoiceregistry_test.go +++ b/invoices/invoiceregistry_test.go @@ -132,9 +132,7 @@ func TestSettleInvoice(t *testing.T) { testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, getCircuitKey(0), hodlChan, testPayload, ) - if err != nil { - t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) - } + require.NoError(t, err, "unexpected NotifyExitHopHtlc error") require.NotNil(t, resolution) settleResolution = checkSettleResolution( t, resolution, testInvoicePreimage, @@ -148,9 +146,7 @@ func TestSettleInvoice(t *testing.T) { testInvoicePaymentHash, amtPaid+600, testHtlcExpiry, testCurrentHeight, getCircuitKey(1), hodlChan, testPayload, ) - if err != nil { - t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) - } + require.NoError(t, err, "unexpected NotifyExitHopHtlc error") require.NotNil(t, resolution) settleResolution = checkSettleResolution( t, resolution, testInvoicePreimage, @@ -163,9 +159,7 @@ func TestSettleInvoice(t *testing.T) { testInvoicePaymentHash, amtPaid-600, testHtlcExpiry, testCurrentHeight, getCircuitKey(2), hodlChan, testPayload, ) - if err != nil { - t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) - } + require.NoError(t, err, "unexpected NotifyExitHopHtlc error") require.NotNil(t, resolution) checkFailResolution(t, resolution, ResultAmountTooLow) diff --git a/invoices/test_utils_test.go b/invoices/test_utils_test.go index 3d8842cb2..fec6f4e83 100644 --- a/invoices/test_utils_test.go +++ b/invoices/test_utils_test.go @@ -272,15 +272,11 @@ func newTestInvoice(t *testing.T, preimage lntypes.Preimage, zpay32.Expiry(expiry), zpay32.PaymentAddr(payAddr), ) - if err != nil { - t.Fatalf("Error while creating new invoice: %v", err) - } + require.NoError(t, err, "Error while creating new invoice") paymentRequest, err := rawInvoice.Encode(testMessageSigner) - if err != nil { - t.Fatalf("Error while encoding payment request: %v", err) - } + require.NoError(t, err, "Error while encoding payment request") return &channeldb.Invoice{ Terms: channeldb.ContractTerm{ diff --git a/keychain/bench_test.go b/keychain/bench_test.go index bd0442ad7..e6746fcbb 100644 --- a/keychain/bench_test.go +++ b/keychain/bench_test.go @@ -11,9 +11,7 @@ func BenchmarkDerivePrivKey(t *testing.B) { cleanUp, wallet, err := createTestBtcWallet( CoinTypeBitcoin, ) - if err != nil { - t.Fatalf("unable to create wallet: %v", err) - } + require.NoError(t, err, "unable to create wallet") keyRing := NewBtcWalletKeyRing(wallet, CoinTypeBitcoin) diff --git a/lncfg/address_test.go b/lncfg/address_test.go index cb3634a5f..a738523a8 100644 --- a/lncfg/address_test.go +++ b/lncfg/address_test.go @@ -202,9 +202,7 @@ func testLNAddress(t *testing.T, test lnAddressCase) { lnAddr, err := ParseLNAddressString( test.lnAddress, defaultTestPort, net.ResolveTCPAddr, ) - if err != nil { - t.Fatalf("unable to parse ln address: %v", err) - } + require.NoError(t, err, "unable to parse ln address") // Assert that the public key matches the expected public key. pkBytes := lnAddr.IdentityKey.SerializeCompressed() diff --git a/lnwallet/chainfee/estimator_test.go b/lnwallet/chainfee/estimator_test.go index d898030c2..d7169b114 100644 --- a/lnwallet/chainfee/estimator_test.go +++ b/lnwallet/chainfee/estimator_test.go @@ -99,9 +99,7 @@ func TestStaticFeeEstimator(t *testing.T) { defer feeEstimator.Stop() feeRate, err := feeEstimator.EstimateFeePerKW(6) - if err != nil { - t.Fatalf("unable to get fee rate: %v", err) - } + require.NoError(t, err, "unable to get fee rate") if feeRate != feePerKw { t.Fatalf("expected fee rate %v, got %v", feePerKw, feeRate) @@ -130,16 +128,12 @@ func TestSparseConfFeeSource(t *testing.T) { } testJSON := map[string]map[uint32]uint32{"fee_by_block_target": testFees} jsonResp, err := json.Marshal(testJSON) - if err != nil { - t.Fatalf("unable to marshal JSON API response: %v", err) - } + require.NoError(t, err, "unable to marshal JSON API response") reader := bytes.NewReader(jsonResp) // Finally, ensure the expected map is returned without error. fees, err := feeSource.ParseResponse(reader) - if err != nil { - t.Fatalf("unable to parse API response: %v", err) - } + require.NoError(t, err, "unable to parse API response") if !reflect.DeepEqual(fees, testFees) { t.Fatalf("expected %v, got %v", testFees, fees) } @@ -148,9 +142,7 @@ func TestSparseConfFeeSource(t *testing.T) { badFees := map[string]uint32{"hi": 12345, "hello": 42, "satoshi": 54321} badJSON := map[string]map[string]uint32{"fee_by_block_target": badFees} jsonResp, err = json.Marshal(badJSON) - if err != nil { - t.Fatalf("unable to marshal JSON API response: %v", err) - } + require.NoError(t, err, "unable to marshal JSON API response") reader = bytes.NewReader(jsonResp) // Finally, ensure the improperly formatted fees error. diff --git a/lnwallet/chancloser/chancloser_test.go b/lnwallet/chancloser/chancloser_test.go index 5d5173784..66ac38c40 100644 --- a/lnwallet/chancloser/chancloser_test.go +++ b/lnwallet/chancloser/chancloser_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) // randDeliveryAddress generates a random delivery address for testing. @@ -13,9 +14,7 @@ func randDeliveryAddress(t *testing.T) lnwire.DeliveryAddress { da := lnwire.DeliveryAddress(make([]byte, 34)) _, err := rand.Read(da) - if err != nil { - t.Fatalf("cannot generate random address: %v", err) - } + require.NoError(t, err, "cannot generate random address") return da } diff --git a/lnwallet/chanfunding/psbt_assembler_test.go b/lnwallet/chanfunding/psbt_assembler_test.go index d646bd4a2..461b83f5a 100644 --- a/lnwallet/chanfunding/psbt_assembler_test.go +++ b/lnwallet/chanfunding/psbt_assembler_test.go @@ -38,9 +38,7 @@ func TestPsbtIntent(t *testing.T) { // the funding intent. a := NewPsbtAssembler(chanCapacity, nil, ¶ms, true) intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) - if err != nil { - t.Fatalf("error provisioning channel: %v", err) - } + require.NoError(t, err, "error provisioning channel") psbtIntent, ok := intent.(*PsbtIntent) if !ok { t.Fatalf("intent was not a PsbtIntent") @@ -69,20 +67,14 @@ func TestPsbtIntent(t *testing.T) { localPubkey.SerializeCompressed(), remotePubkey.SerializeCompressed(), int64(chanCapacity), ) - if err != nil { - t.Fatalf("error calculating script: %v", err) - } + require.NoError(t, err, "error calculating script") witnessScriptHash := sha256.Sum256(script) addr, err := btcutil.NewAddressWitnessScriptHash( witnessScriptHash[:], ¶ms, ) - if err != nil { - t.Fatalf("unable to encode address: %v", err) - } + require.NoError(t, err, "unable to encode address") fundingAddr, amt, pendingPsbt, err := psbtIntent.FundingParams() - if err != nil { - t.Fatalf("unable to get funding params: %v", err) - } + require.NoError(t, err, "unable to get funding params") if addr.EncodeAddress() != fundingAddr.EncodeAddress() { t.Fatalf("unexpected address. got %s wanted %s", fundingAddr, addr) @@ -120,9 +112,7 @@ func TestPsbtIntent(t *testing.T) { // Verify the dummy PSBT with the intent. err = psbtIntent.Verify(pendingPsbt, false) - if err != nil { - t.Fatalf("error verifying pending PSBT: %v", err) - } + require.NoError(t, err, "error verifying pending PSBT") if psbtIntent.State != PsbtVerified { t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, PsbtVerified) @@ -154,16 +144,12 @@ func TestPsbtIntent(t *testing.T) { } }() err = psbtIntent.Finalize(pendingPsbt) - if err != nil { - t.Fatalf("error finalizing pending PSBT: %v", err) - } + require.NoError(t, err, "error finalizing pending PSBT") wg.Wait() // We should have a nil error in our channel now. err = <-errChan - if err != nil { - t.Fatalf("unexpected error after finalize: %v", err) - } + require.NoError(t, err, "unexpected error after finalize") if psbtIntent.State != PsbtFinalized { t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, PsbtFinalized) @@ -171,9 +157,7 @@ func TestPsbtIntent(t *testing.T) { // Make sure the funding transaction can be compiled. _, err = psbtIntent.CompileFundingTx() - if err != nil { - t.Fatalf("error compiling funding TX from PSBT: %v", err) - } + require.NoError(t, err, "error compiling funding TX from PSBT") if psbtIntent.State != PsbtFundingTxCompiled { t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, PsbtFundingTxCompiled) @@ -204,24 +188,18 @@ func TestPsbtIntentBasePsbt(t *testing.T) { localPubkey.SerializeCompressed(), remotePubkey.SerializeCompressed(), int64(chanCapacity), ) - if err != nil { - t.Fatalf("error calculating script: %v", err) - } + require.NoError(t, err, "error calculating script") witnessScriptHash := sha256.Sum256(script) addr, err := btcutil.NewAddressWitnessScriptHash( witnessScriptHash[:], ¶ms, ) - if err != nil { - t.Fatalf("unable to encode address: %v", err) - } + require.NoError(t, err, "unable to encode address") // Now as the next step, create a new assembler/intent pair with a base // PSBT to see that we can add an additional output to it. a := NewPsbtAssembler(chanCapacity, pendingPsbt, ¶ms, true) intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) - if err != nil { - t.Fatalf("error provisioning channel: %v", err) - } + require.NoError(t, err, "error provisioning channel") psbtIntent, ok := intent.(*PsbtIntent) if !ok { t.Fatalf("intent was not a PsbtIntent") @@ -230,9 +208,7 @@ func TestPsbtIntentBasePsbt(t *testing.T) { &keychain.KeyDescriptor{PubKey: localPubkey}, remotePubkey, ) newAddr, amt, twoOutPsbt, err := psbtIntent.FundingParams() - if err != nil { - t.Fatalf("unable to get funding params: %v", err) - } + require.NoError(t, err, "unable to get funding params") if addr.EncodeAddress() != newAddr.EncodeAddress() { t.Fatalf("unexpected address. got %s wanted %s", newAddr, addr) @@ -468,9 +444,7 @@ func TestPsbtVerify(t *testing.T) { // the funding intent. a := NewPsbtAssembler(chanCapacity, nil, ¶ms, true) intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) - if err != nil { - t.Fatalf("error provisioning channel: %v", err) - } + require.NoError(t, err, "error provisioning channel") psbtIntent := intent.(*PsbtIntent) // Bind our test keys to get the funding parameters. @@ -636,9 +610,7 @@ func TestPsbtFinalize(t *testing.T) { // the funding intent. a := NewPsbtAssembler(chanCapacity, nil, ¶ms, true) intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) - if err != nil { - t.Fatalf("error provisioning channel: %v", err) - } + require.NoError(t, err, "error provisioning channel") psbtIntent := intent.(*PsbtIntent) // Bind our test keys to get the funding parameters. @@ -792,12 +764,8 @@ func TestVerifyAllInputsSegWit(t *testing.T) { func clonePsbt(t *testing.T, p *psbt.Packet) *psbt.Packet { var buf bytes.Buffer err := p.Serialize(&buf) - if err != nil { - t.Fatalf("error serializing PSBT: %v", err) - } + require.NoError(t, err, "error serializing PSBT") newPacket, err := psbt.NewFromRawBytes(&buf, false) - if err != nil { - t.Fatalf("error unserializing PSBT: %v", err) - } + require.NoError(t, err, "error unserializing PSBT") return newPacket } diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index 812eb4b0e..25aa1aa0b 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -69,9 +69,7 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { } aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(chanType) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() paymentPreimage := bytes.Repeat([]byte{1}, 32) @@ -87,55 +85,41 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { // update log. Then Alice sends this wire message over to Bob who adds // this htlc to his remote state update log. aliceHtlcIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } + require.NoError(t, err, "unable to recv htlc") // Next alice commits this change by sending a signature message. Since // we expect the messages to be ordered, Bob will receive the HTLC we // just sent before he receives this signature, so the signature will // cover the HTLC. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // Bob receives this signature message, and checks that this covers the // state he has in his remote log. This includes the HTLC just sent // from Alice. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's new commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's new commitment") // Bob revokes his prior commitment given to him by Alice, since he now // has a valid signature for a newer commitment. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to generate bob revocation: %v", err) - } + require.NoError(t, err, "unable to generate bob revocation") // Bob finally send a signature for Alice's commitment transaction. // This signature will cover the HTLC, since Bob will first send the // revocation just created. The revocation also acks every received // HTLC up to the point where Alice sent here signature. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign alice's commitment") // Alice then processes this revocation, sending her own revocation for // her prior commitment transaction. Alice shouldn't have any HTLCs to // forward since she's sending an outgoing HTLC. fwdPkg, _, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to process bob's revocation: %v", err) - } + require.NoError(t, err, "alice unable to process bob's revocation") if len(fwdPkg.Adds) != 0 { t.Fatalf("alice forwards %v add htlcs, should forward none", len(fwdPkg.Adds)) @@ -149,24 +133,18 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { // the revocation, she expect this signature to cover everything up to // the point where she sent her signature, including the HTLC. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to process bob's new commitment: %v", err) - } + require.NoError(t, err, "alice unable to process bob's new commitment") // Alice then generates a revocation for bob. aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke alice channel: %v", err) - } + require.NoError(t, err, "unable to revoke alice channel") // Finally Bob processes Alice's revocation, at this point the new HTLC // is fully locked in within both commitment transactions. Bob should // also be able to forward an HTLC now that the HTLC has been locked // into both commitment transactions. fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") if len(fwdPkg.Adds) != 1 { t.Fatalf("bob forwards %v add htlcs, should only forward one", len(fwdPkg.Adds)) @@ -231,9 +209,7 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { var preimage [32]byte copy(preimage[:], paymentPreimage) err = bobChannel.SettleHTLC(preimage, bobHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("bob unable to settle inbound htlc: %v", err) - } + require.NoError(t, err, "bob unable to settle inbound htlc") err = aliceChannel.ReceiveHTLCSettle(preimage, aliceHtlcIndex) if err != nil { @@ -241,27 +217,17 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { } bobSig2, bobHtlcSigs2, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign settle commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign settle commitment") err = aliceChannel.ReceiveNewCommitment(bobSig2, bobHtlcSigs2) - if err != nil { - t.Fatalf("alice unable to process bob's new commitment: %v", err) - } + require.NoError(t, err, "alice unable to process bob's new commitment") aliceRevocation2, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to generate revocation: %v", err) - } + require.NoError(t, err, "alice unable to generate revocation") aliceSig2, aliceHtlcSigs2, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign new commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign new commitment") fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation2) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") if len(fwdPkg.Adds) != 0 { t.Fatalf("bob forwards %v add htlcs, should forward none", len(fwdPkg.Adds)) @@ -272,18 +238,12 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { } err = bobChannel.ReceiveNewCommitment(aliceSig2, aliceHtlcSigs2) - if err != nil { - t.Fatalf("bob unable to process alice's new commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's new commitment") bobRevocation2, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("bob unable to revoke commitment: %v", err) - } + require.NoError(t, err, "bob unable to revoke commitment") fwdPkg, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation2) - if err != nil { - t.Fatalf("alice unable to process bob's revocation: %v", err) - } + require.NoError(t, err, "alice unable to process bob's revocation") if len(fwdPkg.Adds) != 0 { // Alice should now be able to forward the settlement HTLC to // any down stream peers. @@ -501,9 +461,7 @@ func TestCheckCommitTxSize(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Check that weight estimation of the commitment transaction without @@ -610,32 +568,22 @@ func testCommitHTLCSigTieBreak(t *testing.T, restart bool) { // signed by Alice because received HTLC scripts commit to the CLTV // directly, so the outputs will have different scriptPubkeys. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign alice's commitment: %v", err) - } + require.NoError(t, err, "unable to sign alice's commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive alice's commitment: %v", err) - } + require.NoError(t, err, "unable to receive alice's commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob's commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob's commitment") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("unable to receive bob's revocation: %v", err) - } + require.NoError(t, err, "unable to receive bob's revocation") // Now have Bob initiate the second half of the commitment dance. Here // the offered HTLC scripts he adds for Alice will need to have the // tie-breaking applied because the CLTV is not committed, but instead // implicit via the construction of the second-level transactions. bobSig, bobHtlcSigs, bobHtlcs, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign bob's commitment: %v", err) - } + require.NoError(t, err, "unable to sign bob's commitment") if len(bobHtlcs) != numHtlcs { t.Fatalf("expected %d htlcs, got: %v", numHtlcs, len(bobHtlcs)) @@ -681,9 +629,7 @@ func testCommitHTLCSigTieBreak(t *testing.T, restart bool) { // Finally, have Alice validate the signatures to ensure that she is // expecting the signatures in the proper order. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive bob's commitment: %v", err) - } + require.NoError(t, err, "unable to receive bob's commitment") } // TestCooperativeChannelClosure checks that the coop close process finishes @@ -718,9 +664,7 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( testCase.chanType, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() aliceDeliveryScript := bobsPrivKey[:] @@ -739,17 +683,13 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { aliceSig, _, _, err := aliceChannel.CreateCloseProposal( aliceFee, aliceDeliveryScript, bobDeliveryScript, ) - if err != nil { - t.Fatalf("unable to create alice coop close proposal: %v", err) - } + require.NoError(t, err, "unable to create alice coop close proposal") bobFee := bobChannel.CalcFee(bobFeeRate) bobSig, _, _, err := bobChannel.CreateCloseProposal( bobFee, bobDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("unable to create bob coop close proposal: %v", err) - } + require.NoError(t, err, "unable to create bob coop close proposal") // With the proposals created, both sides should be able to properly // process the other party's signature. This indicates that the @@ -758,18 +698,14 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, bobFee, ) - if err != nil { - t.Fatalf("unable to complete alice cooperative close: %v", err) - } + require.NoError(t, err, "unable to complete alice cooperative close") bobCloseSha := aliceCloseTx.TxHash() bobCloseTx, aliceTxBalance, err := aliceChannel.CompleteCooperativeClose( aliceSig, bobSig, aliceDeliveryScript, bobDeliveryScript, aliceFee, ) - if err != nil { - t.Fatalf("unable to complete bob cooperative close: %v", err) - } + require.NoError(t, err, "unable to complete bob cooperative close") aliceCloseSha := bobCloseTx.TxHash() if bobCloseSha != aliceCloseSha { @@ -840,9 +776,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( testCase.chanType, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() bobAmount := bobChannel.channelState.LocalCommitment.LocalBalance @@ -882,9 +816,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { // With the cache populated, we'll now attempt the force close // initiated by Alice. closeSummary, err := aliceChannel.ForceClose() - if err != nil { - t.Fatalf("unable to force close channel: %v", err) - } + require.NoError(t, err, "unable to force close channel") // Alice should detect that she can sweep the outgoing HTLC after a // timeout, but also that she's able to sweep in incoming HTLC Bob sent @@ -999,9 +931,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { senderHtlcPkScript, int64(htlcAmount.ToSatoshis()), ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc timeout spend is invalid: %v", err) } @@ -1023,9 +953,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { sweepTx.TxIn[0].Witness, err = input.HtlcSpendSuccess(aliceChannel.Signer, &htlcResolution.SweepSignDesc, sweepTx, uint32(aliceChannel.channelState.LocalChanCfg.CsvDelay)) - if err != nil { - t.Fatalf("unable to gen witness for timeout output: %v", err) - } + require.NoError(t, err, "unable to gen witness for timeout output") // With the witness fully populated for the success spend from the // second-level transaction, we ensure that the scripts properly @@ -1039,9 +967,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { htlcResolution.SweepSignDesc.Output.Value, ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc timeout spend is invalid: %v", err) } @@ -1075,9 +1001,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { receiverHtlcScript, int64(htlcAmount.ToSatoshis()), ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc success spend is invalid: %v", err) } @@ -1096,9 +1020,7 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { sweepTx.TxIn[0].Witness, err = input.HtlcSpendSuccess(aliceChannel.Signer, &inHtlcResolution.SweepSignDesc, sweepTx, uint32(aliceChannel.channelState.LocalChanCfg.CsvDelay)) - if err != nil { - t.Fatalf("unable to gen witness for timeout output: %v", err) - } + require.NoError(t, err, "unable to gen witness for timeout output") // The spend we create above spending the second level HTLC output // should validate without any issues. @@ -1111,18 +1033,14 @@ func testForceClose(t *testing.T, testCase *forceCloseTestCase) { inHtlcResolution.SweepSignDesc.Output.Value, ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc timeout spend is invalid: %v", err) } // Check the same for Bob's ForceCloseSummary. closeSummary, err = bobChannel.ForceClose() - if err != nil { - t.Fatalf("unable to force close channel: %v", err) - } + require.NoError(t, err, "unable to force close channel") bobCommitResolution := closeSummary.CommitResolution if bobCommitResolution == nil { t.Fatalf("bob fails to include to-self output in ForceCloseSummary") @@ -1185,9 +1103,7 @@ func TestForceCloseDustOutput(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We set both node's channel reserves to 0, to make sure @@ -1207,22 +1123,16 @@ func TestForceCloseDustOutput(t *testing.T) { // ForceCloseSummary again on both peers. htlc, preimage := createHTLC(0, bobAmount-htlcAmount) bobHtlcIndex, err := bobChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("alice unable to add htlc: %v", err) - } + require.NoError(t, err, "alice unable to add htlc") aliceHtlcIndex, err := aliceChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("bob unable to receive htlc: %v", err) - } + require.NoError(t, err, "bob unable to receive htlc") if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { t.Fatalf("Can't update the channel state: %v", err) } // Settle HTLC and sign new commitment. err = aliceChannel.SettleHTLC(preimage, aliceHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("bob unable to settle inbound htlc: %v", err) - } + require.NoError(t, err, "bob unable to settle inbound htlc") err = bobChannel.ReceiveHTLCSettle(preimage, bobHtlcIndex) if err != nil { t.Fatalf("alice unable to accept settle of outbound htlc: %v", err) @@ -1235,9 +1145,7 @@ func TestForceCloseDustOutput(t *testing.T) { bobAmount = bobChannel.channelState.LocalCommitment.RemoteBalance closeSummary, err := aliceChannel.ForceClose() - if err != nil { - t.Fatalf("unable to force close channel: %v", err) - } + require.NoError(t, err, "unable to force close channel") // Alice's to-self output should still be in the commitment // transaction. @@ -1274,9 +1182,7 @@ func TestForceCloseDustOutput(t *testing.T) { } closeSummary, err = bobChannel.ForceClose() - if err != nil { - t.Fatalf("unable to force close channel: %v", err) - } + require.NoError(t, err, "unable to force close channel") // Bob's to-self output is below Bob's dust value and should be // reflected in the ForceCloseSummary. @@ -1305,9 +1211,7 @@ func TestDustHTLCFees(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() aliceStartingBalance := aliceChannel.channelState.LocalCommitment.LocalBalance @@ -1384,9 +1288,7 @@ func TestHTLCDustLimit(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // The amount of the HTLC should be above Alice's dust limit and below @@ -1401,13 +1303,9 @@ func TestHTLCDustLimit(t *testing.T) { htlc, preimage := createHTLC(0, htlcAmount) aliceHtlcIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("alice unable to add htlc: %v", err) - } + require.NoError(t, err, "alice unable to add htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("bob unable to receive htlc: %v", err) - } + require.NoError(t, err, "bob unable to receive htlc") if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { t.Fatalf("Can't update the channel state: %v", err) } @@ -1435,9 +1333,7 @@ func TestHTLCDustLimit(t *testing.T) { // Settle HTLC and create a new commitment state. err = bobChannel.SettleHTLC(preimage, bobHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("bob unable to settle inbound htlc: %v", err) - } + require.NoError(t, err, "bob unable to settle inbound htlc") err = aliceChannel.ReceiveHTLCSettle(preimage, aliceHtlcIndex) if err != nil { t.Fatalf("alice unable to accept settle of outbound htlc: %v", err) @@ -1498,9 +1394,7 @@ func TestHTLCSigNumber(t *testing.T) { // Calculate two values that will be below and above Bob's dust limit. estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to get fee: %v", err) - } + require.NoError(t, err, "unable to get fee") belowDust := btcutil.Amount(500) + HtlcTimeoutFee( channeldb.SingleFunderTweaklessBit, feePerKw, @@ -1518,9 +1412,7 @@ func TestHTLCSigNumber(t *testing.T) { defer cleanUp() aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("Error signing next commitment: %v", err) - } + require.NoError(t, err, "Error signing next commitment") if len(aliceHtlcSigs) != 2 { t.Fatalf("expected 2 htlc sig, instead got %v", @@ -1542,9 +1434,7 @@ func TestHTLCSigNumber(t *testing.T) { defer cleanUp() aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("Error signing next commitment: %v", err) - } + require.NoError(t, err, "Error signing next commitment") if len(aliceHtlcSigs) != 1 { t.Fatalf("expected 1 htlc sig, instead got %v", @@ -1565,9 +1455,7 @@ func TestHTLCSigNumber(t *testing.T) { defer cleanUp() aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("Error signing next commitment: %v", err) - } + require.NoError(t, err, "Error signing next commitment") // Since the HTLC is below Bob's dust limit, Alice won't need to send // any signatures for this HTLC. @@ -1577,9 +1465,7 @@ func TestHTLCSigNumber(t *testing.T) { } err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("Bob failed receiving commitment: %v", err) - } + require.NoError(t, err, "Bob failed receiving commitment") // ================================================================ // Test that sigs are correctly returned for HTLCs above dust limit. @@ -1588,9 +1474,7 @@ func TestHTLCSigNumber(t *testing.T) { defer cleanUp() aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("Error signing next commitment: %v", err) - } + require.NoError(t, err, "Error signing next commitment") // Since the HTLC is above Bob's dust limit, Alice should send a // signature for this HTLC. @@ -1600,9 +1484,7 @@ func TestHTLCSigNumber(t *testing.T) { } err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("Bob failed receiving commitment: %v", err) - } + require.NoError(t, err, "Bob failed receiving commitment") // ==================================================================== // Test that Bob will not validate a received commitment if Alice sends @@ -1615,9 +1497,7 @@ func TestHTLCSigNumber(t *testing.T) { // Alice should produce only one signature, since one HTLC is below // dust. aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("Error signing next commitment: %v", err) - } + require.NoError(t, err, "Error signing next commitment") if len(aliceHtlcSigs) != 1 { t.Fatalf("expected 1 htlc sig, instead got %v", @@ -1651,9 +1531,7 @@ func TestChannelBalanceDustLimit(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // To allow Alice's balance to get beneath her dust limit, set the @@ -1678,20 +1556,14 @@ func TestChannelBalanceDustLimit(t *testing.T) { htlc, preimage := createHTLC(0, htlcAmount) aliceHtlcIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("alice unable to add htlc: %v", err) - } + require.NoError(t, err, "alice unable to add htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("bob unable to receive htlc: %v", err) - } + require.NoError(t, err, "bob unable to receive htlc") if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { t.Fatalf("state transition error: %v", err) } err = bobChannel.SettleHTLC(preimage, bobHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("bob unable to settle inbound htlc: %v", err) - } + require.NoError(t, err, "bob unable to settle inbound htlc") err = aliceChannel.ReceiveHTLCSettle(preimage, aliceHtlcIndex) if err != nil { t.Fatalf("alice unable to accept settle of outbound htlc: %v", err) @@ -1724,9 +1596,7 @@ func TestStateUpdatePersistence(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() htlcAmt := lnwire.NewMSatFromSatoshis(5000) @@ -1860,28 +1730,20 @@ func TestStateUpdatePersistence(t *testing.T) { aliceChannels, err := aliceChannel.channelState.Db.FetchOpenChannels( alicePub, ) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } + require.NoError(t, err, "unable to fetch channel") bobPub := bobChannel.channelState.IdentityPub bobChannels, err := bobChannel.channelState.Db.FetchOpenChannels(bobPub) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } + require.NoError(t, err, "unable to fetch channel") aliceChannelNew, err := NewLightningChannel( aliceChannel.Signer, aliceChannels[0], aliceChannel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") bobChannelNew, err := NewLightningChannel( bobChannel.Signer, bobChannels[0], bobChannel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") // The state update logs of the new channels and the old channels // should now be identical other than the height the HTLCs were added. @@ -2000,13 +1862,9 @@ func TestStateUpdatePersistence(t *testing.T) { } } err = aliceChannelNew.SettleHTLC(bobPreimage, 0, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") err = bobChannelNew.ReceiveHTLCSettle(bobPreimage, 0) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") // Similar to the two transitions above, as both Bob and Alice added // entries to the update log before a state transition was initiated by @@ -2043,16 +1901,12 @@ func TestStateUpdatePersistence(t *testing.T) { // it should have an index of 3. If we instruct Bob to do the // same, it should have an index of 1. aliceHtlcIndex, err := aliceChannel.AddHTLC(bobh, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") if aliceHtlcIndex != 3 { t.Fatalf("wrong htlc index: expected %v, got %v", 3, aliceHtlcIndex) } bobHtlcIndex, err := bobChannel.AddHTLC(bobh, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") if bobHtlcIndex != 1 { t.Fatalf("wrong htlc index: expected %v, got %v", 1, aliceHtlcIndex) } @@ -2067,9 +1921,7 @@ func TestCancelHTLC(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Add a new HTLC from Alice to Bob, then trigger a new state @@ -2085,13 +1937,9 @@ func TestCancelHTLC(t *testing.T) { } aliceHtlcIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add alice htlc: %v", err) - } + require.NoError(t, err, "unable to add alice htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("unable to add bob htlc: %v", err) - } + require.NoError(t, err, "unable to add bob htlc") if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { t.Fatalf("unable to create new commitment state: %v", err) } @@ -2110,13 +1958,9 @@ func TestCancelHTLC(t *testing.T) { // Now, with the HTLC committed on both sides, trigger a cancellation // from Bob to Alice, removing the HTLC. err = bobChannel.FailHTLC(bobHtlcIndex, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(aliceHtlcIndex, []byte("bad")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // Now trigger another state transition, the HTLC should now be removed // from both sides, with balances reflected. @@ -2184,9 +2028,7 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() aliceFeeRate := chainfee.SatPerKWeight( @@ -2230,25 +2072,19 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { aliceSig, _, _, err := aliceChannel.CreateCloseProposal( aliceFee, aliceDeliveryScript, bobDeliveryScript, ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") bobFee := btcutil.Amount(bobChannel.CalcFee(bobFeeRate)) + 1000 bobSig, _, _, err := bobChannel.CreateCloseProposal( bobFee, bobDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") closeTx, _, err := bobChannel.CompleteCooperativeClose( bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, bobFee, ) - if err != nil { - t.Fatalf("unable to accept channel close: %v", err) - } + require.NoError(t, err, "unable to accept channel close") // The closure transaction should have exactly two outputs. if len(closeTx.TxOut) != 2 { @@ -2270,24 +2106,18 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { aliceSig, _, _, err = aliceChannel.CreateCloseProposal( aliceFee, aliceDeliveryScript, bobDeliveryScript, ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") bobSig, _, _, err = bobChannel.CreateCloseProposal( bobFee, bobDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") closeTx, _, err = bobChannel.CompleteCooperativeClose( bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, bobFee, ) - if err != nil { - t.Fatalf("unable to accept channel close: %v", err) - } + require.NoError(t, err, "unable to accept channel close") // The closure transaction should only have a single output, and that // output should be Alice's balance. @@ -2328,24 +2158,18 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { aliceSig, _, _, err = aliceChannel.CreateCloseProposal( aliceFee, aliceDeliveryScript, bobDeliveryScript, ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") bobSig, _, _, err = bobChannel.CreateCloseProposal( bobFee, bobDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("unable to close channel: %v", err) - } + require.NoError(t, err, "unable to close channel") closeTx, _, err = bobChannel.CompleteCooperativeClose( bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, bobFee, ) - if err != nil { - t.Fatalf("unable to accept channel close: %v", err) - } + require.NoError(t, err, "unable to accept channel close") // The closure transaction should only have a single output, and that // output should be Bob's balance. @@ -2367,9 +2191,7 @@ func TestUpdateFeeAdjustments(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll grab the current base fee rate as we'll be using this @@ -2424,9 +2246,7 @@ func TestUpdateFeeFail(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Bob receives the update, that will apply to his commitment @@ -2438,9 +2258,7 @@ func TestUpdateFeeFail(t *testing.T) { // Alice sends signature for commitment that does not cover any fee // update. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // Bob verifies this commit, meaning that he checks that it is // consistent everything he has received. This should fail, since he got @@ -2460,9 +2278,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() paymentPreimage := bytes.Repeat([]byte{1}, 32) @@ -2491,21 +2307,15 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { // Alice signs a commitment, and sends this to bob. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // At the same time, Bob signs a commitment. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign alice's commitment") // ...that Alice receives. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to process bob's new commitment: %v", err) - } + require.NoError(t, err, "alice unable to process bob's new commitment") // Now let Bob receive the fee update + commitment that Alice sent. if err := bobChannel.ReceiveUpdateFee(fee); err != nil { @@ -2516,9 +2326,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { // consistent with the state he had for Alice, including the received // HTLC and fee update. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's new commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's new commitment") if chainfee.SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) == fee { t.Fatalf("bob's feePerKw was unexpectedly locked in") @@ -2527,9 +2335,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { // Bob can revoke the prior commitment he had. This should lock in the // fee update for him. _, _, err = bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to generate bob revocation: %v", err) - } + require.NoError(t, err, "unable to generate bob revocation") if chainfee.SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee { t.Fatalf("bob's feePerKw was not locked in") @@ -2548,9 +2354,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() paymentPreimage := bytes.Repeat([]byte{1}, 32) @@ -2580,17 +2384,13 @@ func TestUpdateFeeSenderCommits(t *testing.T) { // (the HTLC and the fee update), and everything acked by Bob (nothing // so far). aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // Bob receives this signature message, and verifies that it is // consistent with the state he had for Alice, including the received // HTLC and fee update. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's new commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's new commitment") if chainfee.SatPerKWeight( bobChannel.channelState.LocalCommitment.FeePerKw, @@ -2602,9 +2402,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) { // Bob can revoke the prior commitment he had. This should lock in the // fee update for him. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to generate bob revocation: %v", err) - } + require.NoError(t, err, "unable to generate bob revocation") if chainfee.SatPerKWeight( bobChannel.channelState.LocalCommitment.FeePerKw, @@ -2616,24 +2414,18 @@ func TestUpdateFeeSenderCommits(t *testing.T) { // Bob commits to all updates he has received from Alice. This includes // the HTLC he received, and the fee update. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign alice's commitment") // Alice receives the revocation of the old one, and can now assume // that Bob's received everything up to the signature she sent, // including the HTLC and fee update. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to process bob's revocation: %v", err) - } + require.NoError(t, err, "alice unable to process bob's revocation") // Alice receives new signature from Bob, and assumes this covers the // changes. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to process bob's new commitment: %v", err) - } + require.NoError(t, err, "alice unable to process bob's new commitment") if chainfee.SatPerKWeight( aliceChannel.channelState.LocalCommitment.FeePerKw, @@ -2645,9 +2437,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) { // Alice can revoke the old commitment, which will lock in the fee // update. aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke alice channel: %v", err) - } + require.NoError(t, err, "unable to revoke alice channel") if chainfee.SatPerKWeight( aliceChannel.channelState.LocalCommitment.FeePerKw, @@ -2658,9 +2448,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) { // Bob receives revocation from Alice. _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") } @@ -2676,9 +2464,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() paymentPreimage := bytes.Repeat([]byte{1}, 32) @@ -2708,45 +2494,33 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { // does not commit to the received HTLC and fee update, since Alice // cannot know if he has received them. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // Alice receives this signature message, and verifies that it is // consistent with the remote state, not including any of the updates. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's new commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's new commitment") // Alice can revoke the prior commitment she had, this will ack // everything received before last commitment signature, but in this // case that is nothing. aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to generate bob revocation: %v", err) - } + require.NoError(t, err, "unable to generate bob revocation") // Bob receives the revocation of the old commitment _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("alice unable to process bob's revocation: %v", err) - } + require.NoError(t, err, "alice unable to process bob's revocation") // Alice will sign next commitment. Since she sent the revocation, she // also ack'ed everything received, but in this case this is nothing. // Since she sent the two updates, this signature will cover those two. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign alice's commitment") // Bob gets the signature for the new commitment from Alice. He assumes // this covers everything received from alice, including the two updates. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("alice unable to process bob's new commitment: %v", err) - } + require.NoError(t, err, "alice unable to process bob's new commitment") if chainfee.SatPerKWeight( bobChannel.channelState.LocalCommitment.FeePerKw, @@ -2759,9 +2533,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { // received, including the HTLC and fee update. This will lock in the // fee update for bob. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke alice channel: %v", err) - } + require.NoError(t, err, "unable to revoke alice channel") if chainfee.SatPerKWeight( bobChannel.channelState.LocalCommitment.FeePerKw, @@ -2773,23 +2545,17 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { // Bob will send a new signature, which will cover what he just acked: // the HTLC and fee update. bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // Alice receives revocation from Bob, and can now be sure that Bob // received the two updates, and they are considered locked in. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") // Alice will receive the signature from Bob, which will cover what was // just acked by his revocation. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to process bob's new commitment: %v", err) - } + require.NoError(t, err, "alice unable to process bob's new commitment") if chainfee.SatPerKWeight( aliceChannel.channelState.LocalCommitment.FeePerKw, @@ -2801,9 +2567,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { // After Alice now revokes her old commitment, the fee update should // lock in. aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to generate bob revocation: %v", err) - } + require.NoError(t, err, "unable to generate bob revocation") if chainfee.SatPerKWeight( aliceChannel.channelState.LocalCommitment.FeePerKw, @@ -2814,9 +2578,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { // Bob receives revocation from Alice. _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") } // TestUpdateFeeReceiverSendsUpdate tests that receiving a fee update as channel @@ -2831,9 +2593,7 @@ func TestUpdateFeeReceiverSendsUpdate(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Since Alice is the channel initiator, she should fail when receiving @@ -2862,9 +2622,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Simulate Alice sending update fee message to bob. @@ -2879,9 +2637,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // (the HTLC and the fee update), and everything acked by Bob (nothing // so far). aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") bobChannel.ReceiveUpdateFee(fee1) bobChannel.ReceiveUpdateFee(fee2) @@ -2891,9 +2647,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // consistent with the state he had for Alice, including the received // HTLC and fee update. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's new commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's new commitment") if chainfee.SatPerKWeight( bobChannel.channelState.LocalCommitment.FeePerKw, @@ -2917,9 +2671,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // Bob can revoke the prior commitment he had. This should lock in the // fee update for him. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to generate bob revocation: %v", err) - } + require.NoError(t, err, "unable to generate bob revocation") if chainfee.SatPerKWeight( bobChannel.channelState.LocalCommitment.FeePerKw, @@ -2931,17 +2683,13 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // Bob commits to all updates he has received from Alice. This includes // the HTLC he received, and the fee update. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign alice's commitment") // Alice receives the revocation of the old one, and can now assume that // Bob's received everything up to the signature she sent, including the // HTLC and fee update. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to process bob's revocation: %v", err) - } + require.NoError(t, err, "alice unable to process bob's revocation") // Alice receives new signature from Bob, and assumes this covers the // changes. @@ -2959,9 +2707,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // Alice can revoke the old commitment, which will lock in the fee // update. aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke alice channel: %v", err) - } + require.NoError(t, err, "unable to revoke alice channel") if chainfee.SatPerKWeight( aliceChannel.channelState.LocalCommitment.FeePerKw, @@ -2972,9 +2718,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // Bob receives revocation from Alice. _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") } // TestAddHTLCNegativeBalance tests that if enough HTLC's are added to the @@ -2988,9 +2732,7 @@ func TestAddHTLCNegativeBalance(t *testing.T) { aliceChannel, _, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We set the channel reserve to 0, such that we can add HTLCs all the @@ -3071,9 +2813,7 @@ func TestChanSyncFullySynced(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // If we exchange channel sync messages from the get-go , then both @@ -3091,13 +2831,9 @@ func TestChanSyncFullySynced(t *testing.T) { Expiry: uint32(5), } aliceHtlcIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } + require.NoError(t, err, "unable to recv htlc") // Then we'll initiate a state transition to lock in this new HTLC. if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { @@ -3111,13 +2847,9 @@ func TestChanSyncFullySynced(t *testing.T) { // If bob settles the HTLC, and then initiates a state transition, they // should both still think that they're in sync. err = bobChannel.SettleHTLC(paymentPreimage, bobHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") err = aliceChannel.ReceiveHTLCSettle(paymentPreimage, aliceHtlcIndex) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") // Next, we'll complete Bob's state transition, and assert again that // they think they're fully synced. @@ -3132,27 +2864,19 @@ func TestChanSyncFullySynced(t *testing.T) { aliceChannels, err := aliceChannel.channelState.Db.FetchOpenChannels( alicePub, ) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } + require.NoError(t, err, "unable to fetch channel") bobPub := bobChannel.channelState.IdentityPub bobChannels, err := bobChannel.channelState.Db.FetchOpenChannels(bobPub) - if err != nil { - t.Fatalf("unable to fetch channel: %v", err) - } + require.NoError(t, err, "unable to fetch channel") aliceChannelNew, err := NewLightningChannel( aliceChannel.Signer, aliceChannels[0], aliceChannel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") bobChannelNew, err := NewLightningChannel( bobChannel.Signer, bobChannels[0], bobChannel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") assertNoChanSyncNeeded(t, aliceChannelNew, bobChannelNew) } @@ -3193,9 +2917,7 @@ func TestChanSyncOweCommitment(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() var fakeOnionBlob [lnwire.OnionPacketSize]byte @@ -3262,33 +2984,23 @@ func TestChanSyncOweCommitment(t *testing.T) { ExtraData: make([]byte, 0), } aliceHtlcIndex, err := aliceChannel.AddHTLC(aliceHtlc, nil) - if err != nil { - t.Fatalf("unable to add alice's htlc: %v", err) - } + require.NoError(t, err, "unable to add alice's htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(aliceHtlc) - if err != nil { - t.Fatalf("unable to recv alice's htlc: %v", err) - } + require.NoError(t, err, "unable to recv alice's htlc") // Now we'll begin the core of the test itself. Alice will extend a new // commitment to Bob, but the connection drops before Bob can process // it. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // Bob doesn't get this message so upon reconnection, they need to // synchronize. Alice should conclude that she owes Bob a commitment, // while Bob should think he's properly synchronized. aliceSyncMsg, err := aliceChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") // This is a helper function that asserts Alice concludes that she // needs to retransmit the exact commitment that we failed to send @@ -3371,9 +3083,7 @@ func TestChanSyncOweCommitment(t *testing.T) { // From Bob's Pov he has nothing else to send, so he should conclude he // has no further action remaining. bobMsgsToSend, _, _, err := bobChannel.ProcessChanSyncMsg(aliceSyncMsg) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(bobMsgsToSend) != 0 { t.Fatalf("expected bob to send %v messages instead will "+ "send %v: %v", 5, len(bobMsgsToSend), @@ -3383,9 +3093,7 @@ func TestChanSyncOweCommitment(t *testing.T) { // If we restart Alice, she should still conclude that she needs to // send the exact same set of messages. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") assertAliceCommitRetransmit() // TODO(roasbeef): restart bob as well??? @@ -3394,33 +3102,19 @@ func TestChanSyncOweCommitment(t *testing.T) { // without any issues, resulting in Alice settling the 3 htlc's, and // adding one of her own. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign commitment") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to recv revocation: %v", err) - } + require.NoError(t, err, "alice unable to recv revocation") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") // At this point, we'll now assert that their log states are what we // expect. @@ -3466,13 +3160,9 @@ func TestChanSyncOweCommitment(t *testing.T) { // We'll conclude the test by having Bob settle Alice's HTLC, then // initiate a state transition. err = bobChannel.SettleHTLC(alicePreimage, bobHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") err = aliceChannel.ReceiveHTLCSettle(alicePreimage, aliceHtlcIndex) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { t.Fatalf("unable to complete bob's state transition: %v", err) } @@ -3508,9 +3198,7 @@ func TestChanSyncOweCommitmentPendingRemote(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() var fakeOnionBlob [lnwire.OnionPacketSize]byte @@ -3590,15 +3278,11 @@ func TestChanSyncOweCommitmentPendingRemote(t *testing.T) { // We restart Bob. This should have no impact on further message that // are generated. bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + require.NoError(t, err, "unable to restart bob") // Bob signs the commitment he owes. bobCommit, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // This commitment is expected to contain no htlcs anymore. if len(bobHtlcSigs) != 0 { @@ -3633,9 +3317,7 @@ func TestChanSyncOweRevocation(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() chanID := lnwire.NewChanIDFromOutPoint( @@ -3655,13 +3337,9 @@ func TestChanSyncOweRevocation(t *testing.T) { Expiry: uint32(10), } bobHtlcIndex, err := bobChannel.AddHTLC(bobHtlc, nil) - if err != nil { - t.Fatalf("unable to add bob's htlc: %v", err) - } + require.NoError(t, err, "unable to add bob's htlc") aliceHtlcIndex, err := aliceChannel.ReceiveHTLC(bobHtlc) - if err != nil { - t.Fatalf("unable to recv bob's htlc: %v", err) - } + require.NoError(t, err, "unable to recv bob's htlc") if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { t.Fatalf("unable to complete bob's state transition: %v", err) } @@ -3669,13 +3347,9 @@ func TestChanSyncOweRevocation(t *testing.T) { // Next, Alice will settle that single HTLC, the _begin_ the start of a // state transition. err = aliceChannel.SettleHTLC(bobPreimage, aliceHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") err = bobChannel.ReceiveHTLCSettle(bobPreimage, bobHtlcIndex) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") // We'll model the state transition right up until Alice needs to send // her revocation message to complete the state transition. @@ -3683,50 +3357,32 @@ func TestChanSyncOweRevocation(t *testing.T) { // Alice signs the next state, then Bob receives and sends his // revocation message. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign commitment") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to recv revocation: %v", err) - } + require.NoError(t, err, "alice unable to recv revocation") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") // At this point, we'll simulate the connection breaking down by Bob's // lack of knowledge of the revocation message that Alice just sent. aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") // If we fetch the channel sync messages at this state, then Alice // should report that she owes Bob a revocation message, while Bob // thinks they're fully in sync. aliceSyncMsg, err := aliceChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") assertAliceOwesRevoke := func() { aliceMsgsToSend, _, _, err := aliceChannel.ProcessChanSyncMsg(bobSyncMsg) @@ -3759,9 +3415,7 @@ func TestChanSyncOweRevocation(t *testing.T) { // From Bob's PoV he shouldn't think that he owes Alice any messages. bobMsgsToSend, _, _, err := bobChannel.ProcessChanSyncMsg(aliceSyncMsg) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(bobMsgsToSend) != 0 { t.Fatalf("expected bob to not retransmit, instead has: %v", spew.Sdump(bobMsgsToSend)) @@ -3774,18 +3428,14 @@ func TestChanSyncOweRevocation(t *testing.T) { // If we restart Alice, then she should still decide that she owes a // revocation message to Bob. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") assertAliceOwesRevoke() // TODO(roasbeef): restart bob too??? // We'll continue by then allowing bob to process Alice's revocation message. _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") // Finally, Alice will add an HTLC over her own such that we assert the // channel can continue to receive updates. @@ -3825,9 +3475,7 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() htlcAmt := lnwire.NewMSatFromSatoshis(20000) @@ -3843,13 +3491,9 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) { Expiry: uint32(10), } bobHtlcIndex, err := bobChannel.AddHTLC(bobHtlc, nil) - if err != nil { - t.Fatalf("unable to add bob's htlc: %v", err) - } + require.NoError(t, err, "unable to add bob's htlc") aliceHtlcIndex, err := aliceChannel.ReceiveHTLC(bobHtlc) - if err != nil { - t.Fatalf("unable to recv bob's htlc: %v", err) - } + require.NoError(t, err, "unable to recv bob's htlc") if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { t.Fatalf("unable to complete bob's state transition: %v", err) } @@ -3857,52 +3501,34 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) { // Next, Alice will settle that incoming HTLC, then we'll start the // core of the test itself. err = aliceChannel.SettleHTLC(bobPreimage, aliceHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") err = bobChannel.ReceiveHTLCSettle(bobPreimage, bobHtlcIndex) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") // Progressing the exchange: Alice will send her signature, Bob will // receive, send a revocation and also a signature for Alice's state. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's commitment") // Bob generates the revoke and sig message, but the messages don't // reach Alice before the connection dies. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign commitment") // If we now attempt to resync, then Alice should conclude that she // doesn't need any further updates, while Bob concludes that he needs // to re-send both his revocation and commit sig message. aliceSyncMsg, err := aliceChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") aliceMsgsToSend, _, _, err := aliceChannel.ProcessChanSyncMsg(bobSyncMsg) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(aliceMsgsToSend) != 0 { t.Fatalf("expected alice to not retransmit, instead she's "+ "sending: %v", spew.Sdump(aliceMsgsToSend)) @@ -3956,29 +3582,19 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) { // At this point we simulate the connection failing with a restart from // Bob. He should still re-send the exact same set of messages. bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") assertBobSendsRevokeAndCommit() // We'll now finish the state transition by having Alice process both // messages, and send her final revocation. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to recv revocation: %v", err) - } + require.NoError(t, err, "alice unable to recv revocation") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") } // TestChanSyncOweRevocationAndCommitForceTransition tests that if Alice @@ -3996,9 +3612,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() htlcAmt := lnwire.NewMSatFromSatoshis(20000) @@ -4015,13 +3629,9 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { Expiry: uint32(10), } bobHtlcIndex, err := bobChannel.AddHTLC(bobHtlc[0], nil) - if err != nil { - t.Fatalf("unable to add bob's htlc: %v", err) - } + require.NoError(t, err, "unable to add bob's htlc") aliceHtlcIndex, err := aliceChannel.ReceiveHTLC(bobHtlc[0]) - if err != nil { - t.Fatalf("unable to recv bob's htlc: %v", err) - } + require.NoError(t, err, "unable to recv bob's htlc") if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { t.Fatalf("unable to complete bob's state transition: %v", err) } @@ -4038,84 +3648,56 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { ID: 1, } _, err = bobChannel.AddHTLC(bobHtlc[1], nil) - if err != nil { - t.Fatalf("unable to add bob's htlc: %v", err) - } + require.NoError(t, err, "unable to add bob's htlc") _, err = aliceChannel.ReceiveHTLC(bobHtlc[1]) - if err != nil { - t.Fatalf("unable to recv bob's htlc: %v", err) - } + require.NoError(t, err, "unable to recv bob's htlc") // Bob signs the new state update, and sends the signature to Alice. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign commitment") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") // Alice revokes her current state, but doesn't immediately send a // signature for Bob's updated state. Instead she will issue a new // update before sending a new CommitSig. This will lead to Alice's // local commit chain getting height > remote commit chain. aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") // Next, Alice will settle that incoming HTLC, then we'll start the // core of the test itself. err = aliceChannel.SettleHTLC(bobPreimage, aliceHtlcIndex, nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") err = bobChannel.ReceiveHTLCSettle(bobPreimage, bobHtlcIndex) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } + require.NoError(t, err, "unable to settle htlc") // Progressing the exchange: Alice will send her signature, with Bob // processing the new state locally. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's commitment") // Bob then sends his revocation message, but before Alice can process // it (and before he scan send his CommitSig message), then connection // dies. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") // Now if we attempt to synchronize states at this point, Alice should // detect that she owes nothing, while Bob should re-send both his // RevokeAndAck as well as his commitment message. aliceSyncMsg, err := aliceChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") aliceMsgsToSend, _, _, err := aliceChannel.ProcessChanSyncMsg(bobSyncMsg) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(aliceMsgsToSend) != 0 { t.Fatalf("expected alice to not retransmit, instead she's "+ "sending: %v", spew.Sdump(aliceMsgsToSend)) @@ -4126,9 +3708,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { // message that he sends should be sufficient to finalize the state // transition. bobMsgsToSend, _, _, err := bobChannel.ProcessChanSyncMsg(aliceSyncMsg) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(bobMsgsToSend) != 2 { t.Fatalf("expected bob to send %v messages, instead "+ "sends: %v", 2, spew.Sdump(bobMsgsToSend)) @@ -4154,9 +3734,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { // At this point we simulate the connection failing with a restart from // Bob. He should still re-send the exact same set of messages. bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") if len(bobMsgsToSend) != 2 { t.Fatalf("expected bob to send %v messages, instead "+ "sends: %v", 2, spew.Sdump(bobMsgsToSend)) @@ -4196,23 +3774,15 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { // signature message to Alice, ending with Alice sending her revocation // message to Bob. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to recv revocation: %v", err) - } + require.NoError(t, err, "alice unable to recv revocation") err = aliceChannel.ReceiveNewCommitment( bobSigMsg.CommitSig, bobSigMsg.HtlcSigs, ) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") } // TestChanSyncFailure tests the various scenarios during channel sync where we @@ -4227,9 +3797,7 @@ func TestChanSyncFailure(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() htlcAmt := lnwire.NewMSatFromSatoshis(20000) @@ -4363,9 +3931,7 @@ func TestChanSyncFailure(t *testing.T) { // Make a copy of Alice's state from the database at this point. aliceOld, err := restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") // Advance the states. advanceState() @@ -4389,9 +3955,7 @@ func TestChanSyncFailure(t *testing.T) { // tell if she lost state, since Bob might be lying. She still should // be able to detect that chains cannot be synced. bobSyncMsg, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg.LocalUnrevokedCommitPoint = nil _, _, _, err = aliceOld.ProcessChanSyncMsg(bobSyncMsg) if err != ErrCannotSyncCommitChains { @@ -4403,9 +3967,7 @@ func TestChanSyncFailure(t *testing.T) { // what Alice expect, she cannot tell for sure whether she lost state, // but should detect the desync. bobSyncMsg, err = bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg.NextLocalCommitHeight++ _, _, _, err = aliceChannel.ProcessChanSyncMsg(bobSyncMsg) if err != ErrCannotSyncCommitChains { @@ -4416,9 +3978,7 @@ func TestChanSyncFailure(t *testing.T) { // If Bob's NextLocalCommitHeight is lower than what Alice expects, Bob // probably lost state. bobSyncMsg, err = bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg.NextLocalCommitHeight-- _, _, _, err = aliceChannel.ProcessChanSyncMsg(bobSyncMsg) if err != ErrCommitSyncRemoteDataLoss { @@ -4429,15 +3989,11 @@ func TestChanSyncFailure(t *testing.T) { // If Alice and Bob's states are in sync, but Bob is sending the wrong // LocalUnrevokedCommitPoint, Alice should detect this. bobSyncMsg, err = bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") p := bobSyncMsg.LocalUnrevokedCommitPoint.SerializeCompressed() p[4] ^= 0x01 modCommitPoint, err := btcec.ParsePubKey(p) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") bobSyncMsg.LocalUnrevokedCommitPoint = modCommitPoint _, _, _, err = aliceChannel.ProcessChanSyncMsg(bobSyncMsg) @@ -4458,9 +4014,7 @@ func TestChanSyncFailure(t *testing.T) { halfAdvance() bobSyncMsg, err = bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg.LocalUnrevokedCommitPoint = modCommitPoint _, _, _, err = aliceChannel.ProcessChanSyncMsg(bobSyncMsg) if err != ErrInvalidLocalUnrevokedCommitPoint { @@ -4482,9 +4036,7 @@ func TestFeeUpdateRejectInsaneFee(t *testing.T) { aliceChannel, _, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Next, we'll try to add a fee rate to Alice which is 1,000,000x her @@ -4512,9 +4064,7 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll fetch the current fee rate present within the @@ -4536,37 +4086,25 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { // Now, Alice will send a new commitment to Bob, but we'll simulate a // connection failure, so Bob doesn't get her signature. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // Restart both channels to simulate a connection restart. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") // Bob doesn't get this message so upon reconnection, they need to // synchronize. Alice should conclude that she owes Bob a commitment, // while Bob should think he's properly synchronized. aliceSyncMsg, err := aliceChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") bobSyncMsg, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to produce chan sync msg: %v", err) - } + require.NoError(t, err, "unable to produce chan sync msg") // Bob should detect that he doesn't need to send anything to Alice. bobMsgsToSend, _, _, err := bobChannel.ProcessChanSyncMsg(aliceSyncMsg) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(bobMsgsToSend) != 0 { t.Fatalf("expected bob to send %v messages instead "+ "will send %v: %v", 0, len(bobMsgsToSend), @@ -4579,9 +4117,7 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { aliceMsgsToSend, _, _, err := aliceChannel.ProcessChanSyncMsg( bobSyncMsg, ) - if err != nil { - t.Fatalf("unable to process chan sync msg: %v", err) - } + require.NoError(t, err, "unable to process chan sync msg") if len(aliceMsgsToSend) != 2 { t.Fatalf("expected alice to send %v messages instead "+ "will send %v: %v", 2, len(aliceMsgsToSend), @@ -4631,33 +4167,19 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { } err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign commitment") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to recv revocation: %v", err) - } + require.NoError(t, err, "alice unable to recv revocation") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") // Both parties should now have the latest fee rate locked-in. if chainfee.SatPerKWeight( @@ -4705,9 +4227,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // helper that counts the number of updates, and number of fee updates @@ -4793,9 +4313,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { // Now, Alice will send a new commitment to Bob, but we'll simulate a // connection failure, so Bob doesn't get the signature. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // Before restarting Alice, to mimic the old format, we fetch the // pending remote commit from disk, set the UpdateFee message's @@ -4820,13 +4338,9 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { // Restart both channels to simulate a connection restart. This will // trigger a update logs restoration. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") // After a reconnection, Alice will resend the pending updates, that // was not ACKed by Bob, so we re-send the HTLCs and fee updates. @@ -4852,33 +4366,19 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { // We send Alice's commitment signatures, and finish the state // transition. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("bob unable to process alice's commitment: %v", err) - } + require.NoError(t, err, "bob unable to process alice's commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("bob unable to sign commitment: %v", err) - } + require.NoError(t, err, "bob unable to sign commitment") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("alice unable to recv revocation: %v", err) - } + require.NoError(t, err, "alice unable to recv revocation") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("alice unable to rev bob's commitment: %v", err) - } + require.NoError(t, err, "alice unable to rev bob's commitment") aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("alice unable to revoke commitment: %v", err) - } + require.NoError(t, err, "alice unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to recv revocation: %v", err) - } + require.NoError(t, err, "bob unable to recv revocation") // Both parties should now have the latest fee rate locked-in. if chainfee.SatPerKWeight( @@ -4938,9 +4438,7 @@ func TestChanSyncUnableToSync(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // If we immediately send both sides a "bogus" ChanSync message, then @@ -4977,9 +4475,7 @@ func TestChanSyncInvalidLastSecret(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We'll create a new instances of Alice before doing any state updates @@ -5017,23 +4513,15 @@ func TestChanSyncInvalidLastSecret(t *testing.T) { // Next, we'll restart both parties in order to simulate a connection // re-establishment. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + require.NoError(t, err, "unable to restart bob") // Next, we'll produce the ChanSync messages for both parties. aliceChanSync, err := aliceChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to generate chan sync msg: %v", err) - } + require.NoError(t, err, "unable to generate chan sync msg") bobChanSync, err := bobChannel.channelState.ChanSyncMsg() - if err != nil { - t.Fatalf("unable to generate chan sync msg: %v", err) - } + require.NoError(t, err, "unable to generate chan sync msg") // We'll modify Alice's sync message to have an invalid commitment // secret. @@ -5069,9 +4557,7 @@ func TestChanAvailableBandwidth(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() aliceReserve := lnwire.NewMSatFromSatoshis( @@ -5174,13 +4660,9 @@ func TestChanAvailableBandwidth(t *testing.T) { htlcIndex := uint64((numHtlcs * 2) - 1) err = bobChannel.FailHTLC(htlcIndex, []byte("f"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(htlcIndex, []byte("bad")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // We must do a state transition before the balance is available // for Alice. @@ -5207,9 +4689,7 @@ func TestChanAvailableBalanceNearHtlcFee(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Alice and Bob start with half the channel capacity. @@ -5388,9 +4868,7 @@ func TestChanCommitWeightDustHtlcs(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() aliceDustlimit := lnwire.NewMSatFromSatoshis( @@ -5523,9 +5001,7 @@ func TestSignCommitmentFailNotLockedIn(t *testing.T) { aliceChannel, _, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Next, we'll modify Alice's internal state to omit knowledge of Bob's @@ -5550,9 +5026,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We'll now add two HTLC's from Alice to Bob, then Alice will initiate @@ -5636,24 +5110,16 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) { // We'll now restart both Alice and Bob. This emulates a reconnection // between the two peers. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + require.NoError(t, err, "unable to restart bob") // With both nodes restarted, Bob will now attempt to cancel one of // Alice's HTLC's. err = bobChannel.FailHTLC(htlc.ID, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(htlc.ID, []byte("bad")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // We'll now initiate another state transition, but this time Bob will // lead. @@ -5704,13 +5170,9 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) { // Failing the HTLC here will cause the update to be included in Alice's // remote log, but it should not be committed by this transition. err = bobChannel.FailHTLC(htlc2.ID, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(htlc2.ID, []byte("bad")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") bobRevocation, _, err = bobChannel.RevokeCurrentCommitment() if err != nil { @@ -5740,24 +5202,16 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) { // We'll now restart both Alice and Bob. This emulates a reconnection // between the two peers. aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart alice: %v", err) - } + require.NoError(t, err, "unable to restart alice") bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart bob: %v", err) - } + require.NoError(t, err, "unable to restart bob") // Re-add the Fail to both Alice and Bob's channels, as the non-committed // update will not have survived the restart. err = bobChannel.FailHTLC(htlc2.ID, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(htlc2.ID, []byte("bad")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // Have Alice initiate a state transition, which does not include the // HTLCs just re-added to the channel state. @@ -5866,9 +5320,7 @@ func TestInvalidCommitSigError(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // With the channel established, we'll now send a single HTLC from @@ -5884,9 +5336,7 @@ func TestInvalidCommitSigError(t *testing.T) { // Alice will now attempt to initiate a state transition. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign new commit: %v", err) - } + require.NoError(t, err, "unable to sign new commit") // Before the signature gets to Bob, we'll mutate it, such that the // signature is now actually invalid. @@ -5915,9 +5365,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We'll start off the test by adding an HTLC in both directions, then @@ -5947,9 +5395,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { // With both HTLC's locked in, we'll now simulate Bob force closing the // transaction on Alice. bobForceClose, err := bobChannel.ForceClose() - if err != nil { - t.Fatalf("unable to close: %v", err) - } + require.NoError(t, err, "unable to close") // We'll then use Bob's transaction to trigger a spend notification for // Alice. @@ -5965,9 +5411,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { aliceChannel.channelState.RemoteCommitment, aliceChannel.channelState.RemoteCurrentRevocation, ) - if err != nil { - t.Fatalf("unable to create alice close summary: %v", err) - } + require.NoError(t, err, "unable to create alice close summary") // She should detect that she can sweep both the outgoing HTLC as well // as the incoming one from Bob. @@ -6008,9 +5452,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { aliceChannel.Signer, &outHtlcResolution.SweepSignDesc, sweepTx, int32(outHtlcResolution.Expiry), ) - if err != nil { - t.Fatalf("unable to witness: %v", err) - } + require.NoError(t, err, "unable to witness") vm, err := txscript.NewEngine( outHtlcResolution.SweepSignDesc.Output.PkScript, sweepTx, 0, txscript.StandardVerifyFlags, nil, @@ -6020,9 +5462,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { outHtlcResolution.SweepSignDesc.Output.Value, ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc timeout spend is invalid: %v", err) } @@ -6062,9 +5502,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { inHtlcResolution.SweepSignDesc.Output.Value, ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc timeout spend is invalid: %v", err) } @@ -6083,9 +5521,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll add an HTLC from Alice to Bob, just to be be able to @@ -6125,9 +5561,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { aliceChannel.channelState.RemoteCommitment, aliceChannel.channelState.RemoteCurrentRevocation, ) - if err != nil { - t.Fatalf("unable to create alice close summary: %v", err) - } + require.NoError(t, err, "unable to create alice close summary") if aliceWrongCloseSummary.CommitResolution != nil { t.Fatalf("alice shouldn't have found self output") @@ -6137,18 +5571,14 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { // pending commit to Bob, then the unilateral close summary should be // properly populated. aliceRemoteChainTip, err := aliceChannel.channelState.RemoteCommitChainTip() - if err != nil { - t.Fatalf("unable to fetch remote chain tip: %v", err) - } + require.NoError(t, err, "unable to fetch remote chain tip") aliceCloseSummary, err := NewUnilateralCloseSummary( aliceChannel.channelState, aliceChannel.Signer, spendDetail, aliceRemoteChainTip.Commitment, aliceChannel.channelState.RemoteNextRevocation, ) - if err != nil { - t.Fatalf("unable to create alice close summary: %v", err) - } + require.NoError(t, err, "unable to create alice close summary") // With this proper version, Alice's commit resolution should have been // properly located. @@ -6182,9 +5612,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { sweepTx.TxIn[0].Witness, err = input.CommitSpendNoDelay( aliceChannel.Signer, &aliceSignDesc, sweepTx, false, ) - if err != nil { - t.Fatalf("unable to generate sweep witness: %v", err) - } + require.NoError(t, err, "unable to generate sweep witness") // If we validate the signature on the new sweep transaction, it should // be fully valid. @@ -6197,9 +5625,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { aliceSignDesc.Output.Value, ), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("htlc timeout spend is invalid: %v", err) } @@ -6215,22 +5641,16 @@ func TestDesyncHTLCs(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First add one HTLC of value 4.1 BTC. htlcAmt := lnwire.NewMSatFromSatoshis(4.1 * btcutil.SatoshiPerBitcoin) htlc, _ := createHTLC(0, htlcAmt) aliceIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") bobIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } + require.NoError(t, err, "unable to recv htlc") // Lock this HTLC in. if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { @@ -6239,9 +5659,7 @@ func TestDesyncHTLCs(t *testing.T) { // Now let Bob fail this HTLC. err = bobChannel.FailHTLC(bobIndex, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") if err := aliceChannel.ReceiveFailHTLC(aliceIndex, []byte("bad")); err != nil { t.Fatalf("unable to recv htlc cancel: %v", err) } @@ -6284,9 +5702,7 @@ func TestMaxAcceptedHTLCs(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // One over the maximum number of HTLCs that either can accept. @@ -6343,9 +5759,7 @@ func TestMaxAcceptedHTLCs(t *testing.T) { // Bob will fail the htlc specified by htlcID and then force a state // transition. err = bobChannel.FailHTLC(htlcID, []byte{}, nil, nil, nil) - if err != nil { - t.Fatalf("unable to fail htlc: %v", err) - } + require.NoError(t, err, "unable to fail htlc") if err := aliceChannel.ReceiveFailHTLC(htlcID, []byte{}); err != nil { t.Fatalf("unable to receive fail htlc: %v", err) @@ -6368,13 +5782,9 @@ func TestMaxAcceptedHTLCs(t *testing.T) { // Add a commitment to Bob's commitment chain. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign next commitment: %v", err) - } + require.NoError(t, err, "unable to sign next commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to recv new commitment: %v", err) - } + require.NoError(t, err, "unable to recv new commitment") // The next HTLC should fail with ErrMaxHTLCNumber. The index is incremented // by one. @@ -6418,9 +5828,7 @@ func TestMaxAsynchronousHtlcs(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // One over the maximum number of HTLCs that either can accept. @@ -6471,56 +5879,38 @@ func TestMaxAsynchronousHtlcs(t *testing.T) { // Fail back an HTLC and sign a commitment as in steps 1 & 2. err = bobChannel.FailHTLC(htlcID, []byte{}, nil, nil, nil) - if err != nil { - t.Fatalf("unable to fail htlc: %v", err) - } + require.NoError(t, err, "unable to fail htlc") if err := aliceChannel.ReceiveFailHTLC(htlcID, []byte{}); err != nil { t.Fatalf("unable to receive fail htlc: %v", err) } bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign next commitment: %v", err) - } + require.NoError(t, err, "unable to sign next commitment") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive new commitment: %v", err) - } + require.NoError(t, err, "unable to receive new commitment") // Cover the HTLC referenced with id equal to numHTLCs-1 with a new // signature (step 3). aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign next commitment: %v", err) - } + require.NoError(t, err, "unable to sign next commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive new commitment: %v", err) - } + require.NoError(t, err, "unable to receive new commitment") // Both sides exchange revocations as in step 4 & 5. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke revocation: %v", err) - } + require.NoError(t, err, "unable to revoke revocation") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke revocation: %v", err) - } + require.NoError(t, err, "unable to revoke revocation") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") // Send the final Add which should succeed as in step 6. htlc, _ = createHTLC(numHTLCs, htlcAmt) @@ -6534,14 +5924,10 @@ func TestMaxAsynchronousHtlcs(t *testing.T) { // Receiving the commitment should succeed as in step 7 since space was // made. aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign next commitment: %v", err) - } + require.NoError(t, err, "unable to sign next commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive new commitment: %v", err) - } + require.NoError(t, err, "unable to receive new commitment") } // TestMaxPendingAmount tests that the maximum overall pending HTLC value is met @@ -6555,9 +5941,7 @@ func TestMaxPendingAmount(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We set the remote required MaxPendingAmount to 3 BTC. We will @@ -6781,13 +6165,9 @@ func TestChanReserve(t *testing.T) { htlc, preimage := createHTLC(aliceIndex, htlcAmt) aliceIndex++ aliceHtlcIndex, err := aliceChannel.AddHTLC(htlc, nil) - if err != nil { - t.Fatalf("unable to add htlc: %v", err) - } + require.NoError(t, err, "unable to add htlc") bobHtlcIndex, err := bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } + require.NoError(t, err, "unable to recv htlc") if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { t.Fatalf("unable to complete state update: %v", err) } @@ -6949,9 +6329,7 @@ func TestMinHTLC(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We set Alice's MinHTLC to 0.1 BTC. We will attempt to send an @@ -7001,9 +6379,7 @@ func TestInvalidHTLCAmt(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // We'll set the min HTLC values for each party to zero, which @@ -7039,9 +6415,7 @@ func TestNewBreachRetributionSkipsDustHtlcs(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() var fakeOnionBlob [lnwire.OnionPacketSize]byte @@ -7111,9 +6485,7 @@ func TestNewBreachRetributionSkipsDustHtlcs(t *testing.T) { breachRet, err := NewBreachRetribution( aliceChannel.channelState, revokedStateNum, 100, breachTx, ) - if err != nil { - t.Fatalf("unable to create breach retribution: %v", err) - } + require.NoError(t, err, "unable to create breach retribution") // The retribution shouldn't have any HTLCs set as they were all below // dust for both parties. @@ -7214,9 +6586,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll add an HTLC from Alice to Bob, which we will lock in on @@ -7232,19 +6602,13 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { // Let Alice sign a new state, which will include the HTLC just sent. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // Bob receives this commitment signature, and revokes his old state. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") // When Alice now receives this revocation, she will advance her remote // commitment chain to the commitment which includes the HTLC just @@ -7252,9 +6616,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { // state with the HTLC, since she hasn't received a new commitment // signature from Bob yet. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") // Now make Alice send and sign an additional HTLC. We don't let Bob // receive it. We do this since we want to check that update logs are @@ -7270,9 +6632,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { // remote chain was updated with the latest state (since Bob sent the // revocation earlier) we can keep advancing the remote commit chain. aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // After Alice has signed this commitment, her local commitment will // contain no HTLCs, her remote commitment will contain an HTLC with @@ -7286,43 +6646,31 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { aliceChannel.Signer, aliceChannel.channelState, aliceChannel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") newBobChannel, err := NewLightningChannel( bobChannel.Signer, bobChannel.channelState, bobChannel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") // compare all the logs between the old and new channels, to make sure // they all got restored properly. err = compareLogs(aliceChannel.localUpdateLog, newAliceChannel.localUpdateLog) - if err != nil { - t.Fatalf("alice local log not restored: %v", err) - } + require.NoError(t, err, "alice local log not restored") err = compareLogs(aliceChannel.remoteUpdateLog, newAliceChannel.remoteUpdateLog) - if err != nil { - t.Fatalf("alice remote log not restored: %v", err) - } + require.NoError(t, err, "alice remote log not restored") err = compareLogs(bobChannel.localUpdateLog, newBobChannel.localUpdateLog) - if err != nil { - t.Fatalf("bob local log not restored: %v", err) - } + require.NoError(t, err, "bob local log not restored") err = compareLogs(bobChannel.remoteUpdateLog, newBobChannel.remoteUpdateLog) - if err != nil { - t.Fatalf("bob remote log not restored: %v", err) - } + require.NoError(t, err, "bob remote log not restored") } // fetchNumUpdates counts the number of updateType in the log. @@ -7368,9 +6716,7 @@ func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal, channel.Signer, channel.channelState, channel.sigPool, ) - if err != nil { - t.Fatalf("unable to create new channel: %v", err) - } + require.NoError(t, err, "unable to create new channel") assertInLog(t, newChannel.localUpdateLog, numAddsLocal, numFailsLocal) assertInLog(t, newChannel.remoteUpdateLog, numAddsRemote, numFailsRemote) @@ -7385,9 +6731,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll add an HTLC from Alice to Bob, and lock it in for both. @@ -7419,14 +6763,10 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { // Now we make Bob fail this HTLC. err = bobChannel.FailHTLC(0, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(0, []byte("failreason")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // This Fail update should have been added to Alice's remote update log. assertInLogs(t, aliceChannel, 1, 0, 0, 1) @@ -7437,13 +6777,9 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { // Bob sends a signature. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") // When Alice receives Bob's new commitment, the logs will stay the // same until she revokes her old state. The Fail will still not be @@ -7452,13 +6788,9 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { restoreAndAssert(t, aliceChannel, 1, 0, 0, 0) aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("bob unable to process alice's revocation: %v", err) - } + require.NoError(t, err, "bob unable to process alice's revocation") // At this point Alice has advanced her local commitment chain to a // commitment with no HTLCs left. The current state on her remote @@ -7472,13 +6804,9 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { // Now send a signature from Alice. This will give Bob a new commitment // where the HTLC is removed. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") // When sending a new commitment, Alice will add a pending commit to // her remote chain. Since the unsigned acked updates aren't deleted @@ -7490,13 +6818,9 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { // in on both sides. She should compact the logs, removing the HTLC and // the corresponding Fail from the local update log. bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") assertInLogs(t, aliceChannel, 0, 0, 0, 0) restoreAndAssert(t, aliceChannel, 0, 0, 0, 0) @@ -7510,9 +6834,7 @@ func TestDuplicateFailRejection(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll add an HTLC from Alice to Bob, and lock it in for both @@ -7523,9 +6845,7 @@ func TestDuplicateFailRejection(t *testing.T) { t.Fatalf("alice unable to add htlc: %v", err) } _, err = bobChannel.ReceiveHTLC(htlcAlice) - if err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } + require.NoError(t, err, "unable to recv htlc") if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { t.Fatalf("unable to complete state update: %v", err) @@ -7534,9 +6854,7 @@ func TestDuplicateFailRejection(t *testing.T) { // With the HTLC locked in, we'll now have Bob fail the HTLC back to // Alice. err = bobChannel.FailHTLC(0, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") if err := aliceChannel.ReceiveFailHTLC(0, []byte("bad")); err != nil { t.Fatalf("unable to recv htlc cancel: %v", err) } @@ -7554,20 +6872,14 @@ func TestDuplicateFailRejection(t *testing.T) { // We'll now have Bob sign a new commitment to lock in the HTLC fail // for Alice. _, _, _, err = bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commit: %v", err) - } + require.NoError(t, err, "unable to sign commit") // We'll now force a restart for Bob and Alice, so we can test the // persistence related portion of this assertion. bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") // If we try to fail the same HTLC again, then we should get an error. err = bobChannel.FailHTLC(0, []byte("failreason"), nil, nil, nil) @@ -7590,9 +6902,7 @@ func TestDuplicateSettleRejection(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // First, we'll add an HTLC from Alice to Bob, and lock it in for both @@ -7603,9 +6913,7 @@ func TestDuplicateSettleRejection(t *testing.T) { t.Fatalf("alice unable to add htlc: %v", err) } _, err = bobChannel.ReceiveHTLC(htlcAlice) - if err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } + require.NoError(t, err, "unable to recv htlc") if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { t.Fatalf("unable to complete state update: %v", err) @@ -7614,13 +6922,9 @@ func TestDuplicateSettleRejection(t *testing.T) { // With the HTLC locked in, we'll now have Bob settle the HTLC back to // Alice. err = bobChannel.SettleHTLC(alicePreimage, uint64(0), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveHTLCSettle(alicePreimage, uint64(0)) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // If we attempt to fail it AGAIN, then both sides should reject this // second failure attempt. @@ -7636,20 +6940,14 @@ func TestDuplicateSettleRejection(t *testing.T) { // We'll now have Bob sign a new commitment to lock in the HTLC fail // for Alice. _, _, _, err = bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commit: %v", err) - } + require.NoError(t, err, "unable to sign commit") // We'll now force a restart for Bob and Alice, so we can test the // persistence related portion of this assertion. bobChannel, err = restartChannel(bobChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") aliceChannel, err = restartChannel(aliceChannel) - if err != nil { - t.Fatalf("unable to restart channel: %v", err) - } + require.NoError(t, err, "unable to restart channel") // If we try to fail the same HTLC again, then we should get an error. err = bobChannel.SettleHTLC(alicePreimage, uint64(0), nil, nil, nil) @@ -7660,9 +6958,7 @@ func TestDuplicateSettleRejection(t *testing.T) { // Alice on the other hand should accept the failure again, as she // dropped all items in the logs which weren't committed. err = aliceChannel.ReceiveHTLCSettle(alicePreimage, uint64(0)) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") } // TestChannelRestoreCommitHeight tests that the local and remote commit @@ -7673,9 +6969,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // helper method to check add heights of the htlcs found in the given @@ -7731,9 +7025,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Let Alice sign a new state, which will include the HTLC just sent. aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // The HTLC should only be on the pending remote commitment, so the // only the remote add height should be set during a restore. @@ -7743,13 +7035,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Bob receives this commitment signature, and revokes his old state. err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") // Now the HTLC is locked into Bob's commitment, a restoration should // set only the local commit height, as it is not locked into Alice's @@ -7758,9 +7046,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Alice receives the revocation, ACKing her pending commitment. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") // However, the HTLC is still not locked into her local commitment, so // the local add height should still be 0 after a restoration. @@ -7771,9 +7057,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Now let Bob send the commitment signature making the HTLC lock in on // Alice's commitment. bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // At this stage Bob has a pending remote commitment. Make sure // restoring at this stage correctly restores the HTLC add commit @@ -7781,13 +7065,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) { bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 1, 1) err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") // Now both the local and remote add heights should be properly set. aliceChannel = restoreAndAssertCommitHeights( @@ -7795,9 +7075,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { ) _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") // Alice ACKing Bob's pending commitment shouldn't change the heights // restored. @@ -7817,9 +7095,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Send a new signature from Alice to Bob, making Alice have a pending // remote commitment. aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // A restoration should keep the add heights iof the first HTLC, and // the new HTLC should have a remote add height 2. @@ -7831,13 +7107,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) { ) err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") bobRevocation, _, err = bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") // Since Bob just revoked another commitment, a restoration should // increase the add height of the first HTLC to 2, as we only keep the @@ -7848,9 +7120,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Alice receives the revocation, ACKing her pending commitment for Bob. _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") // Alice receiving Bob's revocation should bump both addCommitHeightRemote // heights to 2. @@ -7864,9 +7134,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Sign a new state for Alice, making Bob have a pending remote // commitment. bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // The signing of a new commitment for Alice should have given the new // HTLC an add height. @@ -7875,13 +7143,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Alice should receive the commitment and send over a revocation. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") // Both heights should be 2 and they are on both commitments. aliceChannel = restoreAndAssertCommitHeights( @@ -7894,28 +7158,20 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Bob receives the revocation, which should set both addCommitHeightRemote // fields to 2. _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) - if err != nil { - t.Fatalf("unable to receive revocation: %v", err) - } + require.NoError(t, err, "unable to receive revocation") bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 2, 2) bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 1, 2, 2) // Bob now fails back the htlc that was just locked in. err = bobChannel.FailHTLC(0, []byte("failreason"), nil, nil, nil) - if err != nil { - t.Fatalf("unable to cancel HTLC: %v", err) - } + require.NoError(t, err, "unable to cancel HTLC") err = aliceChannel.ReceiveFailHTLC(0, []byte("bad")) - if err != nil { - t.Fatalf("unable to recv htlc cancel: %v", err) - } + require.NoError(t, err, "unable to recv htlc cancel") // Now Bob signs for the fail update. bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commitment: %v", err) - } + require.NoError(t, err, "unable to sign commitment") // Bob has a pending commitment for Alice, it shouldn't affect the add // commit heights though. @@ -7924,13 +7180,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) { // Alice receives commitment, sends revocation. err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") _, _, err = aliceChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke commitment: %v", err) - } + require.NoError(t, err, "unable to revoke commitment") aliceChannel = restoreAndAssertCommitHeights( t, aliceChannel, false, 0, 3, 2, @@ -7946,9 +7198,7 @@ func TestForceCloseFailLocalDataLoss(t *testing.T) { aliceChannel, _, cleanUp, err := CreateTestChannels( channeldb.SingleFunderBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Now that we have our set of channels, we'll modify the channel state @@ -7956,9 +7206,7 @@ func TestForceCloseFailLocalDataLoss(t *testing.T) { err = aliceChannel.channelState.ApplyChanStatus( channeldb.ChanStatusLocalDataLoss, ) - if err != nil { - t.Fatalf("unable to apply channel state: %v", err) - } + require.NoError(t, err, "unable to apply channel state") // Due to the change above, if we attempt to force close this // channel, we should fail as it isn't safe to force close a @@ -7979,34 +7227,22 @@ func TestForceCloseBorkedState(t *testing.T) { aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( channeldb.SingleFunderBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Do the commitment dance until Bob sends a revocation so Alice is // able to receive the revocation, and then also make a new state // herself. aliceSigs, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commit: %v", err) - } + require.NoError(t, err, "unable to sign commit") err = bobChannel.ReceiveNewCommitment(aliceSigs, aliceHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") revokeMsg, _, err := bobChannel.RevokeCurrentCommitment() - if err != nil { - t.Fatalf("unable to revoke bob commitment: %v", err) - } + require.NoError(t, err, "unable to revoke bob commitment") bobSigs, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() - if err != nil { - t.Fatalf("unable to sign commit: %v", err) - } + require.NoError(t, err, "unable to sign commit") err = aliceChannel.ReceiveNewCommitment(bobSigs, bobHtlcSigs) - if err != nil { - t.Fatalf("unable to receive commitment: %v", err) - } + require.NoError(t, err, "unable to receive commitment") // Now that we have a new Alice channel, we'll force close once to // trigger the update on disk to mark the channel as borked. @@ -8018,9 +7254,7 @@ func TestForceCloseBorkedState(t *testing.T) { err = aliceChannel.channelState.ApplyChanStatus( channeldb.ChanStatusBorked, ) - if err != nil { - t.Fatalf("unable to apply chan status: %v", err) - } + require.NoError(t, err, "unable to apply chan status") // The on-disk state should indicate that the channel is now borked. if !aliceChannel.channelState.HasChanStatus( @@ -8083,9 +7317,7 @@ func TestChannelMaxFeeRate(t *testing.T) { aliceChannel, _, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() if err := quick.Check(propertyTest(aliceChannel), nil); err != nil { @@ -8102,9 +7334,7 @@ func TestChannelMaxFeeRate(t *testing.T) { channeldb.SingleFunderTweaklessBit | channeldb.AnchorOutputsBit | channeldb.ZeroHtlcTxFeeBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() if err = quick.Check(propertyTest(anchorChannel), nil); err != nil { @@ -8302,9 +7532,7 @@ func TestChannelFeeRateFloor(t *testing.T) { alice, bob, cleanUp, err := CreateTestChannels( channeldb.SingleFunderTweaklessBit, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() // Set the fee rate to the proposing fee rate floor. @@ -8320,9 +7548,7 @@ func TestChannelFeeRateFloor(t *testing.T) { // Check that alice can still sign commitments. sig, htlcSigs, _, err := alice.SignNextCommitment() - if err != nil { - t.Fatalf("alice unable to sign commitment: %v", err) - } + require.NoError(t, err, "alice unable to sign commitment") // Check that bob can still receive commitments. err = bob.ReceiveNewCommitment(sig, htlcSigs) diff --git a/lnwallet/chanvalidate/validate_test.go b/lnwallet/chanvalidate/validate_test.go index 85d0e92a1..a1be34a92 100644 --- a/lnwallet/chanvalidate/validate_test.go +++ b/lnwallet/chanvalidate/validate_test.go @@ -12,6 +12,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) var ( @@ -155,9 +156,7 @@ func TestValidate(t *testing.T) { chanSize := int64(1000000) channelCtx, err := newChannelTestCtx(chanSize) - if err != nil { - t.Fatalf("unable to make channel context: %v", err) - } + require.NoError(t, err, "unable to make channel context") testCases := []struct { // expectedErr is the error we expect, this should be nil if diff --git a/lnwallet/test/test_interface.go b/lnwallet/test/test_interface.go index 2a99c7287..6225227cf 100644 --- a/lnwallet/test/test_interface.go +++ b/lnwallet/test/test_interface.go @@ -132,9 +132,7 @@ func assertProperBalance(t *testing.T, lw *lnwallet.LightningWallet, numConfirms int32, amount float64) { balance, err := lw.ConfirmedBalance(numConfirms, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to query for balance: %v", err) - } + require.NoError(t, err, "unable to query for balance") if balance.ToBTC() != amount { t.Fatalf("wallet credits not properly loaded, should have 40BTC, "+ "instead have %v", balance) @@ -161,9 +159,7 @@ func mineAndAssertTxInBlock(t *testing.T, miner *rpctest.Harness, // We'll mined a block to confirm it. blockHashes, err := miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate new block: %v", err) - } + require.NoError(t, err, "unable to generate new block") // Finally, we'll check it was actually mined in this block. block, err := miner.Client.GetBlock(blockHashes[0]) @@ -188,13 +184,9 @@ func newPkScript(t *testing.T, w *lnwallet.LightningWallet, t.Helper() addr, err := w.NewAddress(addrType, false, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to create new address: %v", err) - } + require.NoError(t, err, "unable to create new address") pkScript, err := txscript.PayToAddrScript(addr) - if err != nil { - t.Fatalf("unable to create output script: %v", err) - } + require.NoError(t, err, "unable to create output script") return pkScript } @@ -210,9 +202,7 @@ func sendCoins(t *testing.T, miner *rpctest.Harness, tx, err := sender.SendOutputs( []*wire.TxOut{output}, feeRate, minConf, labels.External, ) - if err != nil { - t.Fatalf("unable to send transaction: %v", err) - } + require.NoError(t, err, "unable to send transaction") if mineBlock { mineAndAssertTxInBlock(t, miner, tx.TxHash()) @@ -239,9 +229,7 @@ func assertTxInWallet(t *testing.T, w *lnwallet.LightningWallet, // finding the expected transaction with its expected confirmation // status. txs, err := w.ListTransactionDetails(0, btcwallet.UnconfirmedHeight, "") - if err != nil { - t.Fatalf("unable to retrieve transactions: %v", err) - } + require.NoError(t, err, "unable to retrieve transactions") for _, tx := range txs { if tx.Hash != txHash { continue @@ -445,9 +433,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, alice, bob *lnwallet.LightningWallet, t *testing.T) { fundingAmount, err := btcutil.NewAmount(5) - if err != nil { - t.Fatalf("unable to create amt: %v", err) - } + require.NoError(t, err, "unable to create amt") // In this scenario, we'll test a dual funder reservation, with each // side putting in 10 BTC. @@ -455,9 +441,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, // Alice initiates a channel funded with 5 BTC for each side, so 10 BTC // total. She also generates 2 BTC in change. feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") aliceReq := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, NodeID: bobPub, @@ -470,9 +454,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, Flags: lnwire.FFAnnounceChannel, } aliceChanReservation, err := alice.InitChannelReservation(aliceReq) - if err != nil { - t.Fatalf("unable to initialize funding reservation: %v", err) - } + require.NoError(t, err, "unable to initialize funding reservation") aliceChanReservation.SetNumConfsRequired(numReqConfs) channelConstraints := &channeldb.ChannelConstraints{ DustLimit: alice.Cfg.DefaultConstraints.DustLimit, @@ -485,9 +467,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, err = aliceChanReservation.CommitConstraints( channelConstraints, defaultMaxLocalCsvDelay, false, ) - if err != nil { - t.Fatalf("unable to verify constraints: %v", err) - } + require.NoError(t, err, "unable to verify constraints") // The channel reservation should now be populated with a multi-sig key // from our HD chain, a change output with 3 BTC, and 2 outputs @@ -515,23 +495,17 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, Flags: lnwire.FFAnnounceChannel, } bobChanReservation, err := bob.InitChannelReservation(bobReq) - if err != nil { - t.Fatalf("bob unable to init channel reservation: %v", err) - } + require.NoError(t, err, "bob unable to init channel reservation") err = bobChanReservation.CommitConstraints( channelConstraints, defaultMaxLocalCsvDelay, true, ) - if err != nil { - t.Fatalf("unable to verify constraints: %v", err) - } + require.NoError(t, err, "unable to verify constraints") bobChanReservation.SetNumConfsRequired(numReqConfs) assertContributionInitPopulated(t, bobChanReservation.OurContribution()) err = bobChanReservation.ProcessContribution(aliceContribution) - if err != nil { - t.Fatalf("bob unable to process alice's contribution: %v", err) - } + require.NoError(t, err, "bob unable to process alice's contribution") assertContributionInitPopulated(t, bobChanReservation.TheirContribution()) bobContribution := bobChanReservation.OurContribution() @@ -541,9 +515,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, // material required to craft the funding transaction and commitment // transactions. err = aliceChanReservation.ProcessContribution(bobContribution) - if err != nil { - t.Fatalf("alice unable to process bob's contribution: %v", err) - } + require.NoError(t, err, "alice unable to process bob's contribution") assertContributionInitPopulated(t, aliceChanReservation.TheirContribution()) // At this point, all Alice's signatures should be fully populated. @@ -578,9 +550,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, _, err = bobChanReservation.CompleteReservation( aliceFundingSigs, aliceCommitSig, ) - if err != nil { - t.Fatalf("unable to consume bob's sigs: %v", err) - } + require.NoError(t, err, "unable to consume bob's sigs") // At this point, the funding tx should have been populated. fundingTx := aliceChanReservation.FinalFundingTx() @@ -592,9 +562,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, // DB. fundingSha := fundingTx.TxHash() aliceChannels, err := alice.Cfg.Database.FetchOpenChannels(bobPub) - if err != nil { - t.Fatalf("unable to retrieve channel from DB: %v", err) - } + require.NoError(t, err, "unable to retrieve channel from DB") if !bytes.Equal(aliceChannels[0].FundingOutpoint.Hash[:], fundingSha[:]) { t.Fatalf("channel state not properly saved") } @@ -602,9 +570,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, t.Fatalf("channel not detected as dual funder") } bobChannels, err := bob.Cfg.Database.FetchOpenChannels(alicePub) - if err != nil { - t.Fatalf("unable to retrieve channel from DB: %v", err) - } + require.NoError(t, err, "unable to retrieve channel from DB") if !bytes.Equal(bobChannels[0].FundingOutpoint.Hash[:], fundingSha[:]) { t.Fatalf("channel state not properly saved") } @@ -614,24 +580,16 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, // Let Alice publish the funding transaction. err = alice.PublishTransaction(fundingTx, "") - if err != nil { - t.Fatalf("unable to publish funding tx: %v", err) - } + require.NoError(t, err, "unable to publish funding tx") // Mine a single block, the funding transaction should be included // within this block. err = waitForMempoolTx(miner, &fundingSha) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") blockHashes, err := miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") block, err := miner.Client.GetBlock(blockHashes[0]) - if err != nil { - t.Fatalf("unable to find block: %v", err) - } + require.NoError(t, err, "unable to find block") if len(block.Transactions) != 2 { t.Fatalf("funding transaction wasn't mined: %v", err) } @@ -645,13 +603,9 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, // Wait for wallets to catch up to prevent issues in subsequent tests. err = waitForWalletSync(miner, alice) - if err != nil { - t.Fatalf("unable to sync alice: %v", err) - } + require.NoError(t, err, "unable to sync alice") err = waitForWalletSync(miner, bob) - if err != nil { - t.Fatalf("unable to sync bob: %v", err) - } + require.NoError(t, err, "unable to sync bob") } func testFundingTransactionLockedOutputs(miner *rpctest.Harness, @@ -659,13 +613,9 @@ func testFundingTransactionLockedOutputs(miner *rpctest.Harness, // Create a single channel asking for 16 BTC total. fundingAmount, err := btcutil.NewAmount(8) - if err != nil { - t.Fatalf("unable to create amt: %v", err) - } + require.NoError(t, err, "unable to create amt") feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") req := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, NodeID: bobPub, @@ -686,9 +636,7 @@ func testFundingTransactionLockedOutputs(miner *rpctest.Harness, // requesting 900 BTC. We only have around 64BTC worth of outpoints // that aren't locked, so this should fail. amt, err := btcutil.NewAmount(900) - if err != nil { - t.Fatalf("unable to create amt: %v", err) - } + require.NoError(t, err, "unable to create amt") failedReq := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, NodeID: bobPub, @@ -717,15 +665,11 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness, alice, _ *lnwallet.LightningWallet, t *testing.T) { feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") // Create a reservation for 44 BTC. fundingAmount, err := btcutil.NewAmount(44) - if err != nil { - t.Fatalf("unable to create amt: %v", err) - } + require.NoError(t, err, "unable to create amt") req := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, NodeID: bobPub, @@ -739,9 +683,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness, PendingChanID: [32]byte{2, 3, 4, 5}, } chanReservation, err := alice.InitChannelReservation(req) - if err != nil { - t.Fatalf("unable to initialize funding reservation: %v", err) - } + require.NoError(t, err, "unable to initialize funding reservation") // Attempt to create another channel with 44 BTC, this should fail. req.PendingChanID = [32]byte{3, 4, 5, 6} @@ -784,9 +726,7 @@ func testCancelNonExistentReservation(miner *rpctest.Harness, alice, _ *lnwallet.LightningWallet, t *testing.T) { feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") // Create our own reservation, give it some ID. res, err := lnwallet.NewChannelReservation( @@ -794,9 +734,7 @@ func testCancelNonExistentReservation(miner *rpctest.Harness, lnwire.FFAnnounceChannel, lnwallet.CommitmentTypeTweakless, nil, [32]byte{}, 0, ) - if err != nil { - t.Fatalf("unable to create res: %v", err) - } + require.NoError(t, err, "unable to create res") // Attempt to cancel this reservation. This should fail, we know // nothing of it. @@ -813,9 +751,7 @@ func testReservationInitiatorBalanceBelowDustCancel(miner *rpctest.Harness, // and result in a failure to create the reservation. const numBTC = 4 fundingAmount, err := btcutil.NewAmount(numBTC) - if err != nil { - t.Fatalf("unable to create amt: %v", err) - } + require.NoError(t, err, "unable to create amt") feePerKw := chainfee.SatPerKWeight( numBTC * numBTC * btcutil.SatoshiPerBitcoin, @@ -897,14 +833,10 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // funded solely by us. We'll also initially push 1 BTC of the channel // towards Bob's side. fundingAmt, err := btcutil.NewAmount(4) - if err != nil { - t.Fatalf("unable to create amt: %v", err) - } + require.NoError(t, err, "unable to create amt") pushAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) feePerKw, err := alice.Cfg.FeeEstimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } + require.NoError(t, err, "unable to query fee estimator") aliceReq := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, PendingChanID: pendingChanID, @@ -920,9 +852,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, ChanFunder: aliceChanFunder, } aliceChanReservation, err := alice.InitChannelReservation(aliceReq) - if err != nil { - t.Fatalf("unable to init channel reservation: %v", err) - } + require.NoError(t, err, "unable to init channel reservation") aliceChanReservation.SetNumConfsRequired(numReqConfs) channelConstraints := &channeldb.ChannelConstraints{ DustLimit: alice.Cfg.DefaultConstraints.DustLimit, @@ -935,9 +865,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, err = aliceChanReservation.CommitConstraints( channelConstraints, defaultMaxLocalCsvDelay, false, ) - if err != nil { - t.Fatalf("unable to verify constraints: %v", err) - } + require.NoError(t, err, "unable to verify constraints") // Verify all contribution fields have been set properly, but only if // Alice is the funder herself. @@ -972,15 +900,11 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, CommitType: commitType, } bobChanReservation, err := bob.InitChannelReservation(bobReq) - if err != nil { - t.Fatalf("unable to create bob reservation: %v", err) - } + require.NoError(t, err, "unable to create bob reservation") err = bobChanReservation.CommitConstraints( channelConstraints, defaultMaxLocalCsvDelay, true, ) - if err != nil { - t.Fatalf("unable to verify constraints: %v", err) - } + require.NoError(t, err, "unable to verify constraints") bobChanReservation.SetNumConfsRequired(numReqConfs) // We'll ensure that Bob's contribution also gets generated properly. @@ -990,9 +914,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // With his contribution generated, he can now process Alice's // contribution. err = bobChanReservation.ProcessSingleContribution(aliceContribution) - if err != nil { - t.Fatalf("bob unable to process alice's contribution: %v", err) - } + require.NoError(t, err, "bob unable to process alice's contribution") assertContributionInitPopulated(t, bobChanReservation.TheirContribution()) // Bob will next send over his contribution to Alice, we simulate this @@ -1042,9 +964,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, _, err = bobChanReservation.CompleteReservationSingle( fundingPoint, aliceCommitSig, ) - if err != nil { - t.Fatalf("bob unable to consume single reservation: %v", err) - } + require.NoError(t, err, "bob unable to consume single reservation") // Finally, we'll conclude the reservation process by sending over // Bob's commitment signature, which is the final thing Alice needs to @@ -1056,9 +976,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, _, err = aliceChanReservation.CompleteReservation( nil, bobCommitSig, ) - if err != nil { - t.Fatalf("alice unable to complete reservation: %v", err) - } + require.NoError(t, err, "alice unable to complete reservation") // If the caller provided an alternative way to obtain the funding tx, // then we'll use that. Otherwise, we'll obtain it directly from Alice. @@ -1073,9 +991,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // DB for both Alice and Bob. fundingSha := fundingTx.TxHash() aliceChannels, err := alice.Cfg.Database.FetchOpenChannels(bobPub) - if err != nil { - t.Fatalf("unable to retrieve channel from DB: %v", err) - } + require.NoError(t, err, "unable to retrieve channel from DB") if len(aliceChannels) != 1 { t.Fatalf("alice didn't save channel state: %v", err) } @@ -1093,9 +1009,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, } bobChannels, err := bob.Cfg.Database.FetchOpenChannels(alicePub) - if err != nil { - t.Fatalf("unable to retrieve channel from DB: %v", err) - } + require.NoError(t, err, "unable to retrieve channel from DB") if len(bobChannels) != 1 { t.Fatalf("bob didn't save channel state: %v", err) } @@ -1114,24 +1028,16 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // Let Alice publish the funding transaction. err = alice.PublishTransaction(fundingTx, "") - if err != nil { - t.Fatalf("unable to publish funding tx: %v", err) - } + require.NoError(t, err, "unable to publish funding tx") // Mine a single block, the funding transaction should be included // within this block. err = waitForMempoolTx(miner, &fundingSha) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") blockHashes, err := miner.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") block, err := miner.Client.GetBlock(blockHashes[0]) - if err != nil { - t.Fatalf("unable to find block: %v", err) - } + require.NoError(t, err, "unable to find block") if len(block.Transactions) != 2 { t.Fatalf("funding transaction wasn't mined: %d", len(block.Transactions)) @@ -1198,16 +1104,12 @@ func testListTransactionDetails(miner *rpctest.Harness, // Get the miner's current best block height before we mine blocks. _, startHeight, err := miner.Client.GetBestBlock() - if err != nil { - t.Fatalf("cannot get best block: %v", err) - } + require.NoError(t, err, "cannot get best block") // Generate 10 blocks to mine all the transactions created above. const numBlocksMined = 10 blocks, err := miner.Client.Generate(numBlocksMined) - if err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } + require.NoError(t, err, "unable to mine blocks") // Our new best block height should be our start height + the number of // blocks we just mined. @@ -1219,15 +1121,11 @@ func testListTransactionDetails(miner *rpctest.Harness, // not include unconfirmed transactions, since all of our transactions // should be confirmed. err = waitForWalletSync(miner, alice) - if err != nil { - t.Fatalf("Couldn't sync Alice's wallet: %v", err) - } + require.NoError(t, err, "Couldn't sync Alice's wallet") txDetails, err := alice.ListTransactionDetails( startHeight, chainTip, "", ) - if err != nil { - t.Fatalf("unable to fetch tx details: %v", err) - } + require.NoError(t, err, "unable to fetch tx details") // This is a mapping from: // blockHash -> transactionHash -> transactionOutputs @@ -1311,33 +1209,23 @@ func testListTransactionDetails(miner *rpctest.Harness, // Next create a transaction paying to an output which isn't under the // wallet's control. minerAddr, err := miner.NewAddress() - if err != nil { - t.Fatalf("unable to generate address: %v", err) - } + require.NoError(t, err, "unable to generate address") outputScript, err := txscript.PayToAddrScript(minerAddr) - if err != nil { - t.Fatalf("unable to make output script: %v", err) - } + require.NoError(t, err, "unable to make output script") burnOutput := wire.NewTxOut(outputAmt, outputScript) burnTX, err := alice.SendOutputs( []*wire.TxOut{burnOutput}, 2500, 1, labels.External, ) - if err != nil { - t.Fatalf("unable to create burn tx: %v", err) - } + require.NoError(t, err, "unable to create burn tx") burnTXID := burnTX.TxHash() err = waitForMempoolTx(miner, &burnTXID) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") // Before we mine the next block, we'll ensure that the above // transaction shows up in the set of unconfirmed transactions returned // by ListTransactionDetails. err = waitForWalletSync(miner, alice) - if err != nil { - t.Fatalf("Couldn't sync Alice's wallet: %v", err) - } + require.NoError(t, err, "Couldn't sync Alice's wallet") // Query our wallet for transactions from the chain tip, including // unconfirmed transactions. The transaction above should be included @@ -1346,9 +1234,7 @@ func testListTransactionDetails(miner *rpctest.Harness, txDetails, err = alice.ListTransactionDetails( chainTip, btcwallet.UnconfirmedHeight, "", ) - if err != nil { - t.Fatalf("unable to fetch tx details: %v", err) - } + require.NoError(t, err, "unable to fetch tx details") var mempoolTxFound bool for _, txDetail := range txDetails { if !bytes.Equal(txDetail.Hash[:], burnTXID[:]) { @@ -1387,9 +1273,7 @@ func testListTransactionDetails(miner *rpctest.Harness, // Generate one block for our transaction to confirm in. var numBlocks int32 = 1 burnBlock, err := miner.Client.Generate(uint32(numBlocks)) - if err != nil { - t.Fatalf("unable to mine block: %v", err) - } + require.NoError(t, err, "unable to mine block") // Progress our chain tip by the number of blocks we have just mined. chainTip += numBlocks @@ -1399,13 +1283,9 @@ func testListTransactionDetails(miner *rpctest.Harness, // are inclusive, so we use chainTip for both parameters to get only // transactions from the last block. err = waitForWalletSync(miner, alice) - if err != nil { - t.Fatalf("Couldn't sync Alice's wallet: %v", err) - } + require.NoError(t, err, "Couldn't sync Alice's wallet") txDetails, err = alice.ListTransactionDetails(chainTip, chainTip, "") - if err != nil { - t.Fatalf("unable to fetch tx details: %v", err) - } + require.NoError(t, err, "unable to fetch tx details") var burnTxFound bool for _, txDetail := range txDetails { if !bytes.Equal(txDetail.Hash[:], burnTXID[:]) { @@ -1438,21 +1318,15 @@ func testListTransactionDetails(miner *rpctest.Harness, // Generate a block which has no wallet transactions in it. chainTip += numBlocks _, err = miner.Client.Generate(uint32(numBlocks)) - if err != nil { - t.Fatalf("unable to mine block: %v", err) - } + require.NoError(t, err, "unable to mine block") err = waitForWalletSync(miner, alice) - if err != nil { - t.Fatalf("Couldn't sync Alice's wallet: %v", err) - } + require.NoError(t, err, "Couldn't sync Alice's wallet") // Query for transactions only in the latest block. We do not expect // any transactions to be returned. txDetails, err = alice.ListTransactionDetails(chainTip, chainTip, "") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if len(txDetails) != 0 { t.Fatalf("expected 0 transactions, got: %v", len(txDetails)) } @@ -1591,29 +1465,21 @@ func testTransactionSubscriptions(miner *rpctest.Harness, b := txscript.NewScriptBuilder() b.AddOp(txscript.OP_RETURN) outputScript, err := b.Script() - if err != nil { - t.Fatalf("unable to make output script: %v", err) - } + require.NoError(t, err, "unable to make output script") burnOutput := wire.NewTxOut(outputAmt, outputScript) tx, err := alice.SendOutputs( []*wire.TxOut{burnOutput}, 2500, 1, labels.External, ) - if err != nil { - t.Fatalf("unable to create burn tx: %v", err) - } + require.NoError(t, err, "unable to create burn tx") txid := tx.TxHash() err = waitForMempoolTx(miner, &txid) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") // Before we mine the next block, we'll ensure that the above // transaction shows up in the set of unconfirmed transactions returned // by ListTransactionDetails. err = waitForWalletSync(miner, alice) - if err != nil { - t.Fatalf("Couldn't sync Alice's wallet: %v", err) - } + require.NoError(t, err, "Couldn't sync Alice's wallet") // As we just sent the transaction and it was landed in the mempool, we // should get a notification for a new unconfirmed transactions @@ -1781,9 +1647,7 @@ func newTx(t *testing.T, r *rpctest.Harness, pubKey *btcec.PublicKey, t.Helper() keyScript, err := scriptFromKey(pubKey) - if err != nil { - t.Fatalf("unable to generate script: %v", err) - } + require.NoError(t, err, "unable to generate script") // Instruct the wallet to fund the output with a newly created // transaction. @@ -1794,9 +1658,7 @@ func newTx(t *testing.T, r *rpctest.Harness, pubKey *btcec.PublicKey, tx, err := alice.SendOutputs( []*wire.TxOut{newOutput}, 2500, 1, labels.External, ) - if err != nil { - t.Fatalf("unable to create output: %v", err) - } + require.NoError(t, err, "unable to create output") // Query for the transaction generated above so we can located the // index of our output. @@ -1824,9 +1686,7 @@ func testPublishTransaction(r *rpctest.Harness, // Generate a pubkey, and pay-to-addr script. keyDesc, err := alice.DeriveNextKey(keychain.KeyFamilyMultiSig) - if err != nil { - t.Fatalf("unable to obtain public key: %v", err) - } + require.NoError(t, err, "unable to obtain public key") // We will first check that publishing a transaction already in the // mempool does NOT return an error. Create the tx. @@ -1834,22 +1694,16 @@ func testPublishTransaction(r *rpctest.Harness, // Publish the transaction. err = alice.PublishTransaction(tx1, labels.External) - if err != nil { - t.Fatalf("unable to publish: %v", err) - } + require.NoError(t, err, "unable to publish") txid1 := tx1.TxHash() err = waitForMempoolTx(r, &txid1) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") // Publish the exact same transaction again. This should not return an // error, even though the transaction is already in the mempool. err = alice.PublishTransaction(tx1, labels.External) - if err != nil { - t.Fatalf("unable to publish: %v", err) - } + require.NoError(t, err, "unable to publish") // Mine the transaction. if _, err := r.Client.Generate(1); err != nil { @@ -1867,9 +1721,7 @@ func testPublishTransaction(r *rpctest.Harness, // Publish this tx. err = alice.PublishTransaction(tx2, labels.External) - if err != nil { - t.Fatalf("unable to publish: %v", err) - } + require.NoError(t, err, "unable to publish") // Mine the transaction. if err := mineAndAssert(r, tx2); err != nil { @@ -1879,9 +1731,7 @@ func testPublishTransaction(r *rpctest.Harness, // Publish the transaction again. It is already mined, and we don't // expect this to return an error. err = alice.PublishTransaction(tx2, labels.External) - if err != nil { - t.Fatalf("unable to publish: %v", err) - } + require.NoError(t, err, "unable to publish") // We'll do the next mempool check on both RBF and non-RBF enabled // transactions. @@ -2035,9 +1885,7 @@ func testSignOutputUsingTweaks(r *rpctest.Harness, pubKey, err := alice.DeriveNextKey( keychain.KeyFamilyMultiSig, ) - if err != nil { - t.Fatalf("unable to obtain public key: %v", err) - } + require.NoError(t, err, "unable to obtain public key") // As we'd like to test both single tweak, and double tweak spends, // we'll generate a commitment pre-image, then derive a revocation key @@ -2179,32 +2027,22 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet, // create any new non-coinbase transactions. We'll then check if it's // the same after the empty reorg. _, err := r.Client.Generate(5) - if err != nil { - t.Fatalf("unable to generate blocks on passed node: %v", err) - } + require.NoError(t, err, "unable to generate blocks on passed node") // Give wallet time to catch up. err = waitForWalletSync(r, w) - if err != nil { - t.Fatalf("unable to sync wallet: %v", err) - } + require.NoError(t, err, "unable to sync wallet") // Send some money from the miner to the wallet err = loadTestCredits(r, w, 20, 4) - if err != nil { - t.Fatalf("unable to send money to lnwallet: %v", err) - } + require.NoError(t, err, "unable to send money to lnwallet") // Send some money from the wallet back to the miner. // Grab a fresh address from the miner to house this output. minerAddr, err := r.NewAddress() - if err != nil { - t.Fatalf("unable to generate address for miner: %v", err) - } + require.NoError(t, err, "unable to generate address for miner") script, err := txscript.PayToAddrScript(minerAddr) - if err != nil { - t.Fatalf("unable to create pay to addr script: %v", err) - } + require.NoError(t, err, "unable to create pay to addr script") output := &wire.TxOut{ Value: 1e8, PkScript: script, @@ -2212,46 +2050,30 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet, tx, err := w.SendOutputs( []*wire.TxOut{output}, 2500, 1, labels.External, ) - if err != nil { - t.Fatalf("unable to send outputs: %v", err) - } + require.NoError(t, err, "unable to send outputs") txid := tx.TxHash() err = waitForMempoolTx(r, &txid) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") _, err = r.Client.Generate(50) - if err != nil { - t.Fatalf("unable to generate blocks on passed node: %v", err) - } + require.NoError(t, err, "unable to generate blocks on passed node") // Give wallet time to catch up. err = waitForWalletSync(r, w) - if err != nil { - t.Fatalf("unable to sync wallet: %v", err) - } + require.NoError(t, err, "unable to sync wallet") // Get the original balance. origBalance, err := w.ConfirmedBalance(1, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to query for balance: %v", err) - } + require.NoError(t, err, "unable to query for balance") // Now we cause a reorganization as follows. // Step 1: create a new miner and start it. r2, err := rpctest.New(r.ActiveNet, nil, []string{"--txindex"}, "") - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") err = r2.SetUp(false, 0) - if err != nil { - t.Fatalf("unable to set up mining node: %v", err) - } + require.NoError(t, err, "unable to set up mining node") defer r2.TearDown() newBalance, err := w.ConfirmedBalance(1, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to query for balance: %v", err) - } + require.NoError(t, err, "unable to query for balance") if origBalance != newBalance { t.Fatalf("wallet balance incorrect, should have %v, "+ "instead have %v", origBalance, newBalance) @@ -2260,13 +2082,9 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet, // Step 2: connect the miner to the passed miner and wait for // synchronization. err = r2.Client.AddNode(r.P2PAddress(), rpcclient.ANAdd) - if err != nil { - t.Fatalf("unable to connect mining nodes together: %v", err) - } + require.NoError(t, err, "unable to connect mining nodes together") err = rpctest.JoinNodes([]*rpctest.Harness{r2, r}, rpctest.Blocks) - if err != nil { - t.Fatalf("unable to synchronize mining nodes: %v", err) - } + require.NoError(t, err, "unable to synchronize mining nodes") // Step 3: Do a set of reorgs by disconnecting the two miners, mining // one block on the passed miner and two on the created miner, @@ -2341,9 +2159,7 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet, // Now we check that the wallet balance stays the same. newBalance, err = w.ConfirmedBalance(1, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to query for balance: %v", err) - } + require.NoError(t, err, "unable to query for balance") if origBalance != newBalance { t.Fatalf("wallet balance incorrect, should have %v, "+ "instead have %v", origBalance, newBalance) @@ -2364,9 +2180,7 @@ func testChangeOutputSpendConfirmation(r *rpctest.Harness, // we'll craft the following transaction so that Alice doesn't have any // UTXOs left. aliceBalance, err := alice.ConfirmedBalance(0, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to retrieve alice's balance: %v", err) - } + require.NoError(t, err, "unable to retrieve alice's balance") bobPkScript := newPkScript(t, bob, lnwallet.WitnessPubKey) // We'll use a transaction fee of 14380 satoshis, which will allow us to @@ -2389,9 +2203,7 @@ func testChangeOutputSpendConfirmation(r *rpctest.Harness, // With the transaction sent and confirmed, Alice's balance should now // be 0. aliceBalance, err = alice.ConfirmedBalance(0, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to retrieve alice's balance: %v", err) - } + require.NoError(t, err, "unable to retrieve alice's balance") if aliceBalance != 0 { t.Fatalf("expected alice's balance to be 0 BTC, found %v", aliceBalance) @@ -2445,9 +2257,7 @@ func testSpendUnconfirmed(miner *rpctest.Harness, // First we will empty out bob's wallet, sending the entire balance // to alice. bobBalance, err := bob.ConfirmedBalance(0, lnwallet.DefaultAccountName) - if err != nil { - t.Fatalf("unable to retrieve bob's balance: %v", err) - } + require.NoError(t, err, "unable to retrieve bob's balance") txFee := btcutil.Amount(28760) output := &wire.TxOut{ Value: int64(bobBalance - txFee), @@ -2504,9 +2314,7 @@ func testSpendUnconfirmed(miner *rpctest.Harness, // Mine the unconfirmed transactions. err = waitForMempoolTx(miner, &txHash) - if err != nil { - t.Fatalf("tx not relayed to miner: %v", err) - } + require.NoError(t, err, "tx not relayed to miner") if _, err := miner.Client.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } @@ -2601,9 +2409,7 @@ func testCreateSimpleTx(r *rpctest.Harness, w *lnwallet.LightningWallet, // Send some money from the miner to the wallet err := loadTestCredits(r, w, 20, 4) - if err != nil { - t.Fatalf("unable to send money to lnwallet: %v", err) - } + require.NoError(t, err, "unable to send money to lnwallet") // The test cases we will run through for all backends. testCases := []struct { @@ -3083,13 +2889,9 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness, // First, we'll obtain multi-sig keys from both Alice and Bob which // simulates them exchanging keys on a higher level. aliceFundingKey, err := alice.DeriveNextKey(keychain.KeyFamilyMultiSig) - if err != nil { - t.Fatalf("unable to obtain alice funding key: %v", err) - } + require.NoError(t, err, "unable to obtain alice funding key") bobFundingKey, err := bob.DeriveNextKey(keychain.KeyFamilyMultiSig) - if err != nil { - t.Fatalf("unable to obtain bob funding key: %v", err) - } + require.NoError(t, err, "unable to obtain bob funding key") // We'll now set up for them to open a 4 BTC channel, with 1 BTC pushed // to Bob's side. @@ -3120,9 +2922,7 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness, ) }, }) - if err != nil { - t.Fatalf("unable to perform coin selection: %v", err) - } + require.NoError(t, err, "unable to perform coin selection") // With our intent created, we'll instruct it to finalize the funding // transaction, and also hand us the outpoint so we can simulate @@ -3168,9 +2968,7 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness, ) }, }) - if err != nil { - t.Fatalf("unable to create shim intent for bob: %v", err) - } + require.NoError(t, err, "unable to create shim intent for bob") // At this point, we have everything we need to carry out our test, so // we'll being the funding flow between Alice and Bob. @@ -3180,9 +2978,7 @@ func testSingleFunderExternalFundingTx(miner *rpctest.Harness, // from Alice. pendingChanID := testHdSeed err = bob.RegisterFundingIntent(pendingChanID, bobShimIntent) - if err != nil { - t.Fatalf("unable to register intent: %v", err) - } + require.NoError(t, err, "unable to register intent") // Now we can carry out the single funding flow as normal, we'll // specify our external funder and funding transaction, as well as the @@ -3219,9 +3015,7 @@ func TestLightningWallet(t *testing.T, targetBackEnd string) { miningNode, err := rpctest.New( netParams, nil, []string{"--txindex"}, "", ) - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") defer miningNode.TearDown() if err := miningNode.SetUp(true, 25); err != nil { t.Fatalf("unable to set up mining node: %v", err) @@ -3237,27 +3031,19 @@ func TestLightningWallet(t *testing.T, targetBackEnd string) { rpcConfig := miningNode.RPCConfig() tempDir, err := ioutil.TempDir("", "channeldb") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") db, err := channeldb.Open(tempDir) - if err != nil { - t.Fatalf("unable to create db: %v", err) - } + require.NoError(t, err, "unable to create db") testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend) - if err != nil { - t.Fatalf("unable to create height hint cache: %v", err) - } + require.NoError(t, err, "unable to create height hint cache") blockCache := blockcache.NewBlockCache(10000) chainNotifier, err := btcdnotify.New( &rpcConfig, netParams, hintCache, hintCache, blockCache, ) - if err != nil { - t.Fatalf("unable to create notifier: %v", err) - } + require.NoError(t, err, "unable to create notifier") if err := chainNotifier.Start(); err != nil { t.Fatalf("unable to start notifier: %v", err) } @@ -3300,15 +3086,11 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver, ) tempTestDirAlice, err := ioutil.TempDir("", "lnwallet") - if err != nil { - t.Fatalf("unable to create temp directory: %v", err) - } + require.NoError(t, err, "unable to create temp directory") defer os.RemoveAll(tempTestDirAlice) tempTestDirBob, err := ioutil.TempDir("", "lnwallet") - if err != nil { - t.Fatalf("unable to create temp directory: %v", err) - } + require.NoError(t, err, "unable to create temp directory") defer os.RemoveAll(tempTestDirBob) blockCache := blockcache.NewBlockCache(10000) @@ -3622,18 +3404,14 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver, chainNotifier, aliceWalletController, aliceKeyRing, aliceSigner, bio, ) - if err != nil { - t.Fatalf("unable to create test ln wallet: %v", err) - } + require.NoError(t, err, "unable to create test ln wallet") defer alice.Shutdown() bob, err := createTestWallet( tempTestDirBob, miningNode, netParams, chainNotifier, bobWalletController, bobKeyRing, bobSigner, bio, ) - if err != nil { - t.Fatalf("unable to create test ln wallet: %v", err) - } + require.NoError(t, err, "unable to create test ln wallet") defer bob.Shutdown() // Both wallets should now have 80BTC available for diff --git a/lnwallet/transactions_test.go b/lnwallet/transactions_test.go index e8446e0e4..6540dbbf7 100644 --- a/lnwallet/transactions_test.go +++ b/lnwallet/transactions_test.go @@ -495,9 +495,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { // doesn't need to exist, as we'll only be validating spending from the // transaction that references this. txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) - if err != nil { - t.Fatalf("unable to create txid: %v", err) - } + require.NoError(t, err, "unable to create txid") fundingOut := &wire.OutPoint{ Hash: *txid, Index: 50, @@ -585,9 +583,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { // We're testing an uncooperative close, output sweep, so construct a // transaction which sweeps the funds to a random address. targetOutput, err := input.CommitScriptUnencumbered(aliceKeyPub) - if err != nil { - t.Fatalf("unable to create target output: %v", err) - } + require.NoError(t, err, "unable to create target output") sweepTx := wire.NewMsgTx(2) sweepTx.AddTxIn(wire.NewTxIn(&wire.OutPoint{ Hash: commitmentTx.TxHash(), @@ -602,9 +598,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { delayScript, err := input.CommitScriptToSelf( csvTimeout, aliceDelayKey, revokePubKey, ) - if err != nil { - t.Fatalf("unable to generate alice delay script: %v", err) - } + require.NoError(t, err, "unable to generate alice delay script") sweepTx.TxIn[0].Sequence = input.LockTimeToSequence(false, csvTimeout) signDesc := &input.SignDescriptor{ WitnessScript: delayScript, @@ -622,18 +616,14 @@ func testSpendValidation(t *testing.T, tweakless bool) { aliceWitnessSpend, err := input.CommitSpendTimeout( aliceSelfOutputSigner, signDesc, sweepTx, ) - if err != nil { - t.Fatalf("unable to generate delay commit spend witness: %v", err) - } + require.NoError(t, err, "unable to generate delay commit spend witness") sweepTx.TxIn[0].Witness = aliceWitnessSpend vm, err := txscript.NewEngine( delayOutput.PkScript, sweepTx, 0, txscript.StandardVerifyFlags, nil, nil, int64(channelBalance), txscript.NewCannedPrevOutputFetcher(nil, 0), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("spend from delay output is invalid: %v", err) } @@ -658,18 +648,14 @@ func testSpendValidation(t *testing.T, tweakless bool) { } bobWitnessSpend, err := input.CommitSpendRevoke(localSigner, signDesc, sweepTx) - if err != nil { - t.Fatalf("unable to generate revocation witness: %v", err) - } + require.NoError(t, err, "unable to generate revocation witness") sweepTx.TxIn[0].Witness = bobWitnessSpend vm, err = txscript.NewEngine( delayOutput.PkScript, sweepTx, 0, txscript.StandardVerifyFlags, nil, nil, int64(channelBalance), txscript.NewCannedPrevOutputFetcher(nil, 0), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("revocation spend is invalid: %v", err) } @@ -687,9 +673,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { // Finally, we test bob sweeping his output as normal in the case that // Alice broadcasts this commitment transaction. bobScriptP2WKH, err := input.CommitScriptUnencumbered(bobPayKey) - if err != nil { - t.Fatalf("unable to create bob p2wkh script: %v", err) - } + require.NoError(t, err, "unable to create bob p2wkh script") signDesc = &input.SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: bobKeyPub, @@ -709,9 +693,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { bobRegularSpend, err := input.CommitSpendNoDelay( localSigner, signDesc, sweepTx, tweakless, ) - if err != nil { - t.Fatalf("unable to create bob regular spend: %v", err) - } + require.NoError(t, err, "unable to create bob regular spend") sweepTx.TxIn[0].Witness = bobRegularSpend vm, err = txscript.NewEngine( regularOutput.PkScript, @@ -719,9 +701,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { nil, int64(channelBalance), txscript.NewCannedPrevOutputFetcher(bobScriptP2WKH, 0), ) - if err != nil { - t.Fatalf("unable to create engine: %v", err) - } + require.NoError(t, err, "unable to create engine") if err := vm.Execute(); err != nil { t.Fatalf("bob p2wkh spend is invalid: %v", err) } diff --git a/lnwire/extra_bytes_test.go b/lnwire/extra_bytes_test.go index 6e840a2a6..fd9f28841 100644 --- a/lnwire/extra_bytes_test.go +++ b/lnwire/extra_bytes_test.go @@ -8,6 +8,7 @@ import ( "testing/quick" "github.com/lightningnetwork/lnd/tlv" + "github.com/stretchr/testify/require" ) // TestExtraOpaqueDataEncodeDecode tests that we're able to encode/decode @@ -128,9 +129,7 @@ func TestExtraOpaqueDataPackUnpackRecords(t *testing.T) { &recordProducer{tlv.MakePrimitiveRecord(type2, &hop2)}, } typeMap, err := extraBytes.ExtractRecords(newRecords...) - if err != nil { - t.Fatalf("unable to extract record: %v", err) - } + require.NoError(t, err, "unable to extract record") // We should find that the new backing values have been populated with // the proper value. diff --git a/lnwire/netaddress_test.go b/lnwire/netaddress_test.go index dbefef32a..c7ae82b84 100644 --- a/lnwire/netaddress_test.go +++ b/lnwire/netaddress_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcec/v2" + "github.com/stretchr/testify/require" ) func TestNetAddressDisplay(t *testing.T) { @@ -13,14 +14,10 @@ func TestNetAddressDisplay(t *testing.T) { pubKeyStr := "036a0c5ea35df8a528b98edf6f290b28676d51d0fe202b073fe677612a39c0aa09" pubHex, err := hex.DecodeString(pubKeyStr) - if err != nil { - t.Fatalf("unable to decode str: %v", err) - } + require.NoError(t, err, "unable to decode str") pubKey, err := btcec.ParsePubKey(pubHex) - if err != nil { - t.Fatalf("unable to parse pubkey: %v", err) - } + require.NoError(t, err, "unable to parse pubkey") addr, _ := net.ResolveTCPAddr("tcp", "10.0.0.2:9000") netAddr := NetAddress{ diff --git a/lnwire/onion_error_test.go b/lnwire/onion_error_test.go index 16f9d4c36..27bba342e 100644 --- a/lnwire/onion_error_test.go +++ b/lnwire/onion_error_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/require" ) var ( @@ -101,9 +102,7 @@ func TestChannelUpdateCompatabilityParsing(t *testing.T) { err := parseChannelUpdateCompatabilityMode( bufio.NewReader(&b), &newChanUpdate, 0, ) - if err != nil { - t.Fatalf("unable to parse channel update: %v", err) - } + require.NoError(t, err, "unable to parse channel update") // At this point, we'll ensure that we get the exact same failure out // on the other side. @@ -130,9 +129,7 @@ func TestChannelUpdateCompatabilityParsing(t *testing.T) { err = parseChannelUpdateCompatabilityMode( bufio.NewReader(&b), &newChanUpdate2, 0, ) - if err != nil { - t.Fatalf("unable to parse channel update: %v", err) - } + require.NoError(t, err, "unable to parse channel update") if !reflect.DeepEqual(newChanUpdate2, newChanUpdate) { t.Fatalf("mismatched channel updates: %v", err) @@ -158,9 +155,7 @@ func TestWriteOnionErrorChanUpdate(t *testing.T) { // onion error message. var errorBuf bytes.Buffer err := writeOnionErrorChanUpdate(&errorBuf, &update, 0) - if err != nil { - t.Fatalf("unable to encode onion error: %v", err) - } + require.NoError(t, err, "unable to encode onion error") // Finally, read the length encoded and ensure that it matches the raw // length. @@ -188,9 +183,7 @@ func TestFailIncorrectDetailsOptionalAmount(t *testing.T) { } onionError2, err := DecodeFailure(bytes.NewReader(b.Bytes()), 0) - if err != nil { - t.Fatalf("unable to decode error: %v", err) - } + require.NoError(t, err, "unable to decode error") invalidDetailsErr, ok := onionError2.(*FailIncorrectDetails) if !ok { @@ -241,9 +234,7 @@ func TestFailIncorrectDetailsOptionalHeight(t *testing.T) { } onionError2, err := DecodeFailure(bytes.NewReader(b.Bytes()), 0) - if err != nil { - t.Fatalf("unable to decode error: %v", err) - } + require.NoError(t, err, "unable to decode error") invalidDetailsErr, ok := onionError2.(*FailIncorrectDetails) if !ok { diff --git a/macaroons/constraints_test.go b/macaroons/constraints_test.go index 8bceb0b32..d690bda8c 100644 --- a/macaroons/constraints_test.go +++ b/macaroons/constraints_test.go @@ -23,9 +23,7 @@ func createDummyMacaroon(t *testing.T) *macaroon.Macaroon { dummyMacaroon, err := macaroon.New( testRootKey, testID, testLocation, testVersion, ) - if err != nil { - t.Fatalf("Error creating initial macaroon: %v", err) - } + require.NoError(t, err, "Error creating initial macaroon") return dummyMacaroon } @@ -41,9 +39,7 @@ func TestAddConstraints(t *testing.T) { newMac, err := macaroons.AddConstraints( initialMac, macaroons.TimeoutConstraint(1), ) - if err != nil { - t.Fatalf("Error adding constraint: %v", err) - } + require.NoError(t, err, "Error adding constraint") if &newMac == &initialMac { t.Fatalf("Initial macaroon has been changed, something " + "went wrong!") @@ -66,9 +62,7 @@ func TestTimeoutConstraint(t *testing.T) { // function to. testMacaroon := createDummyMacaroon(t) err := constraintFunc(testMacaroon) - if err != nil { - t.Fatalf("Error applying timeout constraint: %v", err) - } + require.NoError(t, err, "Error applying timeout constraint") // Finally, check that the created caveat has an // acceptable value. @@ -92,9 +86,7 @@ func TestIpLockConstraint(t *testing.T) { // function to. testMacaroon := createDummyMacaroon(t) err := constraintFunc(testMacaroon) - if err != nil { - t.Fatalf("Error applying timeout constraint: %v", err) - } + require.NoError(t, err, "Error applying timeout constraint") // Finally, check that the created caveat has an // acceptable value. diff --git a/macaroons/service_test.go b/macaroons/service_test.go index bf3cb0fde..aaaf8be0a 100644 --- a/macaroons/service_test.go +++ b/macaroons/service_test.go @@ -35,16 +35,12 @@ var ( // and read the store on its own. func setupTestRootKeyStorage(t *testing.T) (string, kvdb.Backend) { tempDir, err := ioutil.TempDir("", "macaroonstore-") - if err != nil { - t.Fatalf("Error creating temp dir: %v", err) - } + require.NoError(t, err, "Error creating temp dir") db, err := kvdb.Create( kvdb.BoltBackendName, path.Join(tempDir, "macaroons.db"), true, kvdb.DefaultDBTimeout, ) - if err != nil { - t.Fatalf("Error opening store DB: %v", err) - } + require.NoError(t, err, "Error opening store DB") store, err := macaroons.NewRootKeyStorage(db) if err != nil { db.Close() @@ -52,9 +48,7 @@ func setupTestRootKeyStorage(t *testing.T) (string, kvdb.Backend) { } defer store.Close() err = store.CreateUnlock(&defaultPw) - if err != nil { - t.Fatalf("error creating unlock: %v", err) - } + require.NoError(t, err, "error creating unlock") return tempDir, db } @@ -70,14 +64,10 @@ func TestNewService(t *testing.T) { service, err := macaroons.NewService( db, "lnd", false, macaroons.IPLockChecker, ) - if err != nil { - t.Fatalf("Error creating new service: %v", err) - } + require.NoError(t, err, "Error creating new service") defer service.Close() err = service.CreateUnlock(&defaultPw) - if err != nil { - t.Fatalf("Error unlocking root key storage: %v", err) - } + require.NoError(t, err, "Error unlocking root key storage") // Third, check if the created service can bake macaroons. _, err = service.NewMacaroon(context.TODO(), nil, testOperation) @@ -88,9 +78,7 @@ func TestNewService(t *testing.T) { macaroon, err := service.NewMacaroon( context.TODO(), macaroons.DefaultRootKeyID, testOperation, ) - if err != nil { - t.Fatalf("Error creating macaroon from service: %v", err) - } + require.NoError(t, err, "Error creating macaroon from service") if macaroon.Namespace().String() != "std:" { t.Fatalf("The created macaroon has an invalid namespace: %s", macaroon.Namespace().String()) @@ -121,28 +109,20 @@ func TestValidateMacaroon(t *testing.T) { service, err := macaroons.NewService( db, "lnd", false, macaroons.IPLockChecker, ) - if err != nil { - t.Fatalf("Error creating new service: %v", err) - } + require.NoError(t, err, "Error creating new service") defer service.Close() err = service.CreateUnlock(&defaultPw) - if err != nil { - t.Fatalf("Error unlocking root key storage: %v", err) - } + require.NoError(t, err, "Error unlocking root key storage") // Then, create a new macaroon that we can serialize. macaroon, err := service.NewMacaroon( context.TODO(), macaroons.DefaultRootKeyID, testOperation, testOperationURI, ) - if err != nil { - t.Fatalf("Error creating macaroon from service: %v", err) - } + require.NoError(t, err, "Error creating macaroon from service") macaroonBinary, err := macaroon.M().MarshalBinary() - if err != nil { - t.Fatalf("Error serializing macaroon: %v", err) - } + require.NoError(t, err, "Error serializing macaroon") // Because the macaroons are always passed in a context, we need to // mock one that has just the serialized macaroon as a value. @@ -155,18 +135,14 @@ func TestValidateMacaroon(t *testing.T) { err = service.ValidateMacaroon( mockContext, []bakery.Op{testOperation}, "FooMethod", ) - if err != nil { - t.Fatalf("Error validating the macaroon: %v", err) - } + require.NoError(t, err, "Error validating the macaroon") // If the macaroon has the method specific URI permission, the list of // required entity/action pairs is irrelevant. err = service.ValidateMacaroon( mockContext, []bakery.Op{{Entity: "irrelevant"}}, "SomeMethod", ) - if err != nil { - t.Fatalf("Error validating the macaroon: %v", err) - } + require.NoError(t, err, "Error validating the macaroon") } // TestListMacaroonIDs checks that ListMacaroonIDs returns the expected result. diff --git a/netann/chan_status_manager_test.go b/netann/chan_status_manager_test.go index 19ada7ba6..14096d086 100644 --- a/netann/chan_status_manager_test.go +++ b/netann/chan_status_manager_test.go @@ -17,6 +17,7 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/netann" + "github.com/stretchr/testify/require" ) var ( @@ -34,9 +35,7 @@ func randOutpoint(t *testing.T) wire.OutPoint { var buf [36]byte _, err := io.ReadFull(rand.Reader, buf[:]) - if err != nil { - t.Fatalf("unable to generate random outpoint: %v", err) - } + require.NoError(t, err, "unable to generate random outpoint") op := wire.OutPoint{} copy(op.Hash[:], buf[:32]) @@ -86,9 +85,7 @@ func createEdgePolicies(t *testing.T, channel *channeldb.OpenChannel, // Generate and set pubkey2 for THEIR pubkey. privKey2, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to generate key pair: %v", err) - } + require.NoError(t, err, "unable to generate key pair") copy(pubkey2[:], privKey2.PubKey().SerializeCompressed()) // Set pubkey1 to the lower of the two pubkeys. @@ -316,9 +313,7 @@ func newManagerCfg(t *testing.T, numChannels int, t.Helper() privKey, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to generate key pair: %v", err) - } + require.NoError(t, err, "unable to generate key pair") privKeySigner := keychain.NewPrivKeyMessageSigner(privKey, testKeyLoc) graph := newMockGraph( @@ -362,14 +357,10 @@ func newHarness(t *testing.T, numChannels int, cfg, graph, htlcSwitch := newManagerCfg(t, numChannels, startEnabled) mgr, err := netann.NewChanStatusManager(cfg) - if err != nil { - t.Fatalf("unable to create chan status manager: %v", err) - } + require.NoError(t, err, "unable to create chan status manager") err = mgr.Start() - if err != nil { - t.Fatalf("unable to start chan status manager: %v", err) - } + require.NoError(t, err, "unable to start chan status manager") h := testHarness{ t: t, diff --git a/netann/channel_announcement_test.go b/netann/channel_announcement_test.go index 663de5c85..01aa767f8 100644 --- a/netann/channel_announcement_test.go +++ b/netann/channel_announcement_test.go @@ -10,6 +10,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnwire" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCreateChanAnnouncement(t *testing.T) { @@ -59,9 +60,7 @@ func TestCreateChanAnnouncement(t *testing.T) { chanAnn, _, _, err := CreateChanAnnouncement( chanProof, chanInfo, nil, nil, ) - if err != nil { - t.Fatalf("unable to create channel announcement: %v", err) - } + require.NoError(t, err, "unable to create channel announcement") assert.Equal(t, chanAnn, expChanAnn) } diff --git a/peer/brontide_test.go b/peer/brontide_test.go index 43fb2b782..23f646a05 100644 --- a/peer/brontide_test.go +++ b/peer/brontide_test.go @@ -48,9 +48,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, mockSwitch, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) @@ -102,14 +100,10 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { bobSig, _, _, err := bobChan.CreateCloseProposal( aliceFee, dummyDeliveryScript, respDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err := lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } + require.NoError(t, err, "error parsing signature") closingSigned := lnwire.NewClosingSigned(chanID, aliceFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, @@ -156,9 +150,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, mockSwitch, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) @@ -218,13 +210,9 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { bobSig, _, _, err := bobChan.CreateCloseProposal( bobFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("unable to create close proposal: %v", err) - } + require.NoError(t, err, "unable to create close proposal") parsedSig, err := lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("unable to parse signature: %v", err) - } + require.NoError(t, err, "unable to parse signature") closingSigned := lnwire.NewClosingSigned(shutdownMsg.ChannelID, bobFee, parsedSig) @@ -283,9 +271,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, mockSwitch, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) @@ -337,14 +323,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { bobSig, _, _, err := bobChan.CreateCloseProposal( increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err := lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } + require.NoError(t, err, "error parsing signature") closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, @@ -381,14 +363,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { bobSig, _, _, err = bobChan.CreateCloseProposal( increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err = lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } + require.NoError(t, err, "error parsing signature") closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, @@ -427,14 +405,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { bobSig, _, _, err = bobChan.CreateCloseProposal( aliceFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err = lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } + require.NoError(t, err, "error parsing signature") closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, @@ -482,9 +456,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, mockSwitch, ) - if err != nil { - t.Fatalf("unable to create test channels: %v", err) - } + require.NoError(t, err, "unable to create test channels") defer cleanUp() chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) @@ -549,14 +521,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { bobSig, _, _, err := bobChan.CreateCloseProposal( increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err := lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("unable to parse signature: %v", err) - } + require.NoError(t, err, "unable to parse signature") closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ @@ -596,14 +564,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { bobSig, _, _, err = bobChan.CreateCloseProposal( increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err = lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } + require.NoError(t, err, "error parsing signature") closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ @@ -640,14 +604,10 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { bobSig, _, _, err = bobChan.CreateCloseProposal( aliceFee, dummyDeliveryScript, aliceDeliveryScript, ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } + require.NoError(t, err, "error creating close proposal") parsedSig, err = lnwire.NewSigFromSignature(bobSig) - if err != nil { - t.Fatalf("error parsing signature: %v", err) - } + require.NoError(t, err, "error parsing signature") closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig) alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, @@ -1024,14 +984,10 @@ func genScript(t *testing.T, address string) lnwire.DeliveryAddress { address, &chaincfg.TestNet3Params, ) - if err != nil { - t.Fatalf("invalid delivery address: %v", err) - } + require.NoError(t, err, "invalid delivery address") script, err := txscript.PayToAddrScript(deliveryAddr) - if err != nil { - t.Fatalf("cannot create script: %v", err) - } + require.NoError(t, err, "cannot create script") return script } diff --git a/pool/worker_test.go b/pool/worker_test.go index ee23e7a57..d581c84f9 100644 --- a/pool/worker_test.go +++ b/pool/worker_test.go @@ -11,6 +11,7 @@ import ( "github.com/lightningnetwork/lnd/buffer" "github.com/lightningnetwork/lnd/pool" + "github.com/stretchr/testify/require" ) type workerPoolTest struct { @@ -256,9 +257,7 @@ func startGeneric(t *testing.T, p interface{}) { t.Fatalf("unknown worker pool type: %T", p) } - if err != nil { - t.Fatalf("unable to start worker pool: %v", err) - } + require.NoError(t, err, "unable to start worker pool") } func stopGeneric(t *testing.T, p interface{}) { @@ -276,9 +275,7 @@ func stopGeneric(t *testing.T, p interface{}) { t.Fatalf("unknown worker pool type: %T", p) } - if err != nil { - t.Fatalf("unable to stop worker pool: %v", err) - } + require.NoError(t, err, "unable to stop worker pool") } func submitGeneric(p interface{}, sem <-chan struct{}) error { diff --git a/routing/chainview/interface_test.go b/routing/chainview/interface_test.go index bb1700729..2096298cb 100644 --- a/routing/chainview/interface_test.go +++ b/routing/chainview/interface_test.go @@ -30,6 +30,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" ) var ( @@ -200,34 +201,22 @@ func testFilterBlockNotifications(node *rpctest.Harness, // To start the test, we'll create to fresh outputs paying to the // private key that we generated above. txid1, err := getTestTXID(node) - if err != nil { - t.Fatalf("unable to get test txid: %v", err) - } + require.NoError(t, err, "unable to get test txid") err = waitForMempoolTx(node, txid1) - if err != nil { - t.Fatalf("unable to get test txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get test txid in mempool") txid2, err := getTestTXID(node) - if err != nil { - t.Fatalf("unable to get test txid: %v", err) - } + require.NoError(t, err, "unable to get test txid") err = waitForMempoolTx(node, txid2) - if err != nil { - t.Fatalf("unable to get test txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get test txid in mempool") blockChan := chainView.FilteredBlocks() // Next we'll mine a block confirming the output generated above. newBlockHashes, err := node.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") _, currentHeight, err := node.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // We should get an update, however it shouldn't yet contain any // filtered transaction as the filter hasn't been update. @@ -243,34 +232,22 @@ func testFilterBlockNotifications(node *rpctest.Harness, // so we can add them to the filter, and also craft transaction // spending the outputs we created. tx1, err := node.Client.GetRawTransaction(txid1) - if err != nil { - t.Fatalf("unable to fetch transaction: %v", err) - } + require.NoError(t, err, "unable to fetch transaction") tx2, err := node.Client.GetRawTransaction(txid2) - if err != nil { - t.Fatalf("unable to fetch transaction: %v", err) - } + require.NoError(t, err, "unable to fetch transaction") targetScript, err := txscript.PayToAddrScript(testAddr) - if err != nil { - t.Fatalf("unable to create target output: %v", err) - } + require.NoError(t, err, "unable to create target output") // Next, we'll locate the two outputs generated above that pay to use // so we can properly add them to the filter. outPoint1, _, err := locateOutput(tx1.MsgTx(), targetScript) - if err != nil { - t.Fatalf("unable to find output: %v", err) - } + require.NoError(t, err, "unable to find output") outPoint2, _, err := locateOutput(tx2.MsgTx(), targetScript) - if err != nil { - t.Fatalf("unable to find output: %v", err) - } + require.NoError(t, err, "unable to find output") _, currentHeight, err = node.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now we'll add both outpoints to the current filter. filter := []channeldb.EdgePoint{ @@ -278,35 +255,23 @@ func testFilterBlockNotifications(node *rpctest.Harness, {FundingPkScript: targetScript, OutPoint: *outPoint2}, } err = chainView.UpdateFilter(filter, uint32(currentHeight)) - if err != nil { - t.Fatalf("unable to update filter: %v", err) - } + require.NoError(t, err, "unable to update filter") // With the filter updated, we'll now create two transaction spending // the outputs we created. spendingTx1, err := craftSpendTransaction(*outPoint1, targetScript) - if err != nil { - t.Fatalf("unable to create spending tx: %v", err) - } + require.NoError(t, err, "unable to create spending tx") spendingTx2, err := craftSpendTransaction(*outPoint2, targetScript) - if err != nil { - t.Fatalf("unable to create spending tx: %v", err) - } + require.NoError(t, err, "unable to create spending tx") // Now we'll broadcast the first spending transaction and also mine a // block which should include it. spendTxid1, err := node.Client.SendRawTransaction(spendingTx1, true) - if err != nil { - t.Fatalf("unable to broadcast transaction: %v", err) - } + require.NoError(t, err, "unable to broadcast transaction") err = waitForMempoolTx(node, spendTxid1) - if err != nil { - t.Fatalf("unable to get spending txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get spending txid in mempool") newBlockHashes, err = node.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") // We should receive a notification over the channel. The notification // should correspond to the current block height and have that single @@ -322,17 +287,11 @@ func testFilterBlockNotifications(node *rpctest.Harness, // Next, mine the second transaction which spends the second output. // This should also generate a notification. spendTxid2, err := node.Client.SendRawTransaction(spendingTx2, true) - if err != nil { - t.Fatalf("unable to broadcast transaction: %v", err) - } + require.NoError(t, err, "unable to broadcast transaction") err = waitForMempoolTx(node, spendTxid2) - if err != nil { - t.Fatalf("unable to get spending txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get spending txid in mempool") newBlockHashes, err = node.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") select { case filteredBlock := <-blockChan: @@ -354,22 +313,16 @@ func testUpdateFilterBackTrack(node *rpctest.Harness, t.Fatalf("unable to get test txid") } err = waitForMempoolTx(node, txid) - if err != nil { - t.Fatalf("unable to get test txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get test txid in mempool") // Next we'll mine a block confirming the output generated above. initBlockHashes, err := node.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") blockChan := chainView.FilteredBlocks() _, currentHeight, err := node.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Consume the notification sent which contains an empty filtered // block. @@ -384,29 +337,17 @@ func testUpdateFilterBackTrack(node *rpctest.Harness, // Next, create a transaction which spends the output created above, // mining the spend into a block. tx, err := node.Client.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to fetch transaction: %v", err) - } + require.NoError(t, err, "unable to fetch transaction") outPoint, _, err := locateOutput(tx.MsgTx(), testScript) - if err != nil { - t.Fatalf("unable to find output: %v", err) - } + require.NoError(t, err, "unable to find output") spendingTx, err := craftSpendTransaction(*outPoint, testScript) - if err != nil { - t.Fatalf("unable to create spending tx: %v", err) - } + require.NoError(t, err, "unable to create spending tx") spendTxid, err := node.Client.SendRawTransaction(spendingTx, true) - if err != nil { - t.Fatalf("unable to broadcast transaction: %v", err) - } + require.NoError(t, err, "unable to broadcast transaction") err = waitForMempoolTx(node, spendTxid) - if err != nil { - t.Fatalf("unable to get spending txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get spending txid in mempool") newBlockHashes, err := node.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") // We should have received another empty filtered block notification. select { @@ -423,9 +364,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness, {FundingPkScript: testScript, OutPoint: *outPoint}, } err = chainView.UpdateFilter(filter, uint32(currentHeight)) - if err != nil { - t.Fatalf("unable to update filter: %v", err) - } + require.NoError(t, err, "unable to update filter") // We should now receive a fresh filtered block notification that // includes the transaction spend we included above. @@ -451,30 +390,22 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView, t.Fatalf("unable to get test txid") } err = waitForMempoolTx(node, txid1) - if err != nil { - t.Fatalf("unable to get test txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get test txid in mempool") txid2, err := getTestTXID(node) if err != nil { t.Fatalf("unable to get test txid") } err = waitForMempoolTx(node, txid2) - if err != nil { - t.Fatalf("unable to get test txid in mempool: %v", err) - } + require.NoError(t, err, "unable to get test txid in mempool") blockChan := chainView.FilteredBlocks() // Next we'll mine a block confirming the output generated above. newBlockHashes, err := node.Client.Generate(1) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") _, currentHeight, err := node.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // We should get an update, however it shouldn't yet contain any // filtered transaction as the filter hasn't been updated. @@ -487,37 +418,23 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView, } tx1, err := node.Client.GetRawTransaction(txid1) - if err != nil { - t.Fatalf("unable to fetch transaction: %v", err) - } + require.NoError(t, err, "unable to fetch transaction") tx2, err := node.Client.GetRawTransaction(txid2) - if err != nil { - t.Fatalf("unable to fetch transaction: %v", err) - } + require.NoError(t, err, "unable to fetch transaction") // Next, we'll create a block that includes two transactions, each // which spend one of the outputs created. outPoint1, _, err := locateOutput(tx1.MsgTx(), testScript) - if err != nil { - t.Fatalf("unable to find output: %v", err) - } + require.NoError(t, err, "unable to find output") outPoint2, _, err := locateOutput(tx2.MsgTx(), testScript) - if err != nil { - t.Fatalf("unable to find output: %v", err) - } + require.NoError(t, err, "unable to find output") spendingTx1, err := craftSpendTransaction(*outPoint1, testScript) - if err != nil { - t.Fatalf("unable to create spending tx: %v", err) - } + require.NoError(t, err, "unable to create spending tx") spendingTx2, err := craftSpendTransaction(*outPoint2, testScript) - if err != nil { - t.Fatalf("unable to create spending tx: %v", err) - } + require.NoError(t, err, "unable to create spending tx") txns := []*btcutil.Tx{btcutil.NewTx(spendingTx1), btcutil.NewTx(spendingTx2)} block, err := node.GenerateAndSubmitBlock(txns, 11, time.Time{}) - if err != nil { - t.Fatalf("unable to generate block: %v", err) - } + require.NoError(t, err, "unable to generate block") select { case filteredBlock := <-blockChan: @@ -528,9 +445,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView, } _, currentHeight, err = node.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now we'll manually trigger filtering the block generated above. // First, we'll add the two outpoints to our filter. @@ -539,9 +454,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView, {FundingPkScript: testScript, OutPoint: *outPoint2}, } err = chainView.UpdateFilter(filter, uint32(currentHeight)) - if err != nil { - t.Fatalf("unable to update filter: %v", err) - } + require.NoError(t, err, "unable to update filter") // We set the filter with the current height, so we shouldn't get any // notifications. @@ -554,9 +467,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView, // Now we'll manually rescan that past block. This should include two // filtered transactions, the spending transactions we created above. filteredBlock, err := chainView.FilterBlock(block.Hash()) - if err != nil { - t.Fatalf("unable to filter block: %v", err) - } + require.NoError(t, err, "unable to filter block") txn1, txn2 := spendingTx1.TxHash(), spendingTx2.TxHash() expectedTxns := []*chainhash.Hash{&txn1, &txn2} assertFilteredBlock(t, filteredBlock, currentHeight, block.Hash(), @@ -573,9 +484,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness, // Create a node that has a shorter chain than the main chain, so we // can trigger a reorg. reorgNode, err := rpctest.New(netParams, nil, []string{"--txindex"}, "") - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") defer reorgNode.TearDown() // We want to overwrite some of the connection settings to make the @@ -592,17 +501,13 @@ func testFilterBlockDisconnected(node *rpctest.Harness, } _, bestHeight, err := reorgNode.Client.GetBestBlock() - if err != nil { - t.Fatalf("error getting best block: %v", err) - } + require.NoError(t, err, "error getting best block") // Init a chain view that has this node as its block source. cleanUpFunc, reorgView, err := chainViewInit( reorgNode.RPCConfig(), reorgNode.P2PAddress(), bestHeight, ) - if err != nil { - t.Fatalf("unable to create chain view: %v", err) - } + require.NoError(t, err, "unable to create chain view") defer func() { if cleanUpFunc != nil { cleanUpFunc() @@ -625,9 +530,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness, } _, oldHeight, err := reorgNode.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now connect the node with the short chain to the main node, and wait // for their chains to synchronize. The short chain will be reorged all @@ -641,9 +544,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness, } _, newHeight, err := reorgNode.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // We should be getting oldHeight number of blocks marked as // stale/disconnected. We expect to first get all stale blocks, @@ -681,16 +582,12 @@ func testFilterBlockDisconnected(node *rpctest.Harness, // Now we trigger a small reorg, by disconnecting the nodes, mining // a few blocks on each, then connecting them again. peers, err := reorgNode.Client.GetPeerInfo() - if err != nil { - t.Fatalf("unable to get peer info: %v", err) - } + require.NoError(t, err, "unable to get peer info") numPeers := len(peers) // Disconnect the nodes. err = reorgNode.Client.AddNode(node.P2PAddress(), rpcclient.ANRemove) - if err != nil { - t.Fatalf("unable to disconnect mining nodes: %v", err) - } + require.NoError(t, err, "unable to disconnect mining nodes") // Wait for disconnection for { @@ -732,9 +629,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness, } _, oldHeight, err = reorgNode.Client.GetBestBlock() - if err != nil { - t.Fatalf("unable to get current height: %v", err) - } + require.NoError(t, err, "unable to get current height") // Now connect the two nodes, and wait for their chains to sync up. if err := rpctest.ConnectNode(reorgNode, node); err != nil { @@ -1135,9 +1030,7 @@ func TestFilteredChainView(t *testing.T) { // this node with a chain length of 125, so we have plenty of BTC to // play around with. miner, err := rpctest.New(netParams, nil, []string{"--txindex"}, "") - if err != nil { - t.Fatalf("unable to create mining node: %v", err) - } + require.NoError(t, err, "unable to create mining node") defer miner.TearDown() if err := miner.SetUp(true, 25); err != nil { t.Fatalf("unable to set up mining node: %v", err) diff --git a/routing/control_tower_test.go b/routing/control_tower_test.go index 591ea3ced..e24870c87 100644 --- a/routing/control_tower_test.go +++ b/routing/control_tower_test.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/routing/route" + "github.com/stretchr/testify/require" ) var ( @@ -48,9 +49,7 @@ func TestControlTowerSubscribeUnknown(t *testing.T) { t.Parallel() db, err := initDB() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewControlTower(channeldb.NewPaymentControl(db)) @@ -67,9 +66,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { t.Parallel() db, err := initDB() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewControlTower(channeldb.NewPaymentControl(db)) @@ -87,9 +84,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { // Subscription should succeed and immediately report the InFlight // status. subscriber1, err := pControl.SubscribePayment(info.PaymentIdentifier) - if err != nil { - t.Fatalf("expected subscribe to succeed, but got: %v", err) - } + require.NoError(t, err, "expected subscribe to succeed, but got") // Register an attempt. err = pControl.RegisterAttempt(info.PaymentIdentifier, attempt) @@ -99,9 +94,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { // Register a second subscriber after the first attempt has started. subscriber2, err := pControl.SubscribePayment(info.PaymentIdentifier) - if err != nil { - t.Fatalf("expected subscribe to succeed, but got: %v", err) - } + require.NoError(t, err, "expected subscribe to succeed, but got") // Mark the payment as successful. settleInfo := channeldb.HTLCSettleInfo{ @@ -119,9 +112,7 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { // Register a third subscriber after the payment succeeded. subscriber3, err := pControl.SubscribePayment(info.PaymentIdentifier) - if err != nil { - t.Fatalf("expected subscribe to succeed, but got: %v", err) - } + require.NoError(t, err, "expected subscribe to succeed, but got") // We expect all subscribers to now report the final outcome followed by // no other events. @@ -184,9 +175,7 @@ func TestPaymentControlSubscribeFail(t *testing.T) { func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) { db, err := initDB() - if err != nil { - t.Fatalf("unable to init db: %v", err) - } + require.NoError(t, err, "unable to init db") pControl := NewControlTower(channeldb.NewPaymentControl(db)) @@ -203,9 +192,7 @@ func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) { // Subscription should succeed. subscriber1, err := pControl.SubscribePayment(info.PaymentIdentifier) - if err != nil { - t.Fatalf("expected subscribe to succeed, but got: %v", err) - } + require.NoError(t, err, "expected subscribe to succeed, but got") // Conditionally register the attempt based on the test type. This // allows us to simulate failing after attempting with an htlc or before @@ -239,9 +226,7 @@ func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) { // Register a second subscriber after the payment failed. subscriber2, err := pControl.SubscribePayment(info.PaymentIdentifier) - if err != nil { - t.Fatalf("expected subscribe to succeed, but got: %v", err) - } + require.NoError(t, err, "expected subscribe to succeed, but got") // We expect all subscribers to now report the final outcome followed by // no other events. diff --git a/routing/integrated_routing_test.go b/routing/integrated_routing_test.go index 5eaa859ac..0962f45f5 100644 --- a/routing/integrated_routing_test.go +++ b/routing/integrated_routing_test.go @@ -57,9 +57,7 @@ func TestProbabilityExtrapolation(t *testing.T) { // modifications anywhere in the chain of components that is involved in // this test. attempts, err := ctx.testPayment(1) - if err != nil { - t.Fatalf("payment failed: %v", err) - } + require.NoError(t, err, "payment failed") if len(attempts) != 5 { t.Fatalf("expected 5 attempts, but needed %v", len(attempts)) } @@ -69,9 +67,7 @@ func TestProbabilityExtrapolation(t *testing.T) { // first before switching to the paid channel. ctx.mcCfg.AprioriWeight = 1 attempts, err = ctx.testPayment(1) - if err != nil { - t.Fatalf("payment failed: %v", err) - } + require.NoError(t, err, "payment failed") if len(attempts) != 11 { t.Fatalf("expected 11 attempts, but needed %v", len(attempts)) } diff --git a/routing/notifications_test.go b/routing/notifications_test.go index 9a9f52cf1..7a82508fd 100644 --- a/routing/notifications_test.go +++ b/routing/notifications_test.go @@ -400,9 +400,7 @@ func TestEdgeUpdateNotification(t *testing.T) { fundingTx, chanPoint, chanID, err := createChannelEdge(ctx, bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), chanValue, 0) - if err != nil { - t.Fatalf("unable create channel edge: %v", err) - } + require.NoError(t, err, "unable create channel edge") // We'll also add a record for the block that included our funding // transaction. @@ -414,13 +412,9 @@ func TestEdgeUpdateNotification(t *testing.T) { // Next we'll create two test nodes that the fake channel will be open // between. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // Finally, to conclude our test set up, we'll create a channel // update to announce the created channel between the two nodes. @@ -445,9 +439,7 @@ func TestEdgeUpdateNotification(t *testing.T) { // With the channel edge now in place, we'll subscribe for topology // notifications. ntfnClient, err := ctx.router.SubscribeTopology() - if err != nil { - t.Fatalf("unable to subscribe for channel notifications: %v", err) - } + require.NoError(t, err, "unable to subscribe for channel notifications") // Create random policy edges that are stemmed to the channel id // created above. @@ -514,13 +506,9 @@ func TestEdgeUpdateNotification(t *testing.T) { } node1Pub, err := node1.PubKey() - if err != nil { - t.Fatalf("unable to encode key: %v", err) - } + require.NoError(t, err, "unable to encode key") node2Pub, err := node2.PubKey() - if err != nil { - t.Fatalf("unable to encode key: %v", err) - } + require.NoError(t, err, "unable to encode key") const numEdgePolicies = 2 for i := 0; i < numEdgePolicies; i++ { @@ -593,9 +581,7 @@ func TestNodeUpdateNotification(t *testing.T) { bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), chanValue, startingBlockHeight) - if err != nil { - t.Fatalf("unable create channel edge: %v", err) - } + require.NoError(t, err, "unable create channel edge") // We'll also add a record for the block that included our funding // transaction. @@ -608,13 +594,9 @@ func TestNodeUpdateNotification(t *testing.T) { // them to trigger notifications by sending updated node announcement // messages. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") testFeaturesBuf := new(bytes.Buffer) require.NoError(t, testFeatures.Encode(testFeaturesBuf)) @@ -641,9 +623,7 @@ func TestNodeUpdateNotification(t *testing.T) { // Create a new client to receive notifications. ntfnClient, err := ctx.router.SubscribeTopology() - if err != nil { - t.Fatalf("unable to subscribe for channel notifications: %v", err) - } + require.NoError(t, err, "unable to subscribe for channel notifications") // Change network topology by adding the updated info for the two nodes // to the channel router. @@ -778,9 +758,7 @@ func TestNotificationCancellation(t *testing.T) { // Create a new client to receive notifications. ntfnClient, err := ctx.router.SubscribeTopology() - if err != nil { - t.Fatalf("unable to subscribe for channel notifications: %v", err) - } + require.NoError(t, err, "unable to subscribe for channel notifications") // We'll create the utxo for a new channel. const chanValue = 10000 @@ -788,9 +766,7 @@ func TestNotificationCancellation(t *testing.T) { bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), chanValue, startingBlockHeight) - if err != nil { - t.Fatalf("unable create channel edge: %v", err) - } + require.NoError(t, err, "unable create channel edge") // We'll also add a record for the block that included our funding // transaction. @@ -802,13 +778,9 @@ func TestNotificationCancellation(t *testing.T) { // We'll create a fresh new node topology update to feed to the channel // router. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // Before we send the message to the channel router, we'll cancel the // notifications for this client. As a result, the notification @@ -870,9 +842,7 @@ func TestChannelCloseNotification(t *testing.T) { fundingTx, chanUtxo, chanID, err := createChannelEdge(ctx, bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), chanValue, startingBlockHeight) - if err != nil { - t.Fatalf("unable create channel edge: %v", err) - } + require.NoError(t, err, "unable create channel edge") // We'll also add a record for the block that included our funding // transaction. @@ -884,13 +854,9 @@ func TestChannelCloseNotification(t *testing.T) { // Next we'll create two test nodes that the fake channel will be open // between. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") // Finally, to conclude our test set up, we'll create a channel // announcement to announce the created channel between the two nodes. @@ -914,9 +880,7 @@ func TestChannelCloseNotification(t *testing.T) { // With the channel edge now in place, we'll subscribe for topology // notifications. ntfnClient, err := ctx.router.SubscribeTopology() - if err != nil { - t.Fatalf("unable to subscribe for channel notifications: %v", err) - } + require.NoError(t, err, "unable to subscribe for channel notifications") // Next, we'll simulate the closure of our channel by generating a new // block at height 102 which spends the original multi-sig output of diff --git a/routing/pathfind_test.go b/routing/pathfind_test.go index ca53a23dc..5e1bb2f62 100644 --- a/routing/pathfind_test.go +++ b/routing/pathfind_test.go @@ -962,9 +962,7 @@ func runFindLowestFeePath(t *testing.T, useCache bool) { paymentAmt := lnwire.NewMSatFromSatoshis(100) target := ctx.keyFromAlias("target") path, err := ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") route, err := newRoute( ctx.source, path, startingHeight, finalHopParams{ @@ -973,9 +971,7 @@ func runFindLowestFeePath(t *testing.T, useCache bool) { records: nil, }, ) - if err != nil { - t.Fatalf("unable to create path: %v", err) - } + require.NoError(t, err, "unable to create path") // Assert that the lowest fee route is returned. if route.Hops[1].PubKeyBytes != ctx.keyFromAlias("b") { @@ -1058,9 +1054,7 @@ var basicGraphPathFindingTests = []basicGraphPathFindingTestCase{ func runBasicGraphPathFinding(t *testing.T, useCache bool) { testGraphInstance, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer testGraphInstance.cleanUp() // With the test graph loaded, we'll test some basic path finding using @@ -1083,9 +1077,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc expectedHopCount := len(expectedHops) sourceNode, err := graphInstance.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") sourceVertex := route.Vertex(sourceNode.PubKeyBytes) const ( @@ -1112,9 +1104,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc } return } - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") route, err := newRoute( sourceVertex, path, startingHeight, @@ -1124,9 +1114,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc records: nil, }, ) - if err != nil { - t.Fatalf("unable to create path: %v", err) - } + require.NoError(t, err, "unable to create path") if len(route.Hops) != len(expectedHops) { t.Fatalf("route is of incorrect length, expected %v got %v", @@ -1148,9 +1136,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc // properly points to the channel ID that the HTLC should be forwarded // along. sphinxPath, err := route.ToSphinxPath() - if err != nil { - t.Fatalf("unable to make sphinx path: %v", err) - } + require.NoError(t, err, "unable to make sphinx path") if sphinxPath.TrueRouteLength() != expectedHopCount { t.Fatalf("incorrect number of hop payloads: expected %v, got %v", expectedHopCount, sphinxPath.TrueRouteLength()) @@ -1178,9 +1164,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc lastHopIndex := len(expectedHops) - 1 hopData, err := sphinxPath[lastHopIndex].HopPayload.HopData() - if err != nil { - t.Fatalf("unable to create hop data: %v", err) - } + require.NoError(t, err, "unable to create hop data") if !bytes.Equal(hopData.NextAddress[:], exitHop[:]) { t.Fatalf("first hop has incorrect next hop: expected %x, got %x", @@ -1235,15 +1219,11 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc // appropriate circumstances. func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) { graph, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer graph.cleanUp() sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") paymentAmt := lnwire.NewMSatFromSatoshis(100) @@ -1254,13 +1234,9 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) { // to find a path from our source node, roasbeef, to doge. dogePubKeyHex := "03dd46ff29a6941b4a2607525b043ec9b020b3f318a1bf281536fd7011ec59c882" dogePubKeyBytes, err := hex.DecodeString(dogePubKeyHex) - if err != nil { - t.Fatalf("unable to decode public key: %v", err) - } + require.NoError(t, err, "unable to decode public key") dogePubKey, err := btcec.ParsePubKey(dogePubKeyBytes) - if err != nil { - t.Fatalf("unable to parse public key from bytes: %v", err) - } + require.NoError(t, err, "unable to parse public key from bytes") doge := &channeldb.LightningNode{} doge.AddPubKey(dogePubKey) @@ -1298,9 +1274,7 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) { // We should now be able to find a path from roasbeef to doge. path, err := find(noRestrictions) - if err != nil { - t.Fatalf("unable to find private path to doge: %v", err) - } + require.NoError(t, err, "unable to find private path to doge") // The path should represent the following hops: // roasbeef -> songoku -> doge @@ -1330,9 +1304,7 @@ func runPathFindingWithAdditionalEdges(t *testing.T, useCache bool) { restrictions.DestFeatures = tlvFeatures path, err = find(&restrictions) - if err != nil { - t.Fatalf("path should have been found: %v", err) - } + require.NoError(t, err, "path should have been found") assertExpectedPath(t, graph.aliasMap, path, "songoku", "doge") } @@ -1675,9 +1647,7 @@ func runNewRoutePathTooLong(t *testing.T, useCache bool) { node20 := ctx.keyFromAlias("node-20") payAmt := lnwire.MilliSatoshi(100001) _, err := ctx.findPath(node20, payAmt) - if err != nil { - t.Fatalf("unexpected pathfinding failure: %v", err) - } + require.NoError(t, err, "unexpected pathfinding failure") // Assert that finding a 21 hop route fails. node21 := ctx.keyFromAlias("node-21") @@ -1700,24 +1670,18 @@ func runNewRoutePathTooLong(t *testing.T, useCache bool) { func runPathNotAvailable(t *testing.T, useCache bool) { graph, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer graph.cleanUp() sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") // With the test graph loaded, we'll test that queries for target that // are either unreachable within the graph, or unknown result in an // error. unknownNodeStr := "03dd46ff29a6941b4a2607525b043ec9b020b3f318a1bf281536fd7011ec59c882" unknownNodeBytes, err := hex.DecodeString(unknownNodeStr) - if err != nil { - t.Fatalf("unable to parse bytes: %v", err) - } + require.NoError(t, err, "unable to parse bytes") var unknownNode route.Vertex copy(unknownNode[:], unknownNodeBytes) @@ -1767,9 +1731,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) { defer ctx.cleanup() sourceNode, err := ctx.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") find := func(r *RestrictParams, target route.Vertex) ([]*channeldb.CachedEdgePolicy, error) { @@ -1801,9 +1763,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) { // However, path to satoshi should succeed via the fallback because his // node ann features have the TLV bit. path, err := find(&restrictions, satoshi) - if err != nil { - t.Fatalf("path should have been found: %v", err) - } + require.NoError(t, err, "path should have been found") assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "satoshi") // Add empty destination features. This should cause both paths to fail, @@ -1824,9 +1784,7 @@ func runDestTLVGraphFallback(t *testing.T, useCache bool) { restrictions.DestFeatures = tlvFeatures path, err = find(&restrictions, luoji) - if err != nil { - t.Fatalf("path should have been found: %v", err) - } + require.NoError(t, err, "path should have been found") assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "luoji") } @@ -1888,9 +1846,7 @@ func runMissingFeatureDep(t *testing.T, useCache bool) { ctx.restrictParams.DestFeatures = tlvPayAddrFeatures path, err := ctx.findPath(conner, 100) - if err != nil { - t.Fatalf("path should have been found: %v", err) - } + require.NoError(t, err, "path should have been found") assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "conner") // Finally, try to find a route to joost through conner. The @@ -2006,23 +1962,17 @@ func runDestPaymentAddr(t *testing.T, useCache bool) { ctx.restrictParams.DestFeatures = tlvPayAddrFeatures path, err := ctx.findPath(luoji, 100) - if err != nil { - t.Fatalf("path should have been found: %v", err) - } + require.NoError(t, err, "path should have been found") assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "luoji") } func runPathInsufficientCapacity(t *testing.T, useCache bool) { graph, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer graph.cleanUp() sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") // Next, test that attempting to find a path in which the current // channel graph cannot support due to insufficient capacity triggers @@ -2049,15 +1999,11 @@ func runPathInsufficientCapacity(t *testing.T, useCache bool) { // smaller than the advertised minHTLC of an edge, then path finding fails. func runRouteFailMinHTLC(t *testing.T, useCache bool) { graph, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer graph.cleanUp() sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") // We'll not attempt to route an HTLC of 10 SAT from roasbeef to Son // Goku. However, the min HTLC of Son Goku is 1k SAT, as a result, this @@ -2111,17 +2057,13 @@ func runRouteFailMaxHTLC(t *testing.T, useCache bool) { target := ctx.keyFromAlias("target") payAmt := lnwire.MilliSatoshi(100001) _, err := ctx.findPath(target, payAmt) - if err != nil { - t.Fatalf("graph should've been able to support payment: %v", err) - } + require.NoError(t, err, "graph should've been able to support payment") // Next, update the middle edge policy to only allow payments up to 100k // msat. graph := ctx.testGraphInstance.graph _, midEdge, _, err := graph.FetchChannelEdgesByID(firstToSecondID) - if err != nil { - t.Fatalf("unable to fetch channel edges by ID: %v", err) - } + require.NoError(t, err, "unable to fetch channel edges by ID") midEdge.MessageFlags = 1 midEdge.MaxHTLC = payAmt - 1 if err := graph.UpdateEdgePolicy(midEdge); err != nil { @@ -2143,15 +2085,11 @@ func runRouteFailMaxHTLC(t *testing.T, useCache bool) { // found among the bandwidth hints. func runRouteFailDisabledEdge(t *testing.T, useCache bool) { graph, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer graph.cleanUp() sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") // First, we'll try to route from roasbeef -> sophon. This should // succeed without issue, and return a single path via phamnuwen @@ -2162,18 +2100,14 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) { noRestrictions, testPathFindingConfig, sourceNode.PubKeyBytes, target, payAmt, 0, 0, ) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") // Disable the edge roasbeef->phamnuwen. This should not impact the // path finding, as we don't consider the disable flag for local // channels (and roasbeef is the source). roasToPham := uint64(999991) _, e1, e2, err := graph.graph.FetchChannelEdgesByID(roasToPham) - if err != nil { - t.Fatalf("unable to fetch edge: %v", err) - } + require.NoError(t, err, "unable to fetch edge") e1.ChannelFlags |= lnwire.ChanUpdateDisabled if err := graph.graph.UpdateEdgePolicy(e1); err != nil { t.Fatalf("unable to update edge: %v", err) @@ -2188,17 +2122,13 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) { noRestrictions, testPathFindingConfig, sourceNode.PubKeyBytes, target, payAmt, 0, 0, ) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") // Now, we'll modify the edge from phamnuwen -> sophon, to read that // it's disabled. phamToSophon := uint64(99999) _, e, _, err := graph.graph.FetchChannelEdgesByID(phamToSophon) - if err != nil { - t.Fatalf("unable to fetch edge: %v", err) - } + require.NoError(t, err, "unable to fetch edge") e.ChannelFlags |= lnwire.ChanUpdateDisabled if err := graph.graph.UpdateEdgePolicy(e); err != nil { t.Fatalf("unable to update edge: %v", err) @@ -2221,15 +2151,11 @@ func runRouteFailDisabledEdge(t *testing.T, useCache bool) { // use a local channel. func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) { graph, err := parseTestGraph(useCache, basicGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer graph.cleanUp() sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") // First, we'll try to route from roasbeef -> sophon. This should // succeed without issue, and return a path via songoku, as that's the @@ -2241,9 +2167,7 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) { noRestrictions, testPathFindingConfig, sourceNode.PubKeyBytes, target, payAmt, 0, 0, ) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") assertExpectedPath(t, graph.aliasMap, path, "songoku", "sophon") // Now we'll set the bandwidth of the edge roasbeef->songoku and @@ -2279,18 +2203,14 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) { noRestrictions, testPathFindingConfig, sourceNode.PubKeyBytes, target, payAmt, 0, 0, ) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") assertExpectedPath(t, graph.aliasMap, path, "phamnuwen", "sophon") // Finally, set the roasbeef->songoku bandwidth, but also set its // disable flag. bandwidths.hints[roasToSongoku] = 2 * payAmt _, e1, e2, err := graph.graph.FetchChannelEdgesByID(roasToSongoku) - if err != nil { - t.Fatalf("unable to fetch edge: %v", err) - } + require.NoError(t, err, "unable to fetch edge") e1.ChannelFlags |= lnwire.ChanUpdateDisabled if err := graph.graph.UpdateEdgePolicy(e1); err != nil { t.Fatalf("unable to update edge: %v", err) @@ -2307,9 +2227,7 @@ func runPathSourceEdgesBandwidth(t *testing.T, useCache bool) { noRestrictions, testPathFindingConfig, sourceNode.PubKeyBytes, target, payAmt, 0, 0, ) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") assertExpectedPath(t, graph.aliasMap, path, "songoku", "sophon") } @@ -2340,9 +2258,7 @@ func TestPathFindSpecExample(t *testing.T) { // Bob. bob := ctx.aliases["B"] bobNode, err := ctx.graph.FetchLightningNode(bob) - if err != nil { - t.Fatalf("unable to find bob: %v", err) - } + require.NoError(t, err, "unable to find bob") if err := ctx.graph.SetSourceNode(bobNode); err != nil { t.Fatalf("unable to set source node: %v", err) } @@ -2354,9 +2270,7 @@ func TestPathFindSpecExample(t *testing.T) { bobNode.PubKeyBytes, carol, amt, 0, noRestrictions, nil, nil, MinCLTVDelta, ) - if err != nil { - t.Fatalf("unable to find route: %v", err) - } + require.NoError(t, err, "unable to find route") // Now we'll examine the route returned for correctness. // @@ -2390,17 +2304,13 @@ func TestPathFindSpecExample(t *testing.T) { // the proper route for any queries starting with Alice. alice := ctx.aliases["A"] aliceNode, err := ctx.graph.FetchLightningNode(alice) - if err != nil { - t.Fatalf("unable to find alice: %v", err) - } + require.NoError(t, err, "unable to find alice") if err := ctx.graph.SetSourceNode(aliceNode); err != nil { t.Fatalf("unable to set source node: %v", err) } ctx.router.selfNode = aliceNode source, err := ctx.graph.SourceNode() - if err != nil { - t.Fatalf("unable to retrieve source node: %v", err) - } + require.NoError(t, err, "unable to retrieve source node") if source.PubKeyBytes != alice { t.Fatalf("source node not set") } @@ -2410,9 +2320,7 @@ func TestPathFindSpecExample(t *testing.T) { source.PubKeyBytes, carol, amt, 0, noRestrictions, nil, nil, MinCLTVDelta, ) - if err != nil { - t.Fatalf("unable to find routes: %v", err) - } + require.NoError(t, err, "unable to find routes") // The route should be two hops. if len(route.Hops) != 2 { @@ -2569,9 +2477,7 @@ func runRestrictOutgoingChannel(t *testing.T, useCache bool) { // outgoing channel. ctx.restrictParams.OutgoingChannelIDs = []uint64{outgoingChannelID} path, err := ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") // Assert that the route starts with channel chanSourceB1, in line with // the specified restriction. @@ -2587,9 +2493,7 @@ func runRestrictOutgoingChannel(t *testing.T, useCache bool) { chanSourceB1, chanSourceTarget, } path, err = ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") if path[0].ChannelID != chanSourceTarget { t.Fatalf("expected route to pass through channel %v", chanSourceTarget) @@ -2629,9 +2533,7 @@ func runRestrictLastHop(t *testing.T, useCache bool) { // This should force pathfinding to not take the lowest cost option. ctx.restrictParams.LastHop = &lastHop path, err := ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") if path[0].ChannelID != 3 { t.Fatalf("expected route to pass through channel 3, "+ "but channel %v was selected instead", @@ -2704,9 +2606,7 @@ func testCltvLimit(t *testing.T, useCache bool, limit uint32, } t.Fatal("expected no path to be found") } - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") const ( startingHeight = 100 @@ -2720,9 +2620,7 @@ func testCltvLimit(t *testing.T, useCache bool, limit uint32, records: nil, }, ) - if err != nil { - t.Fatalf("unable to create path: %v", err) - } + require.NoError(t, err, "unable to create path") // Assert that the route starts with the expected channel. if route.Hops[0].ChannelID != expectedChannel { @@ -3037,9 +2935,7 @@ func runNoCycle(t *testing.T, useCache bool) { // Find the best path given the restriction to only use channel 2 as the // outgoing channel. path, err := ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") route, err := newRoute( ctx.source, path, startingHeight, finalHopParams{ @@ -3048,9 +2944,7 @@ func runNoCycle(t *testing.T, useCache bool) { records: nil, }, ) - if err != nil { - t.Fatalf("unable to create path: %v", err) - } + require.NoError(t, err, "unable to create path") if len(route.Hops) != 2 { t.Fatalf("unexpected route") @@ -3089,9 +2983,7 @@ func runRouteToSelf(t *testing.T, useCache bool) { // Find the best path to self. We expect this to be source->a->source, // because a charges the lowest forwarding fee. path, err := ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") ctx.assertPath(path, []uint64{1, 1}) outgoingChanID := uint64(1) @@ -3102,9 +2994,7 @@ func runRouteToSelf(t *testing.T, useCache bool) { // Find the best path to self given that we want to go out via channel 1 // and return through node b. path, err = ctx.findPath(target, paymentAmt) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") ctx.assertPath(path, []uint64{1, 3, 2}) } @@ -3125,14 +3015,10 @@ func newPathFindingTestContext(t *testing.T, useCache bool, testGraphInstance, err := createTestGraphFromChannels( useCache, testChannels, source, ) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") sourceNode, err := testGraphInstance.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") ctx := &pathFindingTestContext{ t: t, diff --git a/routing/payment_lifecycle_test.go b/routing/payment_lifecycle_test.go index 0f4c08a8f..81c88ee3b 100644 --- a/routing/payment_lifecycle_test.go +++ b/routing/payment_lifecycle_test.go @@ -181,9 +181,7 @@ func TestRouterPaymentStateMachine(t *testing.T) { } testGraph, err := createTestGraphFromChannels(true, testChannels, "a") - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer testGraph.cleanUp() paymentAmt := lnwire.NewMSatFromSatoshis(1000) @@ -191,9 +189,7 @@ func TestRouterPaymentStateMachine(t *testing.T) { // We create a simple route that we will supply every time the router // requests one. rt, err := createTestRoute(paymentAmt, testGraph.aliasMap) - if err != nil { - t.Fatalf("unable to create route: %v", err) - } + require.NoError(t, err, "unable to create route") tests := []paymentLifecycleTestCase{ { diff --git a/routing/router_test.go b/routing/router_test.go index ff9d14a41..343348a02 100644 --- a/routing/router_test.go +++ b/routing/router_test.go @@ -1168,9 +1168,7 @@ func TestAddProof(t *testing.T) { fundingTx, _, chanID, err := createChannelEdge(ctx, bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), 100, 0) - if err != nil { - t.Fatalf("unable create channel edge: %v", err) - } + require.NoError(t, err, "unable create channel edge") fundingBlock := &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -1197,9 +1195,7 @@ func TestAddProof(t *testing.T) { } info, _, _, err := ctx.router.GetChannelByID(*chanID) - if err != nil { - t.Fatalf("unable to get channel: %v", err) - } + require.NoError(t, err, "unable to get channel") if info.AuthProof == nil { t.Fatal("proof have been updated") } @@ -1247,9 +1243,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) { testGraph, err := createTestGraphFromChannels( true, testChannels, "roasbeef", ) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer testGraph.cleanUp() ctx, cleanUp := createTestCtxFromGraphInstance( @@ -1269,9 +1263,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) { ctx, bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), 10000, 500, ) - if err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } + require.NoError(t, err, "unable to create channel edge") fundingBlock := &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -1335,16 +1327,12 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { // The two nodes we are about to add should not exist yet. _, exists1, err := ctx.graph.HasLightningNode(pub1) - if err != nil { - t.Fatalf("unable to query graph: %v", err) - } + require.NoError(t, err, "unable to query graph") if exists1 { t.Fatalf("node already existed") } _, exists2, err := ctx.graph.HasLightningNode(pub2) - if err != nil { - t.Fatalf("unable to query graph: %v", err) - } + require.NoError(t, err, "unable to query graph") if exists2 { t.Fatalf("node already existed") } @@ -1356,9 +1344,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { bitcoinKey2.SerializeCompressed(), 10000, 500, ) - if err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } + require.NoError(t, err, "unable to create channel edge") fundingBlock := &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -1419,16 +1405,12 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { // After adding the edge between the two previously unknown nodes, they // should have been added to the graph. _, exists1, err = ctx.graph.HasLightningNode(pub1) - if err != nil { - t.Fatalf("unable to query graph: %v", err) - } + require.NoError(t, err, "unable to query graph") if !exists1 { t.Fatalf("node1 was not added to the graph") } _, exists2, err = ctx.graph.HasLightningNode(pub2) - if err != nil { - t.Fatalf("unable to query graph: %v", err) - } + require.NoError(t, err, "unable to query graph") if !exists2 { t.Fatalf("node2 was not added to the graph") } @@ -1461,9 +1443,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { fundingTx, _, chanID, err = createChannelEdge(ctx, pubKey1.SerializeCompressed(), pubKey2.SerializeCompressed(), 10000, 510) - if err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } + require.NoError(t, err, "unable to create channel edge") fundingBlock = &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -1528,9 +1508,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { targetPubKeyBytes, paymentAmt, 0, noRestrictions, nil, nil, MinCLTVDelta, ) - if err != nil { - t.Fatalf("unable to find any routes: %v", err) - } + require.NoError(t, err, "unable to find any routes") // Now check that we can update the node info for the partial node // without messing up the channel graph. @@ -1571,23 +1549,17 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { targetPubKeyBytes, paymentAmt, 0, noRestrictions, nil, nil, MinCLTVDelta, ) - if err != nil { - t.Fatalf("unable to find any routes: %v", err) - } + require.NoError(t, err, "unable to find any routes") copy1, err := ctx.graph.FetchLightningNode(pub1) - if err != nil { - t.Fatalf("unable to fetch node: %v", err) - } + require.NoError(t, err, "unable to fetch node") if copy1.Alias != n1.Alias { t.Fatalf("fetched node not equal to original") } copy2, err := ctx.graph.FetchLightningNode(pub2) - if err != nil { - t.Fatalf("unable to fetch node: %v", err) - } + require.NoError(t, err, "unable to fetch node") if copy2.Alias != n2.Alias { t.Fatalf("fetched node not equal to original") @@ -1642,9 +1614,7 @@ func TestWakeUpOnStaleBranch(t *testing.T) { time.Sleep(time.Millisecond * 500) _, forkHeight, err := ctx.chain.GetBestBlock() - if err != nil { - t.Fatalf("unable to ge best block: %v", err) - } + require.NoError(t, err, "unable to ge best block") // Create 10 blocks on the minority chain, confirming chanID2. for i := uint32(1); i <= 10; i++ { @@ -1675,13 +1645,9 @@ func TestWakeUpOnStaleBranch(t *testing.T) { // Now add the two edges to the channel graph, and check that they // correctly show up in the database. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") edge1 := &channeldb.ChannelEdgeInfo{ ChannelID: chanID1, @@ -1851,9 +1817,7 @@ func TestDisconnectedBlocks(t *testing.T) { time.Sleep(time.Millisecond * 500) _, forkHeight, err := ctx.chain.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block: %v", err) - } + require.NoError(t, err, "unable to get best block") // Create 10 blocks on the minority chain, confirming chanID2. var minorityChain []*wire.MsgBlock @@ -1886,13 +1850,9 @@ func TestDisconnectedBlocks(t *testing.T) { // Now add the two edges to the channel graph, and check that they // correctly show up in the database. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") edge1 := &channeldb.ChannelEdgeInfo{ ChannelID: chanID1, @@ -2032,9 +1992,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) { bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), chanValue, uint32(nextHeight)) - if err != nil { - t.Fatalf("unable create channel edge: %v", err) - } + require.NoError(t, err, "unable create channel edge") block102.Transactions = append(block102.Transactions, fundingTx1) ctx.chain.addBlock(block102, uint32(nextHeight), rand.Uint32()) ctx.chain.setBestBlock(int32(nextHeight)) @@ -2045,13 +2003,9 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) { // for the ChannelRouter to properly recognize the channel we added // above. node1, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") node2, err := createTestNode() - if err != nil { - t.Fatalf("unable to create test node: %v", err) - } + require.NoError(t, err, "unable to create test node") edge1 := &channeldb.ChannelEdgeInfo{ ChannelID: chanID1.ToUint64(), NodeKey1Bytes: node1.PubKeyBytes, @@ -2097,9 +2051,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) { // At this point, our starting height should be 107. _, chainHeight, err := ctx.chain.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block: %v", err) - } + require.NoError(t, err, "unable to get best block") if chainHeight != 107 { t.Fatalf("incorrect chain height: expected %v, got %v", 107, chainHeight) @@ -2139,9 +2091,7 @@ func TestRouterChansClosedOfflinePruneGraph(t *testing.T) { // At this point, our starting height should be 112. _, chainHeight, err = ctx.chain.GetBestBlock() - if err != nil { - t.Fatalf("unable to get best block: %v", err) - } + require.NoError(t, err, "unable to get best block") if chainHeight != 112 { t.Fatalf("incorrect chain height: expected %v, got %v", 112, chainHeight) @@ -2394,9 +2344,7 @@ func testPruneChannelGraphDoubleDisabled(t *testing.T, assumeValid bool) { testGraph, err := createTestGraphFromChannels( true, testChannels, "self", ) - if err != nil { - t.Fatalf("unable to create test graph: %v", err) - } + require.NoError(t, err, "unable to create test graph") defer testGraph.cleanUp() const startingHeight = 100 @@ -2450,9 +2398,7 @@ func TestFindPathFeeWeighting(t *testing.T) { copy(preImage[:], bytes.Repeat([]byte{9}, 32)) sourceNode, err := ctx.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + require.NoError(t, err, "unable to fetch source node") amt := lnwire.MilliSatoshi(100) @@ -2467,9 +2413,7 @@ func TestFindPathFeeWeighting(t *testing.T) { testPathFindingConfig, sourceNode.PubKeyBytes, target, amt, 0, 0, ) - if err != nil { - t.Fatalf("unable to find path: %v", err) - } + require.NoError(t, err, "unable to find path") // The route that was chosen should be exactly one hop, and should be // directly to luoji. @@ -2503,9 +2447,7 @@ func TestIsStaleNode(t *testing.T) { bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), 10000, 500) - if err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } + require.NoError(t, err, "unable to create channel edge") fundingBlock := &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -2582,9 +2524,7 @@ func TestIsKnownEdge(t *testing.T) { bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), 10000, 500) - if err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } + require.NoError(t, err, "unable to create channel edge") fundingBlock := &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -2633,9 +2573,7 @@ func TestIsStaleEdgePolicy(t *testing.T) { bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), 10000, 500) - if err != nil { - t.Fatalf("unable to create channel edge: %v", err) - } + require.NoError(t, err, "unable to create channel edge") fundingBlock := &wire.MsgBlock{ Transactions: []*wire.MsgTx{fundingTx}, } @@ -2765,9 +2703,7 @@ func TestUnknownErrorSource(t *testing.T) { testGraph, err := createTestGraphFromChannels(true, testChannels, "a") defer testGraph.cleanUp() - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") const startingBlockHeight = 101 ctx, cleanUp := createTestCtxFromGraphInstance( @@ -2807,9 +2743,7 @@ func TestUnknownErrorSource(t *testing.T) { // which should pruning the channel a->b. We expect the payment to // succeed via a->d. _, _, err = ctx.router.SendPayment(&payment) - if err != nil { - t.Fatalf("expected payment to succeed, but got: %v", err) - } + require.NoError(t, err, "expected payment to succeed, but got") // Next we modify payment result to return an unknown failure. ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcherOld).setPaymentResult( @@ -2900,9 +2834,7 @@ func TestSendToRouteStructuredError(t *testing.T) { } testGraph, err := createTestGraphFromChannels(true, testChannels, "a") - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer testGraph.cleanUp() const startingBlockHeight = 101 @@ -2938,9 +2870,7 @@ func TestSendToRouteStructuredError(t *testing.T) { } rt, err := route.NewRouteFromHops(payAmt, 100, ctx.aliases["a"], hops) - if err != nil { - t.Fatalf("unable to create route: %v", err) - } + require.NoError(t, err, "unable to create route") finalHopIndex := len(hops) testCases := map[int]lnwire.FailureMessage{ @@ -3036,9 +2966,7 @@ func TestSendToRouteMultiShardSend(t *testing.T) { rt, err := route.NewRouteFromHops( payAmt, 100, sourceNode.PubKeyBytes, hops, ) - if err != nil { - t.Fatalf("unable to create route: %v", err) - } + require.NoError(t, err, "unable to create route") // The first shard we send we'll fail immediately, to check that we are // still allowed to retry with other shards after a failed one. @@ -3149,9 +3077,7 @@ func TestSendToRouteMaxHops(t *testing.T) { } testGraph, err := createTestGraphFromChannels(true, testChannels, "a") - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer testGraph.cleanUp() const startingBlockHeight = 101 @@ -3184,9 +3110,7 @@ func TestSendToRouteMaxHops(t *testing.T) { } rt, err := route.NewRouteFromHops(payAmt, 100, ctx.aliases["a"], hops) - if err != nil { - t.Fatalf("unable to create route: %v", err) - } + require.NoError(t, err, "unable to create route") // Send off the payment request to the router. We expect an error back // indicating that the route is too long. @@ -3260,9 +3184,7 @@ func TestBuildRoute(t *testing.T) { } testGraph, err := createTestGraphFromChannels(true, testChannels, "a") - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") defer testGraph.cleanUp() const startingBlockHeight = 101 @@ -4358,7 +4280,5 @@ func TestBlockDifferenceFix(t *testing.T) { return nil }, testTimeout) - if err != nil { - t.Fatalf("block height wasn't updated: %v", err) - } + require.NoError(t, err, "block height wasn't updated") } diff --git a/server_test.go b/server_test.go index 50ab6f0fb..eb36026b4 100644 --- a/server_test.go +++ b/server_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/lightningnetwork/lnd/lncfg" + "github.com/stretchr/testify/require" ) // TestTLSAutoRegeneration creates an expired TLS certificate, to test that a @@ -37,9 +38,7 @@ func TestTLSAutoRegeneration(t *testing.T) { certDerBytes, keyBytes := genExpiredCertPair(t, tempDirPath) expiredCert, err := x509.ParseCertificate(certDerBytes) - if err != nil { - t.Fatalf("failed to parse certificate: %v", err) - } + require.NoError(t, err, "failed to parse certificate") certBuf := bytes.Buffer{} err = pem.Encode( @@ -48,9 +47,7 @@ func TestTLSAutoRegeneration(t *testing.T) { Bytes: certDerBytes, }, ) - if err != nil { - t.Fatalf("failed to encode certificate: %v", err) - } + require.NoError(t, err, "failed to encode certificate") keyBuf := bytes.Buffer{} err = pem.Encode( @@ -59,19 +56,13 @@ func TestTLSAutoRegeneration(t *testing.T) { Bytes: keyBytes, }, ) - if err != nil { - t.Fatalf("failed to encode private key: %v", err) - } + require.NoError(t, err, "failed to encode private key") // Write cert and key files. err = ioutil.WriteFile(tempDirPath+"/tls.cert", certBuf.Bytes(), 0644) - if err != nil { - t.Fatalf("failed to write cert file: %v", err) - } + require.NoError(t, err, "failed to write cert file") err = ioutil.WriteFile(tempDirPath+"/tls.key", keyBuf.Bytes(), 0600) - if err != nil { - t.Fatalf("failed to write key file: %v", err) - } + require.NoError(t, err, "failed to write key file") rpcListener := net.IPAddr{IP: net.ParseIP("127.0.0.1"), Zone: ""} rpcListeners := make([]net.Addr, 0) @@ -118,9 +109,7 @@ func genExpiredCertPair(t *testing.T, certDirPath string) ([]byte, []byte) { // Generate a serial number that's below the serialNumberLimit. serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - t.Fatalf("failed to generate serial number: %s", err) - } + require.NoError(t, err, "failed to generate serial number") host := "lightning" @@ -157,14 +146,10 @@ func genExpiredCertPair(t *testing.T, certDirPath string) ([]byte, []byte) { certDerBytes, err := x509.CreateCertificate( rand.Reader, &template, &template, &priv.PublicKey, priv, ) - if err != nil { - t.Fatalf("failed to create certificate: %v", err) - } + require.NoError(t, err, "failed to create certificate") keyBytes, err := x509.MarshalECPrivateKey(priv) - if err != nil { - t.Fatalf("unable to encode privkey: %v", err) - } + require.NoError(t, err, "unable to encode privkey") return certDerBytes, keyBytes } diff --git a/shachain/element_test.go b/shachain/element_test.go index d1a55e649..d2e87640c 100644 --- a/shachain/element_test.go +++ b/shachain/element_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/go-errors/errors" + "github.com/stretchr/testify/require" ) // bitsToIndex is a helper function which takes 'n' last bits as input and @@ -48,13 +49,9 @@ func generateTests(t *testing.T) []deriveTest { ) from, err = bitsToIndex(0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "zero 'from' 'to'", from: from, @@ -64,13 +61,9 @@ func generateTests(t *testing.T) []deriveTest { }) from, err = bitsToIndex(0, 1, 0, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(0, 1, 0, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "same indexes #1", from: from, @@ -80,13 +73,9 @@ func generateTests(t *testing.T) []deriveTest { }) from, err = bitsToIndex(1) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "same indexes #2", from: from, @@ -95,13 +84,9 @@ func generateTests(t *testing.T) []deriveTest { }) from, err = bitsToIndex(0, 0, 0, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(0, 0, 1, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "test seed 'from'", from: from, @@ -111,13 +96,9 @@ func generateTests(t *testing.T) []deriveTest { }) from, err = bitsToIndex(1, 1, 0, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(0, 1, 0, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "not the same indexes", from: from, @@ -126,13 +107,9 @@ func generateTests(t *testing.T) []deriveTest { }) from, err = bitsToIndex(1, 0, 1, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(1, 0, 0, 0) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "'from' index greater then 'to' index", from: from, @@ -141,13 +118,9 @@ func generateTests(t *testing.T) []deriveTest { }) from, err = bitsToIndex(1) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") to, err = bitsToIndex(1) - if err != nil { - t.Fatalf("can't generate from index: %v", err) - } + require.NoError(t, err, "can't generate from index") tests = append(tests, deriveTest{ name: "zero number trailing zeros", from: from, diff --git a/sweep/store_test.go b/sweep/store_test.go index b27efb31e..0096b5563 100644 --- a/sweep/store_test.go +++ b/sweep/store_test.go @@ -6,6 +6,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" + "github.com/stretchr/testify/require" ) // TestStore asserts that the store persists the presented data to disk and is @@ -125,9 +126,7 @@ func testStore(t *testing.T, createStore func() (SweeperStore, error)) { } txns, err := store.ListSweeps() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") // Create a map containing the sweeps we expect to be returned by list // sweeps. diff --git a/sweep/sweeper_test.go b/sweep/sweeper_test.go index 5f4830711..0eebb3799 100644 --- a/sweep/sweeper_test.go +++ b/sweep/sweeper_test.go @@ -1237,9 +1237,7 @@ func TestBumpFeeRBF(t *testing.T) { bumpResult, err := ctx.sweeper.UpdateParams( *input.OutPoint(), ParamsUpdate{Fee: highFeePref}, ) - if err != nil { - t.Fatalf("unable to bump input's fee: %v", err) - } + require.NoError(t, err, "unable to bump input's fee") // A higher fee rate transaction should be immediately broadcast. ctx.tick() diff --git a/sweep/walletsweep_test.go b/sweep/walletsweep_test.go index 2f5679c69..29800bb57 100644 --- a/sweep/walletsweep_test.go +++ b/sweep/walletsweep_test.go @@ -12,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/stretchr/testify/require" ) // TestDetermineFeePerKw tests that given a fee preference, the @@ -352,9 +353,7 @@ func TestCraftSweepAllTx(t *testing.T) { 0, 10, nil, deliveryAddr, coinSelectLocker, utxoSource, utxoLocker, feeEstimator, signer, 0, ) - if err != nil { - t.Fatalf("unable to make sweep tx: %v", err) - } + require.NoError(t, err, "unable to make sweep tx") // At this point, all of the UTXOs that we made above should be locked // and none of them unlocked. diff --git a/tlv/stream_test.go b/tlv/stream_test.go index eb732d4db..60a3db73b 100644 --- a/tlv/stream_test.go +++ b/tlv/stream_test.go @@ -82,9 +82,7 @@ func testParsedTypes(t *testing.T, test parsedTypeTest) { parsedTypes, err := decStream.DecodeWithParsedTypes( bytes.NewReader(b.Bytes()), ) - if err != nil { - t.Fatalf("error decoding: %v", err) - } + require.NoError(t, err, "error decoding") if !reflect.DeepEqual(parsedTypes, test.expParsedTypes) { t.Fatalf("error mismatch on parsed types") } diff --git a/tor/cmd_onion_test.go b/tor/cmd_onion_test.go index e7502e941..f6b2c4b17 100644 --- a/tor/cmd_onion_test.go +++ b/tor/cmd_onion_test.go @@ -17,9 +17,7 @@ func TestOnionFile(t *testing.T) { t.Parallel() tempDir, err := ioutil.TempDir("", "onion_store") - if err != nil { - t.Fatalf("unable to create temp dir: %v", err) - } + require.NoError(t, err, "unable to create temp dir") privateKey := []byte("hide_me_plz") privateKeyPath := filepath.Join(tempDir, "secret") @@ -36,9 +34,7 @@ func TestOnionFile(t *testing.T) { t.Fatalf("unable to store private key: %v", err) } storePrivateKey, err := onionFile.PrivateKey(V2) - if err != nil { - t.Fatalf("unable to retrieve private key: %v", err) - } + require.NoError(t, err, "unable to retrieve private key") if !bytes.Equal(storePrivateKey, privateKey) { t.Fatalf("expected private key \"%v\", got \"%v\"", string(privateKey), string(storePrivateKey)) diff --git a/watchtower/blob/justice_kit_test.go b/watchtower/blob/justice_kit_test.go index 592a0df7e..b6b7b59ad 100644 --- a/watchtower/blob/justice_kit_test.go +++ b/watchtower/blob/justice_kit_test.go @@ -166,9 +166,7 @@ func testBlobJusticeKitEncryptDecrypt(t *testing.T, test descriptorTest) { // party's commitment txid as the key. var key blob.BreachKey _, err := rand.Read(key[:]) - if err != nil { - t.Fatalf("unable to generate blob encryption key: %v", err) - } + require.NoError(t, err, "unable to generate blob encryption key") // Encrypt the blob plaintext using the generated key and // target version for this test. diff --git a/watchtower/lookout/lookout_test.go b/watchtower/lookout/lookout_test.go index da2159c19..34e5590e7 100644 --- a/watchtower/lookout/lookout_test.go +++ b/watchtower/lookout/lookout_test.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/watchtower/wtdb" "github.com/lightningnetwork/lnd/watchtower/wtmock" "github.com/lightningnetwork/lnd/watchtower/wtpolicy" + "github.com/stretchr/testify/require" ) type mockPunisher struct { @@ -116,13 +117,9 @@ func TestLookoutBreachMatching(t *testing.T) { // Insert both sessions into the watchtower's database. err := db.InsertSessionInfo(sessionInfo1) - if err != nil { - t.Fatalf("unable to insert session info: %v", err) - } + require.NoError(t, err, "unable to insert session info") err = db.InsertSessionInfo(sessionInfo2) - if err != nil { - t.Fatalf("unable to insert session info: %v", err) - } + require.NoError(t, err, "unable to insert session info") // Construct two distinct transactions, that will be used to test the // breach hint matching. @@ -160,15 +157,11 @@ func TestLookoutBreachMatching(t *testing.T) { // Encrypt the first justice kit under breach key one. encBlob1, err := blob1.Encrypt(key1) - if err != nil { - t.Fatalf("unable to encrypt sweep detail 1: %v", err) - } + require.NoError(t, err, "unable to encrypt sweep detail 1") // Encrypt the second justice kit under breach key two. encBlob2, err := blob2.Encrypt(key2) - if err != nil { - t.Fatalf("unable to encrypt sweep detail 2: %v", err) - } + require.NoError(t, err, "unable to encrypt sweep detail 2") // Add both state updates to the tower's database. txBlob1 := &wtdb.SessionStateUpdate{ diff --git a/watchtower/wtclient/backup_task_internal_test.go b/watchtower/wtclient/backup_task_internal_test.go index 661f5e144..9c0a3e20e 100644 --- a/watchtower/wtclient/backup_task_internal_test.go +++ b/watchtower/wtclient/backup_task_internal_test.go @@ -23,6 +23,7 @@ import ( "github.com/lightningnetwork/lnd/watchtower/wtdb" "github.com/lightningnetwork/lnd/watchtower/wtmock" "github.com/lightningnetwork/lnd/watchtower/wtpolicy" + "github.com/stretchr/testify/require" ) const csvDelay uint32 = 144 @@ -602,9 +603,7 @@ func testBackupTask(t *testing.T, test backupTaskTest) { // Now, we'll construct, sign, and encrypt the blob containing the parts // needed to reconstruct the justice transaction. hint, encBlob, err := task.craftSessionPayload(test.signer) - if err != nil { - t.Fatalf("unable to craft session payload: %v", err) - } + require.NoError(t, err, "unable to craft session payload") // Verify that the breach hint matches the breach txid's prefix. breachTxID := test.breachInfo.BreachTxHash @@ -618,9 +617,7 @@ func testBackupTask(t *testing.T, test backupTaskTest) { // contents. key := blob.NewBreachKeyFromHash(&breachTxID) jKit, err := blob.Decrypt(key, encBlob, policy.BlobType) - if err != nil { - t.Fatalf("unable to decrypt blob: %v", err) - } + require.NoError(t, err, "unable to decrypt blob") keyRing := test.breachInfo.KeyRing expToLocalPK := keyRing.ToLocalKey.SerializeCompressed() diff --git a/watchtower/wtclient/candidate_iterator_test.go b/watchtower/wtclient/candidate_iterator_test.go index 1435754f2..99547d794 100644 --- a/watchtower/wtclient/candidate_iterator_test.go +++ b/watchtower/wtclient/candidate_iterator_test.go @@ -11,6 +11,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/watchtower/wtdb" + "github.com/stretchr/testify/require" ) func init() { @@ -35,9 +36,7 @@ func randAddr(t *testing.T) net.Addr { func randTower(t *testing.T) *wtdb.Tower { priv, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to create private key: %v", err) - } + require.NoError(t, err, "unable to create private key") pubKey := priv.PubKey() return &wtdb.Tower{ ID: wtdb.TowerID(rand.Uint64()), diff --git a/watchtower/wtclient/client_test.go b/watchtower/wtclient/client_test.go index fcd256cd1..83bb27ff7 100644 --- a/watchtower/wtclient/client_test.go +++ b/watchtower/wtclient/client_test.go @@ -69,9 +69,7 @@ func randPrivKey(t *testing.T) *btcec.PrivateKey { t.Helper() sk, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to generate pubkey: %v", err) - } + require.NoError(t, err, "unable to generate pubkey") return sk } @@ -200,21 +198,15 @@ func (c *mockChannel) createRemoteCommitTx(t *testing.T) { toLocalScript, err := input.CommitScriptToSelf( c.csvDelay, c.toLocalPK, c.revPK, ) - if err != nil { - t.Fatalf("unable to create to-local script: %v", err) - } + require.NoError(t, err, "unable to create to-local script") // Compute the to-local witness script hash. toLocalScriptHash, err := input.WitnessScriptHash(toLocalScript) - if err != nil { - t.Fatalf("unable to create to-local witness script hash: %v", err) - } + require.NoError(t, err, "unable to create to-local witness script hash") // Compute the to-remote witness script hash. toRemoteScriptHash, err := input.CommitScriptUnencumbered(c.toRemotePK) - if err != nil { - t.Fatalf("unable to create to-remote script: %v", err) - } + require.NoError(t, err, "unable to create to-remote script") // Construct the remote commitment txn, containing the to-local and // to-remote outputs. The balances are flipped since the transaction is @@ -400,14 +392,10 @@ type harnessCfg struct { func newHarness(t *testing.T, cfg harnessCfg) *testHarness { towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr) - if err != nil { - t.Fatalf("Unable to resolve tower TCP addr: %v", err) - } + require.NoError(t, err, "Unable to resolve tower TCP addr") privKey, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("Unable to generate tower private key: %v", err) - } + require.NoError(t, err, "Unable to generate tower private key") privKeyECDH := &keychain.PrivKeyECDH{PrivKey: privKey} towerPubKey := privKey.PubKey() @@ -432,9 +420,7 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness { } server, err := wtserver.New(serverCfg) - if err != nil { - t.Fatalf("unable to create wtserver: %v", err) - } + require.NoError(t, err, "unable to create wtserver") signer := wtmock.NewMockSigner() mockNet := newMockNet(server.InboundPeerConnected) @@ -457,9 +443,7 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness { ForceQuitDelay: 10 * time.Second, } client, err := wtclient.New(clientCfg) - if err != nil { - t.Fatalf("Unable to create wtclient: %v", err) - } + require.NoError(t, err, "Unable to create wtclient") if err := server.Start(); err != nil { t.Fatalf("Unable to start wtserver: %v", err) diff --git a/watchtower/wtserver/server_test.go b/watchtower/wtserver/server_test.go index fdf40af33..be94e9bee 100644 --- a/watchtower/wtserver/server_test.go +++ b/watchtower/wtserver/server_test.go @@ -16,6 +16,7 @@ import ( "github.com/lightningnetwork/lnd/watchtower/wtmock" "github.com/lightningnetwork/lnd/watchtower/wtserver" "github.com/lightningnetwork/lnd/watchtower/wtwire" + "github.com/stretchr/testify/require" ) var ( @@ -36,9 +37,7 @@ func randPubKey(t *testing.T) *btcec.PublicKey { t.Helper() sk, err := btcec.NewPrivateKey() - if err != nil { - t.Fatalf("unable to generate pubkey: %v", err) - } + require.NoError(t, err, "unable to generate pubkey") return sk.PubKey() } @@ -63,9 +62,7 @@ func initServer(t *testing.T, db wtserver.DB, }, ChainHash: testnetChainHash, }) - if err != nil { - t.Fatalf("unable to create server: %v", err) - } + require.NoError(t, err, "unable to create server") if err = s.Start(); err != nil { t.Fatalf("unable to start server: %v", err) @@ -101,9 +98,7 @@ func TestServerOnlyAcceptOnePeer(t *testing.T) { var b bytes.Buffer _, err := wtwire.WriteMessage(&b, init, 0) - if err != nil { - t.Fatalf("unable to write message: %v", err) - } + require.NoError(t, err, "unable to write message") msg := b.Bytes() diff --git a/zpay32/invoice_internal_test.go b/zpay32/invoice_internal_test.go index c93c80693..35091d6a6 100644 --- a/zpay32/invoice_internal_test.go +++ b/zpay32/invoice_internal_test.go @@ -12,6 +12,7 @@ import ( "github.com/btcsuite/btcd/btcutil/bech32" "github.com/btcsuite/btcd/chaincfg" "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/require" ) // TestDecodeAmount ensures that the amount string in the hrp of the Invoice @@ -567,9 +568,7 @@ func TestParseMaxUint64Expiry(t *testing.T) { expiryBytes := uint64ToBase32(expiry) expiryReParse, err := base32ToUint64(expiryBytes) - if err != nil { - t.Fatalf("unable to parse uint64: %v", err) - } + require.NoError(t, err, "unable to parse uint64") if expiryReParse != expiry { t.Fatalf("wrong expiry: expected %v got %v", expiry,