wtclient: cleanup the test file

This commit just does some linting of the client_test.go file so that
future commits are easier to parse.
This commit is contained in:
Elle Mouton 2023-03-22 08:39:53 +02:00
parent 588a7eb9ca
commit cab0560d5e
No known key found for this signature in database
GPG Key ID: D7D916376026F177

View File

@ -36,8 +36,6 @@ import (
) )
const ( const (
csvDelay uint32 = 144
towerAddrStr = "18.28.243.2:9911" towerAddrStr = "18.28.243.2:9911"
towerAddr2Str = "19.29.244.3:9912" towerAddr2Str = "19.29.244.3:9912"
) )
@ -73,6 +71,16 @@ var (
addrScript, _ = txscript.PayToAddrScript(addr) addrScript, _ = txscript.PayToAddrScript(addr)
waitTime = 5 * time.Second waitTime = 5 * time.Second
defaultTxPolicy = wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: wtpolicy.DefaultSweepFeeRate,
}
highSweepRateTxPolicy = wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: 1000000, // The high sweep fee creates dust.
}
) )
// randPrivKey generates a new secp keypair, and returns the public key. // randPrivKey generates a new secp keypair, and returns the public key.
@ -823,7 +831,7 @@ func (h *testHarness) assertUpdatesForPolicy(hints []blob.BreachHint,
require.Lenf(h.t, matches, len(hints), "expected: %d matches, got: %d", require.Lenf(h.t, matches, len(hints), "expected: %d matches, got: %d",
len(hints), len(matches)) len(hints), len(matches))
// Assert that all of the matches correspond to a session with the // Assert that all the matches correspond to a session with the
// expected policy. // expected policy.
for _, match := range matches { for _, match := range matches {
matchPolicy := match.SessionInfo.Policy matchPolicy := match.SessionInfo.Policy
@ -969,11 +977,6 @@ const (
remoteBalance = lnwire.MilliSatoshi(200000000) remoteBalance = lnwire.MilliSatoshi(200000000)
) )
var defaultTxPolicy = wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: wtpolicy.DefaultSweepFeeRate,
}
type clientTest struct { type clientTest struct {
name string name string
cfg harnessCfg cfg harnessCfg
@ -1072,7 +1075,7 @@ var clientTests = []clientTest{
// pipeline is always flushed before it exits. // pipeline is always flushed before it exits.
go h.client.Stop() go h.client.Stop()
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, time.Second) h.waitServerUpdates(hints, time.Second)
}, },
@ -1086,10 +1089,7 @@ var clientTests = []clientTest{
localBalance: localBalance, localBalance: localBalance,
remoteBalance: remoteBalance, remoteBalance: remoteBalance,
policy: wtpolicy.Policy{ policy: wtpolicy.Policy{
TxPolicy: wtpolicy.TxPolicy{ TxPolicy: highSweepRateTxPolicy,
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: 1000000, // high sweep fee creates dust
},
MaxUpdates: 20000, MaxUpdates: 20000,
}, },
}, },
@ -1177,7 +1177,7 @@ var clientTests = []clientTest{
// the tower to receive the remaining states. // the tower to receive the remaining states.
h.backupStates(chanID, numSent, numUpdates, nil) h.backupStates(chanID, numSent, numUpdates, nil)
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, time.Second) h.waitServerUpdates(hints, time.Second)
@ -1230,7 +1230,7 @@ var clientTests = []clientTest{
h.serverCfg.NoAckUpdates = false h.serverCfg.NoAckUpdates = false
h.startServer() h.startServer()
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, waitTime) h.waitServerUpdates(hints, waitTime)
}, },
@ -1252,9 +1252,11 @@ var clientTests = []clientTest{
}, },
fn: func(h *testHarness) { fn: func(h *testHarness) {
var ( var (
capacity = h.cfg.localBalance + h.cfg.remoteBalance capacity = h.cfg.localBalance +
h.cfg.remoteBalance
paymentAmt = lnwire.MilliSatoshi(2000000) paymentAmt = lnwire.MilliSatoshi(2000000)
numSends = uint64(h.cfg.localBalance / paymentAmt) numSends = uint64(h.cfg.localBalance) /
uint64(paymentAmt)
numRecvs = uint64(capacity / paymentAmt) numRecvs = uint64(capacity / paymentAmt)
numUpdates = numSends + numRecvs // 200 updates numUpdates = numSends + numRecvs // 200 updates
chanID = uint64(0) chanID = uint64(0)
@ -1262,11 +1264,15 @@ var clientTests = []clientTest{
// Send money to the remote party until all funds are // Send money to the remote party until all funds are
// depleted. // depleted.
sendHints := h.sendPayments(chanID, 0, numSends, paymentAmt) sendHints := h.sendPayments(
chanID, 0, numSends, paymentAmt,
)
// Now, sequentially receive the entire channel balance // Now, sequentially receive the entire channel balance
// from the remote party. // from the remote party.
recvHints := h.recvPayments(chanID, numSends, numUpdates, paymentAmt) recvHints := h.recvPayments(
chanID, numSends, numUpdates, paymentAmt,
)
// Collect the hints generated by both sending and // Collect the hints generated by both sending and
// receiving. // receiving.
@ -1275,7 +1281,7 @@ var clientTests = []clientTest{
// Backup the channel's states the client. // Backup the channel's states the client.
h.backupStates(chanID, 0, numUpdates, nil) h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, 3*time.Second) h.waitServerUpdates(hints, 3*time.Second)
}, },
@ -1292,10 +1298,7 @@ var clientTests = []clientTest{
}, },
}, },
fn: func(h *testHarness) { fn: func(h *testHarness) {
const ( const numUpdates = 5
numUpdates = 5
numChans = 10
)
// Initialize and register an additional 9 channels. // Initialize and register an additional 9 channels.
for id := uint64(1); id < 10; id++ { for id := uint64(1); id < 10; id++ {
@ -1323,7 +1326,7 @@ var clientTests = []clientTest{
// Test reliable flush under multi-client scenario. // Test reliable flush under multi-client scenario.
go h.client.Stop() go h.client.Stop()
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, 10*time.Second) h.waitServerUpdates(hints, 10*time.Second)
}, },
@ -1372,7 +1375,7 @@ var clientTests = []clientTest{
// Now, queue the retributions for backup. // Now, queue the retributions for backup.
h.backupStates(chanID, 0, numUpdates, nil) h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, waitTime) h.waitServerUpdates(hints, waitTime)
@ -1426,7 +1429,7 @@ var clientTests = []clientTest{
// Now, queue the retributions for backup. // Now, queue the retributions for backup.
h.backupStates(chanID, 0, numUpdates, nil) h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, waitTime) h.waitServerUpdates(hints, waitTime)
@ -1469,11 +1472,11 @@ var clientTests = []clientTest{
require.NoError(h.t, h.client.Stop()) require.NoError(h.t, h.client.Stop())
// Record the policy that the first half was stored // Record the policy that the first half was stored
// under. We'll expect the second half to also be stored // under. We'll expect the second half to also be
// under the original policy, since we are only adjusting // stored under the original policy, since we are only
// the MaxUpdates. The client should detect that the // adjusting the MaxUpdates. The client should detect
// two policies have equivalent TxPolicies and continue // that the two policies have equivalent TxPolicies and
// using the first. // continue using the first.
expPolicy := h.clientCfg.Policy expPolicy := h.clientCfg.Policy
// Restart the client with a new policy. // Restart the client with a new policy.
@ -1483,7 +1486,7 @@ var clientTests = []clientTest{
// Now, queue the second half of the retributions. // Now, queue the second half of the retributions.
h.backupStates(chanID, numUpdates/2, numUpdates, nil) h.backupStates(chanID, numUpdates/2, numUpdates, nil)
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, waitTime) h.waitServerUpdates(hints, waitTime)
@ -1534,7 +1537,7 @@ var clientTests = []clientTest{
// the second half should actually be sent. // the second half should actually be sent.
h.backupStates(chanID, 0, numUpdates, nil) h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
h.waitServerUpdates(hints, waitTime) h.waitServerUpdates(hints, waitTime)
}, },
@ -1599,10 +1602,7 @@ var clientTests = []clientTest{
localBalance: localBalance, localBalance: localBalance,
remoteBalance: remoteBalance, remoteBalance: remoteBalance,
policy: wtpolicy.Policy{ policy: wtpolicy.Policy{
TxPolicy: wtpolicy.TxPolicy{ TxPolicy: defaultTxPolicy,
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: wtpolicy.DefaultSweepFeeRate,
},
MaxUpdates: 5, MaxUpdates: 5,
}, },
}, },
@ -1629,7 +1629,7 @@ var clientTests = []clientTest{
// Back up the remaining two states. Once the first is // Back up the remaining two states. Once the first is
// processed, the session will be exhausted but the // processed, the session will be exhausted but the
// client won't be able to regnegotiate a session for // client won't be able to renegotiate a session for
// the final state. We'll only wait for the first five // the final state. We'll only wait for the first five
// states to arrive at the tower. // states to arrive at the tower.
h.backupStates(chanID, maxUpdates-1, numUpdates, nil) h.backupStates(chanID, maxUpdates-1, numUpdates, nil)