wtclient: cleanup the test file

This commit just does some linting of the client_test.go file so that
future commits are easier to parse.
This commit is contained in:
Elle Mouton 2023-03-22 08:39:53 +02:00
parent 588a7eb9ca
commit cab0560d5e
No known key found for this signature in database
GPG Key ID: D7D916376026F177

View File

@ -36,8 +36,6 @@ import (
)
const (
csvDelay uint32 = 144
towerAddrStr = "18.28.243.2:9911"
towerAddr2Str = "19.29.244.3:9912"
)
@ -73,6 +71,16 @@ var (
addrScript, _ = txscript.PayToAddrScript(addr)
waitTime = 5 * time.Second
defaultTxPolicy = wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: wtpolicy.DefaultSweepFeeRate,
}
highSweepRateTxPolicy = wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: 1000000, // The high sweep fee creates dust.
}
)
// randPrivKey generates a new secp keypair, and returns the public key.
@ -823,7 +831,7 @@ func (h *testHarness) assertUpdatesForPolicy(hints []blob.BreachHint,
require.Lenf(h.t, matches, len(hints), "expected: %d matches, got: %d",
len(hints), len(matches))
// Assert that all of the matches correspond to a session with the
// Assert that all the matches correspond to a session with the
// expected policy.
for _, match := range matches {
matchPolicy := match.SessionInfo.Policy
@ -969,11 +977,6 @@ const (
remoteBalance = lnwire.MilliSatoshi(200000000)
)
var defaultTxPolicy = wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: wtpolicy.DefaultSweepFeeRate,
}
type clientTest struct {
name string
cfg harnessCfg
@ -1072,7 +1075,7 @@ var clientTests = []clientTest{
// pipeline is always flushed before it exits.
go h.client.Stop()
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, time.Second)
},
@ -1086,10 +1089,7 @@ var clientTests = []clientTest{
localBalance: localBalance,
remoteBalance: remoteBalance,
policy: wtpolicy.Policy{
TxPolicy: wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: 1000000, // high sweep fee creates dust
},
TxPolicy: highSweepRateTxPolicy,
MaxUpdates: 20000,
},
},
@ -1177,7 +1177,7 @@ var clientTests = []clientTest{
// the tower to receive the remaining states.
h.backupStates(chanID, numSent, numUpdates, nil)
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, time.Second)
@ -1230,7 +1230,7 @@ var clientTests = []clientTest{
h.serverCfg.NoAckUpdates = false
h.startServer()
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, waitTime)
},
@ -1252,9 +1252,11 @@ var clientTests = []clientTest{
},
fn: func(h *testHarness) {
var (
capacity = h.cfg.localBalance + h.cfg.remoteBalance
capacity = h.cfg.localBalance +
h.cfg.remoteBalance
paymentAmt = lnwire.MilliSatoshi(2000000)
numSends = uint64(h.cfg.localBalance / paymentAmt)
numSends = uint64(h.cfg.localBalance) /
uint64(paymentAmt)
numRecvs = uint64(capacity / paymentAmt)
numUpdates = numSends + numRecvs // 200 updates
chanID = uint64(0)
@ -1262,11 +1264,15 @@ var clientTests = []clientTest{
// Send money to the remote party until all funds are
// depleted.
sendHints := h.sendPayments(chanID, 0, numSends, paymentAmt)
sendHints := h.sendPayments(
chanID, 0, numSends, paymentAmt,
)
// Now, sequentially receive the entire channel balance
// from the remote party.
recvHints := h.recvPayments(chanID, numSends, numUpdates, paymentAmt)
recvHints := h.recvPayments(
chanID, numSends, numUpdates, paymentAmt,
)
// Collect the hints generated by both sending and
// receiving.
@ -1275,7 +1281,7 @@ var clientTests = []clientTest{
// Backup the channel's states the client.
h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, 3*time.Second)
},
@ -1292,10 +1298,7 @@ var clientTests = []clientTest{
},
},
fn: func(h *testHarness) {
const (
numUpdates = 5
numChans = 10
)
const numUpdates = 5
// Initialize and register an additional 9 channels.
for id := uint64(1); id < 10; id++ {
@ -1323,7 +1326,7 @@ var clientTests = []clientTest{
// Test reliable flush under multi-client scenario.
go h.client.Stop()
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, 10*time.Second)
},
@ -1372,7 +1375,7 @@ var clientTests = []clientTest{
// Now, queue the retributions for backup.
h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, waitTime)
@ -1426,7 +1429,7 @@ var clientTests = []clientTest{
// Now, queue the retributions for backup.
h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, waitTime)
@ -1469,11 +1472,11 @@ var clientTests = []clientTest{
require.NoError(h.t, h.client.Stop())
// Record the policy that the first half was stored
// under. We'll expect the second half to also be stored
// under the original policy, since we are only adjusting
// the MaxUpdates. The client should detect that the
// two policies have equivalent TxPolicies and continue
// using the first.
// under. We'll expect the second half to also be
// stored under the original policy, since we are only
// adjusting the MaxUpdates. The client should detect
// that the two policies have equivalent TxPolicies and
// continue using the first.
expPolicy := h.clientCfg.Policy
// Restart the client with a new policy.
@ -1483,7 +1486,7 @@ var clientTests = []clientTest{
// Now, queue the second half of the retributions.
h.backupStates(chanID, numUpdates/2, numUpdates, nil)
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, waitTime)
@ -1534,7 +1537,7 @@ var clientTests = []clientTest{
// the second half should actually be sent.
h.backupStates(chanID, 0, numUpdates, nil)
// Wait for all of the updates to be populated in the
// Wait for all the updates to be populated in the
// server's database.
h.waitServerUpdates(hints, waitTime)
},
@ -1599,10 +1602,7 @@ var clientTests = []clientTest{
localBalance: localBalance,
remoteBalance: remoteBalance,
policy: wtpolicy.Policy{
TxPolicy: wtpolicy.TxPolicy{
BlobType: blob.TypeAltruistCommit,
SweepFeeRate: wtpolicy.DefaultSweepFeeRate,
},
TxPolicy: defaultTxPolicy,
MaxUpdates: 5,
},
},
@ -1629,7 +1629,7 @@ var clientTests = []clientTest{
// Back up the remaining two states. Once the first is
// processed, the session will be exhausted but the
// client won't be able to regnegotiate a session for
// client won't be able to renegotiate a session for
// the final state. We'll only wait for the first five
// states to arrive at the tower.
h.backupStates(chanID, maxUpdates-1, numUpdates, nil)