wtclient: separate server from testHarness

In this commit, a serverHarness is added to the test framework. This
will allow us to create more mock servers appart from the main server.
This commit is contained in:
Elle Mouton
2023-03-23 09:33:31 +02:00
parent 2bb28f7aaf
commit 2ed5788de3

View File

@@ -38,6 +38,8 @@ import (
const ( const (
towerAddrStr = "18.28.243.2:9911" towerAddrStr = "18.28.243.2:9911"
towerAddr2Str = "19.29.244.3:9912" towerAddr2Str = "19.29.244.3:9912"
timeout = 200 * time.Millisecond
) )
var ( var (
@@ -392,18 +394,15 @@ func (c *mockChannel) getState(
} }
type testHarness struct { type testHarness struct {
t *testing.T t *testing.T
cfg harnessCfg cfg harnessCfg
signer *wtmock.MockSigner signer *wtmock.MockSigner
capacity lnwire.MilliSatoshi capacity lnwire.MilliSatoshi
clientDB *wtmock.ClientDB clientDB *wtmock.ClientDB
clientCfg *wtclient.Config clientCfg *wtclient.Config
client wtclient.Client client wtclient.Client
serverAddr *lnwire.NetAddress server *serverHarness
serverDB *wtmock.TowerDB net *mockNet
serverCfg *wtserver.Config
server *wtserver.Server
net *mockNet
blockEvents *mockBlockSub blockEvents *mockBlockSub
height int32 height int32
@@ -428,47 +427,23 @@ type harnessCfg struct {
} }
func newHarness(t *testing.T, cfg harnessCfg) *testHarness { func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr)
require.NoError(t, err, "Unable to resolve tower TCP addr")
privKey, err := btcec.NewPrivateKey()
require.NoError(t, err, "Unable to generate tower private key")
privKeyECDH := &keychain.PrivKeyECDH{PrivKey: privKey}
towerPubKey := privKey.PubKey()
towerAddr := &lnwire.NetAddress{
IdentityKey: towerPubKey,
Address: towerTCPAddr,
}
const timeout = 200 * time.Millisecond
serverDB := wtmock.NewTowerDB()
serverCfg := &wtserver.Config{
DB: serverDB,
ReadTimeout: timeout,
WriteTimeout: timeout,
NodeKeyECDH: privKeyECDH,
NewAddress: func() (btcutil.Address, error) {
return addr, nil
},
NoAckCreateSession: cfg.noAckCreateSession,
}
signer := wtmock.NewMockSigner() signer := wtmock.NewMockSigner()
mockNet := newMockNet() mockNet := newMockNet()
clientDB := wtmock.NewClientDB() clientDB := wtmock.NewClientDB()
server := newServerHarness(
t, mockNet, towerAddrStr, func(serverCfg *wtserver.Config) {
serverCfg.NoAckCreateSession = cfg.noAckCreateSession
},
)
h := &testHarness{ h := &testHarness{
t: t, t: t,
cfg: cfg, cfg: cfg,
signer: signer, signer: signer,
capacity: cfg.localBalance + cfg.remoteBalance, capacity: cfg.localBalance + cfg.remoteBalance,
clientDB: clientDB, clientDB: clientDB,
serverAddr: towerAddr, server: server,
serverDB: serverDB,
serverCfg: serverCfg,
net: mockNet, net: mockNet,
blockEvents: newMockBlockSub(t), blockEvents: newMockBlockSub(t),
channelEvents: newMockSubscription(t), channelEvents: newMockSubscription(t),
@@ -528,8 +503,8 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
} }
if !cfg.noServerStart { if !cfg.noServerStart {
h.startServer() h.server.start()
t.Cleanup(h.stopServer) t.Cleanup(h.server.stop)
} }
h.startClient() h.startClient()
@@ -553,29 +528,6 @@ func (h *testHarness) mine(numBlocks int) {
} }
} }
// startServer creates a new server using the harness's current serverCfg and
// starts it after pointing the mockNet's callback to the new server.
func (h *testHarness) startServer() {
h.t.Helper()
var err error
h.server, err = wtserver.New(h.serverCfg)
require.NoError(h.t, err)
h.net.registerConnCallback(h.serverAddr, h.server.InboundPeerConnected)
require.NoError(h.t, h.server.Start())
}
// stopServer stops the main harness server.
func (h *testHarness) stopServer() {
h.t.Helper()
h.net.removeConnCallback(h.serverAddr)
require.NoError(h.t, h.server.Stop())
}
// startClient creates a new server using the harness's current clientCf and // startClient creates a new server using the harness's current clientCf and
// starts it. // starts it.
func (h *testHarness) startClient() { func (h *testHarness) startClient() {
@@ -584,7 +536,7 @@ func (h *testHarness) startClient() {
towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr) towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr)
require.NoError(h.t, err) require.NoError(h.t, err)
towerAddr := &lnwire.NetAddress{ towerAddr := &lnwire.NetAddress{
IdentityKey: h.serverCfg.NodeKeyECDH.PubKey(), IdentityKey: h.server.cfg.NodeKeyECDH.PubKey(),
Address: towerTCPAddr, Address: towerTCPAddr,
} }
@@ -817,7 +769,7 @@ func (h *testHarness) waitServerUpdates(hints []blob.BreachHint,
for { for {
select { select {
case <-time.After(time.Second): case <-time.After(time.Second):
matches, err := h.serverDB.QueryMatches(hints) matches, err := h.server.db.QueryMatches(hints)
require.NoError(h.t, err, "unable to query for hints") require.NoError(h.t, err, "unable to query for hints")
if wantUpdates && serverHasHints(matches) { if wantUpdates && serverHasHints(matches) {
@@ -830,7 +782,7 @@ func (h *testHarness) waitServerUpdates(hints []blob.BreachHint,
} }
case <-failTimeout: case <-failTimeout:
matches, err := h.serverDB.QueryMatches(hints) matches, err := h.server.db.QueryMatches(hints)
require.NoError(h.t, err, "unable to query for hints") require.NoError(h.t, err, "unable to query for hints")
require.Truef(h.t, serverHasHints(matches), "breach "+ require.Truef(h.t, serverHasHints(matches), "breach "+
"hints not received, only got %d/%d", "hints not received, only got %d/%d",
@@ -847,7 +799,7 @@ func (h *testHarness) assertUpdatesForPolicy(hints []blob.BreachHint,
expPolicy wtpolicy.Policy) { expPolicy wtpolicy.Policy) {
// Query for matches on the provided hints. // Query for matches on the provided hints.
matches, err := h.serverDB.QueryMatches(hints) matches, err := h.server.db.QueryMatches(hints)
require.NoError(h.t, err) require.NoError(h.t, err)
// Assert that the number of matches is exactly the number of provided // Assert that the number of matches is exactly the number of provided
@@ -996,6 +948,96 @@ func (m *mockBlockSub) sendNewBlock(height int32) {
} }
} }
// serverHarness represents a mock watchtower server.
type serverHarness struct {
t *testing.T
net *mockNet
cfg *wtserver.Config
addr *lnwire.NetAddress
db *wtmock.TowerDB
server *wtserver.Server
}
// newServerHarness constructs a new mock watchtower server.
func newServerHarness(t *testing.T, mockNet *mockNet, netAddr string,
opt func(cfg *wtserver.Config)) *serverHarness {
towerTCPAddr, err := net.ResolveTCPAddr("tcp", netAddr)
require.NoError(t, err, "Unable to resolve tower TCP addr")
privKey, err := btcec.NewPrivateKey()
require.NoError(t, err, "Unable to generate tower private key")
privKeyECDH := &keychain.PrivKeyECDH{PrivKey: privKey}
towerPubKey := privKey.PubKey()
towerAddr := &lnwire.NetAddress{
IdentityKey: towerPubKey,
Address: towerTCPAddr,
}
db := wtmock.NewTowerDB()
cfg := &wtserver.Config{
DB: db,
ReadTimeout: timeout,
WriteTimeout: timeout,
NodeKeyECDH: privKeyECDH,
NewAddress: func() (btcutil.Address, error) {
return addr, nil
},
}
if opt != nil {
opt(cfg)
}
server, err := wtserver.New(cfg)
require.NoError(t, err, "unable to create wtserver")
return &serverHarness{
t: t,
net: mockNet,
cfg: cfg,
db: db,
addr: towerAddr,
server: server,
}
}
// start creates a new server using the harness's current server cfg and starts
// it after registering its Dial callback with the mockNet.
func (s *serverHarness) start() {
s.t.Helper()
var err error
s.server, err = wtserver.New(s.cfg)
require.NoError(s.t, err)
s.net.registerConnCallback(s.addr, s.server.InboundPeerConnected)
require.NoError(s.t, s.server.Start())
}
// stop halts the server and removes its Dial callback from the mockNet.
func (s *serverHarness) stop() {
s.t.Helper()
require.NoError(s.t, s.server.Stop())
s.net.removeConnCallback(s.addr)
}
// restart stops the server, applies any given config tweaks and then starts the
// server again.
func (s *serverHarness) restart(op func(cfg *wtserver.Config)) {
s.stop()
defer s.start()
if op == nil {
return
}
op(s.cfg)
}
const ( const (
localBalance = lnwire.MilliSatoshi(100000000) localBalance = lnwire.MilliSatoshi(100000000)
remoteBalance = lnwire.MilliSatoshi(200000000) remoteBalance = lnwire.MilliSatoshi(200000000)
@@ -1161,9 +1203,9 @@ var clientTests = []clientTest{
// Now, restart the server and prevent it from acking // Now, restart the server and prevent it from acking
// state updates. // state updates.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckUpdates = true cfg.NoAckUpdates = true
h.startServer() })
// Send the next state update to the tower. Since the // Send the next state update to the tower. Since the
// tower isn't acking state updates, we expect this // tower isn't acking state updates, we expect this
@@ -1181,9 +1223,9 @@ var clientTests = []clientTest{
// Restart the server and allow it to ack the updates // Restart the server and allow it to ack the updates
// after the client retransmits the unacked update. // after the client retransmits the unacked update.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckUpdates = false cfg.NoAckUpdates = false
h.startServer() })
// Restart the client and allow it to process the // Restart the client and allow it to process the
// committed update. // committed update.
@@ -1228,9 +1270,9 @@ var clientTests = []clientTest{
// Restart the server and prevent it from acking state // Restart the server and prevent it from acking state
// updates. // updates.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckUpdates = true cfg.NoAckUpdates = true
h.startServer() })
// Now, queue the retributions for backup. // Now, queue the retributions for backup.
h.backupStates(chanID, 0, numUpdates, nil) h.backupStates(chanID, 0, numUpdates, nil)
@@ -1242,9 +1284,9 @@ var clientTests = []clientTest{
// Restart the server and allow it to ack the updates // Restart the server and allow it to ack the updates
// after the client retransmits the unacked updates. // after the client retransmits the unacked updates.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckUpdates = false cfg.NoAckUpdates = false
h.startServer() })
// Wait for all the updates to be populated in the // Wait for all the updates to be populated in the
// server's database. // server's database.
@@ -1376,9 +1418,9 @@ var clientTests = []clientTest{
// Restart the server and allow it to ack session // Restart the server and allow it to ack session
// creation. // creation.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckCreateSession = false cfg.NoAckCreateSession = false
h.startServer() })
// Restart the client with the same policy, which will // Restart the client with the same policy, which will
// immediately try to overwrite the old session with an // immediately try to overwrite the old session with an
@@ -1426,9 +1468,9 @@ var clientTests = []clientTest{
// Restart the server and allow it to ack session // Restart the server and allow it to ack session
// creation. // creation.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckCreateSession = false cfg.NoAckCreateSession = false
h.startServer() })
// Restart the client with a new policy, which will // Restart the client with a new policy, which will
// immediately try to overwrite the prior session with // immediately try to overwrite the prior session with
@@ -1575,7 +1617,7 @@ var clientTests = []clientTest{
// Fully remove the tower, causing its existing sessions // Fully remove the tower, causing its existing sessions
// to be marked inactive. // to be marked inactive.
h.removeTower(h.serverAddr.IdentityKey, nil) h.removeTower(h.server.addr.IdentityKey, nil)
// Back up the remaining states. Since the tower has // Back up the remaining states. Since the tower has
// been removed, it shouldn't receive any updates. // been removed, it shouldn't receive any updates.
@@ -1585,18 +1627,20 @@ var clientTests = []clientTest{
// Re-add the tower. We prevent the tower from acking // Re-add the tower. We prevent the tower from acking
// session creation to ensure the inactive sessions are // session creation to ensure the inactive sessions are
// not used. // not used.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckCreateSession = true cfg.NoAckCreateSession = true
h.startServer() })
h.addTower(h.serverAddr)
h.addTower(h.server.addr)
h.waitServerUpdates(nil, time.Second) h.waitServerUpdates(nil, time.Second)
// Finally, allow the tower to ack session creation, // Finally, allow the tower to ack session creation,
// allowing the state updates to be sent through the new // allowing the state updates to be sent through the new
// session. // session.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckCreateSession = false cfg.NoAckCreateSession = false
h.startServer() })
h.waitServerUpdates(hints[numUpdates/2:], waitTime) h.waitServerUpdates(hints[numUpdates/2:], waitTime)
}, },
}, },
@@ -1630,9 +1674,9 @@ var clientTests = []clientTest{
// Now, restart the tower and prevent it from acking any // Now, restart the tower and prevent it from acking any
// new sessions. We do this here as once the last slot // new sessions. We do this here as once the last slot
// is exhausted the client will attempt to renegotiate. // is exhausted the client will attempt to renegotiate.
h.stopServer() h.server.restart(func(cfg *wtserver.Config) {
h.serverCfg.NoAckCreateSession = true cfg.NoAckCreateSession = true
h.startServer() })
// Back up the remaining two states. Once the first is // Back up the remaining two states. Once the first is
// processed, the session will be exhausted but the // processed, the session will be exhausted but the
@@ -1647,7 +1691,7 @@ var clientTests = []clientTest{
// state to process. After the force quite delay // state to process. After the force quite delay
// expires, the client should force quite itself and // expires, the client should force quite itself and
// allow the test to complete. // allow the test to complete.
h.stopServer() h.server.stop()
}, },
}, },
{ {
@@ -1680,7 +1724,7 @@ var clientTests = []clientTest{
h.waitServerUpdates(hints[:len(hints)/2], waitTime) h.waitServerUpdates(hints[:len(hints)/2], waitTime)
// Stop the server. // Stop the server.
h.stopServer() h.server.stop()
// Change the address of the server. // Change the address of the server.
towerTCPAddr, err := net.ResolveTCPAddr( towerTCPAddr, err := net.ResolveTCPAddr(
@@ -1688,12 +1732,12 @@ var clientTests = []clientTest{
) )
require.NoError(h.t, err) require.NoError(h.t, err)
oldAddr := h.serverAddr.Address oldAddr := h.server.addr.Address
towerAddr := &lnwire.NetAddress{ towerAddr := &lnwire.NetAddress{
IdentityKey: h.serverAddr.IdentityKey, IdentityKey: h.server.addr.IdentityKey,
Address: towerTCPAddr, Address: towerTCPAddr,
} }
h.serverAddr = towerAddr h.server.addr = towerAddr
// Add the new tower address to the client. // Add the new tower address to the client.
err = h.client.AddTower(towerAddr) err = h.client.AddTower(towerAddr)
@@ -1706,7 +1750,7 @@ var clientTests = []clientTest{
require.NoError(h.t, err) require.NoError(h.t, err)
// Restart the server. // Restart the server.
h.startServer() h.server.start()
// Now attempt to back up the rest of the updates. // Now attempt to back up the rest of the updates.
h.backupStates(chanID, numUpdates/2, maxUpdates, nil) h.backupStates(chanID, numUpdates/2, maxUpdates, nil)
@@ -1735,7 +1779,7 @@ var clientTests = []clientTest{
// the client should be able to remove the server. // the client should be able to remove the server.
err := wait.NoError(func() error { err := wait.NoError(func() error {
return h.client.RemoveTower( return h.client.RemoveTower(
h.serverAddr.IdentityKey, nil, h.server.addr.IdentityKey, nil,
) )
}, waitTime) }, waitTime)
require.NoError(h.t, err) require.NoError(h.t, err)
@@ -1744,12 +1788,12 @@ var clientTests = []clientTest{
// when the client calls it. This will force the client // when the client calls it. This will force the client
// to remain in the state where it has locked the // to remain in the state where it has locked the
// address of the server. // address of the server.
h.server, err = wtserver.New(h.serverCfg) h.server.server, err = wtserver.New(h.server.cfg)
require.NoError(h.t, err) require.NoError(h.t, err)
cancel := make(chan struct{}) cancel := make(chan struct{})
h.net.registerConnCallback( h.net.registerConnCallback(
h.serverAddr, func(peer wtserver.Peer) { h.server.addr, func(peer wtserver.Peer) {
select { select {
case <-h.quit: case <-h.quit:
case <-cancel: case <-cancel:
@@ -1764,20 +1808,20 @@ var clientTests = []clientTest{
require.NoError(h.t, err) require.NoError(h.t, err)
towerAddr := &lnwire.NetAddress{ towerAddr := &lnwire.NetAddress{
IdentityKey: h.serverAddr.IdentityKey, IdentityKey: h.server.addr.IdentityKey,
Address: towerTCPAddr, Address: towerTCPAddr,
} }
// Register the new address in the mock-net. // Register the new address in the mock-net.
h.net.registerConnCallback( h.net.registerConnCallback(
towerAddr, h.server.InboundPeerConnected, towerAddr, h.server.server.InboundPeerConnected,
) )
// Now start the server. // Now start the server.
require.NoError(h.t, h.server.Start()) require.NoError(h.t, h.server.server.Start())
// Re-add the server to the client // Re-add the server to the client
err = h.client.AddTower(h.serverAddr) err = h.client.AddTower(h.server.addr)
require.NoError(h.t, err) require.NoError(h.t, err)
// Also add the new tower address. // Also add the new tower address.
@@ -1790,8 +1834,8 @@ var clientTests = []clientTest{
// negotiation. // negotiation.
err = wait.Predicate(func() bool { err = wait.Predicate(func() bool {
err = h.client.RemoveTower( err = h.client.RemoveTower(
h.serverAddr.IdentityKey, h.server.addr.IdentityKey,
h.serverAddr.Address, h.server.addr.Address,
) )
return errors.Is(err, wtclient.ErrAddrInUse) return errors.Is(err, wtclient.ErrAddrInUse)
}, waitTime) }, waitTime)
@@ -1801,7 +1845,7 @@ var clientTests = []clientTest{
// it is not being used for session negotiation. // it is not being used for session negotiation.
err = wait.NoError(func() error { err = wait.NoError(func() error {
return h.client.RemoveTower( return h.client.RemoveTower(
h.serverAddr.IdentityKey, towerTCPAddr, h.server.addr.IdentityKey, towerTCPAddr,
) )
}, waitTime) }, waitTime)
require.NoError(h.t, err) require.NoError(h.t, err)
@@ -1813,7 +1857,7 @@ var clientTests = []clientTest{
// address. // address.
err = wait.NoError(func() error { err = wait.NoError(func() error {
return h.client.RemoveTower( return h.client.RemoveTower(
h.serverAddr.IdentityKey, nil, h.server.addr.IdentityKey, nil,
) )
}, waitTime) }, waitTime)
require.NoError(h.t, err) require.NoError(h.t, err)
@@ -1970,7 +2014,7 @@ var clientTests = []clientTest{
// Assert that the server is also aware of all of these // Assert that the server is also aware of all of these
// sessions. // sessions.
for sid := range closableSess { for sid := range closableSess {
_, err := h.serverDB.GetSessionInfo(&sid) _, err := h.server.db.GetSessionInfo(&sid)
require.NoError(h.t, err) require.NoError(h.t, err)
} }
@@ -2004,7 +2048,7 @@ var clientTests = []clientTest{
return false return false
} }
_, err := h.serverDB.GetSessionInfo( _, err := h.server.db.GetSessionInfo(
&sid, &sid,
) )
if !errors.Is( if !errors.Is(
@@ -2107,7 +2151,7 @@ var clientTests = []clientTest{
// Restart the Client (force quit). And also now start // Restart the Client (force quit). And also now start
// the server. // the server.
h.client.ForceQuit() h.client.ForceQuit()
h.startServer() h.server.start()
h.startClient() h.startClient()
// Back up a few more states. // Back up a few more states.