wtclient: show that bound tasks are not replayed

This commit demonstrates a bug. It shows that if backup tasks have been
bound to a session with a tower (ie, the tasks are in the session's
pendingQueue) and then the tower is removed and a new one is added, then
the tasks from the pendingQueue are _not_ replayed to the session with
the new tower. Instead, they are silently lost. This will be fixed in an
upcoming commit.
This commit is contained in:
Elle Mouton
2023-02-03 12:24:40 +02:00
parent feb35e65d8
commit c4fec3ebc9

View File

@@ -952,6 +952,27 @@ func (s *serverHarness) restart(op func(cfg *wtserver.Config)) {
op(s.cfg)
}
// assertUpdatesNotFound asserts that a set of hints are not found in the
// server's DB.
func (s *serverHarness) assertUpdatesNotFound(hints []blob.BreachHint) {
s.t.Helper()
hintSet := make(map[blob.BreachHint]struct{})
for _, hint := range hints {
hintSet[hint] = struct{}{}
}
time.Sleep(time.Second)
matches, err := s.db.QueryMatches(hints)
require.NoError(s.t, err, "unable to query for hints")
for _, match := range matches {
_, ok := hintSet[match.Hint]
require.False(s.t, ok, "breach hint was found in server DB")
}
}
// waitForUpdates blocks until the breach hints provided all appear in the
// watchtower's database or the timeout expires. This is used to test that the
// client in fact sends the updates to the server, even if it is offline.
@@ -2213,6 +2234,77 @@ var clientTests = []clientTest{
server2.waitForUpdates(hints[numUpdates/2:], waitTime)
},
},
{
// Show that if a client switches to a new tower _after_ backup
// tasks have been bound to the session with the first old tower
// then these updates are _not_ replayed onto the new tower.
// This is a bug that will be fixed in a future commit.
name: "switch to new tower after tasks are bound",
cfg: harnessCfg{
localBalance: localBalance,
remoteBalance: remoteBalance,
policy: wtpolicy.Policy{
TxPolicy: defaultTxPolicy,
MaxUpdates: 5,
},
},
fn: func(h *testHarness) {
const (
numUpdates = 5
chanID = 0
)
// Generate numUpdates retributions and back a few of
// them up to the main tower.
hints := h.advanceChannelN(chanID, numUpdates)
h.backupStates(chanID, 0, numUpdates/2, nil)
// Wait for all these updates to be populated in the
// server's database.
h.server.waitForUpdates(hints[:numUpdates/2], waitTime)
// Now stop the server.
h.server.stop()
// Back up a few more tasks. This will bind the
// backup tasks to the session with the old server.
h.backupStates(chanID, numUpdates/2, numUpdates-1, nil)
// Now we add a new tower.
server2 := newServerHarness(
h.t, h.net, towerAddr2Str, nil,
)
server2.start()
h.addTower(server2.addr)
// Now we can remove the old one.
err := wait.Predicate(func() bool {
err := h.client.RemoveTower(
h.server.addr.IdentityKey, nil,
)
return err == nil
}, waitTime)
require.NoError(h.t, err)
// Back up the final task.
h.backupStates(chanID, numUpdates-1, numUpdates, nil)
// Show that only the latest backup is backed up to the
// server and that the ones backed up while no tower was
// online were _not_ backed up to either server. This is
// a bug that will be fixed in a future commit.
server2.waitForUpdates(
hints[numUpdates-1:], time.Second,
)
server2.assertUpdatesNotFound(
hints[numUpdates/2 : numUpdates-1],
)
h.server.assertUpdatesNotFound(
hints[numUpdates/2 : numUpdates-1],
)
},
},
}
// TestClient executes the client test suite, asserting the ability to backup