mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-08-31 08:02:25 +02:00
watchtower: add forceNext to NextSessionKeyIndex
This commit adds a forceNext boolean parameter to NextSessionKeyIndex. Setting this param to true will force the index to cycle over 1000 key indices before returning the new key.
This commit is contained in:
@@ -643,9 +643,10 @@ func (c *ClientDB) ListTowers() ([]*Tower, error) {
|
||||
// particular tower id. The index is reserved for that tower until
|
||||
// CreateClientSession is invoked for that tower and index, at which point a new
|
||||
// index for that tower can be reserved. Multiple calls to this method before
|
||||
// CreateClientSession is invoked should return the same index.
|
||||
// CreateClientSession is invoked should return the same index unless forceNext
|
||||
// is true.
|
||||
func (c *ClientDB) NextSessionKeyIndex(towerID TowerID,
|
||||
blobType blob.Type) (uint32, error) {
|
||||
blobType blob.Type, forceNext bool) (uint32, error) {
|
||||
|
||||
var index uint32
|
||||
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
|
||||
@@ -654,33 +655,48 @@ func (c *ClientDB) NextSessionKeyIndex(towerID TowerID,
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
|
||||
// Check the session key index to see if a key has already been
|
||||
// reserved for this tower. If so, we'll deserialize and return
|
||||
// the index directly.
|
||||
var err error
|
||||
index, err = getSessionKeyIndex(keyIndex, towerID, blobType)
|
||||
if err == nil {
|
||||
return nil
|
||||
if !forceNext {
|
||||
// Check the session key index to see if a key has
|
||||
// already been reserved for this tower. If so, we'll
|
||||
// deserialize and return the index directly.
|
||||
index, err = getSessionKeyIndex(
|
||||
keyIndex, towerID, blobType,
|
||||
)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, generate a new session key index since the node
|
||||
// doesn't already have reserved index. The error is ignored
|
||||
// since NextSequence can't fail inside Update.
|
||||
index64, _ := keyIndex.NextSequence()
|
||||
// By default, we use the next available bucket sequence as the
|
||||
// key index. But if forceNext is true, then it is assumed that
|
||||
// some data loss occurred and so the sequence is incremented a
|
||||
// by a jump of 1000 so that we can arrive at a brand new key
|
||||
// index quicker.
|
||||
currentSequence := keyIndex.Sequence()
|
||||
nextIndex := currentSequence + 1
|
||||
if forceNext {
|
||||
nextIndex = currentSequence + 1000
|
||||
}
|
||||
|
||||
if err = keyIndex.SetSequence(nextIndex); err != nil {
|
||||
return fmt.Errorf("could not set next bucket "+
|
||||
"sequence: %w", err)
|
||||
}
|
||||
|
||||
// As a sanity check, assert that the index is still in the
|
||||
// valid range of unhardened pubkeys. In the future, we should
|
||||
// move to only using hardened keys, and this will prevent any
|
||||
// overlap from occurring until then. This also prevents us from
|
||||
// overflowing uint32s.
|
||||
if index64 > math.MaxInt32 {
|
||||
if nextIndex > math.MaxInt32 {
|
||||
return fmt.Errorf("exhausted session key indexes")
|
||||
}
|
||||
|
||||
// Create the key that will used to be store the reserved index.
|
||||
keyBytes := createSessionKeyIndexKey(towerID, blobType)
|
||||
|
||||
index = uint32(index64)
|
||||
index = uint32(nextIndex)
|
||||
|
||||
var indexBuf [4]byte
|
||||
byteOrder.PutUint32(indexBuf[:], index)
|
||||
|
@@ -62,12 +62,12 @@ func (h *clientDBHarness) listSessions(id *wtdb.TowerID,
|
||||
return sessions
|
||||
}
|
||||
|
||||
func (h *clientDBHarness) nextKeyIndex(id wtdb.TowerID,
|
||||
blobType blob.Type) uint32 {
|
||||
func (h *clientDBHarness) nextKeyIndex(id wtdb.TowerID, blobType blob.Type,
|
||||
forceNext bool) uint32 {
|
||||
|
||||
h.t.Helper()
|
||||
|
||||
index, err := h.db.NextSessionKeyIndex(id, blobType)
|
||||
index, err := h.db.NextSessionKeyIndex(id, blobType, forceNext)
|
||||
require.NoError(h.t, err, "unable to create next session key index")
|
||||
require.NotZero(h.t, index, "next key index should never be 0")
|
||||
|
||||
@@ -309,7 +309,7 @@ func testCreateClientSession(h *clientDBHarness) {
|
||||
h.insertSession(session, wtdb.ErrNoReservedKeyIndex)
|
||||
|
||||
// Now, reserve a session key for this tower.
|
||||
keyIndex := h.nextKeyIndex(session.TowerID, blobType)
|
||||
keyIndex := h.nextKeyIndex(session.TowerID, blobType, false)
|
||||
|
||||
// The client session hasn't been updated with the reserved key index
|
||||
// (since it's still zero). Inserting should fail due to the mismatch.
|
||||
@@ -318,7 +318,7 @@ func testCreateClientSession(h *clientDBHarness) {
|
||||
// Reserve another key for the same index. Since no session has been
|
||||
// successfully created, it should return the same index to maintain
|
||||
// idempotency across restarts.
|
||||
keyIndex2 := h.nextKeyIndex(session.TowerID, blobType)
|
||||
keyIndex2 := h.nextKeyIndex(session.TowerID, blobType, false)
|
||||
require.Equalf(h.t, keyIndex, keyIndex2, "next key index should "+
|
||||
"be idempotent: want: %v, got %v", keyIndex, keyIndex2)
|
||||
|
||||
@@ -335,11 +335,23 @@ func testCreateClientSession(h *clientDBHarness) {
|
||||
// session already existing.
|
||||
h.insertSession(session, wtdb.ErrClientSessionAlreadyExists)
|
||||
|
||||
// Finally, assert that reserving another key index succeeds with a
|
||||
// different key index, now that the first one has been finalized.
|
||||
keyIndex3 := h.nextKeyIndex(session.TowerID, blobType)
|
||||
// Assert that reserving another key index succeeds with a different key
|
||||
// index, now that the first one has been finalized.
|
||||
keyIndex3 := h.nextKeyIndex(session.TowerID, blobType, false)
|
||||
require.NotEqualf(h.t, keyIndex, keyIndex3, "key index still "+
|
||||
"reserved after creating session")
|
||||
|
||||
// Show that calling NextSessionKeyIndex again now will result in the
|
||||
// same key being returned as long as forceNext remains false.
|
||||
keyIndex4 := h.nextKeyIndex(session.TowerID, blobType, false)
|
||||
require.Equal(h.t, keyIndex3, keyIndex4)
|
||||
|
||||
// Finally, assert that if the forceNext param of the
|
||||
// NextSessionKeyIndex method is true, then the key index returned will
|
||||
// be different.
|
||||
keyIndex5 := h.nextKeyIndex(session.TowerID, blobType, true)
|
||||
require.NotEqual(h.t, keyIndex5, keyIndex4)
|
||||
require.Equal(h.t, keyIndex3+1000, keyIndex5)
|
||||
}
|
||||
|
||||
// testFilterClientSessions asserts that we can correctly filter client sessions
|
||||
@@ -352,7 +364,7 @@ func testFilterClientSessions(h *clientDBHarness) {
|
||||
towerSessions := make(map[wtdb.TowerID][]wtdb.SessionID)
|
||||
for i := 0; i < numSessions; i++ {
|
||||
tower := h.newTower()
|
||||
keyIndex := h.nextKeyIndex(tower.ID, blobType)
|
||||
keyIndex := h.nextKeyIndex(tower.ID, blobType, false)
|
||||
sessionID := wtdb.SessionID([33]byte{byte(i)})
|
||||
h.insertSession(&wtdb.ClientSession{
|
||||
ClientSessionBody: wtdb.ClientSessionBody{
|
||||
@@ -499,7 +511,9 @@ func testRemoveTower(h *clientDBHarness) {
|
||||
MaxUpdates: 100,
|
||||
},
|
||||
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
||||
KeyIndex: h.nextKeyIndex(tower.ID, blobType),
|
||||
KeyIndex: h.nextKeyIndex(
|
||||
tower.ID, blobType, false,
|
||||
),
|
||||
},
|
||||
ID: wtdb.SessionID([33]byte{0x01}),
|
||||
}
|
||||
@@ -581,7 +595,7 @@ func testCommitUpdate(h *clientDBHarness) {
|
||||
)
|
||||
|
||||
// Reserve a session key index and insert the session.
|
||||
session.KeyIndex = h.nextKeyIndex(session.TowerID, blobType)
|
||||
session.KeyIndex = h.nextKeyIndex(session.TowerID, blobType, false)
|
||||
h.insertSession(session, nil)
|
||||
|
||||
// Now, try to commit the update that failed initially which should
|
||||
@@ -774,7 +788,7 @@ func testAckUpdate(h *clientDBHarness) {
|
||||
h.ackUpdate(&session.ID, 1, 0, wtdb.ErrClientSessionNotFound)
|
||||
|
||||
// Reserve a session key and insert the client session.
|
||||
session.KeyIndex = h.nextKeyIndex(session.TowerID, blobType)
|
||||
session.KeyIndex = h.nextKeyIndex(session.TowerID, blobType, false)
|
||||
h.insertSession(session, nil)
|
||||
|
||||
// Now, try to ack update 1. This should fail since update 1 was never
|
||||
@@ -1051,7 +1065,9 @@ func (h *clientDBHarness) randSession(t *testing.T,
|
||||
MaxUpdates: maxUpdates,
|
||||
},
|
||||
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
||||
KeyIndex: h.nextKeyIndex(towerID, blobType),
|
||||
KeyIndex: h.nextKeyIndex(
|
||||
towerID, blobType, false,
|
||||
),
|
||||
},
|
||||
ID: id,
|
||||
}
|
||||
|
Reference in New Issue
Block a user