htlcswitch: remove batchReplayBkt

This commit removes the `batchReplayBkt` as its only effect is to allow
reforwarding htlcs during startup.

Normally for every incoming htlc added, their shared secret is used as
the key to be saved into the `sharedHashBucket`, which will be used for
check for replays. In addition, the fwdPkg's ID, which is SCID+height is
also saved to the bucket `batchReplayBkt`. Since replays of HTLCs cannot
happen at the same commitment height, when a replay happens,
`batchReplayBkt` simply doesn't have this info, and we again rely on
`sharedHashBucket` to detect it. This means most of the time the
`batchReplayBkt` is a list of SCID+height with empty values.

The `batchReplayBkt` was previously used as a mechanism to check for
reforwardings during startup - when reforwarding htlcs, it quries this
bucket and finds an empty map, knowing this is a forwarding and skips
the check in `sharedHashBucket`. Given now we use a bool flag to
explicitly skip the replay check, this bucket is no longer useful.
This commit is contained in:
yyforyongyu
2025-06-11 12:45:53 +08:00
committed by Olaoluwa Osuntokun
parent c8ff379a9c
commit 13308644d0
2 changed files with 5 additions and 46 deletions

View File

@@ -1,7 +1,6 @@
package htlcswitch
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
@@ -24,11 +23,6 @@ var (
// bytes of a received HTLC's hashed shared secret as the key and the HTLC's
// CLTV expiry as the value.
sharedHashBucket = []byte("shared-hash")
// batchReplayBucket is a bucket that maps batch identifiers to
// serialized ReplaySets. This is used to give idempotency in the event
// that a batch is processed more than once.
batchReplayBucket = []byte("batch-replay")
)
var (
@@ -138,11 +132,6 @@ func (d *DecayedLog) initBuckets() error {
return ErrDecayedLogInit
}
_, err = tx.CreateTopLevelBucket(batchReplayBucket)
if err != nil {
return ErrDecayedLogInit
}
return nil
}, func() {})
}
@@ -329,11 +318,8 @@ func (d *DecayedLog) Put(hash *sphinx.HashPrefix, cltv uint32) error {
// PutBatch accepts a pending batch of hashed secret entries to write to disk.
// Each hashed secret is inserted with a corresponding time value, dictating
// when the entry will be evicted from the log.
// NOTE: This method enforces idempotency by writing the replay set obtained
// from the first attempt for a particular batch ID, and decoding the return
// value to subsequent calls. For the indices of the replay set to be aligned
// properly, the batch MUST be constructed identically to the first attempt,
// pruning will cause the indices to become invalid.
//
// TODO(yy): remove this method and use `Put` instead.
func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
// Since batched boltdb txns may be executed multiple times before
// succeeding, we will create a new replay set for each invocation to
@@ -348,25 +334,6 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
return ErrDecayedLogCorrupted
}
// Load the batch replay bucket, which will be used to either
// retrieve the result of previously processing this batch, or
// to write the result of this operation.
batchReplayBkt := tx.ReadWriteBucket(batchReplayBucket)
if batchReplayBkt == nil {
return ErrDecayedLogCorrupted
}
// Check for the existence of this batch's id in the replay
// bucket. If a non-nil value is found, this indicates that we
// have already processed this batch before. We deserialize the
// resulting and return it to ensure calls to put batch are
// idempotent.
replayBytes := batchReplayBkt.Get(b.ID)
if replayBytes != nil {
replays = sphinx.NewReplaySet()
return replays.Decode(bytes.NewReader(replayBytes))
}
// The CLTV will be stored into scratch and then stored into the
// sharedHashBucket.
var scratch [4]byte
@@ -394,17 +361,7 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
// batch's construction.
replays.Merge(b.ReplaySet)
// Write the replay set under the batch identifier to the batch
// replays bucket. This can be used during recovery to test (1)
// that a particular batch was successfully processed and (2)
// recover the indexes of the adds that were rejected as
// replays.
var replayBuf bytes.Buffer
if err := replays.Encode(&replayBuf); err != nil {
return err
}
return batchReplayBkt.Put(b.ID, replayBuf.Bytes())
return nil
}); err != nil {
return nil, err
}

View File

@@ -784,6 +784,8 @@ func (p *OnionProcessor) DecodeHopIterators(id []byte,
b.Val,
))
})
// TODO(yy): use `p.router.ProcessOnionPacket` instead.
err = tx.ProcessOnionPacket(
seqNum, onionPkt, req.RHash, req.IncomingCltv, opts...,
)