mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-04-14 15:09:06 +02:00
Merge pull request #6517 from bottlepay/lookup-htlc
htlcswitch: final settle signal
This commit is contained in:
commit
81b17ef4fd
@ -16,6 +16,7 @@ import (
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
@ -135,6 +136,26 @@ var (
|
||||
// sent was a revocation and false when it was a commitment signature.
|
||||
// This is nil in the case of new channels with no updates exchanged.
|
||||
lastWasRevokeKey = []byte("last-was-revoke")
|
||||
|
||||
// finalHtlcsBucket contains the htlcs that have been resolved
|
||||
// definitively. Within this bucket, there is a sub-bucket for each
|
||||
// channel. In each channel bucket, the htlc indices are stored along
|
||||
// with final outcome.
|
||||
//
|
||||
// final-htlcs -> chanID -> htlcIndex -> outcome
|
||||
//
|
||||
// 'outcome' is a byte value that encodes:
|
||||
//
|
||||
// | true false
|
||||
// ------+------------------
|
||||
// bit 0 | settled failed
|
||||
// bit 1 | offchain onchain
|
||||
//
|
||||
// This bucket is positioned at the root level, because its contents
|
||||
// will be kept independent of the channel lifecycle. This is to avoid
|
||||
// the situation where a channel force-closes autonomously and the user
|
||||
// not being able to query for htlc outcomes anymore.
|
||||
finalHtlcsBucket = []byte("final-htlcs")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -618,6 +639,20 @@ func (c ChannelStatus) String() string {
|
||||
return statusStr
|
||||
}
|
||||
|
||||
// FinalHtlcByte defines a byte type that encodes information about the final
|
||||
// htlc resolution.
|
||||
type FinalHtlcByte byte
|
||||
|
||||
const (
|
||||
// FinalHtlcSettledBit is the bit that encodes whether the htlc was
|
||||
// settled or failed.
|
||||
FinalHtlcSettledBit FinalHtlcByte = 1 << 0
|
||||
|
||||
// FinalHtlcOffchainBit is the bit that encodes whether the htlc was
|
||||
// resolved offchain or onchain.
|
||||
FinalHtlcOffchainBit FinalHtlcByte = 1 << 1
|
||||
)
|
||||
|
||||
// OpenChannel encapsulates the persistent and dynamic state of an open channel
|
||||
// with a remote node. An open channel supports several options for on-disk
|
||||
// serialization depending on the exact context. Full (upon channel creation)
|
||||
@ -1043,6 +1078,26 @@ func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey,
|
||||
return chanBucket, nil
|
||||
}
|
||||
|
||||
func fetchFinalHtlcsBucketRw(tx kvdb.RwTx,
|
||||
chanID lnwire.ShortChannelID) (kvdb.RwBucket, error) {
|
||||
|
||||
finalHtlcsBucket, err := tx.CreateTopLevelBucket(finalHtlcsBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var chanIDBytes [8]byte
|
||||
byteOrder.PutUint64(chanIDBytes[:], chanID.ToUint64())
|
||||
chanBucket, err := finalHtlcsBucket.CreateBucketIfNotExists(
|
||||
chanIDBytes[:],
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chanBucket, nil
|
||||
}
|
||||
|
||||
// fullSync syncs the contents of an OpenChannel while re-using an existing
|
||||
// database transaction.
|
||||
func (c *OpenChannel) fullSync(tx kvdb.RwTx) error {
|
||||
@ -1739,8 +1794,12 @@ func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error {
|
||||
// persisted to be able to produce a valid commit signature if a restart would
|
||||
// occur. This method its to be called when we revoke our prior commitment
|
||||
// state.
|
||||
//
|
||||
// A map is returned of all the htlc resolutions that were locked in in this
|
||||
// commitment. Keys correspond to htlc indices and values indicate whether the
|
||||
// htlc was settled or failed.
|
||||
func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
|
||||
unsignedAckedUpdates []LogUpdate) error {
|
||||
unsignedAckedUpdates []LogUpdate) (map[uint64]bool, error) {
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
@ -1749,9 +1808,11 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
|
||||
// state as all, as it's impossible to do so in a protocol compliant
|
||||
// manner.
|
||||
if c.hasChanStatus(ChanStatusRestored) {
|
||||
return ErrNoRestoredChannelMutation
|
||||
return nil, ErrNoRestoredChannelMutation
|
||||
}
|
||||
|
||||
var finalHtlcs = make(map[uint64]bool)
|
||||
|
||||
err := kvdb.Update(c.Db.backend, func(tx kvdb.RwTx) error {
|
||||
chanBucket, err := fetchChanBucketRw(
|
||||
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
|
||||
@ -1822,17 +1883,35 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
|
||||
return err
|
||||
}
|
||||
|
||||
var validUpdates []LogUpdate
|
||||
// Get the bucket where settled htlcs are recorded.
|
||||
finalHtlcsBucket, err := fetchFinalHtlcsBucketRw(
|
||||
tx, c.ShortChannelID,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var unsignedUpdates []LogUpdate
|
||||
for _, upd := range updates {
|
||||
// Filter for updates that are not on our local
|
||||
// commitment.
|
||||
// Gather updates that are not on our local commitment.
|
||||
if upd.LogIndex >= newCommitment.LocalLogIndex {
|
||||
validUpdates = append(validUpdates, upd)
|
||||
unsignedUpdates = append(unsignedUpdates, upd)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// The update was locked in. If the update was a
|
||||
// resolution, then store it in the database.
|
||||
err := processFinalHtlc(
|
||||
finalHtlcsBucket, upd, finalHtlcs,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var b3 bytes.Buffer
|
||||
err = serializeLogUpdates(&b3, validUpdates)
|
||||
err = serializeLogUpdates(&b3, unsignedUpdates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to serialize log updates: %v", err)
|
||||
}
|
||||
@ -1843,12 +1922,57 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
}, func() {
|
||||
finalHtlcs = make(map[uint64]bool)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.LocalCommitment = *newCommitment
|
||||
|
||||
return finalHtlcs, nil
|
||||
}
|
||||
|
||||
// processFinalHtlc stores a final htlc outcome in the database if signaled via
|
||||
// the supplied log update. An in-memory htlcs map is updated too.
|
||||
func processFinalHtlc(finalHtlcsBucket walletdb.ReadWriteBucket, upd LogUpdate,
|
||||
finalHtlcs map[uint64]bool) error {
|
||||
|
||||
var (
|
||||
settled bool
|
||||
id uint64
|
||||
)
|
||||
|
||||
switch msg := upd.UpdateMsg.(type) {
|
||||
case *lnwire.UpdateFulfillHTLC:
|
||||
settled = true
|
||||
id = msg.ID
|
||||
|
||||
case *lnwire.UpdateFailHTLC:
|
||||
settled = false
|
||||
id = msg.ID
|
||||
|
||||
case *lnwire.UpdateFailMalformedHTLC:
|
||||
settled = false
|
||||
id = msg.ID
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
err := putFinalHtlc(
|
||||
finalHtlcsBucket, id,
|
||||
FinalHtlcInfo{
|
||||
Settled: settled,
|
||||
Offchain: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.LocalCommitment = *newCommitment
|
||||
finalHtlcs[id] = settled
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -2681,6 +2805,36 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg,
|
||||
return nil
|
||||
}
|
||||
|
||||
// FinalHtlcInfo contains information about the final outcome of an htlc.
|
||||
type FinalHtlcInfo struct {
|
||||
// Settled is true is the htlc was settled. If false, the htlc was
|
||||
// failed.
|
||||
Settled bool
|
||||
|
||||
// Offchain indicates whether the htlc was resolved off-chain or
|
||||
// on-chain.
|
||||
Offchain bool
|
||||
}
|
||||
|
||||
// putFinalHtlc writes the final htlc outcome to the database. Additionally it
|
||||
// records whether the htlc was resolved off-chain or on-chain.
|
||||
func putFinalHtlc(finalHtlcsBucket kvdb.RwBucket, id uint64,
|
||||
info FinalHtlcInfo) error {
|
||||
|
||||
var key [8]byte
|
||||
byteOrder.PutUint64(key[:], id)
|
||||
|
||||
var finalHtlcByte FinalHtlcByte
|
||||
if info.Settled {
|
||||
finalHtlcByte |= FinalHtlcSettledBit
|
||||
}
|
||||
if info.Offchain {
|
||||
finalHtlcByte |= FinalHtlcOffchainBit
|
||||
}
|
||||
|
||||
return finalHtlcsBucket.Put(key[:], []byte{byte(finalHtlcByte)})
|
||||
}
|
||||
|
||||
// NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure
|
||||
// this always returns the next index that has been not been allocated, this
|
||||
// will first try to examine any pending commitments, before falling back to the
|
||||
|
@ -641,7 +641,7 @@ func TestChannelStateTransition(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates)
|
||||
_, err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates)
|
||||
require.NoError(t, err, "unable to update commitment")
|
||||
|
||||
// Assert that update is correctly written to the database.
|
||||
@ -1457,3 +1457,61 @@ func TestKeyLocatorEncoding(t *testing.T) {
|
||||
// version are equal.
|
||||
require.Equal(t, keyLoc, decodedKeyLoc)
|
||||
}
|
||||
|
||||
// TestFinalHtlcs tests final htlc storage and retrieval.
|
||||
func TestFinalHtlcs(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fullDB, err := MakeTestDB(t)
|
||||
require.NoError(t, err, "unable to make test database")
|
||||
|
||||
cdb := fullDB.ChannelStateDB()
|
||||
|
||||
chanID := lnwire.ShortChannelID{
|
||||
BlockHeight: 1,
|
||||
TxIndex: 2,
|
||||
TxPosition: 3,
|
||||
}
|
||||
|
||||
// Test unknown htlc lookup.
|
||||
const unknownHtlcID = 999
|
||||
|
||||
_, err = cdb.LookupFinalHtlc(chanID, unknownHtlcID)
|
||||
require.ErrorIs(t, err, ErrHtlcUnknown)
|
||||
|
||||
// Test offchain final htlcs.
|
||||
const offchainHtlcID = 1
|
||||
|
||||
err = kvdb.Update(cdb.backend, func(tx kvdb.RwTx) error {
|
||||
bucket, err := fetchFinalHtlcsBucketRw(
|
||||
tx, chanID,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
return putFinalHtlc(bucket, offchainHtlcID, FinalHtlcInfo{
|
||||
Settled: true,
|
||||
Offchain: true,
|
||||
})
|
||||
}, func() {})
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := cdb.LookupFinalHtlc(chanID, offchainHtlcID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, info.Settled)
|
||||
require.True(t, info.Offchain)
|
||||
|
||||
// Test onchain final htlcs.
|
||||
const onchainHtlcID = 2
|
||||
|
||||
err = cdb.PutOnchainFinalHtlcOutcome(chanID, onchainHtlcID, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err = cdb.LookupFinalHtlc(chanID, onchainHtlcID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, info.Settled)
|
||||
require.False(t, info.Offchain)
|
||||
|
||||
// Test unknown htlc lookup for existing channel.
|
||||
_, err = cdb.LookupFinalHtlc(chanID, unknownHtlcID)
|
||||
require.ErrorIs(t, err, ErrHtlcUnknown)
|
||||
}
|
||||
|
105
channeldb/db.go
105
channeldb/db.go
@ -39,6 +39,16 @@ var (
|
||||
// ErrDryRunMigrationOK signals that a migration executed successful,
|
||||
// but we intentionally did not commit the result.
|
||||
ErrDryRunMigrationOK = errors.New("dry run migration successful")
|
||||
|
||||
// ErrFinalHtlcsBucketNotFound signals that the top-level final htlcs
|
||||
// bucket does not exist.
|
||||
ErrFinalHtlcsBucketNotFound = errors.New("final htlcs bucket not " +
|
||||
"found")
|
||||
|
||||
// ErrFinalChannelBucketNotFound signals that the channel bucket for
|
||||
// final htlc outcomes does not exist.
|
||||
ErrFinalChannelBucketNotFound = errors.New("final htlcs channel " +
|
||||
"bucket not found")
|
||||
)
|
||||
|
||||
// migration is a function which takes a prior outdated version of the database
|
||||
@ -1652,6 +1662,101 @@ func (c *ChannelStateDB) FetchHistoricalChannel(outPoint *wire.OutPoint) (
|
||||
return channel, nil
|
||||
}
|
||||
|
||||
func fetchFinalHtlcsBucket(tx kvdb.RTx,
|
||||
chanID lnwire.ShortChannelID) (kvdb.RBucket, error) {
|
||||
|
||||
finalHtlcsBucket := tx.ReadBucket(finalHtlcsBucket)
|
||||
if finalHtlcsBucket == nil {
|
||||
return nil, ErrFinalHtlcsBucketNotFound
|
||||
}
|
||||
|
||||
var chanIDBytes [8]byte
|
||||
byteOrder.PutUint64(chanIDBytes[:], chanID.ToUint64())
|
||||
|
||||
chanBucket := finalHtlcsBucket.NestedReadBucket(chanIDBytes[:])
|
||||
if chanBucket == nil {
|
||||
return nil, ErrFinalChannelBucketNotFound
|
||||
}
|
||||
|
||||
return chanBucket, nil
|
||||
}
|
||||
|
||||
var ErrHtlcUnknown = errors.New("htlc unknown")
|
||||
|
||||
// LookupFinalHtlc retrieves a final htlc resolution from the database. If the
|
||||
// htlc has no final resolution yet, ErrHtlcUnknown is returned.
|
||||
func (c *ChannelStateDB) LookupFinalHtlc(chanID lnwire.ShortChannelID,
|
||||
htlcIndex uint64) (*FinalHtlcInfo, error) {
|
||||
|
||||
var idBytes [8]byte
|
||||
byteOrder.PutUint64(idBytes[:], htlcIndex)
|
||||
|
||||
var settledByte byte
|
||||
|
||||
err := kvdb.View(c.backend, func(tx kvdb.RTx) error {
|
||||
finalHtlcsBucket, err := fetchFinalHtlcsBucket(
|
||||
tx, chanID,
|
||||
)
|
||||
switch {
|
||||
case errors.Is(err, ErrFinalHtlcsBucketNotFound):
|
||||
fallthrough
|
||||
|
||||
case errors.Is(err, ErrFinalChannelBucketNotFound):
|
||||
return ErrHtlcUnknown
|
||||
|
||||
case err != nil:
|
||||
return fmt.Errorf("cannot fetch final htlcs bucket: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
value := finalHtlcsBucket.Get(idBytes[:])
|
||||
if value == nil {
|
||||
return ErrHtlcUnknown
|
||||
}
|
||||
|
||||
if len(value) != 1 {
|
||||
return errors.New("unexpected final htlc value length")
|
||||
}
|
||||
|
||||
settledByte = value[0]
|
||||
|
||||
return nil
|
||||
}, func() {
|
||||
settledByte = 0
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := FinalHtlcInfo{
|
||||
Settled: settledByte&byte(FinalHtlcSettledBit) != 0,
|
||||
Offchain: settledByte&byte(FinalHtlcOffchainBit) != 0,
|
||||
}
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// PutOnchainFinalHtlcOutcome stores the final on-chain outcome of an htlc in
|
||||
// the database.
|
||||
func (c *ChannelStateDB) PutOnchainFinalHtlcOutcome(
|
||||
chanID lnwire.ShortChannelID, htlcID uint64, settled bool) error {
|
||||
|
||||
return kvdb.Update(c.backend, func(tx kvdb.RwTx) error {
|
||||
finalHtlcsBucket, err := fetchFinalHtlcsBucketRw(tx, chanID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return putFinalHtlc(
|
||||
finalHtlcsBucket, htlcID,
|
||||
FinalHtlcInfo{
|
||||
Settled: settled,
|
||||
Offchain: false,
|
||||
},
|
||||
)
|
||||
}, func() {})
|
||||
}
|
||||
|
||||
// MakeTestDB creates a new instance of the ChannelDB for testing purposes.
|
||||
// A callback which cleans up the created temporary directories is also
|
||||
// returned and intended to be executed after the test completes.
|
||||
|
@ -355,7 +355,7 @@ func TestRestoreChannelShells(t *testing.T) {
|
||||
// Ensure that it isn't possible to modify the commitment state machine
|
||||
// of this restored channel.
|
||||
channel := nodeChans[0]
|
||||
err = channel.UpdateCommitment(nil, nil)
|
||||
_, err = channel.UpdateCommitment(nil, nil)
|
||||
if err != ErrNoRestoredChannelMutation {
|
||||
t.Fatalf("able to mutate restored channel")
|
||||
}
|
||||
|
@ -2451,7 +2451,7 @@ func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
||||
return err
|
||||
}
|
||||
|
||||
bobRevocation, _, err := chanB.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := chanB.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2468,7 +2468,7 @@ func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
||||
return err
|
||||
}
|
||||
|
||||
aliceRevocation, _, err := chanA.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := chanA.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -187,6 +187,14 @@ type ChainArbitratorConfig struct {
|
||||
// complete.
|
||||
SubscribeBreachComplete func(op *wire.OutPoint, c chan struct{}) (
|
||||
bool, error)
|
||||
|
||||
// PutFinalHtlcOutcome stores the final outcome of an htlc in the
|
||||
// database.
|
||||
PutFinalHtlcOutcome func(chanId lnwire.ShortChannelID,
|
||||
htlcId uint64, settled bool) error
|
||||
|
||||
// HtlcNotifier is an interface that htlc events are sent to.
|
||||
HtlcNotifier HtlcNotifier
|
||||
}
|
||||
|
||||
// ChainArbitrator is a sub-system that oversees the on-chain resolution of all
|
||||
|
@ -1460,6 +1460,10 @@ const (
|
||||
// other party time it out, or eventually learn of the pre-image, in
|
||||
// which case we'll claim on chain.
|
||||
HtlcIncomingWatchAction = 5
|
||||
|
||||
// HtlcIncomingDustFinalAction indicates that we should mark an incoming
|
||||
// dust htlc as final because it can't be claimed on-chain.
|
||||
HtlcIncomingDustFinalAction = 6
|
||||
)
|
||||
|
||||
// String returns a human readable string describing a chain action.
|
||||
@ -1483,6 +1487,9 @@ func (c ChainAction) String() string {
|
||||
case HtlcIncomingWatchAction:
|
||||
return "HtlcIncomingWatchAction"
|
||||
|
||||
case HtlcIncomingDustFinalAction:
|
||||
return "HtlcIncomingDustFinalAction"
|
||||
|
||||
default:
|
||||
return "<unknown action>"
|
||||
}
|
||||
@ -1698,6 +1705,10 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
|
||||
"needed for incoming dust htlc=%x",
|
||||
c.cfg.ChanPoint, htlc.RHash[:])
|
||||
|
||||
actionMap[HtlcIncomingDustFinalAction] = append(
|
||||
actionMap[HtlcIncomingDustFinalAction], htlc,
|
||||
)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@ -2213,6 +2224,36 @@ func (c *ChannelArbitrator) prepContractResolutions(
|
||||
htlcResolvers = append(htlcResolvers, resolver)
|
||||
}
|
||||
|
||||
// We've lost an htlc because it isn't manifested on the
|
||||
// commitment transaction that closed the channel.
|
||||
case HtlcIncomingDustFinalAction:
|
||||
for _, htlc := range htlcs {
|
||||
htlc := htlc
|
||||
|
||||
key := channeldb.CircuitKey{
|
||||
ChanID: c.cfg.ShortChanID,
|
||||
HtlcID: htlc.HtlcIndex,
|
||||
}
|
||||
|
||||
// Mark this dust htlc as final failed.
|
||||
chainArbCfg := c.cfg.ChainArbitratorConfig
|
||||
err := chainArbCfg.PutFinalHtlcOutcome(
|
||||
key.ChanID, key.HtlcID, false,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Send notification.
|
||||
chainArbCfg.HtlcNotifier.NotifyFinalHtlcEvent(
|
||||
key,
|
||||
channeldb.FinalHtlcInfo{
|
||||
Settled: false,
|
||||
Offchain: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Finally, if this is an outgoing HTLC we've sent, then we'll
|
||||
// launch a resolver to watch for the pre-image (and settle
|
||||
// backwards), or just timeout.
|
||||
|
@ -206,6 +206,8 @@ type chanArbTestCtx struct {
|
||||
|
||||
breachSubscribed chan struct{}
|
||||
breachResolutionChan chan struct{}
|
||||
|
||||
finalHtlcs map[uint64]bool
|
||||
}
|
||||
|
||||
func (c *chanArbTestCtx) CleanUp() {
|
||||
@ -306,6 +308,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog,
|
||||
|
||||
chanArbCtx := &chanArbTestCtx{
|
||||
breachSubscribed: make(chan struct{}),
|
||||
finalHtlcs: make(map[uint64]bool),
|
||||
}
|
||||
|
||||
chanPoint := wire.OutPoint{}
|
||||
@ -358,8 +361,16 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog,
|
||||
chanArbCtx.breachSubscribed <- struct{}{}
|
||||
return false, nil
|
||||
},
|
||||
Clock: clock.NewDefaultClock(),
|
||||
Sweeper: mockSweeper,
|
||||
Clock: clock.NewDefaultClock(),
|
||||
Sweeper: mockSweeper,
|
||||
HtlcNotifier: &mockHTLCNotifier{},
|
||||
PutFinalHtlcOutcome: func(chanId lnwire.ShortChannelID,
|
||||
htlcId uint64, settled bool) error {
|
||||
|
||||
chanArbCtx.finalHtlcs[htlcId] = settled
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// We'll use the resolvedChan to synchronize on call to
|
||||
@ -966,6 +977,12 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
t.Fatalf("unable to stop chan arb: %v", err)
|
||||
}
|
||||
|
||||
// Assert that a final resolution was stored for the incoming dust htlc.
|
||||
expectedFinalHtlcs := map[uint64]bool{
|
||||
incomingDustHtlc.HtlcIndex: false,
|
||||
}
|
||||
require.Equal(t, expectedFinalHtlcs, chanArbCtx.finalHtlcs)
|
||||
|
||||
// We'll no re-create the resolver, notice that we use the existing
|
||||
// arbLog so it carries over the same on-disk state.
|
||||
chanArbCtxNew, err := chanArbCtx.Restart(nil)
|
||||
|
@ -50,6 +50,30 @@ func newIncomingContestResolver(
|
||||
}
|
||||
}
|
||||
|
||||
func (h *htlcIncomingContestResolver) processFinalHtlcFail() error {
|
||||
// Mark the htlc as final failed.
|
||||
err := h.ChainArbitratorConfig.PutFinalHtlcOutcome(
|
||||
h.ChannelArbitratorConfig.ShortChanID, h.htlc.HtlcIndex, false,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send notification.
|
||||
h.ChainArbitratorConfig.HtlcNotifier.NotifyFinalHtlcEvent(
|
||||
channeldb.CircuitKey{
|
||||
ChanID: h.ShortChanID,
|
||||
HtlcID: h.htlc.HtlcIndex,
|
||||
},
|
||||
channeldb.FinalHtlcInfo{
|
||||
Settled: false,
|
||||
Offchain: false,
|
||||
},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolve attempts to resolve this contract. As we don't yet know of the
|
||||
// preimage for the contract, we'll wait for one of two things to happen:
|
||||
//
|
||||
@ -83,6 +107,10 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
|
||||
// link has ran.
|
||||
h.resolved = true
|
||||
|
||||
if err := h.processFinalHtlcFail(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We write a report to disk that indicates we could not decode
|
||||
// the htlc.
|
||||
resReport := h.report().resolverReport(
|
||||
@ -129,6 +157,10 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
|
||||
h.htlcExpiry, currentHeight)
|
||||
h.resolved = true
|
||||
|
||||
if err := h.processFinalHtlcFail(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finally, get our report and checkpoint our resolver with a
|
||||
// timeout outcome report.
|
||||
report := h.report().resolverReport(
|
||||
@ -201,6 +233,10 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
|
||||
|
||||
h.resolved = true
|
||||
|
||||
if err := h.processFinalHtlcFail(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Checkpoint our resolver with an abandoned outcome
|
||||
// because we take no further action on this htlc.
|
||||
report := h.report().resolverReport(
|
||||
@ -343,6 +379,10 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
|
||||
h.htlcExpiry, currentHeight)
|
||||
h.resolved = true
|
||||
|
||||
if err := h.processFinalHtlcFail(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
report := h.report().resolverReport(
|
||||
nil,
|
||||
channeldb.ResolverTypeIncomingHtlc,
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -298,14 +299,15 @@ func (o *mockOnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) (
|
||||
}
|
||||
|
||||
type incomingResolverTestContext struct {
|
||||
registry *mockRegistry
|
||||
witnessBeacon *mockWitnessBeacon
|
||||
resolver *htlcIncomingContestResolver
|
||||
notifier *mock.ChainNotifier
|
||||
onionProcessor *mockOnionProcessor
|
||||
resolveErr chan error
|
||||
nextResolver ContractResolver
|
||||
t *testing.T
|
||||
registry *mockRegistry
|
||||
witnessBeacon *mockWitnessBeacon
|
||||
resolver *htlcIncomingContestResolver
|
||||
notifier *mock.ChainNotifier
|
||||
onionProcessor *mockOnionProcessor
|
||||
resolveErr chan error
|
||||
nextResolver ContractResolver
|
||||
finalHtlcOutcomeStored bool
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolverTestContext {
|
||||
@ -323,12 +325,30 @@ func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolver
|
||||
|
||||
checkPointChan := make(chan struct{}, 1)
|
||||
|
||||
c := &incomingResolverTestContext{
|
||||
registry: registry,
|
||||
witnessBeacon: witnessBeacon,
|
||||
notifier: notifier,
|
||||
onionProcessor: onionProcessor,
|
||||
t: t,
|
||||
}
|
||||
|
||||
htlcNotifier := &mockHTLCNotifier{}
|
||||
|
||||
chainCfg := ChannelArbitratorConfig{
|
||||
ChainArbitratorConfig: ChainArbitratorConfig{
|
||||
Notifier: notifier,
|
||||
PreimageDB: witnessBeacon,
|
||||
Registry: registry,
|
||||
OnionProcessor: onionProcessor,
|
||||
PutFinalHtlcOutcome: func(chanId lnwire.ShortChannelID,
|
||||
htlcId uint64, settled bool) error {
|
||||
|
||||
c.finalHtlcOutcomeStored = true
|
||||
|
||||
return nil
|
||||
},
|
||||
HtlcNotifier: htlcNotifier,
|
||||
},
|
||||
PutResolverReport: func(_ kvdb.RwTx,
|
||||
_ *channeldb.ResolverReport) error {
|
||||
@ -346,7 +366,8 @@ func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolver
|
||||
return nil
|
||||
},
|
||||
}
|
||||
resolver := &htlcIncomingContestResolver{
|
||||
|
||||
c.resolver = &htlcIncomingContestResolver{
|
||||
htlcSuccessResolver: &htlcSuccessResolver{
|
||||
contractResolverKit: *newContractResolverKit(cfg),
|
||||
htlcResolution: lnwallet.IncomingHtlcResolution{},
|
||||
@ -359,14 +380,7 @@ func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolver
|
||||
htlcExpiry: testHtlcExpiry,
|
||||
}
|
||||
|
||||
return &incomingResolverTestContext{
|
||||
registry: registry,
|
||||
witnessBeacon: witnessBeacon,
|
||||
resolver: resolver,
|
||||
notifier: notifier,
|
||||
onionProcessor: onionProcessor,
|
||||
t: t,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (i *incomingResolverTestContext) resolve() {
|
||||
@ -400,6 +414,10 @@ func (i *incomingResolverTestContext) waitForResult(expectSuccessRes bool) {
|
||||
if i.nextResolver != nil {
|
||||
i.t.Fatal("expected no next resolver")
|
||||
}
|
||||
|
||||
require.True(i.t, i.finalHtlcOutcomeStored,
|
||||
"expected final htlc outcome to be stored")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -469,6 +469,26 @@ func (h *htlcSuccessResolver) resolveRemoteCommitOutput() (
|
||||
func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash,
|
||||
outcome channeldb.ResolverOutcome) error {
|
||||
|
||||
// Mark the htlc as final settled.
|
||||
err := h.ChainArbitratorConfig.PutFinalHtlcOutcome(
|
||||
h.ChannelArbitratorConfig.ShortChanID, h.htlc.HtlcIndex, true,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send notification.
|
||||
h.ChainArbitratorConfig.HtlcNotifier.NotifyFinalHtlcEvent(
|
||||
channeldb.CircuitKey{
|
||||
ChanID: h.ShortChanID,
|
||||
HtlcID: h.htlc.HtlcIndex,
|
||||
},
|
||||
channeldb.FinalHtlcInfo{
|
||||
Settled: true,
|
||||
Offchain: false,
|
||||
},
|
||||
)
|
||||
|
||||
// Create a resolver report for claiming of the htlc itself.
|
||||
amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value)
|
||||
reports := []*channeldb.ResolverReport{
|
||||
|
@ -31,6 +31,8 @@ type htlcResolverTestContext struct {
|
||||
resolverResultChan chan resolveResult
|
||||
resolutionChan chan ResolutionMsg
|
||||
|
||||
finalHtlcOutcomeStored bool
|
||||
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
@ -51,6 +53,8 @@ func newHtlcResolverTestContext(t *testing.T,
|
||||
t: t,
|
||||
}
|
||||
|
||||
htlcNotifier := &mockHTLCNotifier{}
|
||||
|
||||
witnessBeacon := newMockWitnessBeacon()
|
||||
chainCfg := ChannelArbitratorConfig{
|
||||
ChainArbitratorConfig: ChainArbitratorConfig{
|
||||
@ -75,6 +79,14 @@ func newHtlcResolverTestContext(t *testing.T,
|
||||
testCtx.resolutionChan <- msgs[0]
|
||||
return nil
|
||||
},
|
||||
PutFinalHtlcOutcome: func(chanId lnwire.ShortChannelID,
|
||||
htlcId uint64, settled bool) error {
|
||||
|
||||
testCtx.finalHtlcOutcomeStored = true
|
||||
|
||||
return nil
|
||||
},
|
||||
HtlcNotifier: htlcNotifier,
|
||||
},
|
||||
PutResolverReport: func(_ kvdb.RwTx,
|
||||
report *channeldb.ResolverReport) error {
|
||||
@ -186,6 +198,7 @@ func TestHtlcSuccessSingleStage(t *testing.T) {
|
||||
reports: []*channeldb.ResolverReport{
|
||||
claim,
|
||||
},
|
||||
finalHtlcStored: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -269,6 +282,7 @@ func TestHtlcSuccessSecondStageResolution(t *testing.T) {
|
||||
secondStage,
|
||||
firstStage,
|
||||
},
|
||||
finalHtlcStored: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -450,6 +464,7 @@ func TestHtlcSuccessSecondStageResolutionSweeper(t *testing.T) {
|
||||
secondStage,
|
||||
firstStage,
|
||||
},
|
||||
finalHtlcStored: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -465,9 +480,10 @@ type checkpoint struct {
|
||||
preCheckpoint func(*htlcResolverTestContext, bool) error
|
||||
|
||||
// data we expect the resolver to be checkpointed with next.
|
||||
incubating bool
|
||||
resolved bool
|
||||
reports []*channeldb.ResolverReport
|
||||
incubating bool
|
||||
resolved bool
|
||||
reports []*channeldb.ResolverReport
|
||||
finalHtlcStored bool
|
||||
}
|
||||
|
||||
// testHtlcSuccess tests resolution of a success resolver. It takes a a list of
|
||||
@ -573,6 +589,11 @@ func runFromCheckpoint(t *testing.T, ctx *htlcResolverTestContext,
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the final htlc outcome is stored.
|
||||
if cp.finalHtlcStored != ctx.finalHtlcOutcomeStored {
|
||||
t.Fatal("final htlc store expectation failed")
|
||||
}
|
||||
|
||||
// Finally encode the resolver, and store it for later use.
|
||||
b := bytes.Buffer{}
|
||||
if err := resolver.Encode(&b); err != nil {
|
||||
|
@ -65,3 +65,11 @@ type UtxoSweeper interface {
|
||||
UpdateParams(input wire.OutPoint, params sweep.ParamsUpdate) (
|
||||
chan sweep.Result, error)
|
||||
}
|
||||
|
||||
// HtlcNotifier defines the notification functions that contract court requires.
|
||||
type HtlcNotifier interface {
|
||||
// NotifyFinalHtlcEvent notifies the HtlcNotifier that the final outcome
|
||||
// for an htlc has been determined.
|
||||
NotifyFinalHtlcEvent(key channeldb.CircuitKey,
|
||||
info channeldb.FinalHtlcInfo)
|
||||
}
|
||||
|
11
contractcourt/mock_htlcnotifier_test.go
Normal file
11
contractcourt/mock_htlcnotifier_test.go
Normal file
@ -0,0 +1,11 @@
|
||||
package contractcourt
|
||||
|
||||
import "github.com/lightningnetwork/lnd/channeldb"
|
||||
|
||||
type mockHTLCNotifier struct {
|
||||
HtlcNotifier
|
||||
}
|
||||
|
||||
func (m *mockHTLCNotifier) NotifyFinalHtlcEvent(key channeldb.CircuitKey,
|
||||
info channeldb.FinalHtlcInfo) { // nolint:whitespace
|
||||
}
|
@ -51,6 +51,16 @@
|
||||
[expose tlv data](https://github.com/lightningnetwork/lnd/pull/7085) that is
|
||||
broadcast over the gossip network.
|
||||
|
||||
* [Add new HTLC notifier event and lookup
|
||||
RPC](https://github.com/lightningnetwork/lnd/pull/6517) for the final
|
||||
settlement of incoming HTLCs. This allows applications to wait for the HTLC to
|
||||
actually disappear from all valid commitment transactions, rather than assume
|
||||
that it will. With the new extensions, situations can be avoided where the
|
||||
application considers an HTLC settled, but in reality the HTLC has timed out.
|
||||
|
||||
Final resolution data will only be available for htlcs that are resolved
|
||||
after upgrading lnd.
|
||||
|
||||
## Wallet
|
||||
|
||||
* [Allows Taproot public keys and tap scripts to be imported as watch-only
|
||||
|
@ -294,6 +294,18 @@ type SettleEvent struct {
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
type FinalHtlcEvent struct {
|
||||
CircuitKey
|
||||
|
||||
Settled bool
|
||||
|
||||
// Offchain is indicating whether the htlc was resolved off-chain.
|
||||
Offchain bool
|
||||
|
||||
// Timestamp is the time when this htlc was settled.
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
// NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been
|
||||
// forwarded.
|
||||
//
|
||||
@ -382,6 +394,27 @@ func (h *HtlcNotifier) NotifySettleEvent(key HtlcKey,
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyFinalHtlcEvent notifies the HtlcNotifier that the final outcome for an
|
||||
// htlc has been determined.
|
||||
//
|
||||
// Note this is part of the htlcNotifier interface.
|
||||
func (h *HtlcNotifier) NotifyFinalHtlcEvent(key channeldb.CircuitKey,
|
||||
info channeldb.FinalHtlcInfo) {
|
||||
|
||||
event := &FinalHtlcEvent{
|
||||
CircuitKey: key,
|
||||
Settled: info.Settled,
|
||||
Offchain: info.Offchain,
|
||||
Timestamp: h.now(),
|
||||
}
|
||||
|
||||
log.Tracef("Notifying final settle event: %v", key)
|
||||
|
||||
if err := h.ntfnServer.SendUpdate(event); err != nil {
|
||||
log.Warnf("Unable to send settle event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// newHtlc key returns a htlc key for the packet provided. If the packet
|
||||
// has a zero incoming channel ID, the packet is for one of our own sends,
|
||||
// which has the payment id stashed in the incoming htlc id. If this is the
|
||||
|
@ -370,4 +370,9 @@ type htlcNotifier interface {
|
||||
// settled.
|
||||
NotifySettleEvent(key HtlcKey, preimage lntypes.Preimage,
|
||||
eventType HtlcEventType)
|
||||
|
||||
// NotifyFinalHtlcEvent notifies the HtlcNotifier that the final outcome
|
||||
// for an htlc has been determined.
|
||||
NotifyFinalHtlcEvent(key channeldb.CircuitKey,
|
||||
info channeldb.FinalHtlcInfo)
|
||||
}
|
||||
|
@ -1920,13 +1920,29 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
// As we've just accepted a new state, we'll now
|
||||
// immediately send the remote peer a revocation for our prior
|
||||
// state.
|
||||
nextRevocation, currentHtlcs, err := l.channel.RevokeCurrentCommitment()
|
||||
nextRevocation, currentHtlcs, finalHTLCs, err :=
|
||||
l.channel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
l.log.Errorf("unable to revoke commitment: %v", err)
|
||||
return
|
||||
}
|
||||
l.cfg.Peer.SendMessage(false, nextRevocation)
|
||||
|
||||
// Notify the incoming htlcs of which the resolutions were
|
||||
// locked in.
|
||||
for id, settled := range finalHTLCs {
|
||||
l.cfg.HtlcNotifier.NotifyFinalHtlcEvent(
|
||||
channeldb.CircuitKey{
|
||||
ChanID: l.shortChanID,
|
||||
HtlcID: id,
|
||||
},
|
||||
channeldb.FinalHtlcInfo{
|
||||
Settled: settled,
|
||||
Offchain: true,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Since we just revoked our commitment, we may have a new set
|
||||
// of HTLC's on our commitment, so we'll send them using our
|
||||
// function closure NotifyContractUpdate.
|
||||
|
@ -179,7 +179,7 @@ func (l *linkTestContext) receiveCommitSigAlice(expHtlcs int) *lnwire.CommitSig
|
||||
func (l *linkTestContext) sendRevAndAckBobToAlice() {
|
||||
l.t.Helper()
|
||||
|
||||
rev, _, err := l.bobChannel.RevokeCurrentCommitment()
|
||||
rev, _, _, err := l.bobChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
l.t.Fatalf("unable to revoke commitment: %v", err)
|
||||
}
|
||||
|
@ -1958,7 +1958,7 @@ func handleStateUpdate(link *channelLink,
|
||||
return err
|
||||
}
|
||||
|
||||
remoteRev, _, err := remoteChannel.RevokeCurrentCommitment()
|
||||
remoteRev, _, _, err := remoteChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2062,7 +2062,7 @@ func updateState(batchTick chan time.Time, link *channelLink,
|
||||
}
|
||||
|
||||
// Lastly, send a revocation back to the link.
|
||||
remoteRev, _, err := remoteChannel.RevokeCurrentCommitment()
|
||||
remoteRev, _, _, err := remoteChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -3134,7 +3134,7 @@ func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) {
|
||||
|
||||
// Next, revoke Bob's current commitment and send it to Alice so that we
|
||||
// can test that Alice's circuits aren't trimmed.
|
||||
rev, _, err := bobChan.RevokeCurrentCommitment()
|
||||
rev, _, _, err := bobChan.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke current commitment")
|
||||
|
||||
_, _, _, _, err = alice.channel.ReceiveRevocation(rev)
|
||||
|
@ -1077,7 +1077,9 @@ func (m *mockOnionErrorDecryptor) DecryptError(encryptedData []byte) (
|
||||
|
||||
var _ htlcNotifier = (*mockHTLCNotifier)(nil)
|
||||
|
||||
type mockHTLCNotifier struct{}
|
||||
type mockHTLCNotifier struct {
|
||||
htlcNotifier
|
||||
}
|
||||
|
||||
func (h *mockHTLCNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo,
|
||||
eventType HtlcEventType) { // nolint:whitespace
|
||||
@ -1095,3 +1097,7 @@ func (h *mockHTLCNotifier) NotifyForwardingFailEvent(key HtlcKey,
|
||||
func (h *mockHTLCNotifier) NotifySettleEvent(key HtlcKey,
|
||||
preimage lntypes.Preimage, eventType HtlcEventType) { // nolint:whitespace
|
||||
}
|
||||
|
||||
func (h *mockHTLCNotifier) NotifyFinalHtlcEvent(key channeldb.CircuitKey,
|
||||
info channeldb.FinalHtlcInfo) { // nolint:whitespace
|
||||
}
|
||||
|
@ -3535,6 +3535,13 @@ func checkHtlcEvents(t *testing.T, events <-chan interface{},
|
||||
t.Fatalf("expected event: %v", expected)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that there are no unexpected events following.
|
||||
select {
|
||||
case event := <-events:
|
||||
t.Fatalf("unexpected event: %v", event)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// sendThreeHopPayment is a helper function which sends a payment over
|
||||
@ -3638,6 +3645,12 @@ func getThreeHopEvents(channels *clusterChannels, htlcID uint64,
|
||||
Incoming: false,
|
||||
Timestamp: ts,
|
||||
},
|
||||
&FinalHtlcEvent{
|
||||
CircuitKey: bobKey.IncomingCircuit,
|
||||
Settled: false,
|
||||
Offchain: true,
|
||||
Timestamp: ts,
|
||||
},
|
||||
}
|
||||
|
||||
return aliceEvents, bobEvents, nil
|
||||
@ -3669,6 +3682,12 @@ func getThreeHopEvents(channels *clusterChannels, htlcID uint64,
|
||||
HtlcEventType: HtlcEventTypeForward,
|
||||
Timestamp: ts,
|
||||
},
|
||||
&FinalHtlcEvent{
|
||||
CircuitKey: bobKey.IncomingCircuit,
|
||||
Settled: true,
|
||||
Offchain: true,
|
||||
Timestamp: ts,
|
||||
},
|
||||
}
|
||||
|
||||
carolEvents := []interface{}{
|
||||
@ -3683,6 +3702,14 @@ func getThreeHopEvents(channels *clusterChannels, htlcID uint64,
|
||||
Preimage: *preimage,
|
||||
HtlcEventType: HtlcEventTypeReceive,
|
||||
Timestamp: ts,
|
||||
}, &FinalHtlcEvent{
|
||||
CircuitKey: channeldb.CircuitKey{
|
||||
ChanID: channels.carolToBob.ShortChanID(),
|
||||
HtlcID: htlcID,
|
||||
},
|
||||
Settled: true,
|
||||
Offchain: true,
|
||||
Timestamp: ts,
|
||||
},
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2460,6 +2460,78 @@ func local_request_Lightning_ListAliases_0(ctx context.Context, marshaler runtim
|
||||
|
||||
}
|
||||
|
||||
func request_Lightning_LookupHtlc_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LookupHtlcRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["chan_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_id")
|
||||
}
|
||||
|
||||
protoReq.ChanId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_id", err)
|
||||
}
|
||||
|
||||
val, ok = pathParams["htlc_index"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "htlc_index")
|
||||
}
|
||||
|
||||
protoReq.HtlcIndex, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "htlc_index", err)
|
||||
}
|
||||
|
||||
msg, err := client.LookupHtlc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Lightning_LookupHtlc_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LookupHtlcRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["chan_id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chan_id")
|
||||
}
|
||||
|
||||
protoReq.ChanId, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chan_id", err)
|
||||
}
|
||||
|
||||
val, ok = pathParams["htlc_index"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "htlc_index")
|
||||
}
|
||||
|
||||
protoReq.HtlcIndex, err = runtime.Uint64(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "htlc_index", err)
|
||||
}
|
||||
|
||||
msg, err := server.LookupHtlc(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterLightningHandlerServer registers the http handlers for service Lightning to "mux".
|
||||
// UnaryRPC :call LightningServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@ -3792,6 +3864,29 @@ func RegisterLightningHandlerServer(ctx context.Context, mux *runtime.ServeMux,
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Lightning_LookupHtlc_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/lnrpc.Lightning/LookupHtlc", runtime.WithHTTPPathPattern("/v1/htlc/{chan_id}/{htlc_index}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Lightning_LookupHtlc_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Lightning_LookupHtlc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5153,6 +5248,26 @@ func RegisterLightningHandlerClient(ctx context.Context, mux *runtime.ServeMux,
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Lightning_LookupHtlc_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/lnrpc.Lightning/LookupHtlc", runtime.WithHTTPPathPattern("/v1/htlc/{chan_id}/{htlc_index}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Lightning_LookupHtlc_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Lightning_LookupHtlc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5288,6 +5403,8 @@ var (
|
||||
pattern_Lightning_SubscribeCustomMessages_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "custommessage", "subscribe"}, ""))
|
||||
|
||||
pattern_Lightning_ListAliases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "aliases", "list"}, ""))
|
||||
|
||||
pattern_Lightning_LookupHtlc_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "htlc", "chan_id", "htlc_index"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@ -5422,4 +5539,6 @@ var (
|
||||
forward_Lightning_SubscribeCustomMessages_0 = runtime.ForwardResponseStream
|
||||
|
||||
forward_Lightning_ListAliases_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Lightning_LookupHtlc_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
@ -1723,4 +1723,29 @@ func RegisterLightningJSONCallbacks(registry map[string]func(ctx context.Context
|
||||
}
|
||||
callback(string(respBytes), nil)
|
||||
}
|
||||
|
||||
registry["lnrpc.Lightning.LookupHtlc"] = func(ctx context.Context,
|
||||
conn *grpc.ClientConn, reqJSON string, callback func(string, error)) {
|
||||
|
||||
req := &LookupHtlcRequest{}
|
||||
err := marshaler.Unmarshal([]byte(reqJSON), req)
|
||||
if err != nil {
|
||||
callback("", err)
|
||||
return
|
||||
}
|
||||
|
||||
client := NewLightningClient(conn)
|
||||
resp, err := client.LookupHtlc(ctx, req)
|
||||
if err != nil {
|
||||
callback("", err)
|
||||
return
|
||||
}
|
||||
|
||||
respBytes, err := marshaler.Marshal(resp)
|
||||
if err != nil {
|
||||
callback("", err)
|
||||
return
|
||||
}
|
||||
callback(string(respBytes), nil)
|
||||
}
|
||||
}
|
||||
|
@ -577,6 +577,20 @@ service Lightning {
|
||||
zero conf).
|
||||
*/
|
||||
rpc ListAliases (ListAliasesRequest) returns (ListAliasesResponse);
|
||||
|
||||
rpc LookupHtlc (LookupHtlcRequest) returns (LookupHtlcResponse);
|
||||
}
|
||||
|
||||
message LookupHtlcRequest {
|
||||
uint64 chan_id = 1;
|
||||
|
||||
uint64 htlc_index = 2;
|
||||
}
|
||||
|
||||
message LookupHtlcResponse {
|
||||
bool settled = 1;
|
||||
|
||||
bool offchain = 2;
|
||||
}
|
||||
|
||||
message SubscribeCustomMessagesRequest {
|
||||
|
@ -1593,6 +1593,44 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/htlc/{chan_id}/{htlc_index}": {
|
||||
"get": {
|
||||
"operationId": "Lightning_LookupHtlc",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/lnrpcLookupHtlcResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "chan_id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "htlc_index",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Lightning"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v1/invoice/{r_hash_str}": {
|
||||
"get": {
|
||||
"summary": "lncli: `lookupinvoice`\nLookupInvoice attempts to look up an invoice according to its payment hash.\nThe passed payment hash *must* be exactly 32 bytes, if not, an error is\nreturned.",
|
||||
@ -5351,6 +5389,17 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcLookupHtlcResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"settled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"offchain": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcMPPRecord": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -165,3 +165,5 @@ http:
|
||||
get: "/v1/custommessage/subscribe"
|
||||
- selector: lnrpc.Lightning.ListAliases
|
||||
get: "/v1/aliases/list"
|
||||
- selector: lnrpc.Lightning.LookupHtlc
|
||||
get: "/v1/htlc/{chan_id}/{htlc_index}"
|
||||
|
@ -398,6 +398,7 @@ type LightningClient interface {
|
||||
// their confirmed SCID (if it exists) and/or the base SCID (in the case of
|
||||
// zero conf).
|
||||
ListAliases(ctx context.Context, in *ListAliasesRequest, opts ...grpc.CallOption) (*ListAliasesResponse, error)
|
||||
LookupHtlc(ctx context.Context, in *LookupHtlcRequest, opts ...grpc.CallOption) (*LookupHtlcResponse, error)
|
||||
}
|
||||
|
||||
type lightningClient struct {
|
||||
@ -1299,6 +1300,15 @@ func (c *lightningClient) ListAliases(ctx context.Context, in *ListAliasesReques
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *lightningClient) LookupHtlc(ctx context.Context, in *LookupHtlcRequest, opts ...grpc.CallOption) (*LookupHtlcResponse, error) {
|
||||
out := new(LookupHtlcResponse)
|
||||
err := c.cc.Invoke(ctx, "/lnrpc.Lightning/LookupHtlc", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// LightningServer is the server API for Lightning service.
|
||||
// All implementations must embed UnimplementedLightningServer
|
||||
// for forward compatibility
|
||||
@ -1683,6 +1693,7 @@ type LightningServer interface {
|
||||
// their confirmed SCID (if it exists) and/or the base SCID (in the case of
|
||||
// zero conf).
|
||||
ListAliases(context.Context, *ListAliasesRequest) (*ListAliasesResponse, error)
|
||||
LookupHtlc(context.Context, *LookupHtlcRequest) (*LookupHtlcResponse, error)
|
||||
mustEmbedUnimplementedLightningServer()
|
||||
}
|
||||
|
||||
@ -1888,6 +1899,9 @@ func (UnimplementedLightningServer) SubscribeCustomMessages(*SubscribeCustomMess
|
||||
func (UnimplementedLightningServer) ListAliases(context.Context, *ListAliasesRequest) (*ListAliasesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListAliases not implemented")
|
||||
}
|
||||
func (UnimplementedLightningServer) LookupHtlc(context.Context, *LookupHtlcRequest) (*LookupHtlcResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method LookupHtlc not implemented")
|
||||
}
|
||||
func (UnimplementedLightningServer) mustEmbedUnimplementedLightningServer() {}
|
||||
|
||||
// UnsafeLightningServer may be embedded to opt out of forward compatibility for this service.
|
||||
@ -3148,6 +3162,24 @@ func _Lightning_ListAliases_Handler(srv interface{}, ctx context.Context, dec fu
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Lightning_LookupHtlc_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(LookupHtlcRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LightningServer).LookupHtlc(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/lnrpc.Lightning/LookupHtlc",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LightningServer).LookupHtlc(ctx, req.(*LookupHtlcRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Lightning_ServiceDesc is the grpc.ServiceDesc for Lightning service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@ -3367,6 +3399,10 @@ var Lightning_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ListAliases",
|
||||
Handler: _Lightning_ListAliases_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "LookupHtlc",
|
||||
Handler: _Lightning_LookupHtlc_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -618,6 +618,8 @@ message HtlcEvent {
|
||||
ForwardFailEvent forward_fail_event = 8;
|
||||
SettleEvent settle_event = 9;
|
||||
LinkFailEvent link_fail_event = 10;
|
||||
SubscribedEvent subscribed_event = 11;
|
||||
FinalHtlcEvent final_htlc_event = 12;
|
||||
}
|
||||
}
|
||||
|
||||
@ -648,6 +650,14 @@ message SettleEvent {
|
||||
bytes preimage = 1;
|
||||
}
|
||||
|
||||
message FinalHtlcEvent {
|
||||
bool settled = 1;
|
||||
bool offchain = 2;
|
||||
}
|
||||
|
||||
message SubscribedEvent {
|
||||
}
|
||||
|
||||
message LinkFailEvent {
|
||||
// Info contains details about the htlc that we failed.
|
||||
HtlcInfo info = 1;
|
||||
|
@ -1168,6 +1168,17 @@
|
||||
],
|
||||
"default": "UNKNOWN"
|
||||
},
|
||||
"routerrpcFinalHtlcEvent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"settled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"offchain": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"routerrpcForwardEvent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -1317,6 +1328,12 @@
|
||||
},
|
||||
"link_fail_event": {
|
||||
"$ref": "#/definitions/routerrpcLinkFailEvent"
|
||||
},
|
||||
"subscribed_event": {
|
||||
"$ref": "#/definitions/routerrpcSubscribedEvent"
|
||||
},
|
||||
"final_htlc_event": {
|
||||
"$ref": "#/definitions/routerrpcFinalHtlcEvent"
|
||||
}
|
||||
},
|
||||
"title": "HtlcEvent contains the htlc event that was processed. These are served on a\nbest-effort basis; events are not persisted, delivery is not guaranteed\n(in the event of a crash in the switch, forward events may be lost) and\nsome events may be replayed upon restart. Events consumed from this package\nshould be de-duplicated by the htlc's unique combination of incoming and\noutgoing channel id and htlc id. [EXPERIMENTAL]"
|
||||
@ -1748,6 +1765,9 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"routerrpcSubscribedEvent": {
|
||||
"type": "object"
|
||||
},
|
||||
"routerrpcUpdateChanStatusRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -900,6 +900,16 @@ func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest,
|
||||
}
|
||||
defer htlcClient.Cancel()
|
||||
|
||||
// Send out an initial subscribed event so that the caller knows the
|
||||
// point from which new events will be transmitted.
|
||||
if err := stream.Send(&HtlcEvent{
|
||||
Event: &HtlcEvent_SubscribedEvent{
|
||||
SubscribedEvent: &SubscribedEvent{},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-htlcClient.Updates():
|
||||
|
@ -14,7 +14,7 @@ func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) {
|
||||
var (
|
||||
key htlcswitch.HtlcKey
|
||||
timestamp time.Time
|
||||
eventType htlcswitch.HtlcEventType
|
||||
eventType *htlcswitch.HtlcEventType
|
||||
event isHtlcEvent_Event
|
||||
)
|
||||
|
||||
@ -27,7 +27,7 @@ func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) {
|
||||
}
|
||||
|
||||
key = e.HtlcKey
|
||||
eventType = e.HtlcEventType
|
||||
eventType = &e.HtlcEventType
|
||||
timestamp = e.Timestamp
|
||||
|
||||
case *htlcswitch.ForwardingFailEvent:
|
||||
@ -36,7 +36,7 @@ func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) {
|
||||
}
|
||||
|
||||
key = e.HtlcKey
|
||||
eventType = e.HtlcEventType
|
||||
eventType = &e.HtlcEventType
|
||||
timestamp = e.Timestamp
|
||||
|
||||
case *htlcswitch.LinkFailEvent:
|
||||
@ -57,7 +57,7 @@ func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) {
|
||||
}
|
||||
|
||||
key = e.HtlcKey
|
||||
eventType = e.HtlcEventType
|
||||
eventType = &e.HtlcEventType
|
||||
timestamp = e.Timestamp
|
||||
|
||||
case *htlcswitch.SettleEvent:
|
||||
@ -68,7 +68,20 @@ func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) {
|
||||
}
|
||||
|
||||
key = e.HtlcKey
|
||||
eventType = e.HtlcEventType
|
||||
eventType = &e.HtlcEventType
|
||||
timestamp = e.Timestamp
|
||||
|
||||
case *htlcswitch.FinalHtlcEvent:
|
||||
event = &HtlcEvent_FinalHtlcEvent{
|
||||
FinalHtlcEvent: &FinalHtlcEvent{
|
||||
Settled: e.Settled,
|
||||
Offchain: e.Offchain,
|
||||
},
|
||||
}
|
||||
|
||||
key = htlcswitch.HtlcKey{
|
||||
IncomingCircuit: e.CircuitKey,
|
||||
}
|
||||
timestamp = e.Timestamp
|
||||
|
||||
default:
|
||||
@ -85,18 +98,21 @@ func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) {
|
||||
}
|
||||
|
||||
// Convert the htlc event type to a rpc event.
|
||||
switch eventType {
|
||||
case htlcswitch.HtlcEventTypeSend:
|
||||
rpcEvent.EventType = HtlcEvent_SEND
|
||||
if eventType != nil {
|
||||
switch *eventType {
|
||||
case htlcswitch.HtlcEventTypeSend:
|
||||
rpcEvent.EventType = HtlcEvent_SEND
|
||||
|
||||
case htlcswitch.HtlcEventTypeReceive:
|
||||
rpcEvent.EventType = HtlcEvent_RECEIVE
|
||||
case htlcswitch.HtlcEventTypeReceive:
|
||||
rpcEvent.EventType = HtlcEvent_RECEIVE
|
||||
|
||||
case htlcswitch.HtlcEventTypeForward:
|
||||
rpcEvent.EventType = HtlcEvent_FORWARD
|
||||
case htlcswitch.HtlcEventTypeForward:
|
||||
rpcEvent.EventType = HtlcEvent_FORWARD
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown event type: %v", eventType)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown event type: %v",
|
||||
eventType)
|
||||
}
|
||||
}
|
||||
|
||||
return rpcEvent, nil
|
||||
|
@ -168,6 +168,7 @@ out:
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, aliceEvents)
|
||||
|
||||
bobEvents, err := net.Bob.RouterClient.SubscribeHtlcEvents(
|
||||
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
|
||||
@ -175,6 +176,7 @@ out:
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, bobEvents)
|
||||
|
||||
carolEvents, err := carol.RouterClient.SubscribeHtlcEvents(
|
||||
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
|
||||
@ -182,6 +184,7 @@ out:
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, carolEvents)
|
||||
|
||||
// For the first scenario, we'll test the cancellation of an HTLC with
|
||||
// an unknown payment hash.
|
||||
@ -421,4 +424,14 @@ func assertLinkFailure(t *harnessTest,
|
||||
t.Fatalf("expected: %v, got: %v", failureDetail,
|
||||
linkFail.LinkFailEvent.FailureDetail)
|
||||
}
|
||||
|
||||
event = assertEventAndType(t, routerrpc.HtlcEvent_UNKNOWN, client)
|
||||
finalHtlc, ok := event.Event.(*routerrpc.HtlcEvent_FinalHtlcEvent)
|
||||
if !ok {
|
||||
t.Fatalf("expected final htlc, got: %T", event.Event)
|
||||
}
|
||||
|
||||
if finalHtlc.FinalHtlcEvent.Settled {
|
||||
t.Fatalf("expected final fail")
|
||||
}
|
||||
}
|
||||
|
@ -172,6 +172,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, aliceEvents)
|
||||
|
||||
bobEvents, err := net.Bob.RouterClient.SubscribeHtlcEvents(
|
||||
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
|
||||
@ -179,6 +180,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, bobEvents)
|
||||
|
||||
carolEvents, err := carol.RouterClient.SubscribeHtlcEvents(
|
||||
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
|
||||
@ -186,6 +188,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, carolEvents)
|
||||
|
||||
daveEvents, err := dave.RouterClient.SubscribeHtlcEvents(
|
||||
ctxt, &routerrpc.SubscribeHtlcEventsRequest{},
|
||||
@ -193,6 +196,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
if err != nil {
|
||||
t.Fatalf("could not subscribe events: %v", err)
|
||||
}
|
||||
assertSubscribed(t, daveEvents)
|
||||
|
||||
// Using Carol as the source, pay to the 5 invoices from Bob created
|
||||
// above.
|
||||
@ -344,13 +348,26 @@ func assertHtlcEvents(t *harnessTest, fwdCount, fwdFailCount, settleCount int,
|
||||
userType routerrpc.HtlcEvent_EventType,
|
||||
client routerrpc.Router_SubscribeHtlcEventsClient) {
|
||||
|
||||
var forwards, forwardFails, settles int
|
||||
var forwards, forwardFails, settles, finalSettles, finalFails int
|
||||
|
||||
var finalFailCount, finalSettleCount int
|
||||
if userType != routerrpc.HtlcEvent_SEND {
|
||||
finalFailCount = fwdFailCount
|
||||
finalSettleCount = settleCount
|
||||
}
|
||||
|
||||
numEvents := fwdCount + fwdFailCount + settleCount +
|
||||
finalFailCount + finalSettleCount
|
||||
|
||||
numEvents := fwdCount + fwdFailCount + settleCount
|
||||
for i := 0; i < numEvents; i++ {
|
||||
event := assertEventAndType(t, userType, client)
|
||||
event, err := client.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("could not get event")
|
||||
}
|
||||
|
||||
switch event.Event.(type) {
|
||||
expectedEventType := userType
|
||||
|
||||
switch e := event.Event.(type) {
|
||||
case *routerrpc.HtlcEvent_ForwardEvent:
|
||||
forwards++
|
||||
|
||||
@ -360,9 +377,23 @@ func assertHtlcEvents(t *harnessTest, fwdCount, fwdFailCount, settleCount int,
|
||||
case *routerrpc.HtlcEvent_SettleEvent:
|
||||
settles++
|
||||
|
||||
case *routerrpc.HtlcEvent_FinalHtlcEvent:
|
||||
if e.FinalHtlcEvent.Settled {
|
||||
finalSettles++
|
||||
} else {
|
||||
finalFails++
|
||||
}
|
||||
|
||||
expectedEventType = routerrpc.HtlcEvent_UNKNOWN
|
||||
|
||||
default:
|
||||
t.Fatalf("unexpected event: %T", event.Event)
|
||||
}
|
||||
|
||||
if event.EventType != expectedEventType {
|
||||
t.Fatalf("expected: %v, got: %v", expectedEventType,
|
||||
event.EventType)
|
||||
}
|
||||
}
|
||||
|
||||
if forwards != fwdCount {
|
||||
@ -374,9 +405,19 @@ func assertHtlcEvents(t *harnessTest, fwdCount, fwdFailCount, settleCount int,
|
||||
forwardFails)
|
||||
}
|
||||
|
||||
if finalFails != finalFailCount {
|
||||
t.Fatalf("expected: %v final fails, got: %v", finalFailCount,
|
||||
finalFails)
|
||||
}
|
||||
|
||||
if settles != settleCount {
|
||||
t.Fatalf("expected: %v settles, got: %v", settleCount, settles)
|
||||
}
|
||||
|
||||
if finalSettles != finalSettleCount {
|
||||
t.Fatalf("expected: %v settles, got: %v", finalSettleCount,
|
||||
finalSettles)
|
||||
}
|
||||
}
|
||||
|
||||
// assertEventAndType reads an event from the stream provided and ensures that
|
||||
@ -399,6 +440,14 @@ func assertEventAndType(t *harnessTest, eventType routerrpc.HtlcEvent_EventType,
|
||||
return event
|
||||
}
|
||||
|
||||
func assertSubscribed(t *harnessTest,
|
||||
client routerrpc.Router_SubscribeHtlcEventsClient) {
|
||||
|
||||
event, err := client.Recv()
|
||||
require.NoError(t.t, err)
|
||||
require.NotNil(t.t, event.GetSubscribedEvent())
|
||||
}
|
||||
|
||||
// updateChannelPolicy updates the channel policy of node to the
|
||||
// given fees and timelock delta. This function blocks until
|
||||
// listenerNode has received the policy update.
|
||||
|
@ -4779,15 +4779,17 @@ func (lc *LightningChannel) PendingLocalUpdateCount() uint64 {
|
||||
// chain is advanced by a single commitment. This now lowest unrevoked
|
||||
// commitment becomes our currently accepted state within the channel. This
|
||||
// method also returns the set of HTLC's currently active within the commitment
|
||||
// transaction. This return value allows callers to act once an HTLC has been
|
||||
// locked into our commitment transaction.
|
||||
func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []channeldb.HTLC, error) {
|
||||
// transaction and the htlcs the were resolved. This return value allows callers
|
||||
// to act once an HTLC has been locked into our commitment transaction.
|
||||
func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck,
|
||||
[]channeldb.HTLC, map[uint64]bool, error) {
|
||||
|
||||
lc.Lock()
|
||||
defer lc.Unlock()
|
||||
|
||||
revocationMsg, err := lc.generateRevocation(lc.currentHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
lc.log.Tracef("revoking height=%v, now at height=%v",
|
||||
@ -4808,11 +4810,11 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c
|
||||
// is committed locally.
|
||||
unsignedAckedUpdates := lc.getUnsignedAckedUpdates()
|
||||
|
||||
err = lc.channelState.UpdateCommitment(
|
||||
finalHtlcs, err := lc.channelState.UpdateCommitment(
|
||||
newCommitment, unsignedAckedUpdates,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
lc.log.Tracef("state transition accepted: "+
|
||||
@ -4825,7 +4827,7 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c
|
||||
&lc.channelState.FundingOutpoint,
|
||||
)
|
||||
|
||||
return revocationMsg, newCommitment.Htlcs, nil
|
||||
return revocationMsg, newCommitment.Htlcs, finalHtlcs, nil
|
||||
}
|
||||
|
||||
// ReceiveRevocation processes a revocation sent by the remote party for the
|
||||
|
@ -104,7 +104,7 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) {
|
||||
|
||||
// Bob revokes his prior commitment given to him by Alice, since he now
|
||||
// has a valid signature for a newer commitment.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to generate bob revocation")
|
||||
|
||||
// Bob finally send a signature for Alice's commitment transaction.
|
||||
@ -135,7 +135,7 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) {
|
||||
require.NoError(t, err, "alice unable to process bob's new commitment")
|
||||
|
||||
// Alice then generates a revocation for bob.
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke alice channel")
|
||||
|
||||
// Finally Bob processes Alice's revocation, at this point the new HTLC
|
||||
@ -220,7 +220,7 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) {
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig2, bobHtlcSigs2)
|
||||
require.NoError(t, err, "alice unable to process bob's new commitment")
|
||||
|
||||
aliceRevocation2, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation2, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to generate revocation")
|
||||
aliceSig2, aliceHtlcSigs2, _, err := aliceChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "alice unable to sign new commitment")
|
||||
@ -239,8 +239,17 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) {
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig2, aliceHtlcSigs2)
|
||||
require.NoError(t, err, "bob unable to process alice's new commitment")
|
||||
|
||||
bobRevocation2, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation2, _, finalHtlcs, err := bobChannel.
|
||||
RevokeCurrentCommitment()
|
||||
|
||||
require.NoError(t, err, "bob unable to revoke commitment")
|
||||
|
||||
// Check finalHtlcs for the expected final resolution.
|
||||
require.Len(t, finalHtlcs, 1, "final htlc expected")
|
||||
for _, settled := range finalHtlcs {
|
||||
require.True(t, settled, "final settle expected")
|
||||
}
|
||||
|
||||
fwdPkg, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation2)
|
||||
require.NoError(t, err, "alice unable to process bob's revocation")
|
||||
if len(fwdPkg.Adds) != 0 {
|
||||
@ -393,7 +402,7 @@ func TestChannelZeroAddLocalHeight(t *testing.T) {
|
||||
|
||||
// Alice should reply with a revocation.
|
||||
// -----rev----->
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
@ -570,7 +579,7 @@ func testCommitHTLCSigTieBreak(t *testing.T, restart bool) {
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive alice's commitment")
|
||||
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob's commitment")
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err, "unable to receive bob's revocation")
|
||||
@ -2312,7 +2321,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) {
|
||||
|
||||
// Bob can revoke the prior commitment he had. This should lock in the
|
||||
// fee update for him.
|
||||
_, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
_, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to generate bob revocation")
|
||||
|
||||
if chainfee.SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee {
|
||||
@ -2378,7 +2387,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
|
||||
|
||||
// Bob can revoke the prior commitment he had. This should lock in the
|
||||
// fee update for him.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to generate bob revocation")
|
||||
|
||||
if chainfee.SatPerKWeight(
|
||||
@ -2413,7 +2422,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
|
||||
|
||||
// Alice can revoke the old commitment, which will lock in the fee
|
||||
// update.
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke alice channel")
|
||||
|
||||
if chainfee.SatPerKWeight(
|
||||
@ -2480,7 +2489,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
// Alice can revoke the prior commitment she had, this will ack
|
||||
// everything received before last commitment signature, but in this
|
||||
// case that is nothing.
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to generate bob revocation")
|
||||
|
||||
// Bob receives the revocation of the old commitment
|
||||
@ -2508,7 +2517,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
// Bob can revoke the old commitment. This will ack what he has
|
||||
// received, including the HTLC and fee update. This will lock in the
|
||||
// fee update for bob.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke alice channel")
|
||||
|
||||
if chainfee.SatPerKWeight(
|
||||
@ -2542,7 +2551,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
|
||||
// After Alice now revokes her old commitment, the fee update should
|
||||
// lock in.
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to generate bob revocation")
|
||||
|
||||
if chainfee.SatPerKWeight(
|
||||
@ -2644,7 +2653,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
|
||||
|
||||
// Bob can revoke the prior commitment he had. This should lock in the
|
||||
// fee update for him.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to generate bob revocation")
|
||||
|
||||
if chainfee.SatPerKWeight(
|
||||
@ -2680,7 +2689,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
|
||||
|
||||
// Alice can revoke the old commitment, which will lock in the fee
|
||||
// update.
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke alice channel")
|
||||
|
||||
if chainfee.SatPerKWeight(
|
||||
@ -3074,7 +3083,7 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
||||
// adding one of her own.
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "bob unable to process alice's commitment")
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "bob unable to sign commitment")
|
||||
@ -3082,7 +3091,7 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
||||
require.NoError(t, err, "alice unable to recv revocation")
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "alice unable to rev bob's commitment")
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to recv revocation")
|
||||
@ -3234,7 +3243,7 @@ func TestChanSyncOweCommitmentPendingRemote(t *testing.T) {
|
||||
// completes, the htlc is settled on the local commitment
|
||||
// transaction. Bob still owes Alice a signature to also settle
|
||||
// the htlc on her local commitment transaction.
|
||||
bobRevoke, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevoke, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke commitment: %v", err)
|
||||
}
|
||||
@ -3264,7 +3273,7 @@ func TestChanSyncOweCommitmentPendingRemote(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
aliceRevoke, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevoke, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -3330,7 +3339,7 @@ func TestChanSyncOweRevocation(t *testing.T) {
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "bob unable to process alice's commitment")
|
||||
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "bob unable to sign commitment")
|
||||
@ -3342,7 +3351,7 @@ func TestChanSyncOweRevocation(t *testing.T) {
|
||||
|
||||
// At this point, we'll simulate the connection breaking down by Bob's
|
||||
// lack of knowledge of the revocation message that Alice just sent.
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
|
||||
// If we fetch the channel sync messages at this state, then Alice
|
||||
@ -3482,7 +3491,7 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
||||
|
||||
// Bob generates the revoke and sig message, but the messages don't
|
||||
// reach Alice before the connection dies.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "bob unable to sign commitment")
|
||||
@ -3559,7 +3568,7 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
||||
require.NoError(t, err, "alice unable to recv revocation")
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "alice unable to rev bob's commitment")
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to recv revocation")
|
||||
@ -3630,7 +3639,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
// signature for Bob's updated state. Instead she will issue a new
|
||||
// update before sending a new CommitSig. This will lead to Alice's
|
||||
// local commit chain getting height > remote commit chain.
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to recv revocation")
|
||||
@ -3652,7 +3661,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
// Bob then sends his revocation message, but before Alice can process
|
||||
// it (and before he scan send his CommitSig message), then connection
|
||||
// dies.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
|
||||
// Now if we attempt to synchronize states at this point, Alice should
|
||||
@ -3746,7 +3755,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
bobSigMsg.CommitSig, bobSigMsg.HtlcSigs,
|
||||
)
|
||||
require.NoError(t, err, "alice unable to rev bob's commitment")
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to recv revocation")
|
||||
@ -4132,7 +4141,7 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
|
||||
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "bob unable to process alice's commitment")
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "bob unable to sign commitment")
|
||||
@ -4140,7 +4149,7 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
|
||||
require.NoError(t, err, "alice unable to recv revocation")
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "alice unable to rev bob's commitment")
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to recv revocation")
|
||||
@ -4330,7 +4339,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
||||
// transition.
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "bob unable to process alice's commitment")
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "bob unable to sign commitment")
|
||||
@ -4338,7 +4347,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
||||
require.NoError(t, err, "alice unable to recv revocation")
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "alice unable to rev bob's commitment")
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "alice unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to recv revocation")
|
||||
@ -5013,7 +5022,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5043,7 +5052,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5087,7 +5096,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5130,11 +5139,18 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
err = aliceChannel.ReceiveFailHTLC(htlc2.ID, []byte("bad"))
|
||||
require.NoError(t, err, "unable to recv htlc cancel")
|
||||
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, finalHtlcs, err := bobChannel.
|
||||
RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check finalHtlcs for the expected final resolution.
|
||||
require.Len(t, finalHtlcs, 1, "final htlc expected")
|
||||
for _, settled := range finalHtlcs {
|
||||
require.False(t, settled, "final fail expected")
|
||||
}
|
||||
|
||||
// Alice should detect that she doesn't need to forward any Adds's, but
|
||||
// that the Fail has been locked in an can be forwarded.
|
||||
_, adds, settleFails, _, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
@ -5179,7 +5195,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5210,7 +5226,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5240,7 +5256,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5852,13 +5868,13 @@ func TestMaxAsynchronousHtlcs(t *testing.T) {
|
||||
require.NoError(t, err, "unable to receive new commitment")
|
||||
|
||||
// Both sides exchange revocations as in step 4 & 5.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke revocation")
|
||||
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err, "unable to receive revocation")
|
||||
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke revocation")
|
||||
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
@ -6549,7 +6565,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
||||
// Bob receives this commitment signature, and revokes his old state.
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
|
||||
// When Alice now receives this revocation, she will advance her remote
|
||||
@ -6728,7 +6744,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) {
|
||||
assertInLogs(t, aliceChannel, 1, 0, 0, 1)
|
||||
restoreAndAssert(t, aliceChannel, 1, 0, 0, 0)
|
||||
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err, "bob unable to process alice's revocation")
|
||||
@ -6758,7 +6774,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) {
|
||||
// When Alice receives Bob's revocation, the Fail is irrevocably locked
|
||||
// in on both sides. She should compact the logs, removing the HTLC and
|
||||
// the corresponding Fail from the local update log.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err, "unable to receive revocation")
|
||||
@ -6974,7 +6990,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
// Bob receives this commitment signature, and revokes his old state.
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
|
||||
// Now the HTLC is locked into Bob's commitment, a restoration should
|
||||
@ -7004,7 +7020,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
|
||||
// Now both the local and remote add heights should be properly set.
|
||||
@ -7046,7 +7062,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
|
||||
// Since Bob just revoked another commitment, a restoration should
|
||||
@ -7082,7 +7098,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
// Alice should receive the commitment and send over a revocation.
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
|
||||
// Both heights should be 2 and they are on both commitments.
|
||||
@ -7119,7 +7135,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
// Alice receives commitment, sends revocation.
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
_, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
_, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke commitment")
|
||||
|
||||
aliceChannel = restoreAndAssertCommitHeights(
|
||||
@ -7173,7 +7189,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
||||
require.NoError(t, err, "unable to sign commit")
|
||||
err = bobChannel.ReceiveNewCommitment(aliceSigs, aliceHtlcSigs)
|
||||
require.NoError(t, err, "unable to receive commitment")
|
||||
revokeMsg, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
revokeMsg, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err, "unable to revoke bob commitment")
|
||||
bobSigs, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
require.NoError(t, err, "unable to sign commit")
|
||||
@ -7213,7 +7229,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
||||
if err != channeldb.ErrChanBorked {
|
||||
t.Fatalf("sign commitment should have failed: %v", err)
|
||||
}
|
||||
_, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
_, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
if err != channeldb.ErrChanBorked {
|
||||
t.Fatalf("append remove chain tail should have failed")
|
||||
}
|
||||
@ -8792,7 +8808,7 @@ func TestChannelUnsignedAckedFailure(t *testing.T) {
|
||||
|
||||
// Alice should reply with a revocation.
|
||||
// -----rev----->
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -8817,7 +8833,7 @@ func TestChannelUnsignedAckedFailure(t *testing.T) {
|
||||
// Bob revokes his current commitment and sends a revocation
|
||||
// to Alice.
|
||||
// <----rev------
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = newAliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -8903,7 +8919,7 @@ func TestChannelLocalUnsignedUpdatesFailure(t *testing.T) {
|
||||
// Bob should reply with a revocation and Alice should save the fail as
|
||||
// an unsigned local update.
|
||||
// <----rev-----
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -8987,7 +9003,7 @@ func TestChannelSignedAckRegression(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// <----rev-----
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9014,7 +9030,7 @@ func TestChannelSignedAckRegression(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// <----rev-----
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9027,7 +9043,7 @@ func TestChannelSignedAckRegression(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// -----rev---->
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
fwdPkg, _, _, _, err := newBobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9119,7 +9135,7 @@ func TestIsChannelClean(t *testing.T) {
|
||||
assertCleanOrDirty(false, aliceChannel, bobChannel, t)
|
||||
|
||||
// <---rev---
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9133,7 +9149,7 @@ func TestIsChannelClean(t *testing.T) {
|
||||
assertCleanOrDirty(false, aliceChannel, bobChannel, t)
|
||||
|
||||
// ---rev--->
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9154,7 +9170,7 @@ func TestIsChannelClean(t *testing.T) {
|
||||
assertCleanOrDirty(false, aliceChannel, bobChannel, t)
|
||||
|
||||
// ---rev--->
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9168,7 +9184,7 @@ func TestIsChannelClean(t *testing.T) {
|
||||
assertCleanOrDirty(false, aliceChannel, bobChannel, t)
|
||||
|
||||
// <---rev---
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9192,7 +9208,7 @@ func TestIsChannelClean(t *testing.T) {
|
||||
assertCleanOrDirty(false, aliceChannel, bobChannel, t)
|
||||
|
||||
// <---rev---
|
||||
bobRevocation, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err = bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9207,7 +9223,7 @@ func TestIsChannelClean(t *testing.T) {
|
||||
|
||||
// The state should finally be clean after alice sends her revocation.
|
||||
// ---rev--->
|
||||
aliceRevocation, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err = aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9346,7 +9362,7 @@ func testGetDustSum(t *testing.T, chantype channeldb.ChannelType) {
|
||||
// Bob now sends a revocation for his prior commitment, and this should
|
||||
// change Alice's perspective to no longer include the first HTLC as
|
||||
// dust.
|
||||
bobRevocation, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := bobChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
require.NoError(t, err)
|
||||
@ -9359,7 +9375,7 @@ func testGetDustSum(t *testing.T, chantype channeldb.ChannelType) {
|
||||
require.NoError(t, err)
|
||||
err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs)
|
||||
require.NoError(t, err)
|
||||
aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := aliceChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
require.NoError(t, err)
|
||||
|
@ -502,7 +502,7 @@ func ForceStateTransition(chanA, chanB *LightningChannel) error {
|
||||
return err
|
||||
}
|
||||
|
||||
bobRevocation, _, err := chanB.RevokeCurrentCommitment()
|
||||
bobRevocation, _, _, err := chanB.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -518,7 +518,7 @@ func ForceStateTransition(chanA, chanB *LightningChannel) error {
|
||||
return err
|
||||
}
|
||||
|
||||
aliceRevocation, _, err := chanA.RevokeCurrentCommitment()
|
||||
aliceRevocation, _, _, err := chanA.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ func testVectors(t *testing.T, chanType channeldb.ChannelType, test testCase) {
|
||||
err = remoteChannel.ReceiveNewCommitment(localSig, localHtlcSigs)
|
||||
require.NoError(t, err)
|
||||
|
||||
revMsg, _, err := remoteChannel.RevokeCurrentCommitment()
|
||||
revMsg, _, _, err := remoteChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, _, err = localChannel.ReceiveRevocation(revMsg)
|
||||
@ -309,7 +309,7 @@ func testVectors(t *testing.T, chanType channeldb.ChannelType, test testCase) {
|
||||
err = localChannel.ReceiveNewCommitment(remoteSig, remoteHtlcSigs)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = localChannel.RevokeCurrentCommitment()
|
||||
_, _, _, err = localChannel.RevokeCurrentCommitment()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Now the local node force closes the channel so that we can inspect
|
||||
|
26
rpcserver.go
26
rpcserver.go
@ -577,6 +577,10 @@ func MainRPCServerPermissions() map[string][]bakery.Op {
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
}},
|
||||
"/lnrpc.Lightning/LookupHtlc": {{
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
}},
|
||||
"/lnrpc.Lightning/ListAliases": {{
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
@ -3904,6 +3908,28 @@ func (r *rpcServer) ClosedChannels(ctx context.Context,
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// LookupHtlc retrieves a final htlc resolution from the database. If the htlc
|
||||
// has no final resolution yet, a NotFound grpc status code is returned.
|
||||
func (r *rpcServer) LookupHtlc(ctx context.Context,
|
||||
in *lnrpc.LookupHtlcRequest) (*lnrpc.LookupHtlcResponse, error) {
|
||||
|
||||
chanID := lnwire.NewShortChanIDFromInt(in.ChanId)
|
||||
|
||||
info, err := r.server.chanStateDB.LookupFinalHtlc(chanID, in.HtlcIndex)
|
||||
switch {
|
||||
case errors.Is(err, channeldb.ErrHtlcUnknown):
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lnrpc.LookupHtlcResponse{
|
||||
Settled: info.Settled,
|
||||
Offchain: info.Offchain,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListChannels returns a description of all the open channels that this node
|
||||
// is a participant in.
|
||||
func (r *rpcServer) ListChannels(ctx context.Context,
|
||||
|
@ -1165,6 +1165,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC,
|
||||
Clock: clock.NewDefaultClock(),
|
||||
SubscribeBreachComplete: s.breachArbiter.SubscribeBreachComplete,
|
||||
PutFinalHtlcOutcome: s.chanStateDB.PutOnchainFinalHtlcOutcome, // nolint: lll
|
||||
HtlcNotifier: s.htlcNotifier,
|
||||
}, dbs.ChanStateDB)
|
||||
|
||||
// Select the configuration and furnding parameters for Bitcoin or
|
||||
|
Loading…
x
Reference in New Issue
Block a user