mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-03-29 03:01:52 +01:00
multi: fix up custom records
This commit is contained in:
parent
a7fa041a0b
commit
8a4531fc96
@ -2550,6 +2550,8 @@ type HTLC struct {
|
|||||||
// HTLC. It is stored in the ExtraData field, which is used to store
|
// HTLC. It is stored in the ExtraData field, which is used to store
|
||||||
// a TLV stream of additional information associated with the HTLC.
|
// a TLV stream of additional information associated with the HTLC.
|
||||||
BlindingPoint lnwire.BlindingPointRecord
|
BlindingPoint lnwire.BlindingPointRecord
|
||||||
|
|
||||||
|
CustomRecords lnwire.CustomRecords
|
||||||
}
|
}
|
||||||
|
|
||||||
// serializeExtraData encodes a TLV stream of extra data to be stored with a
|
// serializeExtraData encodes a TLV stream of extra data to be stored with a
|
||||||
@ -2568,6 +2570,11 @@ func (h *HTLC) serializeExtraData() error {
|
|||||||
records = append(records, &b)
|
records = append(records, &b)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
records, err := h.CustomRecords.ExtendRecordProducers(records)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return h.ExtraData.PackRecords(records...)
|
return h.ExtraData.PackRecords(records...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2589,8 +2596,19 @@ func (h *HTLC) deserializeExtraData() error {
|
|||||||
|
|
||||||
if val, ok := tlvMap[h.BlindingPoint.TlvType()]; ok && val == nil {
|
if val, ok := tlvMap[h.BlindingPoint.TlvType()]; ok && val == nil {
|
||||||
h.BlindingPoint = tlv.SomeRecordT(blindingPoint)
|
h.BlindingPoint = tlv.SomeRecordT(blindingPoint)
|
||||||
|
|
||||||
|
// Remove the entry from the TLV map. Anything left in the map
|
||||||
|
// will be included in the custom records field.
|
||||||
|
delete(tlvMap, h.BlindingPoint.TlvType())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the custom records field to the remaining TLV records.
|
||||||
|
customRecords, err := lnwire.NewCustomRecordsFromTlvTypeMap(tlvMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.CustomRecords = customRecords
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2728,6 +2746,7 @@ func (h *HTLC) Copy() HTLC {
|
|||||||
copy(clone.Signature[:], h.Signature)
|
copy(clone.Signature[:], h.Signature)
|
||||||
copy(clone.RHash[:], h.RHash[:])
|
copy(clone.RHash[:], h.RHash[:])
|
||||||
copy(clone.ExtraData, h.ExtraData)
|
copy(clone.ExtraData, h.ExtraData)
|
||||||
|
clone.CustomRecords = h.CustomRecords.Copy()
|
||||||
|
|
||||||
return clone
|
return clone
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/go-errors/errors"
|
"github.com/go-errors/errors"
|
||||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||||
"github.com/lightningnetwork/lnd/channeldb/models"
|
"github.com/lightningnetwork/lnd/channeldb/models"
|
||||||
@ -489,8 +490,6 @@ func (s *InterceptableSwitch) interceptForward(packet *htlcPacket,
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
packet.wireRecords = record.CustomSet(htlc.CustomRecords)
|
|
||||||
|
|
||||||
intercepted := &interceptedForward{
|
intercepted := &interceptedForward{
|
||||||
htlc: htlc,
|
htlc: htlc,
|
||||||
packet: packet,
|
packet: packet,
|
||||||
@ -633,7 +632,7 @@ func (f *interceptedForward) Packet() InterceptedPacket {
|
|||||||
CustomRecords: f.packet.customRecords,
|
CustomRecords: f.packet.customRecords,
|
||||||
OnionBlob: f.htlc.OnionBlob,
|
OnionBlob: f.htlc.OnionBlob,
|
||||||
AutoFailHeight: f.autoFailHeight,
|
AutoFailHeight: f.autoFailHeight,
|
||||||
CustomPeerRecords: f.packet.wireRecords,
|
WireCustomRecords: record.CustomSet(f.htlc.CustomRecords),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -685,6 +684,8 @@ func (f *interceptedForward) ResumeModified(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Tracef("Forwarding packet %v", spew.Sdump(f.packet))
|
||||||
|
|
||||||
// Forward to the switch. A link quit channel isn't needed, because we
|
// Forward to the switch. A link quit channel isn't needed, because we
|
||||||
// are on a different thread now.
|
// are on a different thread now.
|
||||||
return f.htlcSwitch.ForwardPackets(nil, f.packet)
|
return f.htlcSwitch.ForwardPackets(nil, f.packet)
|
||||||
|
@ -368,9 +368,9 @@ type InterceptedPacket struct {
|
|||||||
// OnionBlob is the onion packet for the next hop
|
// OnionBlob is the onion packet for the next hop
|
||||||
OnionBlob [lnwire.OnionPacketSize]byte
|
OnionBlob [lnwire.OnionPacketSize]byte
|
||||||
|
|
||||||
// CustomPeerRecords are user-defined records that were defined by the
|
// WireCustomRecords are user-defined records that were defined by the
|
||||||
// peer that forwarded this htlc to us.
|
// peer that forwarded this htlc to us.
|
||||||
CustomPeerRecords record.CustomSet
|
WireCustomRecords record.CustomSet
|
||||||
|
|
||||||
// AutoFailHeight is the block height at which this intercept will be
|
// AutoFailHeight is the block height at which this intercept will be
|
||||||
// failed back automatically.
|
// failed back automatically.
|
||||||
|
@ -3376,7 +3376,29 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
|||||||
Amount: fwdInfo.AmountToForward,
|
Amount: fwdInfo.AmountToForward,
|
||||||
PaymentHash: pd.RHash,
|
PaymentHash: pd.RHash,
|
||||||
BlindingPoint: fwdInfo.NextBlinding,
|
BlindingPoint: fwdInfo.NextBlinding,
|
||||||
CustomRecords: pd.WireRecords,
|
}
|
||||||
|
|
||||||
|
err = fn.MapOptionZ(
|
||||||
|
pd.CustomRecords,
|
||||||
|
func(b tlv.Blob) error {
|
||||||
|
r, err := lnwire.ParseCustomRecords(
|
||||||
|
b,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
addMsg.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
l.fail(LinkFailureError{
|
||||||
|
code: ErrInternalError,
|
||||||
|
}, err.Error())
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, we'll encode the onion packet for
|
// Finally, we'll encode the onion packet for
|
||||||
@ -3423,7 +3445,26 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
|||||||
Amount: fwdInfo.AmountToForward,
|
Amount: fwdInfo.AmountToForward,
|
||||||
PaymentHash: pd.RHash,
|
PaymentHash: pd.RHash,
|
||||||
BlindingPoint: fwdInfo.NextBlinding,
|
BlindingPoint: fwdInfo.NextBlinding,
|
||||||
CustomRecords: pd.WireRecords,
|
}
|
||||||
|
|
||||||
|
err = fn.MapOptionZ(
|
||||||
|
pd.CustomRecords, func(b tlv.Blob) error {
|
||||||
|
r, err := lnwire.ParseCustomRecords(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
addMsg.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
l.fail(LinkFailureError{
|
||||||
|
code: ErrInternalError,
|
||||||
|
}, err.Error())
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, we'll encode the onion packet for the
|
// Finally, we'll encode the onion packet for the
|
||||||
|
@ -98,10 +98,6 @@ type htlcPacket struct {
|
|||||||
// were included in the payload.
|
// were included in the payload.
|
||||||
customRecords record.CustomSet
|
customRecords record.CustomSet
|
||||||
|
|
||||||
// wireRecords are user-defined records in the custom type range that
|
|
||||||
// were included in the peer's wire message.
|
|
||||||
wireRecords record.CustomSet
|
|
||||||
|
|
||||||
// originalOutgoingChanID is used when sending back failure messages.
|
// originalOutgoingChanID is used when sending back failure messages.
|
||||||
// It is only used for forwarded Adds on option_scid_alias channels.
|
// It is only used for forwarded Adds on option_scid_alias channels.
|
||||||
// This is to avoid possible confusion if a payer uses the public SCID
|
// This is to avoid possible confusion if a payer uses the public SCID
|
||||||
|
@ -2484,11 +2484,13 @@ func (s *Switch) getLinkByMapping(pkt *htlcPacket) (ChannelLink, error) {
|
|||||||
chanID := pkt.outgoingChanID
|
chanID := pkt.outgoingChanID
|
||||||
aliasID := s.cfg.IsAlias(chanID)
|
aliasID := s.cfg.IsAlias(chanID)
|
||||||
|
|
||||||
|
_, haveAliasMapping := s.aliasToReal[chanID]
|
||||||
|
|
||||||
// Set the originalOutgoingChanID so the proper channel_update can be
|
// Set the originalOutgoingChanID so the proper channel_update can be
|
||||||
// sent back if the option-scid-alias feature bit was negotiated.
|
// sent back if the option-scid-alias feature bit was negotiated.
|
||||||
pkt.originalOutgoingChanID = chanID
|
pkt.originalOutgoingChanID = chanID
|
||||||
|
|
||||||
if aliasID {
|
if aliasID || haveAliasMapping {
|
||||||
// Since outgoingChanID is an alias, we'll fetch the link via
|
// Since outgoingChanID is an alias, we'll fetch the link via
|
||||||
// baseIndex.
|
// baseIndex.
|
||||||
baseScid, ok := s.baseIndex[chanID]
|
baseScid, ok := s.baseIndex[chanID]
|
||||||
|
@ -93,7 +93,7 @@ func (r *forwardInterceptor) onIntercept(
|
|||||||
CustomRecords: htlc.CustomRecords,
|
CustomRecords: htlc.CustomRecords,
|
||||||
OnionBlob: htlc.OnionBlob[:],
|
OnionBlob: htlc.OnionBlob[:],
|
||||||
AutoFailHeight: htlc.AutoFailHeight,
|
AutoFailHeight: htlc.AutoFailHeight,
|
||||||
WireCustomRecords: htlc.CustomPeerRecords,
|
WireCustomRecords: htlc.WireCustomRecords,
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.stream.Send(interceptionRequest)
|
return r.stream.Send(interceptionRequest)
|
||||||
|
@ -335,7 +335,7 @@ type PaymentDescriptor struct {
|
|||||||
// NOTE: Populated only on add payment descriptor entry types.
|
// NOTE: Populated only on add payment descriptor entry types.
|
||||||
OnionBlob []byte
|
OnionBlob []byte
|
||||||
|
|
||||||
// CustomRecrods also stores the set of optional custom records that
|
// CustomRecords also stores the set of optional custom records that
|
||||||
// may have been attached to a sent HTLC.
|
// may have been attached to a sent HTLC.
|
||||||
CustomRecords fn.Option[tlv.Blob]
|
CustomRecords fn.Option[tlv.Blob]
|
||||||
|
|
||||||
@ -383,10 +383,6 @@ type PaymentDescriptor struct {
|
|||||||
// blinded route (ie, not the introduction node) from update_add_htlc's
|
// blinded route (ie, not the introduction node) from update_add_htlc's
|
||||||
// TLVs.
|
// TLVs.
|
||||||
BlindingPoint lnwire.BlindingPointRecord
|
BlindingPoint lnwire.BlindingPointRecord
|
||||||
|
|
||||||
// WireRecords contains the TLV records blob that was included in
|
|
||||||
// the original wire message that added this HTLC.
|
|
||||||
WireRecords lnwire.CustomRecords
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddHeight returns a pointer to the height at which the HTLC was added to the
|
// AddHeight returns a pointer to the height at which the HTLC was added to the
|
||||||
@ -454,6 +450,17 @@ func PayDescsFromRemoteLogUpdates(chanID lnwire.ShortChannelID, height uint64,
|
|||||||
pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
|
pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
|
||||||
copy(pd.OnionBlob[:], wireMsg.OnionBlob[:])
|
copy(pd.OnionBlob[:], wireMsg.OnionBlob[:])
|
||||||
|
|
||||||
|
if len(wireMsg.CustomRecords) > 0 {
|
||||||
|
b, err := wireMsg.CustomRecords.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error "+
|
||||||
|
"serializing custom records: "+
|
||||||
|
"%w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pd.CustomRecords = fn.Some[tlv.Blob](b)
|
||||||
|
}
|
||||||
|
|
||||||
case *lnwire.UpdateFulfillHTLC:
|
case *lnwire.UpdateFulfillHTLC:
|
||||||
pd = PaymentDescriptor{
|
pd = PaymentDescriptor{
|
||||||
RPreimage: wireMsg.PaymentPreimage,
|
RPreimage: wireMsg.PaymentPreimage,
|
||||||
@ -745,7 +752,9 @@ func (c *commitment) populateHtlcIndexes(chanType channeldb.ChannelType,
|
|||||||
|
|
||||||
// toDiskCommit converts the target commitment into a format suitable to be
|
// toDiskCommit converts the target commitment into a format suitable to be
|
||||||
// written to disk after an accepted state transition.
|
// written to disk after an accepted state transition.
|
||||||
func (c *commitment) toDiskCommit(ourCommit bool) *channeldb.ChannelCommitment {
|
func (c *commitment) toDiskCommit(ourCommit bool) (*channeldb.ChannelCommitment,
|
||||||
|
error) {
|
||||||
|
|
||||||
numHtlcs := len(c.outgoingHTLCs) + len(c.incomingHTLCs)
|
numHtlcs := len(c.outgoingHTLCs) + len(c.incomingHTLCs)
|
||||||
|
|
||||||
commit := &channeldb.ChannelCommitment{
|
commit := &channeldb.ChannelCommitment{
|
||||||
@ -782,11 +791,23 @@ func (c *commitment) toDiskCommit(ourCommit bool) *channeldb.ChannelCommitment {
|
|||||||
}
|
}
|
||||||
copy(h.OnionBlob[:], htlc.OnionBlob)
|
copy(h.OnionBlob[:], htlc.OnionBlob)
|
||||||
|
|
||||||
// If the HTLC had custom records, then we'll copy that over so
|
// If the HTLC had custom records, then we'll copy that over, so
|
||||||
// we restore with the same information.
|
// we restore with the same information.
|
||||||
htlc.CustomRecords.WhenSome(func(b tlv.Blob) {
|
err := fn.MapOptionZ(
|
||||||
copy(h.ExtraData[:], b[:])
|
htlc.CustomRecords, func(b tlv.Blob) error {
|
||||||
})
|
r, err := lnwire.ParseCustomRecords(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
h.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if ourCommit && htlc.sig != nil {
|
if ourCommit && htlc.sig != nil {
|
||||||
h.Signature = htlc.sig.Serialize()
|
h.Signature = htlc.sig.Serialize()
|
||||||
@ -813,11 +834,23 @@ func (c *commitment) toDiskCommit(ourCommit bool) *channeldb.ChannelCommitment {
|
|||||||
}
|
}
|
||||||
copy(h.OnionBlob[:], htlc.OnionBlob)
|
copy(h.OnionBlob[:], htlc.OnionBlob)
|
||||||
|
|
||||||
// If the HTLC had custom records, then we'll copy that over so
|
// If the HTLC had custom records, then we'll copy that over, so
|
||||||
// we restore with the same information.
|
// we restore with the same information.
|
||||||
htlc.CustomRecords.WhenSome(func(b tlv.Blob) {
|
err := fn.MapOptionZ(
|
||||||
copy(h.ExtraData[:], b[:])
|
htlc.CustomRecords, func(b tlv.Blob) error {
|
||||||
})
|
r, err := lnwire.ParseCustomRecords(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
h.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if ourCommit && htlc.sig != nil {
|
if ourCommit && htlc.sig != nil {
|
||||||
h.Signature = htlc.sig.Serialize()
|
h.Signature = htlc.sig.Serialize()
|
||||||
@ -826,7 +859,7 @@ func (c *commitment) toDiskCommit(ourCommit bool) *channeldb.ChannelCommitment {
|
|||||||
commit.Htlcs = append(commit.Htlcs, h)
|
commit.Htlcs = append(commit.Htlcs, h)
|
||||||
}
|
}
|
||||||
|
|
||||||
return commit
|
return commit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// diskHtlcToPayDesc converts an HTLC previously written to disk within a
|
// diskHtlcToPayDesc converts an HTLC previously written to disk within a
|
||||||
@ -920,8 +953,12 @@ func (lc *LightningChannel) diskHtlcToPayDesc(feeRate chainfee.SatPerKWeight,
|
|||||||
|
|
||||||
// Ensure that we'll restore any custom records which were stored as
|
// Ensure that we'll restore any custom records which were stored as
|
||||||
// extra data on disk.
|
// extra data on disk.
|
||||||
if len(htlc.ExtraData) != 0 {
|
if len(htlc.CustomRecords) != 0 {
|
||||||
pd.CustomRecords = fn.Some[tlv.Blob](htlc.ExtraData)
|
b, err := htlc.CustomRecords.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return pd, err
|
||||||
|
}
|
||||||
|
pd.CustomRecords = fn.Some[tlv.Blob](b)
|
||||||
}
|
}
|
||||||
|
|
||||||
return pd, nil
|
return pd, nil
|
||||||
@ -1678,6 +1715,16 @@ func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
|
|||||||
pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
|
pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
|
||||||
copy(pd.OnionBlob[:], wireMsg.OnionBlob[:])
|
copy(pd.OnionBlob[:], wireMsg.OnionBlob[:])
|
||||||
|
|
||||||
|
if len(wireMsg.CustomRecords) > 0 {
|
||||||
|
b, err := wireMsg.CustomRecords.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error serializing "+
|
||||||
|
"custom records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pd.CustomRecords = fn.Some[tlv.Blob](b)
|
||||||
|
}
|
||||||
|
|
||||||
isDustRemote := HtlcIsDust(
|
isDustRemote := HtlcIsDust(
|
||||||
lc.channelState.ChanType, false, false, feeRate,
|
lc.channelState.ChanType, false, false, feeRate,
|
||||||
wireMsg.Amount.ToSatoshis(), remoteDustLimit,
|
wireMsg.Amount.ToSatoshis(), remoteDustLimit,
|
||||||
@ -1883,6 +1930,16 @@ func (lc *LightningChannel) remoteLogUpdateToPayDesc(logUpdate *channeldb.LogUpd
|
|||||||
pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
|
pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob))
|
||||||
copy(pd.OnionBlob, wireMsg.OnionBlob[:])
|
copy(pd.OnionBlob, wireMsg.OnionBlob[:])
|
||||||
|
|
||||||
|
if len(wireMsg.CustomRecords) > 0 {
|
||||||
|
b, err := wireMsg.CustomRecords.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error serializing "+
|
||||||
|
"custom records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pd.CustomRecords = fn.Some[tlv.Blob](b)
|
||||||
|
}
|
||||||
|
|
||||||
// We don't need to generate an htlc script yet. This will be
|
// We don't need to generate an htlc script yet. This will be
|
||||||
// done once we sign our remote commitment.
|
// done once we sign our remote commitment.
|
||||||
|
|
||||||
@ -3620,9 +3677,14 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
var err error
|
var err error
|
||||||
cancelChan := make(chan struct{})
|
cancelChan := make(chan struct{})
|
||||||
|
|
||||||
|
diskCommit, err := remoteCommitView.toDiskCommit(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("unable to convert "+
|
||||||
|
"commitment: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
auxLeaves, err := AuxLeavesFromCommit(
|
auxLeaves, err := AuxLeavesFromCommit(
|
||||||
chanState, *remoteCommitView.toDiskCommit(false), leafStore,
|
chanState, *diskCommit, leafStore, *keyRing,
|
||||||
*keyRing,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, fmt.Errorf("unable to fetch aux leaves: "+
|
return nil, nil, nil, fmt.Errorf("unable to fetch aux leaves: "+
|
||||||
@ -3866,6 +3928,26 @@ func (lc *LightningChannel) createCommitDiff(newCommit *commitment,
|
|||||||
BlindingPoint: pd.BlindingPoint,
|
BlindingPoint: pd.BlindingPoint,
|
||||||
}
|
}
|
||||||
copy(htlc.OnionBlob[:], pd.OnionBlob)
|
copy(htlc.OnionBlob[:], pd.OnionBlob)
|
||||||
|
|
||||||
|
// Copy over any custom records as extra data that we
|
||||||
|
// may not explicitly know about.
|
||||||
|
err := fn.MapOptionZ(
|
||||||
|
pd.CustomRecords, func(b tlv.Blob) error {
|
||||||
|
r, err := lnwire.ParseCustomRecords(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlc.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error mapping custom "+
|
||||||
|
"records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
logUpdate.UpdateMsg = htlc
|
logUpdate.UpdateMsg = htlc
|
||||||
|
|
||||||
// Gather any references for circuits opened by this Add
|
// Gather any references for circuits opened by this Add
|
||||||
@ -3875,14 +3957,6 @@ func (lc *LightningChannel) createCommitDiff(newCommit *commitment,
|
|||||||
*pd.OpenCircuitKey)
|
*pd.OpenCircuitKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy over any custom records as extra data that we
|
|
||||||
// may not explicitly know about.
|
|
||||||
pd.CustomRecords.WhenSome(func(b tlv.Blob) {
|
|
||||||
// TODO(roasbeef): needs to merge w/ existing
|
|
||||||
// TLVs
|
|
||||||
copy(htlc.ExtraData[:], b[:])
|
|
||||||
})
|
|
||||||
|
|
||||||
logUpdates = append(logUpdates, logUpdate)
|
logUpdates = append(logUpdates, logUpdate)
|
||||||
|
|
||||||
// Short circuit here since an add should not have any
|
// Short circuit here since an add should not have any
|
||||||
@ -3941,7 +4015,11 @@ func (lc *LightningChannel) createCommitDiff(newCommit *commitment,
|
|||||||
// With the set of log updates mapped into wire messages, we'll now
|
// With the set of log updates mapped into wire messages, we'll now
|
||||||
// convert the in-memory commit into a format suitable for writing to
|
// convert the in-memory commit into a format suitable for writing to
|
||||||
// disk.
|
// disk.
|
||||||
diskCommit := newCommit.toDiskCommit(false)
|
diskCommit, err := newCommit.toDiskCommit(false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error converting commitment to disk "+
|
||||||
|
"commit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
auxSigBlob, err := packSigs(auxSigs, lc.auxSigner)
|
auxSigBlob, err := packSigs(auxSigs, lc.auxSigner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -3968,7 +4046,9 @@ func (lc *LightningChannel) createCommitDiff(newCommit *commitment,
|
|||||||
|
|
||||||
// getUnsignedAckedUpdates returns all remote log updates that we haven't
|
// getUnsignedAckedUpdates returns all remote log updates that we haven't
|
||||||
// signed for yet ourselves.
|
// signed for yet ourselves.
|
||||||
func (lc *LightningChannel) getUnsignedAckedUpdates() []channeldb.LogUpdate {
|
func (lc *LightningChannel) getUnsignedAckedUpdates() ([]channeldb.LogUpdate,
|
||||||
|
error) {
|
||||||
|
|
||||||
// First, we need to convert the funding outpoint into the ID that's
|
// First, we need to convert the funding outpoint into the ID that's
|
||||||
// used on the wire to identify this channel.
|
// used on the wire to identify this channel.
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(lc.channelState.FundingOutpoint)
|
chanID := lnwire.NewChanIDFromOutPoint(lc.channelState.FundingOutpoint)
|
||||||
@ -4018,6 +4098,26 @@ func (lc *LightningChannel) getUnsignedAckedUpdates() []channeldb.LogUpdate {
|
|||||||
BlindingPoint: pd.BlindingPoint,
|
BlindingPoint: pd.BlindingPoint,
|
||||||
}
|
}
|
||||||
copy(htlc.OnionBlob[:], pd.OnionBlob)
|
copy(htlc.OnionBlob[:], pd.OnionBlob)
|
||||||
|
|
||||||
|
// Copy over any custom records as extra data that we
|
||||||
|
// may not explicitly know about.
|
||||||
|
err := fn.MapOptionZ(
|
||||||
|
pd.CustomRecords, func(b tlv.Blob) error {
|
||||||
|
r, err := lnwire.ParseCustomRecords(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlc.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error mapping custom "+
|
||||||
|
"records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
logUpdate.UpdateMsg = htlc
|
logUpdate.UpdateMsg = htlc
|
||||||
|
|
||||||
case Settle:
|
case Settle:
|
||||||
@ -4054,7 +4154,7 @@ func (lc *LightningChannel) getUnsignedAckedUpdates() []channeldb.LogUpdate {
|
|||||||
|
|
||||||
logUpdates = append(logUpdates, logUpdate)
|
logUpdates = append(logUpdates, logUpdate)
|
||||||
}
|
}
|
||||||
return logUpdates
|
return logUpdates, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalcFeeBuffer calculates a FeeBuffer in accordance with the recommended
|
// CalcFeeBuffer calculates a FeeBuffer in accordance with the recommended
|
||||||
@ -5157,8 +5257,14 @@ func genHtlcSigValidationJobs(chanState *channeldb.OpenChannel,
|
|||||||
verifyJobs := make([]VerifyJob, 0, numHtlcs)
|
verifyJobs := make([]VerifyJob, 0, numHtlcs)
|
||||||
auxVerifyJobs := make([]AuxVerifyJob, 0, numHtlcs)
|
auxVerifyJobs := make([]AuxVerifyJob, 0, numHtlcs)
|
||||||
|
|
||||||
|
diskCommit, err := localCommitmentView.toDiskCommit(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("unable to convert commitment: %w",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
auxLeaves, err := AuxLeavesFromCommit(
|
auxLeaves, err := AuxLeavesFromCommit(
|
||||||
chanState, *localCommitmentView.toDiskCommit(true), leafStore,
|
chanState, *diskCommit, leafStore,
|
||||||
*keyRing,
|
*keyRing,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -5938,12 +6044,18 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck,
|
|||||||
// Additionally, generate a channel delta for this state transition for
|
// Additionally, generate a channel delta for this state transition for
|
||||||
// persistent storage.
|
// persistent storage.
|
||||||
chainTail := lc.localCommitChain.tail()
|
chainTail := lc.localCommitChain.tail()
|
||||||
newCommitment := chainTail.toDiskCommit(true)
|
newCommitment, err := chainTail.toDiskCommit(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Get the unsigned acked remotes updates that are currently in memory.
|
// Get the unsigned acked remotes updates that are currently in memory.
|
||||||
// We need them after a restart to sync our remote commitment with what
|
// We need them after a restart to sync our remote commitment with what
|
||||||
// is committed locally.
|
// is committed locally.
|
||||||
unsignedAckedUpdates := lc.getUnsignedAckedUpdates()
|
unsignedAckedUpdates, err := lc.getUnsignedAckedUpdates()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
finalHtlcs, err := lc.channelState.UpdateCommitment(
|
finalHtlcs, err := lc.channelState.UpdateCommitment(
|
||||||
newCommitment, unsignedAckedUpdates,
|
newCommitment, unsignedAckedUpdates,
|
||||||
@ -6132,6 +6244,26 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
|||||||
BlindingPoint: pd.BlindingPoint,
|
BlindingPoint: pd.BlindingPoint,
|
||||||
}
|
}
|
||||||
copy(htlc.OnionBlob[:], pd.OnionBlob)
|
copy(htlc.OnionBlob[:], pd.OnionBlob)
|
||||||
|
|
||||||
|
// Copy over any custom records as extra data that we
|
||||||
|
// may not explicitly know about.
|
||||||
|
err := fn.MapOptionZ(
|
||||||
|
pd.CustomRecords, func(b tlv.Blob) error {
|
||||||
|
r, err := lnwire.ParseCustomRecords(b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlc.CustomRecords = r
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, fmt.Errorf("error "+
|
||||||
|
"mapping custom records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
logUpdate.UpdateMsg = htlc
|
logUpdate.UpdateMsg = htlc
|
||||||
addUpdates = append(addUpdates, logUpdate)
|
addUpdates = append(addUpdates, logUpdate)
|
||||||
|
|
||||||
@ -6333,7 +6465,11 @@ func (lc *LightningChannel) addHTLC(htlc *lnwire.UpdateAddHTLC,
|
|||||||
lc.Lock()
|
lc.Lock()
|
||||||
defer lc.Unlock()
|
defer lc.Unlock()
|
||||||
|
|
||||||
pd := lc.htlcAddDescriptor(htlc, openKey)
|
pd, err := lc.htlcAddDescriptor(htlc, openKey)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
if err := lc.validateAddHtlc(pd, buffer); err != nil {
|
if err := lc.validateAddHtlc(pd, buffer); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -6437,12 +6573,16 @@ func (lc *LightningChannel) MayAddOutgoingHtlc(amt lnwire.MilliSatoshi) error {
|
|||||||
// to the commitment so that we validate commitment slots rather than
|
// to the commitment so that we validate commitment slots rather than
|
||||||
// available balance, since our actual htlc amount is unknown at this
|
// available balance, since our actual htlc amount is unknown at this
|
||||||
// stage.
|
// stage.
|
||||||
pd := lc.htlcAddDescriptor(
|
pd, err := lc.htlcAddDescriptor(
|
||||||
&lnwire.UpdateAddHTLC{
|
&lnwire.UpdateAddHTLC{
|
||||||
Amount: mockHtlcAmt,
|
Amount: mockHtlcAmt,
|
||||||
},
|
},
|
||||||
&models.CircuitKey{},
|
&models.CircuitKey{},
|
||||||
)
|
)
|
||||||
|
if err != nil {
|
||||||
|
lc.log.Errorf("Error adding htlc descriptor: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Enforce the FeeBuffer because we are evaluating whether we can add
|
// Enforce the FeeBuffer because we are evaluating whether we can add
|
||||||
// another htlc to the channel state.
|
// another htlc to the channel state.
|
||||||
@ -6457,7 +6597,7 @@ func (lc *LightningChannel) MayAddOutgoingHtlc(amt lnwire.MilliSatoshi) error {
|
|||||||
// htlcAddDescriptor returns a payment descriptor for the htlc and open key
|
// htlcAddDescriptor returns a payment descriptor for the htlc and open key
|
||||||
// provided to add to our local update log.
|
// provided to add to our local update log.
|
||||||
func (lc *LightningChannel) htlcAddDescriptor(htlc *lnwire.UpdateAddHTLC,
|
func (lc *LightningChannel) htlcAddDescriptor(htlc *lnwire.UpdateAddHTLC,
|
||||||
openKey *models.CircuitKey) *PaymentDescriptor {
|
openKey *models.CircuitKey) (*PaymentDescriptor, error) {
|
||||||
|
|
||||||
pd := &PaymentDescriptor{
|
pd := &PaymentDescriptor{
|
||||||
EntryType: Add,
|
EntryType: Add,
|
||||||
@ -6473,11 +6613,17 @@ func (lc *LightningChannel) htlcAddDescriptor(htlc *lnwire.UpdateAddHTLC,
|
|||||||
|
|
||||||
// Copy over any extra data included to ensure we can forward and
|
// Copy over any extra data included to ensure we can forward and
|
||||||
// process this HTLC properly.
|
// process this HTLC properly.
|
||||||
if len(htlc.ExtraData) != 0 {
|
if len(htlc.CustomRecords) > 0 {
|
||||||
pd.CustomRecords = fn.Some[tlv.Blob](htlc.ExtraData[:])
|
b, err := htlc.CustomRecords.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error serializing custom "+
|
||||||
|
"records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pd.CustomRecords = fn.Some[tlv.Blob](b)
|
||||||
}
|
}
|
||||||
|
|
||||||
return pd
|
return pd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateAddHtlc validates the addition of an outgoing htlc to our local and
|
// validateAddHtlc validates the addition of an outgoing htlc to our local and
|
||||||
@ -6535,13 +6681,16 @@ func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, err
|
|||||||
HtlcIndex: lc.remoteUpdateLog.htlcCounter,
|
HtlcIndex: lc.remoteUpdateLog.htlcCounter,
|
||||||
OnionBlob: htlc.OnionBlob[:],
|
OnionBlob: htlc.OnionBlob[:],
|
||||||
BlindingPoint: htlc.BlindingPoint,
|
BlindingPoint: htlc.BlindingPoint,
|
||||||
WireRecords: htlc.CustomRecords,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy over any extra data included to ensure we can forward and
|
if len(htlc.CustomRecords) > 0 {
|
||||||
// process this HTLC properly.
|
b, err := htlc.CustomRecords.Serialize()
|
||||||
if htlc.ExtraData != nil {
|
if err != nil {
|
||||||
pd.CustomRecords = fn.Some(tlv.Blob(htlc.ExtraData[:]))
|
return 0, fmt.Errorf("error serializing custom "+
|
||||||
|
"records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pd.CustomRecords = fn.Some[tlv.Blob](b)
|
||||||
}
|
}
|
||||||
|
|
||||||
localACKedIndex := lc.remoteCommitChain.tail().ourMessageIndex
|
localACKedIndex := lc.remoteCommitChain.tail().ourMessageIndex
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package lnwire
|
package lnwire
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
@ -13,13 +14,33 @@ const (
|
|||||||
MinCustomRecordsTlvType = 65536
|
MinCustomRecordsTlvType = 65536
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ParseCustomRecords decodes a set of custom records from a byte slice.
|
||||||
|
func ParseCustomRecords(b tlv.Blob) (CustomRecords, error) {
|
||||||
|
stream, err := tlv.NewStream()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating stream: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
typeMap, err := stream.DecodeWithParsedTypes(bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding HTLC record: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
customRecords := make(CustomRecords, len(typeMap))
|
||||||
|
for k, v := range typeMap {
|
||||||
|
customRecords[uint64(k)] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return customRecords, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CustomRecords stores a set of custom key/value pairs. Map keys are TLV types
|
// CustomRecords stores a set of custom key/value pairs. Map keys are TLV types
|
||||||
// which must be greater than or equal to MinCustomRecordsTlvType.
|
// which must be greater than or equal to MinCustomRecordsTlvType.
|
||||||
type CustomRecords map[uint64][]byte
|
type CustomRecords map[uint64][]byte
|
||||||
|
|
||||||
// NewCustomRecordsFromTlvTypeMap creates a new CustomRecords instance from a
|
// NewCustomRecordsFromTlvTypeMap creates a new CustomRecords instance from a
|
||||||
// tlv.TypeMap.
|
// tlv.TypeMap.
|
||||||
func NewCustomRecordsFromTlvTypeMap(tlvMap tlv.TypeMap) (*CustomRecords,
|
func NewCustomRecordsFromTlvTypeMap(tlvMap tlv.TypeMap) (CustomRecords,
|
||||||
error) {
|
error) {
|
||||||
|
|
||||||
customRecords := make(CustomRecords, len(tlvMap))
|
customRecords := make(CustomRecords, len(tlvMap))
|
||||||
@ -34,16 +55,16 @@ func NewCustomRecordsFromTlvTypeMap(tlvMap tlv.TypeMap) (*CustomRecords,
|
|||||||
"validation error: %v", err)
|
"validation error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &customRecords, nil
|
return customRecords, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate checks that all custom records are in the custom type range.
|
// Validate checks that all custom records are in the custom type range.
|
||||||
func (c *CustomRecords) Validate() error {
|
func (c CustomRecords) Validate() error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for key := range *c {
|
for key := range c {
|
||||||
if key < MinCustomRecordsTlvType {
|
if key < MinCustomRecordsTlvType {
|
||||||
return fmt.Errorf("custom records entry with TLV "+
|
return fmt.Errorf("custom records entry with TLV "+
|
||||||
"type below min: %d", MinCustomRecordsTlvType)
|
"type below min: %d", MinCustomRecordsTlvType)
|
||||||
@ -53,15 +74,23 @@ func (c *CustomRecords) Validate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c CustomRecords) Copy() CustomRecords {
|
||||||
|
customRecords := make(CustomRecords, len(c))
|
||||||
|
for k, v := range c {
|
||||||
|
customRecords[k] = v
|
||||||
|
}
|
||||||
|
return customRecords
|
||||||
|
}
|
||||||
|
|
||||||
// ExtendRecordProducers extends the given records slice with the custom
|
// ExtendRecordProducers extends the given records slice with the custom
|
||||||
// records. The resultant records slice will be sorted if the given records
|
// records. The resultant records slice will be sorted if the given records
|
||||||
// slice contains TLV types greater than or equal to MinCustomRecordsTlvType.
|
// slice contains TLV types greater than or equal to MinCustomRecordsTlvType.
|
||||||
func (c *CustomRecords) ExtendRecordProducers(
|
func (c CustomRecords) ExtendRecordProducers(
|
||||||
records []tlv.RecordProducer) ([]tlv.RecordProducer, error) {
|
producers []tlv.RecordProducer) ([]tlv.RecordProducer, error) {
|
||||||
|
|
||||||
// If the custom records are nil or empty, there is nothing to do.
|
// If the custom records are nil or empty, there is nothing to do.
|
||||||
if c == nil || len(*c) == 0 {
|
if c == nil || len(c) == 0 {
|
||||||
return records, nil
|
return producers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate the custom records.
|
// Validate the custom records.
|
||||||
@ -78,11 +107,11 @@ func (c *CustomRecords) ExtendRecordProducers(
|
|||||||
// Ensure that the existing records slice TLV types are not also present
|
// Ensure that the existing records slice TLV types are not also present
|
||||||
// in the custom records. If they are, the resultant extended records
|
// in the custom records. If they are, the resultant extended records
|
||||||
// slice would erroneously contain duplicate TLV types.
|
// slice would erroneously contain duplicate TLV types.
|
||||||
for _, recordProducer := range records {
|
for _, rp := range producers {
|
||||||
record := recordProducer.Record()
|
record := rp.Record()
|
||||||
recordTlvType := uint64(record.Type())
|
recordTlvType := uint64(record.Type())
|
||||||
|
|
||||||
_, foundDuplicateTlvType := (*c)[recordTlvType]
|
_, foundDuplicateTlvType := c[recordTlvType]
|
||||||
if foundDuplicateTlvType {
|
if foundDuplicateTlvType {
|
||||||
return nil, fmt.Errorf("custom records contains a TLV "+
|
return nil, fmt.Errorf("custom records contains a TLV "+
|
||||||
"type that is already present in the "+
|
"type that is already present in the "+
|
||||||
@ -97,23 +126,36 @@ func (c *CustomRecords) ExtendRecordProducers(
|
|||||||
|
|
||||||
// Convert the custom records map to a TLV record producer slice and
|
// Convert the custom records map to a TLV record producer slice and
|
||||||
// append them to the exiting records slice.
|
// append them to the exiting records slice.
|
||||||
crRecords := tlv.MapToRecords(*c)
|
crRecords := tlv.MapToRecords(c)
|
||||||
for _, record := range crRecords {
|
for _, record := range crRecords {
|
||||||
r := recordProducer{record}
|
r := recordProducer{record}
|
||||||
records = append(records, &r)
|
producers = append(producers, &r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the records slice which was given as an argument included TLV
|
// If the records slice which was given as an argument included TLV
|
||||||
// values greater than or equal to the minimum custom records TLV type
|
// values greater than or equal to the minimum custom records TLV type
|
||||||
// we will sort the extended records slice to ensure that it is ordered
|
// we will sort the extended records slice to ensure that it is ordered
|
||||||
// correctly.
|
// correctly.
|
||||||
if maxRecordTlvType >= MinCustomRecordsTlvType {
|
sort.Slice(producers, func(i, j int) bool {
|
||||||
sort.Slice(records, func(i, j int) bool {
|
recordI := producers[i].Record()
|
||||||
recordI := records[i].Record()
|
recordJ := producers[j].Record()
|
||||||
recordJ := records[j].Record()
|
return recordI.Type() < recordJ.Type()
|
||||||
return recordI.Type() < recordJ.Type()
|
})
|
||||||
})
|
|
||||||
|
return producers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CustomRecords) Serialize() ([]byte, error) {
|
||||||
|
records := tlv.MapToRecords(c)
|
||||||
|
stream, err := tlv.NewStream(records...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating stream: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return records, nil
|
var b bytes.Buffer
|
||||||
|
if err := stream.Encode(&b); err != nil {
|
||||||
|
return nil, fmt.Errorf("error encoding custom records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,8 @@ func (e *ExtraOpaqueData) PackRecords(recordProducers ...tlv.RecordProducer) err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
*e = append(extraBytesWriter.Bytes(), *e...)
|
// *e = append(extraBytesWriter.Bytes(), *e...)
|
||||||
|
*e = ExtraOpaqueData(extraBytesWriter.Bytes())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ func (c *UpdateAddHTLC) Decode(r io.Reader, pver uint32) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.CustomRecords = *customRecords
|
c.CustomRecords = customRecords
|
||||||
|
|
||||||
// Set extra data to nil if we didn't parse anything out of it so that
|
// Set extra data to nil if we didn't parse anything out of it so that
|
||||||
// we can use assert.Equal in tests.
|
// we can use assert.Equal in tests.
|
||||||
|
@ -610,22 +610,15 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig,
|
|||||||
|
|
||||||
if source == self {
|
if source == self {
|
||||||
|
|
||||||
firstHopTLVs := tlv.MapToRecords(r.FirstHopCustomRecords)
|
customRecords := lnwire.CustomRecords(r.FirstHopCustomRecords)
|
||||||
wireRecords := fn.Map(func(r tlv.Record) tlv.RecordProducer {
|
firstHopData, err := customRecords.Serialize()
|
||||||
return &r
|
|
||||||
}, firstHopTLVs)
|
|
||||||
|
|
||||||
firstHopData := lnwire.ExtraOpaqueData{}
|
|
||||||
|
|
||||||
err := firstHopData.PackRecords(wireRecords...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tlvOption := fn.Some[tlv.Blob](firstHopData)
|
|
||||||
max, total, err := getOutgoingBalance(
|
max, total, err := getOutgoingBalance(
|
||||||
self, outgoingChanMap, g.bandwidthHints, g.graph,
|
self, outgoingChanMap, g.bandwidthHints, g.graph,
|
||||||
tlvOption,
|
fn.Some[tlv.Blob](firstHopData),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
|
@ -17,7 +17,6 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/record"
|
"github.com/lightningnetwork/lnd/record"
|
||||||
"github.com/lightningnetwork/lnd/routing/route"
|
"github.com/lightningnetwork/lnd/routing/route"
|
||||||
"github.com/lightningnetwork/lnd/routing/shards"
|
"github.com/lightningnetwork/lnd/routing/shards"
|
||||||
"github.com/lightningnetwork/lnd/tlv"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrPaymentLifecycleExiting is used when waiting for htlc attempt result, but
|
// ErrPaymentLifecycleExiting is used when waiting for htlc attempt result, but
|
||||||
@ -680,28 +679,32 @@ func (p *paymentLifecycle) sendAttempt(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If we had any first hop TLVs, then we'll encode that here now.
|
// If we had any first hop TLVs, then we'll encode that here now.
|
||||||
firstHopTLVs := tlv.MapToRecords(p.firstHopTLVs)
|
htlcAdd.CustomRecords = lnwire.CustomRecords(p.firstHopTLVs)
|
||||||
wireRecords := fn.Map(func(r tlv.Record) tlv.RecordProducer {
|
encodedRecords, err := htlcAdd.CustomRecords.Serialize()
|
||||||
return &r
|
if err != nil {
|
||||||
}, firstHopTLVs)
|
return nil, fmt.Errorf("unable to encode first hop TLVs: %w",
|
||||||
if err := htlcAdd.ExtraData.PackRecords(wireRecords...); err != nil {
|
err)
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a hook exists that may affect our outgoing message, we call it now
|
// If a hook exists that may affect our outgoing message, we call it now
|
||||||
// and apply its side effects to the UpdateAddHTLC message.
|
// and apply its side effects to the UpdateAddHTLC message.
|
||||||
err := fn.MapOptionZ(
|
err = fn.MapOptionZ(
|
||||||
p.router.cfg.TrafficShaper,
|
p.router.cfg.TrafficShaper,
|
||||||
func(ts TlvTrafficShaper) error {
|
func(ts TlvTrafficShaper) error {
|
||||||
newAmt, newData, err := ts.ProduceHtlcExtraData(
|
newAmt, newData, err := ts.ProduceHtlcExtraData(
|
||||||
rt.TotalAmount, htlcAdd.ExtraData,
|
rt.TotalAmount, encodedRecords,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
htlcAdd.ExtraData = newData
|
customRecords, err := lnwire.ParseCustomRecords(newData)
|
||||||
htlcAdd.Amount = lnwire.MilliSatoshi(newAmt * 1000)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlcAdd.CustomRecords = customRecords
|
||||||
|
htlcAdd.Amount = lnwire.NewMSatFromSatoshis(newAmt)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
||||||
"github.com/lightningnetwork/lnd/lntypes"
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
|
"github.com/lightningnetwork/lnd/record"
|
||||||
)
|
)
|
||||||
|
|
||||||
// preimageSubscriber reprints an active subscription to be notified once the
|
// preimageSubscriber reprints an active subscription to be notified once the
|
||||||
@ -101,10 +102,11 @@ func (p *preimageBeacon) SubscribeUpdates(
|
|||||||
ChanID: chanID,
|
ChanID: chanID,
|
||||||
HtlcID: htlc.HtlcIndex,
|
HtlcID: htlc.HtlcIndex,
|
||||||
},
|
},
|
||||||
OutgoingChanID: payload.FwdInfo.NextHop,
|
OutgoingChanID: payload.FwdInfo.NextHop,
|
||||||
OutgoingExpiry: payload.FwdInfo.OutgoingCTLV,
|
OutgoingExpiry: payload.FwdInfo.OutgoingCTLV,
|
||||||
OutgoingAmount: payload.FwdInfo.AmountToForward,
|
OutgoingAmount: payload.FwdInfo.AmountToForward,
|
||||||
CustomRecords: payload.CustomRecords(),
|
CustomRecords: payload.CustomRecords(),
|
||||||
|
WireCustomRecords: record.CustomSet(htlc.CustomRecords),
|
||||||
}
|
}
|
||||||
copy(packet.OnionBlob[:], nextHopOnionBlob)
|
copy(packet.OnionBlob[:], nextHopOnionBlob)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user