lnrpc/invoicesrpc: add function for padding encrypted data

This commit adds a helper function called `padHopInfo` along with a test
for it. This function will be used later on when building a blinded
path. It is used to ensure that all encrypted blobs of a blinded path
that we construct are padded to the same size.
This commit is contained in:
Elle Mouton 2024-05-04 10:37:41 +02:00
parent 4457ca2e66
commit f87cc6274f
No known key found for this signature in database
GPG Key ID: D7D916376026F177
2 changed files with 471 additions and 10 deletions

View File

@ -15,6 +15,7 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/invoices"
@ -22,7 +23,9 @@ import (
"github.com/lightningnetwork/lnd/lnutils"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/netann"
"github.com/lightningnetwork/lnd/record"
"github.com/lightningnetwork/lnd/routing"
"github.com/lightningnetwork/lnd/tlv"
"github.com/lightningnetwork/lnd/zpay32"
)
@ -837,3 +840,142 @@ func PopulateHopHints(cfg *SelectHopHintsCfg, amtMSat lnwire.MilliSatoshi,
hopHints = append(hopHints, selectedHints...)
return hopHints, nil
}
// hopData packages the record.BlindedRouteData for a hop on a blinded path with
// the real node ID of that hop.
type hopData struct {
data *record.BlindedRouteData
nodeID *btcec.PublicKey
}
// padStats can be used to keep track of various pieces of data that we collect
// during a call to padHopInfo. This is useful for logging and for test
// assertions.
type padStats struct {
minPayloadSize int
maxPayloadSize int
finalPaddedSize int
numIterations int
}
// padHopInfo iterates over a set of record.BlindedRouteData and adds padding
// where needed until the resulting encrypted data blobs are all the same size.
// This may take a few iterations due to the fact that a TLV field is used to
// add this padding. For example, if we want to add a 1 byte padding to a
// record.BlindedRouteData when it does not yet have any padding, then adding
// a 1 byte padding will actually add 3 bytes due to the bytes required when
// adding the initial type and length bytes. However, on the next iteration if
// we again add just 1 byte, then only a single byte will be added. The same
// iteration is required for padding values on the BigSize encoding bucket
// edges. The number of iterations that this function takes is also returned for
// testing purposes. If prePad is true, then zero byte padding is added to each
// payload that does not yet have padding. This will save some iterations for
// the majority of cases.
func padHopInfo(hopInfo []*hopData, prePad bool) ([]*sphinx.HopInfo, *padStats,
error) {
var (
paymentPath = make([]*sphinx.HopInfo, len(hopInfo))
stats padStats
)
// Pre-pad each payload with zero byte padding (if it does not yet have
// padding) to save a couple of iterations in the majority of cases.
if prePad {
for _, info := range hopInfo {
if info.data.Padding.IsSome() {
continue
}
info.data.PadBy(0)
}
}
for {
stats.numIterations++
// On each iteration of the loop, we first determine the
// current largest encoded data blob size. This will be the
// size we aim to get the others to match.
var (
maxLen int
minLen = math.MaxInt8
)
for i, hop := range hopInfo {
plainText, err := record.EncodeBlindedRouteData(
hop.data,
)
if err != nil {
return nil, nil, err
}
if len(plainText) > maxLen {
maxLen = len(plainText)
// Update the stats to take note of this new
// max since this may be the final max that all
// payloads will be padded to.
stats.finalPaddedSize = maxLen
}
if len(plainText) < minLen {
minLen = len(plainText)
}
paymentPath[i] = &sphinx.HopInfo{
NodePub: hop.nodeID,
PlainText: plainText,
}
}
// If this is our first iteration, then we take note of the min
// and max lengths of the payloads pre-padding for logging
// later.
if stats.numIterations == 1 {
stats.minPayloadSize = minLen
stats.maxPayloadSize = maxLen
}
// Now we iterate over them again and determine which ones we
// need to add padding to.
var numEqual int
for i, hop := range hopInfo {
plainText := paymentPath[i].PlainText
// If the plaintext length is equal to the desired
// length, then we can continue. We use numEqual to
// keep track of how many have the same length.
if len(plainText) == maxLen {
numEqual++
continue
}
// If we previously added padding to this hop, we keep
// the length of that initial padding too.
var existingPadding int
hop.data.Padding.WhenSome(
func(p tlv.RecordT[tlv.TlvType1, []byte]) {
existingPadding = len(p.Val)
},
)
// Add some padding bytes to the hop.
hop.data.PadBy(
existingPadding + maxLen - len(plainText),
)
}
// If all the payloads have the same length, we can exit the
// loop.
if numEqual == len(hopInfo) {
break
}
}
log.Debugf("Finished padding %d blinded path payloads to %d bytes "+
"each where the pre-padded min and max sizes were %d and %d "+
"bytes respectively", len(hopInfo), stats.finalPaddedSize,
stats.minPayloadSize, stats.maxPayloadSize)
return paymentPath, &stats, nil
}

View File

@ -3,18 +3,33 @@ package invoicesrpc
import (
"encoding/hex"
"fmt"
"math/rand"
"reflect"
"testing"
"testing/quick"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
"github.com/lightningnetwork/lnd/tlv"
"github.com/lightningnetwork/lnd/zpay32"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
var (
pubkeyBytes, _ = hex.DecodeString(
"598ec453728e0ffe0ae2f5e174243cf58f2" +
"a3f2c83d2457b43036db568b11093",
)
pubKeyY = new(btcec.FieldVal)
_ = pubKeyY.SetByteSlice(pubkeyBytes)
pubkey = btcec.NewPublicKey(new(btcec.FieldVal).SetInt(4), pubKeyY)
)
type hopHintsConfigMock struct {
t *testing.T
mock.Mock
@ -84,16 +99,6 @@ func (h *hopHintsConfigMock) FetchChannelEdgesByID(chanID uint64) (
// getTestPubKey returns a valid parsed pub key to be used in our tests.
func getTestPubKey() *btcec.PublicKey {
pubkeyBytes, _ := hex.DecodeString(
"598ec453728e0ffe0ae2f5e174243cf58f2" +
"a3f2c83d2457b43036db568b11093",
)
pubKeyY := new(btcec.FieldVal)
_ = pubKeyY.SetByteSlice(pubkeyBytes)
pubkey := btcec.NewPublicKey(
new(btcec.FieldVal).SetInt(4),
pubKeyY,
)
return pubkey
}
@ -896,3 +901,317 @@ func TestPopulateHopHints(t *testing.T) {
})
}
}
// TestPadBlindedHopInfo asserts that the padding of blinded hop data is done
// correctly and that it takes the expected number of iterations.
func TestPadBlindedHopInfo(t *testing.T) {
tests := []struct {
name string
expectedIterations int
expectedFinalSize int
// We will use the pathID field of BlindedRouteData to set an
// initial payload size. The ints in this list represent the
// size of each pathID.
pathIDs []int
// existingPadding is a map from entry index (based on the
// pathIDs set) to the number of pre-existing padding bytes to
// add.
existingPadding map[int]int
// prePad is true if all the hop payloads should be pre-padded
// with a zero length TLV Padding field.
prePad bool
}{
{
// If there is only one entry, then no padding is
// expected.
name: "single entry",
expectedIterations: 1,
pathIDs: []int{10},
// The final size will be 12 since the path ID is 10
// bytes, and it will be prefixed by type and value
// bytes.
expectedFinalSize: 12,
},
{
// All the payloads are the same size from the get go
// meaning that no padding is expected.
name: "all start equal",
expectedIterations: 1,
pathIDs: []int{10, 10, 10},
// The final size will be 12 since the path ID is 10
// bytes, and it will be prefixed by type and value
// bytes.
expectedFinalSize: 12,
},
{
// If the blobs differ by 1 byte it will take 4
// iterations:
// 1) padding of 1 is added to entry 2 which will
// increase its size by 3 bytes since padding does
// not yet exist for it.
// 2) Now entry 1 will be short 2 bytes. It will be
// padded by 2 bytes but again since it is a new
// padding field, 4 bytes are added.
// 3) Finally, entry 2 is padded by 1 extra. Since it
// already does have a padding field, this does end
// up adding only 1 extra byte.
// 4) The fourth iteration determines that all are now
// the same size.
name: "differ by 1 - no pre-padding",
expectedIterations: 4,
pathIDs: []int{4, 3},
expectedFinalSize: 10,
},
{
// By pre-padding the payloads with a zero byte padding,
// we can reduce the number of iterations quite a bit.
name: "differ by 1 - with pre-padding",
expectedIterations: 2,
pathIDs: []int{4, 3},
expectedFinalSize: 8,
prePad: true,
},
{
name: "existing padding and diff of 1",
expectedIterations: 2,
pathIDs: []int{10, 11},
// By adding some existing padding, the type and length
// field for the padding are already accounted for in
// the first iteration, and so we only expect two
// iterations to get the payloads to match size here:
// one for adding a single extra byte to the smaller
// payload and another for confirming the sizes match.
existingPadding: map[int]int{0: 1, 1: 1},
expectedFinalSize: 16,
},
{
// In this test, we test a BigSize bucket shift. We do
// this by setting the initial path ID's of both entries
// to a 0 size which means the total encoding of those
// will be 2 bytes (to encode the type and length). Then
// for the initial padding, we let the first entry be
// 253 bytes long which is just long enough to be in
// the second BigSize bucket which uses 3 bytes to
// encode the value length. We make the second entry
// 252 bytes which still puts it in the first bucket
// which uses 1 byte for the length. The difference in
// overall packet size will be 3 bytes (the first entry
// has 2 more length bytes and 1 more value byte). So
// the function will try to pad the second entry by 3
// bytes (iteration 1). This will however result in the
// second entry shifting to the second BigSize bucket
// meaning it will gain an additional 2 bytes for the
// new length encoding meaning that overall it gains 5
// bytes in size. This will result in another iteration
// which will result in padding the first entry with an
// extra 2 bytes to meet the second entry's new size
// (iteration 2). One more iteration (3) is then done
// to confirm that all entries are now the same size.
name: "big size bucket shift",
expectedIterations: 3,
// We make the path IDs large enough so that
pathIDs: []int{0, 0},
existingPadding: map[int]int{0: 253, 1: 252},
expectedFinalSize: 261,
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
t.Parallel()
// If the test includes existing padding, then make sure
// that the number of existing padding entries is equal
// to the number of pathID entries.
if test.existingPadding != nil {
require.Len(t, test.existingPadding,
len(test.pathIDs))
}
hopDataSet := make([]*hopData, len(test.pathIDs))
for i, l := range test.pathIDs {
pathID := tlv.SomeRecordT(
tlv.NewPrimitiveRecord[tlv.TlvType6](
make([]byte, l),
),
)
data := &record.BlindedRouteData{
PathID: pathID,
}
if test.existingPadding != nil {
//nolint:lll
padding := tlv.SomeRecordT(
tlv.NewPrimitiveRecord[tlv.TlvType1](
make([]byte, test.existingPadding[i]),
),
)
data.Padding = padding
}
hopDataSet[i] = &hopData{data: data}
}
hopInfo, stats, err := padHopInfo(
hopDataSet, test.prePad,
)
require.NoError(t, err)
require.Equal(t, test.expectedIterations,
stats.numIterations)
require.Equal(t, test.expectedFinalSize,
stats.finalPaddedSize)
// We expect all resulting blobs to be the same size.
for _, info := range hopInfo {
require.Len(
t, info.PlainText,
test.expectedFinalSize,
)
}
})
}
}
// TestPadBlindedHopInfoBlackBox tests the padHopInfo function via the
// quick.Check testing function. It generates a random set of hopData and
// asserts that the resulting padded set always has the same encoded length.
func TestPadBlindedHopInfoBlackBox(t *testing.T) {
fn := func(data hopDataList) bool {
resultList, _, err := padHopInfo(data, true)
require.NoError(t, err)
// There should be a resulting sphinx.HopInfo struct for each
// hopData passed to the padHopInfo function.
if len(resultList) != len(data) {
return false
}
// There is nothing left to check if input set was empty to
// start with.
if len(data) == 0 {
return true
}
// Now, assert that the encoded size of each item is the same.
// Get the size of the first item as a base point.
payloadSize := len(resultList[0].PlainText)
// All the other entries should have the same encoded size.
for i := 1; i < len(resultList); i++ {
if len(resultList[i].PlainText) != payloadSize {
return false
}
}
return true
}
require.NoError(t, quick.Check(fn, nil))
}
type hopDataList []*hopData
// Generate returns a random instance of the hopDataList type.
//
// NOTE: this is part of the quick.Generate interface.
func (h hopDataList) Generate(rand *rand.Rand, size int) reflect.Value {
data := make(hopDataList, rand.Intn(size))
for i := 0; i < len(data); i++ {
data[i] = &hopData{
data: genBlindedRouteData(rand),
nodeID: pubkey,
}
}
return reflect.ValueOf(data)
}
// A compile-time check to ensure that hopDataList implements the
// quick.Generator interface.
var _ quick.Generator = (*hopDataList)(nil)
// sometimesDo calls the given function with a 50% probability.
func sometimesDo(fn func(), rand *rand.Rand) {
if rand.Intn(1) == 0 {
return
}
fn()
}
// genBlindedRouteData generates a random record.BlindedRouteData object.
func genBlindedRouteData(rand *rand.Rand) *record.BlindedRouteData {
var data record.BlindedRouteData
sometimesDo(func() {
data.Padding = tlv.SomeRecordT(
tlv.NewPrimitiveRecord[tlv.TlvType1](
make([]byte, rand.Intn(1000000)),
),
)
}, rand)
sometimesDo(func() {
data.ShortChannelID = tlv.SomeRecordT(
tlv.NewRecordT[tlv.TlvType2](lnwire.ShortChannelID{
BlockHeight: rand.Uint32(),
TxIndex: rand.Uint32(),
TxPosition: uint16(rand.Uint32()),
}),
)
}, rand)
sometimesDo(func() {
data.NextNodeID = tlv.SomeRecordT(
tlv.NewPrimitiveRecord[tlv.TlvType4](pubkey),
)
}, rand)
sometimesDo(func() {
data.PathID = tlv.SomeRecordT(
tlv.NewPrimitiveRecord[tlv.TlvType6](
make([]byte, rand.Intn(100)),
),
)
}, rand)
sometimesDo(func() {
data.NextBlindingOverride = tlv.SomeRecordT(
tlv.NewPrimitiveRecord[tlv.TlvType8](pubkey),
)
}, rand)
sometimesDo(func() {
data.RelayInfo = tlv.SomeRecordT(
tlv.NewRecordT[tlv.TlvType10](record.PaymentRelayInfo{
CltvExpiryDelta: uint16(rand.Uint32()),
FeeRate: rand.Uint32(),
BaseFee: lnwire.MilliSatoshi(
rand.Uint32(),
),
}),
)
}, rand)
sometimesDo(func() {
data.Constraints = tlv.SomeRecordT(
tlv.NewRecordT[tlv.TlvType12](record.PaymentConstraints{
MaxCltvExpiry: rand.Uint32(),
HtlcMinimumMsat: lnwire.MilliSatoshi(
rand.Uint32(),
),
}),
)
}, rand)
return &data
}