Merge pull request #9460 from lightningnetwork/0-18-5-branch-rc1

release: create branch for v0.18.5-beta.rc1
This commit is contained in:
Olaoluwa Osuntokun 2025-01-31 13:20:08 -06:00 committed by GitHub
commit 288f0e1b3d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
53 changed files with 1448 additions and 533 deletions

View File

@ -25,13 +25,9 @@ env:
TRANCHES: 8
# If you change this value, please change it in the following files as well:
# /.travis.yml
# /Dockerfile
# /dev.Dockerfile
# /make/builder.Dockerfile
# /.github/workflows/release.yml
GO_VERSION: 1.22.6
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
GO_VERSION: 1.22.11
jobs:
########################
@ -202,15 +198,6 @@ jobs:
if: github.event_name == 'pull_request'
uses: ./.github/actions/rebase
- name: git checkout fuzzing seeds
uses: actions/checkout@v3
with:
repository: lightninglabs/lnd-fuzz
path: lnd-fuzz
- name: rsync fuzzing seeds
run: rsync -a --ignore-existing lnd-fuzz/ ./
- name: setup go ${{ env.GO_VERSION }}
uses: ./.github/actions/setup-go
with:
@ -297,10 +284,10 @@ jobs:
- name: Zip log files on failure
if: ${{ failure() }}
timeout-minutes: 5 # timeout after 5 minute
run: 7z a logs-itest-${{ matrix.name }}.zip itest/**/*.log
run: 7z a logs-itest-${{ matrix.name }}.zip itest/**/*.log itest/postgres.log
- name: Upload log files on failure
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
if: ${{ failure() }}
with:
name: logs-itest-${{ matrix.name }}
@ -345,7 +332,7 @@ jobs:
run: 7z a logs-itest-windows.zip itest/**/*.log
- name: Upload log files on failure
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
if: ${{ failure() }}
with:
name: logs-itest-windows
@ -390,7 +377,7 @@ jobs:
run: 7z a logs-itest-macos.zip itest/**/*.log
- name: Upload log files on failure
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
if: ${{ failure() }}
with:
name: logs-itest-macos

View File

@ -10,12 +10,9 @@ defaults:
shell: bash
env:
# If you change this value, please change it in the following files as well:
# /Dockerfile
# /dev.Dockerfile
# /make/builder.Dockerfile
# /.github/workflows/main.yml
GO_VERSION: 1.22.6
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
GO_VERSION: 1.22.11
jobs:
main:

View File

@ -1,5 +1,7 @@
run:
go: "1.22.6"
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
go: "1.22.11"
# Abort after 10 minutes.
timeout: 10m

View File

@ -1,9 +1,6 @@
# If you change this value, please change it in the following files as well:
# /dev.Dockerfile
# /make/builder.Dockerfile
# /.github/workflows/main.yml
# /.github/workflows/release.yml
FROM golang:1.22.6-alpine as builder
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
FROM golang:1.22.11-alpine as builder
# Force Go to use the cgo based DNS resolver. This is required to ensure DNS
# queries required to connect to linked containers succeed.

View File

@ -35,7 +35,7 @@ endif
# GO_VERSION is the Go version used for the release build, docker files, and
# GitHub Actions. This is the reference version for the project. All other Go
# versions are checked against this version.
GO_VERSION = 1.22.6
GO_VERSION = 1.22.11
GOBUILD := $(LOOPVARFIX) go build -v
GOINSTALL := $(LOOPVARFIX) go install -v
@ -195,19 +195,31 @@ ifeq ($(dbbackend),postgres)
docker rm lnd-postgres --force || echo "Starting new postgres container"
# Start a fresh postgres instance. Allow a maximum of 500 connections so
# that multiple lnd instances with a maximum number of connections of 50
# each can run concurrently.
docker run --name lnd-postgres -e POSTGRES_PASSWORD=postgres -p 6432:5432 -d postgres:13-alpine -N 500
docker logs -f lnd-postgres &
# that multiple lnd instances with a maximum number of connections of 20
# each can run concurrently. Note that many of the settings here are
# specifically for integration testing and are not fit for running
# production nodes. The increase in max connections ensures that there
# are enough entries allocated for the RWConflictPool to allow multiple
# conflicting transactions to track serialization conflicts. The
# increase in predicate locks and locks per transaction is to allow the
# queries to lock individual rows instead of entire tables, helping
# reduce serialization conflicts. Disabling sequential scan for small
# tables also helps prevent serialization conflicts by ensuring lookups
# lock only relevant rows in the index rather than the entire table.
docker run --name lnd-postgres -e POSTGRES_PASSWORD=postgres -p 6432:5432 -d postgres:13-alpine -N 1500 -c max_pred_locks_per_transaction=1024 -c max_locks_per_transaction=128 -c enable_seqscan=off
docker logs -f lnd-postgres >itest/postgres.log 2>&1 &
# Wait for the instance to be started.
sleep $(POSTGRES_START_DELAY)
endif
clean-itest-logs:
rm -rf itest/*.log itest/.logs-*
#? itest-only: Only run integration tests without re-building binaries
itest-only: db-instance
itest-only: clean-itest-logs db-instance
@$(call print, "Running integration tests with ${backend} backend.")
rm -rf itest/*.log itest/.logs-*; date
date
EXEC_SUFFIX=$(EXEC_SUFFIX) scripts/itest_part.sh 0 1 $(TEST_FLAGS) $(ITEST_FLAGS)
$(COLLECT_ITEST_COVERAGE)
@ -218,9 +230,9 @@ itest: build-itest itest-only
itest-race: build-itest-race itest-only
#? itest-parallel: Build and run integration tests in parallel mode, running up to ITEST_PARALLELISM test tranches in parallel (default 4)
itest-parallel: build-itest db-instance
itest-parallel: clean-itest-logs build-itest db-instance
@$(call print, "Running tests")
rm -rf itest/*.log itest/.logs-*; date
date
EXEC_SUFFIX=$(EXEC_SUFFIX) scripts/itest_parallel.sh $(ITEST_PARALLELISM) $(NUM_ITEST_TRANCHES) $(TEST_FLAGS) $(ITEST_FLAGS)
$(COLLECT_ITEST_COVERAGE)

View File

@ -5,6 +5,7 @@ import (
"sync"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/sqldb"
)
// errSolo is a sentinel error indicating that the requester should re-run the
@ -55,8 +56,20 @@ func (b *batch) run() {
for i, req := range b.reqs {
err := req.Update(tx)
if err != nil {
failIdx = i
return err
// If we get a serialization error, we
// want the underlying SQL retry
// mechanism to retry the entire batch.
// Otherwise, we can succeed in an
// sqldb retry and still re-execute the
// failing request individually.
dbErr := sqldb.MapSQLError(err)
if !sqldb.IsSerializationError(dbErr) {
failIdx = i
return err
}
return dbErr
}
}
return nil

74
batch/batch_test.go Normal file
View File

@ -0,0 +1,74 @@
package batch
import (
"errors"
"path/filepath"
"sync"
"testing"
"time"
"github.com/btcsuite/btcwallet/walletdb"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/stretchr/testify/require"
)
func TestRetry(t *testing.T) {
dbDir := t.TempDir()
dbName := filepath.Join(dbDir, "weks.db")
db, err := walletdb.Create(
"bdb", dbName, true, kvdb.DefaultDBTimeout,
)
if err != nil {
t.Fatalf("unable to create walletdb: %v", err)
}
t.Cleanup(func() {
db.Close()
})
var (
mu sync.Mutex
called int
)
sched := NewTimeScheduler(db, &mu, time.Second)
// First, we construct a request that should retry individually and
// execute it non-lazily. It should still return the error the second
// time.
req := &Request{
Update: func(tx kvdb.RwTx) error {
called++
return errors.New("test")
},
}
err = sched.Execute(req)
// Check and reset the called counter.
mu.Lock()
require.Equal(t, 2, called)
called = 0
mu.Unlock()
require.ErrorContains(t, err, "test")
// Now, we construct a request that should NOT retry because it returns
// a serialization error, which should cause the underlying postgres
// transaction to retry. Since we aren't using postgres, this will
// cause the transaction to not be retried at all.
req = &Request{
Update: func(tx kvdb.RwTx) error {
called++
return errors.New("could not serialize access")
},
}
err = sched.Execute(req)
// Check the called counter.
mu.Lock()
require.Equal(t, 1, called)
mu.Unlock()
require.ErrorContains(t, err, "could not serialize access")
}

View File

@ -43,11 +43,11 @@ const (
AppMinor uint = 18
// AppPatch defines the application patch for this binary.
AppPatch uint = 4
AppPatch uint = 5
// AppPreRelease MUST only contain characters from semanticAlphabet per
// the semantic versioning spec.
AppPreRelease = "beta"
AppPreRelease = "beta.rc1"
)
func init() {

View File

@ -1023,6 +1023,10 @@ func (c *ChannelGraph) AddChannelEdge(edge *models.ChannelEdgeInfo,
}
for _, f := range op {
if f == nil {
return fmt.Errorf("nil scheduler option was used")
}
f(r)
}
@ -1079,7 +1083,7 @@ func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx,
err := addLightningNode(tx, &node1Shell)
if err != nil {
return fmt.Errorf("unable to create shell node "+
"for: %x", edge.NodeKey1Bytes)
"for: %x: %w", edge.NodeKey1Bytes, err)
}
case node1Err != nil:
return err
@ -1095,7 +1099,7 @@ func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx,
err := addLightningNode(tx, &node2Shell)
if err != nil {
return fmt.Errorf("unable to create shell node "+
"for: %x", edge.NodeKey2Bytes)
"for: %x: %w", edge.NodeKey2Bytes, err)
}
case node2Err != nil:
return err
@ -2622,8 +2626,14 @@ func (c *ChannelGraph) delChannelEdgeUnsafe(edges, edgeIndex, chanIndex,
// As part of deleting the edge we also remove all disabled entries
// from the edgePolicyDisabledIndex bucket. We do that for both directions.
updateEdgePolicyDisabledIndex(edges, cid, false, false)
updateEdgePolicyDisabledIndex(edges, cid, true, false)
err = updateEdgePolicyDisabledIndex(edges, cid, false, false)
if err != nil {
return err
}
err = updateEdgePolicyDisabledIndex(edges, cid, true, false)
if err != nil {
return err
}
// With the edge data deleted, we can purge the information from the two
// edge indexes.
@ -4452,11 +4462,14 @@ func putChanEdgePolicy(edges kvdb.RwBucket, edge *models.ChannelEdgePolicy,
return err
}
updateEdgePolicyDisabledIndex(
err = updateEdgePolicyDisabledIndex(
edges, edge.ChannelID,
edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
edge.IsDisabled(),
)
if err != nil {
return err
}
return edges.Put(edgeKey[:], b.Bytes()[:])
}

View File

@ -461,7 +461,7 @@ func sendCoins(ctx *cli.Context) error {
// In case that the user has specified the sweepall flag, we'll
// calculate the amount to send based on the current wallet balance.
displayAmt := amt
if ctx.Bool("sweepall") {
if ctx.Bool("sweepall") && !ctx.IsSet("utxo") {
balanceResponse, err := client.WalletBalance(
ctxc, &lnrpc.WalletBalanceRequest{
MinConfs: minConfs,
@ -481,6 +481,32 @@ func sendCoins(ctx *cli.Context) error {
if err != nil {
return fmt.Errorf("unable to decode utxos: %w", err)
}
if ctx.Bool("sweepall") {
displayAmt = 0
// If we're sweeping all funds of the utxos, we'll need
// to set the display amount to the total amount of the
// utxos.
unspents, err := client.ListUnspent(
ctxc, &lnrpc.ListUnspentRequest{
MinConfs: 0,
MaxConfs: math.MaxInt32,
},
)
if err != nil {
return err
}
for _, utxo := range outpoints {
for _, unspent := range unspents.Utxos {
unspentUtxo := unspent.Outpoint
if isSameOutpoint(utxo, unspentUtxo) {
displayAmt += unspent.AmountSat
break
}
}
}
}
}
// Ask for confirmation if we're on an actual terminal and the output is
@ -517,6 +543,10 @@ func sendCoins(ctx *cli.Context) error {
return nil
}
func isSameOutpoint(a, b *lnrpc.OutPoint) bool {
return a.TxidStr == b.TxidStr && a.OutputIndex == b.OutputIndex
}
var listUnspentCommand = cli.Command{
Name: "listunspent",
Category: "On-chain",
@ -2327,6 +2357,21 @@ var updateChannelPolicyCommand = cli.Command{
"channels will be updated. Takes the form of " +
"txid:output_index",
},
cli.BoolFlag{
Name: "create_missing_edge",
Usage: "Under unknown circumstances a channel can " +
"exist with a missing edge in the graph " +
"database. This can cause an 'edge not " +
"found' error when calling `getchaninfo` " +
"and/or cause the default channel policy to " +
"be used during forwards. Setting this flag " +
"will recreate the edge if not found, " +
"allowing updating this channel policy and " +
"fixing the missing edge problem for this " +
"channel permanently. For fields not set in " +
"this command, the default policy will be " +
"created.",
},
},
Action: actionDecorator(updateChannelPolicy),
}
@ -2486,11 +2531,14 @@ func updateChannelPolicy(ctx *cli.Context) error {
}
}
createMissingEdge := ctx.Bool("create_missing_edge")
req := &lnrpc.PolicyUpdateRequest{
BaseFeeMsat: baseFee,
TimeLockDelta: uint32(timeLockDelta),
MaxHtlcMsat: ctx.Uint64("max_htlc_msat"),
InboundFee: inboundFee,
BaseFeeMsat: baseFee,
TimeLockDelta: uint32(timeLockDelta),
MaxHtlcMsat: ctx.Uint64("max_htlc_msat"),
InboundFee: inboundFee,
CreateMissingEdge: createMissingEdge,
}
if ctx.IsSet("min_htlc_msat") {

View File

@ -532,6 +532,7 @@ func bumpForceCloseFee(ctx *cli.Context) error {
for _, sweep := range sweeps.PendingSweeps {
// Only bump anchor sweeps.
//nolint:lll
if sweep.WitnessType != walletrpc.WitnessType_COMMITMENT_ANCHOR {
continue
}

View File

@ -1,9 +1,6 @@
# If you change this value, please change it in the following files as well:
# /Dockerfile
# /make/builder.Dockerfile
# /.github/workflows/main.yml
# /.github/workflows/release.yml
FROM golang:1.22.6-alpine as builder
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
FROM golang:1.22.11-alpine as builder
LABEL maintainer="Olaoluwa Osuntokun <laolu@lightning.engineering>"

View File

@ -1,4 +1,6 @@
FROM golang:1.22.6-alpine as builder
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
FROM golang:1.22.11-alpine as builder
LABEL maintainer="Olaoluwa Osuntokun <laolu@lightning.engineering>"

View File

@ -0,0 +1,76 @@
# Release Notes
- [Bug Fixes](#bug-fixes)
- [New Features](#new-features)
- [Functional Enhancements](#functional-enhancements)
- [RPC Additions](#rpc-additions)
- [lncli Additions](#lncli-additions)
- [Improvements](#improvements)
- [Functional Updates](#functional-updates)
- [RPC Updates](#rpc-updates)
- [lncli Updates](#lncli-updates)
- [Breaking Changes](#breaking-changes)
- [Performance Improvements](#performance-improvements)
- [Technical and Architectural Updates](#technical-and-architectural-updates)
- [BOLT Spec Updates](#bolt-spec-updates)
- [Testing](#testing)
- [Database](#database)
- [Code Health](#code-health)
- [Tooling and Documentation](#tooling-and-documentation)
# Bug Fixes
* [Fixed a bug](https://github.com/lightningnetwork/lnd/pull/9459) where we
would not cancel accepted HTLCs on AMP invoices if the whole invoice was
canceled.
* [Improved user experience](https://github.com/lightningnetwork/lnd/pull/9454)
by returning a custom error code when HTLC carries incorrect custom records.
# New Features
## Functional Enhancements
## RPC Additions
## lncli Additions
* [`updatechanpolicy`](https://github.com/lightningnetwork/lnd/pull/8805) will
now update the channel policy if the edge was not found in the graph
database if the `create_missing_edge` flag is set.
# Improvements
## Functional Updates
## RPC Updates
## lncli Updates
## Code Health
## Breaking Changes
## Performance Improvements
# Technical and Architectural Updates
## BOLT Spec Updates
## Testing
* [Remove global application level lock for
Postgres](https://github.com/lightningnetwork/lnd/pull/9242) so multiple DB
transactions can run at once, increasing efficiency. Includes several bugfixes
to allow this to work properly.
## Database
## Code Health
* [Golang was updated to
`v1.22.11`](https://github.com/lightningnetwork/lnd/pull/9462).
## Tooling and Documentation
# Contributors (Alphabetical Order)
* Alex Akselrod
* Jesse de Wit
* Ziggie

11
go.mod
View File

@ -10,7 +10,7 @@ require (
github.com/btcsuite/btcd/btcutil/psbt v1.1.8
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f
github.com/btcsuite/btcwallet v0.16.10-0.20240912233857-ffb143c77cc5
github.com/btcsuite/btcwallet v0.16.10-0.20241127094224-93c858b2ad63
github.com/btcsuite/btcwallet/wallet/txauthor v1.3.5
github.com/btcsuite/btcwallet/wallet/txrules v1.2.2
github.com/btcsuite/btcwallet/walletdb v1.4.4
@ -201,13 +201,16 @@ replace github.com/ulikunitz/xz => github.com/ulikunitz/xz v0.5.11
// https://deps.dev/advisory/OSV/GO-2021-0053?from=%2Fgo%2Fgithub.com%252Fgogo%252Fprotobuf%2Fv1.3.1
replace github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2
// Use local kvdb package until new version is tagged.
replace github.com/lightningnetwork/lnd/kvdb => ./kvdb
// Use local sqldb package until new version is tagged.
replace github.com/lightningnetwork/lnd/sqldb => ./sqldb
// We want to format raw bytes as hex instead of base64. The forked version
// allows us to specify that as an option.
replace google.golang.org/protobuf => github.com/lightninglabs/protobuf-go-hex-display v1.30.0-hex-display
// Temporary replace until the next version of sqldb is taged.
replace github.com/lightningnetwork/lnd/sqldb => ./sqldb
// If you change this please also update .github/pull_request_template.md,
// docs/INSTALL.md and GO_IMAGE in lnrpc/gen_protos_docker.sh.
go 1.22.6

6
go.sum
View File

@ -92,8 +92,8 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtyd
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcwallet v0.16.10-0.20240912233857-ffb143c77cc5 h1:zYy233eUBvkF3lq2MUkybEhxhDsrRDSgiToIKN57mtk=
github.com/btcsuite/btcwallet v0.16.10-0.20240912233857-ffb143c77cc5/go.mod h1:1HJXYbjJzgumlnxOC2+ViR1U+gnHWoOn7WeK5OfY1eU=
github.com/btcsuite/btcwallet v0.16.10-0.20241127094224-93c858b2ad63 h1:YN+PekOLlLoGxE3P5RJaGgodZD5DDJSU8eXQZVwwCxM=
github.com/btcsuite/btcwallet v0.16.10-0.20241127094224-93c858b2ad63/go.mod h1:1HJXYbjJzgumlnxOC2+ViR1U+gnHWoOn7WeK5OfY1eU=
github.com/btcsuite/btcwallet/wallet/txauthor v1.3.5 h1:Rr0njWI3r341nhSPesKQ2JF+ugDSzdPoeckS75SeDZk=
github.com/btcsuite/btcwallet/wallet/txauthor v1.3.5/go.mod h1:+tXJ3Ym0nlQc/iHSwW1qzjmPs3ev+UVWMbGgfV1OZqU=
github.com/btcsuite/btcwallet/wallet/txrules v1.2.2 h1:YEO+Lx1ZJJAtdRrjuhXjWrYsmAk26wLTlNzxt2q0lhk=
@ -455,8 +455,6 @@ github.com/lightningnetwork/lnd/fn v1.2.3 h1:Q1OrgNSgQynVheBNa16CsKVov1JI5N2AR6G
github.com/lightningnetwork/lnd/fn v1.2.3/go.mod h1:SyFohpVrARPKH3XVAJZlXdVe+IwMYc4OMAvrDY32kw0=
github.com/lightningnetwork/lnd/healthcheck v1.2.5 h1:aTJy5xeBpcWgRtW/PGBDe+LMQEmNm/HQewlQx2jt7OA=
github.com/lightningnetwork/lnd/healthcheck v1.2.5/go.mod h1:G7Tst2tVvWo7cx6mSBEToQC5L1XOGxzZTPB29g9Rv2I=
github.com/lightningnetwork/lnd/kvdb v1.4.10 h1:vK89IVv1oVH9ubQWU+EmoCQFeVRaC8kfmOrqHbY5zoY=
github.com/lightningnetwork/lnd/kvdb v1.4.10/go.mod h1:J2diNABOoII9UrMnxXS5w7vZwP7CA1CStrl8MnIrb3A=
github.com/lightningnetwork/lnd/queue v1.1.1 h1:99ovBlpM9B0FRCGYJo6RSFDlt8/vOkQQZznVb18iNMI=
github.com/lightningnetwork/lnd/queue v1.1.1/go.mod h1:7A6nC1Qrm32FHuhx/mi1cieAiBZo5O6l8IBIoQxvkz4=
github.com/lightningnetwork/lnd/ticker v1.1.1 h1:J/b6N2hibFtC7JLV77ULQp++QLtCwT6ijJlbdiZFbSM=

View File

@ -496,4 +496,9 @@ type AuxTrafficShaper interface {
PaymentBandwidth(htlcBlob, commitmentBlob fn.Option[tlv.Blob],
linkBandwidth,
htlcAmt lnwire.MilliSatoshi) (lnwire.MilliSatoshi, error)
// IsCustomHTLC returns true if the HTLC carries the set of relevant
// custom records to put it under the purview of the traffic shaper,
// meaning that it's from a custom channel.
IsCustomHTLC(htlcRecords lnwire.CustomRecords) bool
}

View File

@ -3875,21 +3875,20 @@ func (l *channelLink) processExitHop(add lnwire.UpdateAddHTLC,
return nil
}
// In case the traffic shaper is active, we'll check if the HTLC has
// custom records and skip the amount check in the onion payload below.
isCustomHTLC := fn.MapOptionZ(
l.cfg.AuxTrafficShaper,
func(ts AuxTrafficShaper) bool {
return ts.IsCustomHTLC(add.CustomRecords)
},
)
// As we're the exit hop, we'll double check the hop-payload included in
// the HTLC to ensure that it was crafted correctly by the sender and
// is compatible with the HTLC we were extended.
//
// For a special case, if the fwdInfo doesn't have any blinded path
// information, and the incoming HTLC had special extra data, then
// we'll skip this amount check. The invoice acceptor will make sure we
// reject the HTLC if it's not containing the correct amount after
// examining the custom data.
hasBlindedPath := fwdInfo.NextBlinding.IsSome()
customHTLC := len(add.CustomRecords) > 0 && !hasBlindedPath
log.Tracef("Exit hop has_blinded_path=%v custom_htlc_bypass=%v",
hasBlindedPath, customHTLC)
if !customHTLC && add.Amount < fwdInfo.AmountToForward {
// is compatible with the HTLC we were extended. If an external
// validator is active we might bypass the amount check.
if !isCustomHTLC && add.Amount < fwdInfo.AmountToForward {
l.log.Errorf("onion payload of incoming htlc(%x) has "+
"incompatible value: expected <=%v, got %v",
add.PaymentHash, add.Amount, fwdInfo.AmountToForward)

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/queue"
@ -653,56 +654,38 @@ func (i *InvoiceRegistry) startHtlcTimer(invoiceRef InvoiceRef,
func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef InvoiceRef,
key CircuitKey, result FailResolutionResult) error {
updateInvoice := func(invoice *Invoice) (*InvoiceUpdateDesc, error) {
updateInvoice := func(invoice *Invoice, setID *SetID) (
*InvoiceUpdateDesc, error) {
// Only allow individual htlc cancellation on open invoices.
if invoice.State != ContractOpen {
log.Debugf("cancelSingleHtlc: invoice %v no longer "+
"open", invoiceRef)
log.Debugf("CancelSingleHtlc: cannot cancel htlc %v "+
"on invoice %v, invoice is no longer open", key,
invoiceRef)
return nil, nil
}
// Lookup the current status of the htlc in the database.
var (
htlcState HtlcState
setID *SetID
)
// Also for AMP invoices we fetch the relevant HTLCs, so
// the HTLC should be found, otherwise we return an error.
htlc, ok := invoice.Htlcs[key]
if !ok {
// If this is an AMP invoice, then all the HTLCs won't
// be read out, so we'll consult the other mapping to
// try to find the HTLC state in question here.
var found bool
for ampSetID, htlcSet := range invoice.AMPState {
ampSetID := ampSetID
for htlcKey := range htlcSet.InvoiceKeys {
if htlcKey == key {
htlcState = htlcSet.State
setID = &ampSetID
found = true
break
}
}
}
if !found {
return nil, fmt.Errorf("htlc %v not found", key)
}
} else {
htlcState = htlc.State
return nil, fmt.Errorf("htlc %v not found on "+
"invoice %v", key, invoiceRef)
}
htlcState := htlc.State
// Cancellation is only possible if the htlc wasn't already
// resolved.
if htlcState != HtlcStateAccepted {
log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+
log.Debugf("CancelSingleHtlc: htlc %v on invoice %v "+
"is already resolved", key, invoiceRef)
return nil, nil
}
log.Debugf("cancelSingleHtlc: cancelling htlc %v on invoice %v",
log.Debugf("CancelSingleHtlc: cancelling htlc %v on invoice %v",
key, invoiceRef)
// Return an update descriptor that cancels htlc and keeps
@ -728,7 +711,7 @@ func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef InvoiceRef,
func(invoice *Invoice) (
*InvoiceUpdateDesc, error) {
updateDesc, err := updateInvoice(invoice)
updateDesc, err := updateInvoice(invoice, setID)
if err != nil {
return nil, err
}
@ -755,8 +738,13 @@ func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef InvoiceRef,
key, int32(htlc.AcceptHeight), result,
)
log.Debugf("Signaling htlc(%v) cancellation of invoice(%v) "+
"with resolution(%v) to the link subsystem", key,
invoiceRef, result)
i.notifyHodlSubscribers(resolution)
}
return nil
}
@ -1086,29 +1074,83 @@ func (i *InvoiceRegistry) notifyExitHopHtlcLocked(
updateSubscribers bool
)
callback := func(inv *Invoice) (*InvoiceUpdateDesc, error) {
// First check if this is a replayed htlc and resolve it
// according to its current state. We cannot decide differently
// once the HTLC has already been processed before.
isReplayed, res, err := resolveReplayedHtlc(ctx, inv)
if err != nil {
return nil, err
}
if isReplayed {
resolution = res
return nil, nil
}
// In case the HTLC interceptor cancels the HTLC set, we do NOT
// cancel the invoice however we cancel the complete HTLC set.
if cancelSet {
// If the invoice is not open, something is wrong, we
// fail just the HTLC with the specific error.
if inv.State != ContractOpen {
log.Errorf("Invoice state (%v) is not OPEN, "+
"cancelling HTLC set not allowed by "+
"external source", inv.State)
resolution = NewFailResolution(
ctx.circuitKey, ctx.currentHeight,
ResultInvoiceNotOpen,
)
return nil, nil
}
// The error `ExternalValidationFailed` error
// information will be packed in the
// `FailIncorrectDetails` msg when sending the msg to
// the peer. Error codes are defined by the BOLT 04
// specification. The error text can be arbitrary
// therefore we return a custom error msg.
resolution = NewFailResolution(
ctx.circuitKey, ctx.currentHeight,
ExternalValidationFailed,
)
// We cancel all HTLCs which are in the accepted state.
//
// NOTE: The current HTLC is not included because it
// was never accepted in the first place.
htlcs := inv.HTLCSet(ctx.setID(), HtlcStateAccepted)
htlcKeys := fn.KeySet[CircuitKey](htlcs)
// The external source did cancel the htlc set, so we
// cancel all HTLCs in the set. We however keep the
// invoice in the open state.
//
// NOTE: The invoice event loop will still call the
// `cancelSingleHTLC` method for MPP payments, however
// because the HTLCs are already cancled back it will be
// a NOOP.
update := &InvoiceUpdateDesc{
UpdateType: CancelHTLCsUpdate,
CancelHtlcs: htlcKeys,
SetID: setID,
}
return update, nil
}
updateDesc, res, err := updateInvoice(ctx, inv)
if err != nil {
return nil, err
}
// Set resolution in outer scope only after successful update.
resolution = res
// Only send an update if the invoice state was changed.
updateSubscribers = updateDesc != nil &&
updateDesc.State != nil
// Assign resolution to outer scope variable.
if cancelSet {
// If a cancel signal was set for the htlc set, we set
// the resolution as a failure with an underpayment
// indication. Something was wrong with this htlc, so
// we probably can't settle the invoice at all.
resolution = NewFailResolution(
ctx.circuitKey, ctx.currentHeight,
ResultAmountTooLow,
)
} else {
resolution = res
}
return updateDesc, nil
}
@ -1413,6 +1455,8 @@ func (i *InvoiceRegistry) cancelInvoiceImpl(ctx context.Context,
}
invoiceRef := InvoiceRefByHash(payHash)
// We pass a nil setID which means no HTLCs will be read out.
invoice, err := i.idb.UpdateInvoice(ctx, invoiceRef, nil, updateInvoice)
// Implement idempotency by returning success if the invoice was already
@ -1439,6 +1483,8 @@ func (i *InvoiceRegistry) cancelInvoiceImpl(ctx context.Context,
// that are waiting for resolution. Any htlcs that were already canceled
// before, will be notified again. This isn't necessary but doesn't hurt
// either.
//
// TODO(ziggie): Also consider AMP HTLCs here.
for key, htlc := range invoice.Htlcs {
if htlc.State != HtlcStateCanceled {
continue

View File

@ -20,6 +20,7 @@ import (
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
"github.com/lightningnetwork/lnd/sqldb"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
@ -108,12 +109,20 @@ func TestInvoiceRegistry(t *testing.T) {
name: "SpontaneousAmpPayment",
test: testSpontaneousAmpPayment,
},
{
name: "FailPartialMPPPaymentExternal",
test: testFailPartialMPPPaymentExternal,
},
{
name: "FailPartialAMPPayment",
test: testFailPartialAMPPayment,
},
}
makeKeyValueDB := func(t *testing.T) (invpkg.InvoiceDB,
*clock.TestClock) {
testClock := clock.NewTestClock(testNow)
testClock := clock.NewTestClock(testTime)
db, err := channeldb.MakeTestInvoiceDB(
t, channeldb.OptionClock(testClock),
)
@ -147,7 +156,7 @@ func TestInvoiceRegistry(t *testing.T) {
},
)
testClock := clock.NewTestClock(testNow)
testClock := clock.NewTestClock(testTime)
return invpkg.NewSQLStore(executor, testClock), testClock
}
@ -204,7 +213,7 @@ func testSettleInvoice(t *testing.T,
require.Equal(t, subscription.PayHash(), &testInvoicePaymentHash)
// Add the invoice.
testInvoice := newInvoice(t, false)
testInvoice := newInvoice(t, false, false)
addIdx, err := ctx.registry.AddInvoice(
ctxb, testInvoice, testInvoicePaymentHash,
)
@ -395,7 +404,7 @@ func testCancelInvoiceImpl(t *testing.T, gc bool,
require.Equal(t, subscription.PayHash(), &testInvoicePaymentHash)
// Add the invoice.
testInvoice := newInvoice(t, false)
testInvoice := newInvoice(t, false, false)
_, err = ctx.registry.AddInvoice(
ctxb, testInvoice, testInvoicePaymentHash,
)
@ -555,7 +564,7 @@ func testSettleHoldInvoice(t *testing.T,
require.Equal(t, subscription.PayHash(), &testInvoicePaymentHash)
// Add the invoice.
invoice := newInvoice(t, true)
invoice := newInvoice(t, true, false)
_, err = registry.AddInvoice(ctxb, invoice, testInvoicePaymentHash)
require.NoError(t, err)
@ -716,7 +725,7 @@ func testCancelHoldInvoice(t *testing.T,
ctxb := context.Background()
// Add the invoice.
invoice := newInvoice(t, true)
invoice := newInvoice(t, true, false)
_, err = registry.AddInvoice(ctxb, invoice, testInvoicePaymentHash)
require.NoError(t, err)
@ -1043,7 +1052,7 @@ func testMppPayment(t *testing.T,
ctxb := context.Background()
// Add the invoice.
testInvoice := newInvoice(t, false)
testInvoice := newInvoice(t, false, false)
_, err := ctx.registry.AddInvoice(
ctxb, testInvoice, testInvoicePaymentHash,
)
@ -1141,7 +1150,7 @@ func testMppPaymentWithOverpayment(t *testing.T,
ctx := newTestContext(t, nil, makeDB)
// Add the invoice.
testInvoice := newInvoice(t, false)
testInvoice := newInvoice(t, false, false)
_, err := ctx.registry.AddInvoice(
ctxb, testInvoice, testInvoicePaymentHash,
)
@ -1432,7 +1441,7 @@ func testHeightExpiryWithRegistryImpl(t *testing.T, numParts int, settle bool,
// Add a hold invoice, we set a non-nil payment request so that this
// invoice is not considered a keysend by the expiry watcher.
testInvoice := newInvoice(t, false)
testInvoice := newInvoice(t, false, false)
testInvoice.HodlInvoice = true
testInvoice.PaymentRequest = []byte{1, 2, 3}
@ -1545,7 +1554,7 @@ func testMultipleSetHeightExpiry(t *testing.T,
ctx := newTestContext(t, nil, makeDB)
// Add a hold invoice.
testInvoice := newInvoice(t, true)
testInvoice := newInvoice(t, true, false)
ctxb := context.Background()
_, err := ctx.registry.AddInvoice(
@ -2109,3 +2118,326 @@ func testSpontaneousAmpPaymentImpl(
}
}
}
// testFailPartialMPPPaymentExternal tests that the HTLC set is cancelled back
// as soon as the HTLC interceptor denies one of the HTLCs.
func testFailPartialMPPPaymentExternal(t *testing.T,
makeDB func(t *testing.T) (invpkg.InvoiceDB, *clock.TestClock)) {
t.Parallel()
mockHtlcInterceptor := &invpkg.MockHtlcModifier{}
cfg := defaultRegistryConfig()
cfg.HtlcInterceptor = mockHtlcInterceptor
ctx := newTestContext(t, &cfg, makeDB)
// Add an invoice which we are going to pay via a MPP set.
testInvoice := newInvoice(t, false, false)
ctxb := context.Background()
_, err := ctx.registry.AddInvoice(
ctxb, testInvoice, testInvoicePaymentHash,
)
require.NoError(t, err)
mppPayload := &mockPayload{
mpp: record.NewMPP(testInvoiceAmount, [32]byte{}),
}
// Send first HTLC which pays part of the invoice but keeps the invoice
// in an open state because the amount is less than the invoice amount.
hodlChan1 := make(chan interface{}, 1)
resolution, err := ctx.registry.NotifyExitHopHtlc(
testInvoicePaymentHash, testInvoice.Terms.Value/3,
testHtlcExpiry, testCurrentHeight, getCircuitKey(1),
hodlChan1, nil, mppPayload,
)
require.NoError(t, err)
require.Nil(t, resolution, "did not expect direct resolution")
// Register the expected response from the interceptor so that the
// whole HTLC set is cancelled.
expectedResponse := invpkg.HtlcModifyResponse{
CancelSet: true,
}
mockHtlcInterceptor.On("Intercept", mock.Anything, mock.Anything).
Return(nil, expectedResponse)
// Send htlc 2. We expect the HTLC to be cancelled because the
// interceptor will deny it.
resolution, err = ctx.registry.NotifyExitHopHtlc(
testInvoicePaymentHash, testInvoice.Terms.Value/2,
testHtlcExpiry, testCurrentHeight, getCircuitKey(2), nil,
nil, mppPayload,
)
require.NoError(t, err)
failResolution, ok := resolution.(*invpkg.HtlcFailResolution)
require.True(t, ok, "expected fail resolution, got: %T", resolution)
// Make sure the resolution includes the custom error msg.
require.Equal(t, invpkg.ExternalValidationFailed,
failResolution.Outcome, "expected ExternalValidationFailed, "+
"got: %v", failResolution.Outcome)
// Expect HLTC 1 also to be cancelled because it is part of the cancel
// set and the interceptor cancelled the whole set after receiving the
// second HTLC.
select {
case resolution := <-hodlChan1:
htlcResolution, _ := resolution.(invpkg.HtlcResolution)
failResolution, ok = htlcResolution.(*invpkg.HtlcFailResolution)
require.True(
t, ok, "expected fail resolution, got: %T",
htlcResolution,
)
require.Equal(
t, invpkg.ExternalValidationFailed,
failResolution.Outcome, "expected "+
"ExternalValidationFailed, got: %v",
failResolution.Outcome,
)
case <-time.After(testTimeout):
t.Fatal("timeout waiting for HTLC resolution")
}
// Assert that the invoice is still open.
inv, err := ctx.registry.LookupInvoice(ctxb, testInvoicePaymentHash)
require.NoError(t, err)
require.Equal(t, invpkg.ContractOpen, inv.State, "expected "+
"OPEN invoice")
// Now let the invoice expire.
currentTime := ctx.clock.Now()
ctx.clock.SetTime(currentTime.Add(61 * time.Minute))
// Make sure the invoices changes to the canceled state.
require.Eventuallyf(t, func() bool {
inv, err := ctx.registry.LookupInvoice(
ctxb, testInvoicePaymentHash,
)
require.NoError(t, err)
return inv.State == invpkg.ContractCanceled
}, testTimeout, time.Millisecond*100, "invoice not canceled")
// Fetch the invoice again and compare the number of cancelled HTLCs.
inv, err = ctx.registry.LookupInvoice(
ctxb, testInvoicePaymentHash,
)
require.NoError(t, err)
// Make sure all HTLCs are in the canceled state which in our case is
// only the first one because the second HTLC was never added to the
// invoice registry in the first place.
require.Len(t, inv.Htlcs, 1)
require.Equal(
t, invpkg.HtlcStateCanceled, inv.Htlcs[getCircuitKey(1)].State,
)
}
// testFailPartialAMPPayment tests the MPP timeout logic for AMP invoices. It
// makes sure that all HTLCs are cancelled if the full invoice amount is not
// received. Moreover it points out some TODOs to make AMP invoices more robust.
func testFailPartialAMPPayment(t *testing.T,
makeDB func(t *testing.T) (invpkg.InvoiceDB, *clock.TestClock)) {
t.Parallel()
ctx := newTestContext(t, nil, makeDB)
ctxb := context.Background()
const (
expiry = uint32(testCurrentHeight + 20)
numShards = 4
)
var (
shardAmt = testInvoiceAmount / lnwire.MilliSatoshi(numShards)
setID [32]byte
payAddr [32]byte
)
_, err := rand.Read(payAddr[:])
require.NoError(t, err)
// Create an AMP invoice we are going to pay via a multi-part payment.
ampInvoice := newInvoice(t, false, true)
// An AMP invoice is referenced by the payment address.
ampInvoice.Terms.PaymentAddr = payAddr
_, err = ctx.registry.AddInvoice(
ctxb, ampInvoice, testInvoicePaymentHash,
)
require.NoError(t, err)
// Generate a random setID for the HTLCs.
_, err = rand.Read(setID[:])
require.NoError(t, err)
htlcPayload1 := &mockPayload{
mpp: record.NewMPP(testInvoiceAmount, payAddr),
// We are not interested in settling the AMP HTLC so we don't
// use valid shares.
amp: record.NewAMP([32]byte{1}, setID, 1),
}
// Send first HTLC which pays part of the invoice.
hodlChan1 := make(chan interface{}, 1)
resolution, err := ctx.registry.NotifyExitHopHtlc(
lntypes.Hash{1}, shardAmt, expiry, testCurrentHeight,
getCircuitKey(1), hodlChan1, nil, htlcPayload1,
)
require.NoError(t, err)
require.Nil(t, resolution, "did not expect direct resolution")
htlcPayload2 := &mockPayload{
mpp: record.NewMPP(testInvoiceAmount, payAddr),
// We are not interested in settling the AMP HTLC so we don't
// use valid shares.
amp: record.NewAMP([32]byte{2}, setID, 2),
}
// Send htlc 2 which should be added to the invoice as expected.
hodlChan2 := make(chan interface{}, 1)
resolution, err = ctx.registry.NotifyExitHopHtlc(
lntypes.Hash{2}, shardAmt, expiry, testCurrentHeight,
getCircuitKey(2), hodlChan2, nil, htlcPayload2,
)
require.NoError(t, err)
require.Nil(t, resolution, "did not expect direct resolution")
// Now time-out the HTLCs. The HoldDuration is 30 seconds after the
// HTLC will be cancelled.
currentTime := ctx.clock.Now()
ctx.clock.SetTime(currentTime.Add(35 * time.Second))
// Expect HLTC 1 to be canceled via the MPPTimeout fail resolution.
select {
case resolution := <-hodlChan1:
htlcResolution, _ := resolution.(invpkg.HtlcResolution)
failRes, ok := htlcResolution.(*invpkg.HtlcFailResolution)
require.True(
t, ok, "expected fail resolution, got: %T", resolution,
)
require.Equal(
t, invpkg.ResultMppTimeout, failRes.Outcome,
"expected MPPTimeout, got: %v", failRes.Outcome,
)
case <-time.After(testTimeout):
t.Fatal("timeout waiting for HTLC resolution")
}
// Expect HLTC 2 to be canceled via the MPPTimeout fail resolution.
select {
case resolution := <-hodlChan2:
htlcResolution, _ := resolution.(invpkg.HtlcResolution)
failRes, ok := htlcResolution.(*invpkg.HtlcFailResolution)
require.True(
t, ok, "expected fail resolution, got: %T", resolution,
)
require.Equal(
t, invpkg.ResultMppTimeout, failRes.Outcome,
"expected MPPTimeout, got: %v", failRes.Outcome,
)
case <-time.After(testTimeout):
t.Fatal("timeout waiting for HTLC resolution")
}
// The AMP invoice should still be open.
inv, err := ctx.registry.LookupInvoice(ctxb, testInvoicePaymentHash)
require.NoError(t, err)
require.Equal(t, invpkg.ContractOpen, inv.State, "expected "+
"OPEN invoice")
// Because one HTLC of the set was cancelled we expect the AMPState to
// be set to canceled.
ampState, ok := inv.AMPState[setID]
require.True(t, ok, "expected AMPState to be set")
require.Equal(t, invpkg.HtlcStateCanceled, ampState.State, "expected "+
"AMPState CANCELED")
// The following is a bug and should not be allowed because the sub
// AMP invoice is already marked as canceled. However LND will accept
// other HTLCs to the AMP sub-invoice.
//
// TODO(ziggie): Fix this bug.
htlcPayload3 := &mockPayload{
mpp: record.NewMPP(testInvoiceAmount, payAddr),
// We are not interested in settling the AMP HTLC so we don't
// use valid shares.
amp: record.NewAMP([32]byte{3}, setID, 3),
}
// Send htlc 3 which should be added to the invoice as expected.
hodlChan3 := make(chan interface{}, 1)
resolution, err = ctx.registry.NotifyExitHopHtlc(
lntypes.Hash{3}, shardAmt, expiry, testCurrentHeight,
getCircuitKey(3), hodlChan3, nil, htlcPayload3,
)
require.NoError(t, err)
require.Nil(t, resolution, "did not expect direct resolution")
// TODO(ziggie): This is a race condition between the invoice being
// cancelled and the htlc being added to the invoice. If we do not wait
// here until the HTLC is added to the invoice, the test might fail
// because the HTLC will not be resolved.
require.Eventuallyf(t, func() bool {
inv, err := ctx.registry.LookupInvoice(
ctxb, testInvoicePaymentHash,
)
require.NoError(t, err)
return len(inv.Htlcs) == 3
}, testTimeout, time.Millisecond*100, "HTLC 3 not added to invoice")
// Now also let the invoice expire the invoice expiry is 1 hour.
currentTime = ctx.clock.Now()
ctx.clock.SetTime(currentTime.Add(1 * time.Minute))
// Expect HLTC 3 to be canceled either via the cancelation of the
// invoice or because the MPP timeout kicks in.
select {
case resolution := <-hodlChan3:
htlcResolution, _ := resolution.(invpkg.HtlcResolution)
failRes, ok := htlcResolution.(*invpkg.HtlcFailResolution)
require.True(
t, ok, "expected fail resolution, got: %T", resolution,
)
require.Equal(
t, invpkg.ResultMppTimeout, failRes.Outcome,
"expected MPPTimeout, got: %v", failRes.Outcome,
)
case <-time.After(testTimeout):
t.Fatal("timeout waiting for HTLC resolution")
}
// expire the invoice here.
currentTime = ctx.clock.Now()
ctx.clock.SetTime(currentTime.Add(61 * time.Minute))
require.Eventuallyf(t, func() bool {
inv, err := ctx.registry.LookupInvoice(
ctxb, testInvoicePaymentHash,
)
require.NoError(t, err)
return inv.State == invpkg.ContractCanceled
}, testTimeout, time.Millisecond*100, "invoice not canceled")
// Fetch the invoice again and compare the number of cancelled HTLCs.
inv, err = ctx.registry.LookupInvoice(
ctxb, testInvoicePaymentHash,
)
require.NoError(t, err)
// Make sure all HTLCs are in the cancelled state.
require.Len(t, inv.Htlcs, 3)
for _, htlc := range inv.Htlcs {
require.Equal(t, invpkg.HtlcStateCanceled, htlc.State,
"expected HTLC to be canceled")
}
}

View File

@ -760,6 +760,9 @@ type InvoiceStateUpdateDesc struct {
// InvoiceUpdateCallback is a callback used in the db transaction to update the
// invoice.
// TODO(ziggie): Add the option of additional return values to the callback
// for example the resolution which is currently assigned via an outer scope
// variable.
type InvoiceUpdateCallback = func(invoice *Invoice) (*InvoiceUpdateDesc, error)
// ValidateInvoice assures the invoice passes the checks for all the relevant

View File

@ -86,6 +86,7 @@ func (m *MockInvoiceDB) DeleteCanceledInvoices(ctx context.Context) error {
// MockHtlcModifier is a mock implementation of the HtlcModifier interface.
type MockHtlcModifier struct {
mock.Mock
}
// Intercept generates a new intercept session for the given invoice.
@ -94,9 +95,23 @@ type MockHtlcModifier struct {
// created in the first place, which is only the case if a client is
// registered.
func (m *MockHtlcModifier) Intercept(
_ HtlcModifyRequest, _ func(HtlcModifyResponse)) error {
req HtlcModifyRequest, callback func(HtlcModifyResponse)) error {
return nil
// If no expectations are set, return nil by default.
if len(m.ExpectedCalls) == 0 {
return nil
}
args := m.Called(req, callback)
// If a response was provided to the mock, execute the callback with it.
if response, ok := args.Get(1).(HtlcModifyResponse); ok &&
callback != nil {
callback(response)
}
return args.Error(0)
}
// RegisterInterceptor sets the client callback function that will be

View File

@ -136,9 +136,6 @@ func (s *HtlcModificationInterceptor) Intercept(clientRequest HtlcModifyRequest,
// Wait for the client to respond or an error to occur.
select {
case response := <-responseChan:
log.Debugf("Received invoice HTLC interceptor response: %v",
response)
responseCallback(*response)
return nil

View File

@ -120,6 +120,10 @@ const (
// ResultAmpReconstruction is returned when the derived child
// hash/preimage pairs were invalid for at least one HTLC in the set.
ResultAmpReconstruction
// ExternalValidationFailed is returned when the external validation
// failed.
ExternalValidationFailed
)
// String returns a string representation of the result.
@ -189,6 +193,9 @@ func (f FailResolutionResult) FailureString() string {
case ResultAmpReconstruction:
return "amp reconstruction failed"
case ExternalValidationFailed:
return "external validation failed"
default:
return "unknown failure resolution result"
}
@ -202,7 +209,8 @@ func (f FailResolutionResult) IsSetFailure() bool {
ResultAmpReconstruction,
ResultHtlcSetTotalTooLow,
ResultHtlcSetTotalMismatch,
ResultHtlcSetOverpayment:
ResultHtlcSetOverpayment,
ExternalValidationFailed:
return true

View File

@ -207,7 +207,7 @@ func getCircuitKey(htlcID uint64) invpkg.CircuitKey {
// Note that this invoice *does not* have a payment address set. It will
// create a regular invoice with a preimage is hodl is false, and a hodl
// invoice with no preimage otherwise.
func newInvoice(t *testing.T, hodl bool) *invpkg.Invoice {
func newInvoice(t *testing.T, hodl bool, ampInvoice bool) *invpkg.Invoice {
invoice := &invpkg.Invoice{
Terms: invpkg.ContractTerm{
Value: testInvoiceAmount,
@ -217,6 +217,23 @@ func newInvoice(t *testing.T, hodl bool) *invpkg.Invoice {
CreationDate: testInvoiceCreationDate,
}
// This makes the invoice an AMP invoice. We do not support AMP hodl
// invoices.
if ampInvoice {
ampFeature := lnwire.NewRawFeatureVector(
lnwire.TLVOnionPayloadOptional,
lnwire.PaymentAddrOptional,
lnwire.AMPRequired,
)
ampFeatures := lnwire.NewFeatureVector(
ampFeature, lnwire.Features,
)
invoice.Terms.Features = ampFeatures
return invoice
}
// If creating a hodl invoice, we don't include a preimage.
if hodl {
invoice.HodlInvoice = true

View File

@ -109,41 +109,52 @@ func (i invoiceUpdateCtx) acceptRes(
return newAcceptResolution(i.circuitKey, outcome)
}
// resolveReplayedHtlc returns the HTLC resolution for a replayed HTLC. The
// returned boolean indicates whether the HTLC was replayed or not.
func resolveReplayedHtlc(ctx *invoiceUpdateCtx, inv *Invoice) (bool,
HtlcResolution, error) {
// Don't update the invoice when this is a replayed htlc.
htlc, replayedHTLC := inv.Htlcs[ctx.circuitKey]
if !replayedHTLC {
return false, nil, nil
}
switch htlc.State {
case HtlcStateCanceled:
return true, ctx.failRes(ResultReplayToCanceled), nil
case HtlcStateAccepted:
return true, ctx.acceptRes(resultReplayToAccepted), nil
case HtlcStateSettled:
pre := inv.Terms.PaymentPreimage
// Terms.PaymentPreimage will be nil for AMP invoices.
// Set it to the HTLCs AMP Preimage instead.
if pre == nil {
pre = htlc.AMP.Preimage
}
return true, ctx.settleRes(
*pre,
ResultReplayToSettled,
), nil
default:
return true, nil, errors.New("unknown htlc state")
}
}
// updateInvoice is a callback for DB.UpdateInvoice that contains the invoice
// settlement logic. It returns a HTLC resolution that indicates what the
// outcome of the update was.
//
// NOTE: Make sure replayed HTLCs are always considered before calling this
// function.
func updateInvoice(ctx *invoiceUpdateCtx, inv *Invoice) (
*InvoiceUpdateDesc, HtlcResolution, error) {
// Don't update the invoice when this is a replayed htlc.
htlc, ok := inv.Htlcs[ctx.circuitKey]
if ok {
switch htlc.State {
case HtlcStateCanceled:
return nil, ctx.failRes(ResultReplayToCanceled), nil
case HtlcStateAccepted:
return nil, ctx.acceptRes(resultReplayToAccepted), nil
case HtlcStateSettled:
pre := inv.Terms.PaymentPreimage
// Terms.PaymentPreimage will be nil for AMP invoices.
// Set it to the HTLCs AMP Preimage instead.
if pre == nil {
pre = htlc.AMP.Preimage
}
return nil, ctx.settleRes(
*pre,
ResultReplayToSettled,
), nil
default:
return nil, nil, errors.New("unknown htlc state")
}
}
// If no MPP payload was provided, then we expect this to be a keysend,
// or a payment to an invoice created before we started to require the
// MPP payload.

View File

@ -454,14 +454,6 @@ var allTestCases = []*lntest.TestCase{
Name: "forward interceptor",
TestFunc: testForwardInterceptorBasic,
},
{
Name: "forward interceptor modified htlc",
TestFunc: testForwardInterceptorModifiedHtlc,
},
{
Name: "forward interceptor wire records",
TestFunc: testForwardInterceptorWireRecords,
},
{
Name: "forward interceptor restart",
TestFunc: testForwardInterceptorRestart,

View File

@ -10,7 +10,6 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/node"
@ -351,242 +350,6 @@ func testForwardInterceptorBasic(ht *lntest.HarnessTest) {
ht.CloseChannel(bob, cpBC)
}
// testForwardInterceptorModifiedHtlc tests that the interceptor can modify the
// amount and custom records of an intercepted HTLC and resume it.
func testForwardInterceptorModifiedHtlc(ht *lntest.HarnessTest) {
// Initialize the test context with 3 connected nodes.
ts := newInterceptorTestScenario(ht)
alice, bob, carol := ts.alice, ts.bob, ts.carol
// Open and wait for channels.
const chanAmt = btcutil.Amount(300000)
p := lntest.OpenChannelParams{Amt: chanAmt}
reqs := []*lntest.OpenChannelRequest{
{Local: alice, Remote: bob, Param: p},
{Local: bob, Remote: carol, Param: p},
}
resp := ht.OpenMultiChannelsAsync(reqs)
cpAB, cpBC := resp[0], resp[1]
// Make sure Alice is aware of channel Bob=>Carol.
ht.AssertTopologyChannelOpen(alice, cpBC)
// Connect an interceptor to Bob's node.
bobInterceptor, cancelBobInterceptor := bob.RPC.HtlcInterceptor()
// We're going to modify the payment amount and want Carol to accept the
// payment, so we set up an invoice acceptor on Dave.
carolAcceptor, carolCancel := carol.RPC.InvoiceHtlcModifier()
defer carolCancel()
// Prepare the test cases.
invoiceValueAmtMsat := int64(20_000_000)
req := &lnrpc.Invoice{ValueMsat: invoiceValueAmtMsat}
addResponse := carol.RPC.AddInvoice(req)
invoice := carol.RPC.LookupInvoice(addResponse.RHash)
tc := &interceptorTestCase{
amountMsat: invoiceValueAmtMsat,
invoice: invoice,
payAddr: invoice.PaymentAddr,
}
// We initiate a payment from Alice.
done := make(chan struct{})
go func() {
// Signal that all the payments have been sent.
defer close(done)
ts.sendPaymentAndAssertAction(tc)
}()
// We start the htlc interceptor with a simple implementation that saves
// all intercepted packets. These packets are held to simulate a
// pending payment.
packet := ht.ReceiveHtlcInterceptor(bobInterceptor)
// Resume the intercepted HTLC with a modified amount and custom
// records.
customRecords := make(map[uint64][]byte)
// Add custom records entry.
crKey := uint64(65537)
crValue := []byte("custom-records-test-value")
customRecords[crKey] = crValue
// Modify the amount of the HTLC, so we send out less than the original
// amount.
const modifyAmount = 5_000_000
newOutAmountMsat := packet.OutgoingAmountMsat - modifyAmount
err := bobInterceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: packet.IncomingCircuitKey,
OutAmountMsat: newOutAmountMsat,
OutWireCustomRecords: customRecords,
Action: actionResumeModify,
})
require.NoError(ht, err, "failed to send request")
invoicePacket := ht.ReceiveInvoiceHtlcModification(carolAcceptor)
require.EqualValues(
ht, newOutAmountMsat, invoicePacket.ExitHtlcAmt,
)
amtPaid := newOutAmountMsat + modifyAmount
err = carolAcceptor.Send(&invoicesrpc.HtlcModifyResponse{
CircuitKey: invoicePacket.ExitHtlcCircuitKey,
AmtPaid: &amtPaid,
})
require.NoError(ht, err, "carol acceptor response")
// Cancel the context, which will disconnect Bob's interceptor.
cancelBobInterceptor()
// Make sure all goroutines are finished.
select {
case <-done:
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for sending payment")
}
// Assert that the payment was successful.
var preimage lntypes.Preimage
copy(preimage[:], invoice.RPreimage)
ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
// Finally, close channels.
ht.CloseChannel(alice, cpAB)
ht.CloseChannel(bob, cpBC)
}
// testForwardInterceptorWireRecords tests that the interceptor can read any
// wire custom records provided by the sender of a payment as part of the
// update_add_htlc message.
func testForwardInterceptorWireRecords(ht *lntest.HarnessTest) {
// Initialize the test context with 3 connected nodes.
ts := newInterceptorTestScenario(ht)
alice, bob, carol, dave := ts.alice, ts.bob, ts.carol, ts.dave
// Open and wait for channels.
const chanAmt = btcutil.Amount(300000)
p := lntest.OpenChannelParams{Amt: chanAmt}
reqs := []*lntest.OpenChannelRequest{
{Local: alice, Remote: bob, Param: p},
{Local: bob, Remote: carol, Param: p},
{Local: carol, Remote: dave, Param: p},
}
resp := ht.OpenMultiChannelsAsync(reqs)
cpAB, cpBC, cpCD := resp[0], resp[1], resp[2]
// Make sure Alice is aware of channel Bob=>Carol.
ht.AssertTopologyChannelOpen(alice, cpBC)
// Connect an interceptor to Bob's node.
bobInterceptor, cancelBobInterceptor := bob.RPC.HtlcInterceptor()
defer cancelBobInterceptor()
// Also connect an interceptor on Carol's node to check whether we're
// relaying the TLVs send in update_add_htlc over Alice -> Bob on the
// Bob -> Carol link.
carolInterceptor, cancelCarolInterceptor := carol.RPC.HtlcInterceptor()
defer cancelCarolInterceptor()
// We're going to modify the payment amount and want Dave to accept the
// payment, so we set up an invoice acceptor on Dave.
daveAcceptor, daveCancel := dave.RPC.InvoiceHtlcModifier()
defer daveCancel()
req := &lnrpc.Invoice{ValueMsat: 20_000_000}
addResponse := dave.RPC.AddInvoice(req)
invoice := dave.RPC.LookupInvoice(addResponse.RHash)
customRecords := map[uint64][]byte{
65537: []byte("test"),
}
sendReq := &routerrpc.SendPaymentRequest{
PaymentRequest: invoice.PaymentRequest,
TimeoutSeconds: int32(wait.PaymentTimeout.Seconds()),
FeeLimitMsat: noFeeLimitMsat,
FirstHopCustomRecords: customRecords,
}
_ = alice.RPC.SendPayment(sendReq)
// We start the htlc interceptor with a simple implementation that saves
// all intercepted packets. These packets are held to simulate a
// pending payment.
packet := ht.ReceiveHtlcInterceptor(bobInterceptor)
require.Len(ht, packet.InWireCustomRecords, 1)
val, ok := packet.InWireCustomRecords[65537]
require.True(ht, ok, "expected custom record")
require.Equal(ht, []byte("test"), val)
// Just resume the payment on Bob.
err := bobInterceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: packet.IncomingCircuitKey,
Action: actionResume,
})
require.NoError(ht, err, "failed to send request")
// Assert that the Alice -> Bob custom records in update_add_htlc are
// not propagated on the Bob -> Carol link.
packet = ht.ReceiveHtlcInterceptor(carolInterceptor)
require.Len(ht, packet.InWireCustomRecords, 0)
// We're going to tell Carol to forward 5k sats less to Dave. We need to
// set custom records on the HTLC as well, to make sure the HTLC isn't
// rejected outright and actually gets to the invoice acceptor.
const modifyAmount = 5_000_000
newOutAmountMsat := packet.OutgoingAmountMsat - modifyAmount
err = carolInterceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{
IncomingCircuitKey: packet.IncomingCircuitKey,
OutAmountMsat: newOutAmountMsat,
OutWireCustomRecords: customRecords,
Action: actionResumeModify,
})
require.NoError(ht, err, "carol interceptor response")
// The payment should get to Dave, and we should be able to intercept
// and modify it, telling Dave to accept it.
invoicePacket := ht.ReceiveInvoiceHtlcModification(daveAcceptor)
require.EqualValues(
ht, newOutAmountMsat, invoicePacket.ExitHtlcAmt,
)
amtPaid := newOutAmountMsat + modifyAmount
err = daveAcceptor.Send(&invoicesrpc.HtlcModifyResponse{
CircuitKey: invoicePacket.ExitHtlcCircuitKey,
AmtPaid: &amtPaid,
})
require.NoError(ht, err, "dave acceptor response")
// Assert that the payment was successful.
var preimage lntypes.Preimage
copy(preimage[:], invoice.RPreimage)
ht.AssertPaymentStatus(
alice, preimage, lnrpc.Payment_SUCCEEDED,
func(p *lnrpc.Payment) error {
recordsEqual := reflect.DeepEqual(
p.FirstHopCustomRecords,
sendReq.FirstHopCustomRecords,
)
if !recordsEqual {
return fmt.Errorf("expected custom records to "+
"be equal, got %v expected %v",
p.FirstHopCustomRecords,
sendReq.FirstHopCustomRecords)
}
return nil
},
)
// Finally, close channels.
ht.CloseChannel(alice, cpAB)
ht.CloseChannel(bob, cpBC)
ht.CloseChannel(carol, cpCD)
}
// testForwardInterceptorRestart tests that the interceptor can read any wire
// custom records provided by the sender of a payment as part of the
// update_add_htlc message and that those records are persisted correctly and

View File

@ -34,6 +34,11 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
resp := ht.OpenMultiChannelsAsync(reqs)
cpAB, cpBC := resp[0], resp[1]
ht.Cleanup(func() {
ht.CloseChannel(alice, cpAB)
ht.CloseChannel(bob, cpBC)
})
// Make sure Alice is aware of channel Bob=>Carol.
ht.AssertTopologyChannelOpen(alice, cpBC)
@ -113,6 +118,7 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
&invoicesrpc.HtlcModifyResponse{
CircuitKey: modifierRequest.ExitHtlcCircuitKey,
AmtPaid: &amtPaid,
CancelSet: tc.cancelSet,
},
)
require.NoError(ht, err, "failed to send request")
@ -125,7 +131,9 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
require.Fail(ht, "timeout waiting for payment send")
}
ht.Log("Ensure invoice status is settled")
ht.Logf("Ensure invoice status is expected state %v",
tc.finalInvoiceState)
require.Eventually(ht, func() bool {
updatedInvoice := carol.RPC.LookupInvoice(
tc.invoice.RHash,
@ -138,6 +146,13 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
tc.invoice.RHash,
)
// If the HTLC modifier canceled the incoming HTLC set, we don't
// expect any HTLCs in the invoice.
if tc.cancelSet {
require.Len(ht, updatedInvoice.Htlcs, 0)
return
}
require.Len(ht, updatedInvoice.Htlcs, 1)
require.Equal(
ht, tc.lastHopCustomRecords,
@ -204,10 +219,6 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
}
cancelModifier()
// Finally, close channels.
ht.CloseChannel(alice, cpAB)
ht.CloseChannel(bob, cpBC)
}
// acceptorTestCase is a helper struct to hold test case data.
@ -231,6 +242,10 @@ type acceptorTestCase struct {
// invoice is the invoice that will be paid.
invoice *lnrpc.Invoice
// cancelSet is a boolean which indicates whether the HTLC modifier
// canceled the incoming HTLC set.
cancelSet bool
}
// acceptorTestScenario is a helper struct to hold the test context and provides
@ -281,6 +296,12 @@ func (c *acceptorTestScenario) prepareTestCases() []*acceptorTestCase {
lnwire.MinCustomRecordsTlvType: {1, 2, 3},
},
},
{
invoiceAmountMsat: 9000,
sendAmountMsat: 1000,
finalInvoiceState: lnrpc.Invoice_OPEN,
cancelSet: true,
},
}
for _, t := range cases {

View File

@ -221,11 +221,11 @@ func testMultiHopPayments(ht *lntest.HarnessTest) {
// Dave and Alice should both have forwards and settles for
// their role as forwarding nodes.
ht.AssertHtlcEvents(
daveEvents, numPayments, 0, numPayments, 0,
daveEvents, numPayments, 0, numPayments*2, 0,
routerrpc.HtlcEvent_FORWARD,
)
ht.AssertHtlcEvents(
aliceEvents, numPayments, 0, numPayments, 0,
aliceEvents, numPayments, 0, numPayments*2, 0,
routerrpc.HtlcEvent_FORWARD,
)

View File

@ -16,7 +16,7 @@ require (
go.etcd.io/etcd/client/v3 v3.5.7
go.etcd.io/etcd/server/v3 v3.5.7
golang.org/x/net v0.22.0
modernc.org/sqlite v1.29.8
modernc.org/sqlite v1.29.10
)
require (
@ -58,6 +58,7 @@ require (
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgtype v1.14.0 // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/lib/pq v1.10.9 // indirect
@ -134,6 +135,9 @@ replace github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt v3.2.1+incompat
// This replace is for https://github.com/advisories/GHSA-25xm-hr59-7c27
replace github.com/ulikunitz/xz => github.com/ulikunitz/xz v0.5.11
// Use local sqldb package until new version is tagged.
replace github.com/lightningnetwork/lnd/sqldb => ../sqldb
// This replace is for
// https://deps.dev/advisory/OSV/GO-2021-0053?from=%2Fgo%2Fgithub.com%252Fgogo%252Fprotobuf%2Fv1.3.1
replace github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2

View File

@ -254,6 +254,8 @@ github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQ
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0=
github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
@ -294,8 +296,6 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lightningnetwork/lnd/healthcheck v1.2.4 h1:lLPLac+p/TllByxGSlkCwkJlkddqMP5UCoawCj3mgFQ=
github.com/lightningnetwork/lnd/healthcheck v1.2.4/go.mod h1:G7Tst2tVvWo7cx6mSBEToQC5L1XOGxzZTPB29g9Rv2I=
github.com/lightningnetwork/lnd/sqldb v1.0.2 h1:PfuYzScYMD9/QonKo/QvgsbXfTnH5DfldIimkfdW4Bk=
github.com/lightningnetwork/lnd/sqldb v1.0.2/go.mod h1:V2Xl6JNWLTKE97WJnwfs0d0TYJdIQTqK8/3aAwkd3qI=
github.com/lightningnetwork/lnd/ticker v1.1.0 h1:ShoBiRP3pIxZHaETndfQ5kEe+S4NdAY1hiX7YbZ4QE4=
github.com/lightningnetwork/lnd/ticker v1.1.0/go.mod h1:ubqbSVCn6RlE0LazXuBr7/Zi6QT0uQo++OgIRBxQUrk=
github.com/lightningnetwork/lnd/tor v1.0.0 h1:wvEc7I+Y7IOtPglVP3cVBbYhiVhc7uTd7cMF9gQRzwA=
@ -307,8 +307,6 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
@ -380,8 +378,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
@ -737,8 +735,8 @@ modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.29.8 h1:nGKglNx9K5v0As+zF0/Gcl1kMkmaU1XynYyq92PbsC8=
modernc.org/sqlite v1.29.8/go.mod h1:lQPm27iqa4UNZpmr4Aor0MH0HkCLbt1huYDfWylLZFk=
modernc.org/sqlite v1.29.10 h1:3u93dz83myFnMilBGCOLbr+HjklS6+5rJLx4q86RDAg=
modernc.org/sqlite v1.29.10/go.mod h1:ItX2a1OVGgNsFh6Dv60JQvGfJfTPHPVpV6DF59akYOA=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=

View File

@ -28,7 +28,6 @@ func newPostgresBackend(ctx context.Context, config *Config, prefix string) (
Schema: "public",
TableNamePrefix: prefix,
SQLiteCmdReplacements: sqliteCmdReplacements,
WithTxLevelLock: true,
}
return sqlbase.NewSqlBackend(ctx, cfg)

View File

@ -55,10 +55,6 @@ type Config struct {
// commands. Note that the sqlite keywords to be replaced are
// case-sensitive.
SQLiteCmdReplacements SQLiteCmdReplacements
// WithTxLevelLock when set will ensure that there is a transaction
// level lock.
WithTxLevelLock bool
}
// db holds a reference to the sql db connection.
@ -79,10 +75,6 @@ type db struct {
// db is the underlying database connection instance.
db *sql.DB
// lock is the global write lock that ensures single writer. This is
// only used if cfg.WithTxLevelLock is set.
lock sync.RWMutex
// table is the name of the table that contains the data for all
// top-level buckets that have keys that cannot be mapped to a distinct
// sql table.

View File

@ -5,7 +5,6 @@ package sqlbase
import (
"context"
"database/sql"
"sync"
"github.com/btcsuite/btcwallet/walletdb"
)
@ -20,28 +19,11 @@ type readWriteTx struct {
// active is true if the transaction hasn't been committed yet.
active bool
// locker is a pointer to the global db lock.
locker sync.Locker
}
// newReadWriteTx creates an rw transaction using a connection from the
// specified pool.
func newReadWriteTx(db *db, readOnly bool) (*readWriteTx, error) {
locker := newNoopLocker()
if db.cfg.WithTxLevelLock {
// Obtain the global lock instance. An alternative here is to
// obtain a database lock from Postgres. Unfortunately there is
// no database-level lock in Postgres, meaning that each table
// would need to be locked individually. Perhaps an advisory
// lock could perform this function too.
locker = &db.lock
if readOnly {
locker = db.lock.RLocker()
}
}
locker.Lock()
// Start the transaction. Don't use the timeout context because it would
// be applied to the transaction as a whole. If possible, mark the
// transaction as read-only to make sure that potential programming
@ -54,7 +36,6 @@ func newReadWriteTx(db *db, readOnly bool) (*readWriteTx, error) {
},
)
if err != nil {
locker.Unlock()
return nil, err
}
@ -62,7 +43,6 @@ func newReadWriteTx(db *db, readOnly bool) (*readWriteTx, error) {
db: db,
tx: tx,
active: true,
locker: locker,
}, nil
}
@ -94,7 +74,6 @@ func (tx *readWriteTx) Rollback() error {
// Unlock the transaction regardless of the error result.
tx.active = false
tx.locker.Unlock()
return err
}
@ -162,7 +141,6 @@ func (tx *readWriteTx) Commit() error {
// Unlock the transaction regardless of the error result.
tx.active = false
tx.locker.Unlock()
return err
}
@ -204,25 +182,3 @@ func (tx *readWriteTx) Exec(query string, args ...interface{}) (sql.Result,
return tx.tx.ExecContext(ctx, query, args...)
}
// noopLocker is an implementation of a no-op sync.Locker.
type noopLocker struct{}
// newNoopLocker creates a new noopLocker.
func newNoopLocker() sync.Locker {
return &noopLocker{}
}
// Lock is a noop.
//
// NOTE: this is part of the sync.Locker interface.
func (n *noopLocker) Lock() {
}
// Unlock is a noop.
//
// NOTE: this is part of the sync.Locker interface.
func (n *noopLocker) Unlock() {
}
var _ sync.Locker = (*noopLocker)(nil)

View File

@ -1,4 +1,6 @@
FROM golang:1.22.6-bookworm
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
FROM golang:1.22.11-bookworm
RUN apt-get update && apt-get install -y \
git \

View File

@ -6,7 +6,7 @@ set -e
DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
# golang docker image version used in this script.
GO_IMAGE=docker.io/library/golang:1.22.6-alpine
GO_IMAGE=docker.io/library/golang:1.22.11-alpine
PROTOBUF_VERSION=$(docker run --rm -v $DIR/../:/lnd -w /lnd $GO_IMAGE \
go list -f '{{.Version}}' -m google.golang.org/protobuf)

View File

@ -180,13 +180,15 @@ func CreateRPCInvoice(invoice *invoices.Invoice,
AmtPaidSat: int64(satAmtPaid),
AmtPaidMsat: int64(invoice.AmtPaid),
AmtPaid: int64(invoice.AmtPaid),
State: state,
Htlcs: rpcHtlcs,
Features: CreateRPCFeatures(invoice.Terms.Features),
IsKeysend: invoice.IsKeysend(),
PaymentAddr: invoice.Terms.PaymentAddr[:],
IsAmp: invoice.IsAMP(),
IsBlinded: invoice.IsBlinded(),
// This will be set to SETTLED if at least one of the AMP Sets
// is settled (see below).
State: state,
Htlcs: rpcHtlcs,
Features: CreateRPCFeatures(invoice.Terms.Features),
IsKeysend: invoice.IsKeysend(),
PaymentAddr: invoice.Terms.PaymentAddr[:],
IsAmp: invoice.IsAMP(),
IsBlinded: invoice.IsBlinded(),
}
rpcInvoice.AmpInvoiceState = make(map[string]*lnrpc.AMPInvoiceState)

View File

@ -15113,6 +15113,14 @@ type PolicyUpdateRequest struct {
// Optional inbound fee. If unset, the previously set value will be
// retained [EXPERIMENTAL].
InboundFee *InboundFee `protobuf:"bytes,10,opt,name=inbound_fee,json=inboundFee,proto3" json:"inbound_fee,omitempty"`
// Under unknown circumstances a channel can exist with a missing edge in
// the graph database. This can cause an 'edge not found' error when calling
// `getchaninfo` and/or cause the default channel policy to be used during
// forwards. Setting this flag will recreate the edge if not found, allowing
// updating this channel policy and fixing the missing edge problem for this
// channel permanently. For fields not set in this command, the default
// policy will be created.
CreateMissingEdge bool `protobuf:"varint,11,opt,name=create_missing_edge,json=createMissingEdge,proto3" json:"create_missing_edge,omitempty"`
}
func (x *PolicyUpdateRequest) Reset() {
@ -15224,6 +15232,13 @@ func (x *PolicyUpdateRequest) GetInboundFee() *InboundFee {
return nil
}
func (x *PolicyUpdateRequest) GetCreateMissingEdge() bool {
if x != nil {
return x.CreateMissingEdge
}
return false
}
type isPolicyUpdateRequest_Scope interface {
isPolicyUpdateRequest_Scope()
}
@ -20664,7 +20679,7 @@ var file_lightning_proto_rawDesc = []byte{
0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x4d, 0x73,
0x61, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x66, 0x65, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x70,
0x70, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x65, 0x65, 0x52, 0x61, 0x74,
0x65, 0x50, 0x70, 0x6d, 0x22, 0xaa, 0x03, 0x0a, 0x13, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55,
0x65, 0x50, 0x70, 0x6d, 0x22, 0xda, 0x03, 0x0a, 0x13, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06,
0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06,
0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, 0x34, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x6e, 0x5f, 0x70,
@ -20690,7 +20705,10 @@ var file_lightning_proto_rawDesc = []byte{
0x66, 0x69, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f,
0x66, 0x65, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6c, 0x6e, 0x72, 0x70,
0x63, 0x2e, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x65, 0x65, 0x52, 0x0a, 0x69, 0x6e,
0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x65, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70,
0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x65, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x72, 0x65, 0x61,
0x74, 0x65, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x18,
0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x69, 0x73,
0x73, 0x69, 0x6e, 0x67, 0x45, 0x64, 0x67, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70,
0x65, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x12, 0x2b, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6e, 0x72, 0x70, 0x63, 0x2e, 0x4f, 0x75, 0x74,

View File

@ -4544,6 +4544,15 @@ message PolicyUpdateRequest {
// Optional inbound fee. If unset, the previously set value will be
// retained [EXPERIMENTAL].
InboundFee inbound_fee = 10;
// Under unknown circumstances a channel can exist with a missing edge in
// the graph database. This can cause an 'edge not found' error when calling
// `getchaninfo` and/or cause the default channel policy to be used during
// forwards. Setting this flag will recreate the edge if not found, allowing
// updating this channel policy and fixing the missing edge problem for this
// channel permanently. For fields not set in this command, the default
// policy will be created.
bool create_missing_edge = 11;
}
enum UpdateFailure {

View File

@ -6721,6 +6721,10 @@
"inbound_fee": {
"$ref": "#/definitions/lnrpcInboundFee",
"description": "Optional inbound fee. If unset, the previously set value will be\nretained [EXPERIMENTAL]."
},
"create_missing_edge": {
"type": "boolean",
"description": "Under unknown circumstances a channel can exist with a missing edge in\nthe graph database. This can cause an 'edge not found' error when calling\n`getchaninfo` and/or cause the default channel policy to be used during\nforwards. Setting this flag will recreate the edge if not found, allowing\nupdating this channel policy and fixing the missing edge problem for this\nchannel permanently. For fields not set in this command, the default\npolicy will be created."
}
}
},

4
log.go
View File

@ -22,6 +22,7 @@ import (
"github.com/lightningnetwork/lnd/healthcheck"
"github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/kvdb/sqlbase"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc/autopilotrpc"
"github.com/lightningnetwork/lnd/lnrpc/chainrpc"
@ -44,6 +45,7 @@ import (
"github.com/lightningnetwork/lnd/peernotifier"
"github.com/lightningnetwork/lnd/routing"
"github.com/lightningnetwork/lnd/routing/blindedpath"
"github.com/lightningnetwork/lnd/routing/localchans"
"github.com/lightningnetwork/lnd/rpcperms"
"github.com/lightningnetwork/lnd/signal"
"github.com/lightningnetwork/lnd/sweep"
@ -143,6 +145,7 @@ func SetupLoggers(root *build.RotatingLogWriter, interceptor signal.Interceptor)
AddSubLogger(root, "DISC", interceptor, discovery.UseLogger)
AddSubLogger(root, "NTFN", interceptor, chainntnfs.UseLogger)
AddSubLogger(root, "CHDB", interceptor, channeldb.UseLogger)
AddSubLogger(root, "SQLB", interceptor, sqlbase.UseLogger)
AddSubLogger(root, "HSWC", interceptor, htlcswitch.UseLogger)
AddSubLogger(root, "CNCT", interceptor, contractcourt.UseLogger)
AddSubLogger(root, "UTXN", interceptor, contractcourt.UseNurseryLogger)
@ -167,6 +170,7 @@ func SetupLoggers(root *build.RotatingLogWriter, interceptor signal.Interceptor)
AddSubLogger(root, "CHFD", interceptor, chanfunding.UseLogger)
AddSubLogger(root, "PEER", interceptor, peer.UseLogger)
AddSubLogger(root, "CHCL", interceptor, chancloser.UseLogger)
AddSubLogger(root, "LCHN", interceptor, localchans.UseLogger)
AddSubLogger(root, routing.Subsystem, interceptor, routing.UseLogger)
AddSubLogger(root, routerrpc.Subsystem, interceptor, routerrpc.UseLogger)

View File

@ -1,9 +1,6 @@
# If you change this value, please change it in the following files as well:
# /Dockerfile
# /dev.Dockerfile
# /.github/workflows/main.yml
# /.github/workflows/release.yml
FROM golang:1.22.6-bookworm
# If you change this please also update GO_VERSION in Makefile (then run
# `make lint` to see where else it needs to be updated as well).
FROM golang:1.22.11-bookworm
MAINTAINER Olaoluwa Osuntokun <laolu@lightning.engineering>

View File

@ -164,3 +164,7 @@ func (*mockTrafficShaper) ProduceHtlcExtraData(totalAmount lnwire.MilliSatoshi,
return totalAmount, nil, nil
}
func (*mockTrafficShaper) IsCustomHTLC(_ lnwire.CustomRecords) bool {
return false
}

31
routing/localchans/log.go Normal file
View File

@ -0,0 +1,31 @@
package localchans
import (
"github.com/btcsuite/btclog"
"github.com/lightningnetwork/lnd/build"
)
// log is a logger that is initialized with no output filters. This means the
// package will not perform any logging by default until the caller requests
// it.
var log btclog.Logger
const Subsystem = "LCHN"
// The default amount of logging is none.
func init() {
UseLogger(build.NewSubLogger(Subsystem, nil))
}
// DisableLog disables all library log output. Logging output is disabled by
// default until UseLogger is called.
func DisableLog() {
UseLogger(btclog.Disabled)
}
// UseLogger uses a specified Logger to output package logging info. This
// should be used in preference to SetLogWriter if the caller is also using
// btclog.
func UseLogger(logger btclog.Logger) {
log = logger
}

View File

@ -1,10 +1,13 @@
package localchans
import (
"bytes"
"errors"
"fmt"
"sync"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
@ -19,6 +22,12 @@ import (
// Manager manages the node's local channels. The only operation that is
// currently implemented is updating forwarding policies.
type Manager struct {
// SelfPub contains the public key of the local node.
SelfPub *btcec.PublicKey
// DefaultRoutingPolicy is the default routing policy.
DefaultRoutingPolicy models.ForwardingPolicy
// UpdateForwardingPolicies is used by the manager to update active
// links with a new policy.
UpdateForwardingPolicies func(
@ -30,7 +39,7 @@ type Manager struct {
edgesToUpdate []discovery.EdgeWithInfo) error
// ForAllOutgoingChannels is required to iterate over all our local
// channels.
// channels. The ChannelEdgePolicy parameter may be nil.
ForAllOutgoingChannels func(cb func(kvdb.RTx,
*models.ChannelEdgeInfo,
*models.ChannelEdgePolicy) error) error
@ -40,6 +49,9 @@ type Manager struct {
FetchChannel func(tx kvdb.RTx, chanPoint wire.OutPoint) (
*channeldb.OpenChannel, error)
// AddEdge is used to add edge/channel to the topology of the router.
AddEdge func(edge *models.ChannelEdgeInfo) error
// policyUpdateLock ensures that the database and the link do not fall
// out of sync if there are concurrent fee update calls. Without it,
// there is a chance that policy A updates the database, then policy B
@ -51,7 +63,8 @@ type Manager struct {
// UpdatePolicy updates the policy for the specified channels on disk and in
// the active links.
func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
chanPoints ...wire.OutPoint) ([]*lnrpc.FailedUpdate, error) {
createMissingEdge bool, chanPoints ...wire.OutPoint) (
[]*lnrpc.FailedUpdate, error) {
r.policyUpdateLock.Lock()
defer r.policyUpdateLock.Unlock()
@ -69,10 +82,8 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
var edgesToUpdate []discovery.EdgeWithInfo
policiesToUpdate := make(map[wire.OutPoint]models.ForwardingPolicy)
// Next, we'll loop over all the outgoing channels the router knows of.
// If we have a filter then we'll only collected those channels,
// otherwise we'll collect them all.
err := r.ForAllOutgoingChannels(func(
// NOTE: edge may be nil when this function is called.
processChan := func(
tx kvdb.RTx,
info *models.ChannelEdgeInfo,
edge *models.ChannelEdgePolicy) error {
@ -88,8 +99,24 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
// will be used to report invalid channels later on.
delete(unprocessedChans, info.ChannelPoint)
if edge == nil {
log.Errorf("Got nil channel edge policy when updating "+
"a channel. Channel point: %v",
info.ChannelPoint.String())
failedUpdates = append(failedUpdates, makeFailureItem(
info.ChannelPoint,
lnrpc.UpdateFailure_UPDATE_FAILURE_NOT_FOUND,
"edge policy not found",
))
return nil
}
// Apply the new policy to the edge.
err := r.updateEdge(tx, info.ChannelPoint, edge, newSchema)
err := r.updateEdge(
tx, info.ChannelPoint, edge, newSchema,
)
if err != nil {
failedUpdates = append(failedUpdates,
makeFailureItem(info.ChannelPoint,
@ -125,7 +152,12 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
}
return nil
})
}
// Next, we'll loop over all the outgoing channels the router knows of.
// If we have a filter then we'll only collect those channels, otherwise
// we'll collect them all.
err := r.ForAllOutgoingChannels(processChan)
if err != nil {
return nil, err
}
@ -155,7 +187,37 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
"not yet confirmed",
))
// If the edge was not found, but the channel is found, that
// means the edge is missing in the graph database and should be
// recreated. The edge and policy are created in-memory. The
// edge is inserted in createEdge below and the policy will be
// added to the graph in the PropagateChanPolicyUpdate call
// below.
case createMissingEdge:
log.Warnf("Missing edge for active channel (%s) "+
"during policy update. Recreating edge with "+
"default policy.",
channel.FundingOutpoint.String())
info, edge, failedUpdate := r.createMissingEdge(
channel, newSchema,
)
if failedUpdate == nil {
err = processChan(nil, info, edge)
if err != nil {
return nil, err
}
} else {
failedUpdates = append(
failedUpdates, failedUpdate,
)
}
default:
log.Warnf("Missing edge for active channel (%s) "+
"during policy update. Could not update "+
"policy.", channel.FundingOutpoint.String())
failedUpdates = append(failedUpdates,
makeFailureItem(chanPoint,
lnrpc.UpdateFailure_UPDATE_FAILURE_UNKNOWN,
@ -180,11 +242,124 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy,
return failedUpdates, nil
}
func (r *Manager) createMissingEdge(channel *channeldb.OpenChannel,
newSchema routing.ChannelPolicy) (*models.ChannelEdgeInfo,
*models.ChannelEdgePolicy, *lnrpc.FailedUpdate) {
info, edge, err := r.createEdge(channel, time.Now())
if err != nil {
log.Errorf("Failed to recreate missing edge "+
"for channel (%s): %v",
channel.FundingOutpoint.String(), err)
return nil, nil, makeFailureItem(
channel.FundingOutpoint,
lnrpc.UpdateFailure_UPDATE_FAILURE_UNKNOWN,
"could not update policies",
)
}
// Validate the newly created edge policy with the user defined new
// schema before adding the edge to the database.
err = r.updateEdge(nil, channel.FundingOutpoint, edge, newSchema)
if err != nil {
return nil, nil, makeFailureItem(
info.ChannelPoint,
lnrpc.UpdateFailure_UPDATE_FAILURE_INVALID_PARAMETER,
err.Error(),
)
}
// Insert the edge into the database to avoid `edge not
// found` errors during policy update propagation.
err = r.AddEdge(info)
if err != nil {
log.Errorf("Attempt to add missing edge for "+
"channel (%s) errored with: %v",
channel.FundingOutpoint.String(), err)
return nil, nil, makeFailureItem(
channel.FundingOutpoint,
lnrpc.UpdateFailure_UPDATE_FAILURE_UNKNOWN,
"could not add edge",
)
}
return info, edge, nil
}
// createEdge recreates an edge and policy from an open channel in-memory.
func (r *Manager) createEdge(channel *channeldb.OpenChannel,
timestamp time.Time) (*models.ChannelEdgeInfo,
*models.ChannelEdgePolicy, error) {
nodeKey1Bytes := r.SelfPub.SerializeCompressed()
nodeKey2Bytes := channel.IdentityPub.SerializeCompressed()
bitcoinKey1Bytes := channel.LocalChanCfg.MultiSigKey.PubKey.
SerializeCompressed()
bitcoinKey2Bytes := channel.RemoteChanCfg.MultiSigKey.PubKey.
SerializeCompressed()
channelFlags := lnwire.ChanUpdateChanFlags(0)
// Make it such that node_id_1 is the lexicographically-lesser of the
// two compressed keys sorted in ascending lexicographic order.
if bytes.Compare(nodeKey2Bytes, nodeKey1Bytes) < 0 {
nodeKey1Bytes, nodeKey2Bytes = nodeKey2Bytes, nodeKey1Bytes
bitcoinKey1Bytes, bitcoinKey2Bytes = bitcoinKey2Bytes,
bitcoinKey1Bytes
channelFlags = 1
}
var featureBuf bytes.Buffer
err := lnwire.NewRawFeatureVector().Encode(&featureBuf)
if err != nil {
return nil, nil, fmt.Errorf("unable to encode features: %w",
err)
}
info := &models.ChannelEdgeInfo{
ChannelID: channel.ShortChanID().ToUint64(),
ChainHash: channel.ChainHash,
Features: featureBuf.Bytes(),
Capacity: channel.Capacity,
ChannelPoint: channel.FundingOutpoint,
}
copy(info.NodeKey1Bytes[:], nodeKey1Bytes)
copy(info.NodeKey2Bytes[:], nodeKey2Bytes)
copy(info.BitcoinKey1Bytes[:], bitcoinKey1Bytes)
copy(info.BitcoinKey2Bytes[:], bitcoinKey2Bytes)
// Construct a dummy channel edge policy with default values that will
// be updated with the new values in the call to processChan below.
timeLockDelta := uint16(r.DefaultRoutingPolicy.TimeLockDelta)
edge := &models.ChannelEdgePolicy{
ChannelID: channel.ShortChanID().ToUint64(),
LastUpdate: timestamp,
TimeLockDelta: timeLockDelta,
ChannelFlags: channelFlags,
MessageFlags: lnwire.ChanUpdateRequiredMaxHtlc,
FeeBaseMSat: r.DefaultRoutingPolicy.BaseFee,
FeeProportionalMillionths: r.DefaultRoutingPolicy.FeeRate,
MinHTLC: r.DefaultRoutingPolicy.MinHTLCOut,
MaxHTLC: r.DefaultRoutingPolicy.MaxHTLC,
}
copy(edge.ToNode[:], channel.IdentityPub.SerializeCompressed())
return info, edge, nil
}
// updateEdge updates the given edge with the new schema.
func (r *Manager) updateEdge(tx kvdb.RTx, chanPoint wire.OutPoint,
edge *models.ChannelEdgePolicy,
newSchema routing.ChannelPolicy) error {
channel, err := r.FetchChannel(tx, chanPoint)
if err != nil {
return err
}
// Update forwarding fee scheme and required time lock delta.
edge.FeeBaseMSat = newSchema.BaseFee
edge.FeeProportionalMillionths = lnwire.MilliSatoshi(
@ -192,7 +367,7 @@ func (r *Manager) updateEdge(tx kvdb.RTx, chanPoint wire.OutPoint,
)
// If inbound fees are set, we update the edge with them.
err := fn.MapOptionZ(newSchema.InboundFee,
err = fn.MapOptionZ(newSchema.InboundFee,
func(f models.InboundFee) error {
inboundWireFee := f.ToWire()
return edge.ExtraOpaqueData.PackRecords(
@ -206,7 +381,7 @@ func (r *Manager) updateEdge(tx kvdb.RTx, chanPoint wire.OutPoint,
edge.TimeLockDelta = uint16(newSchema.TimeLockDelta)
// Retrieve negotiated channel htlc amt limits.
amtMin, amtMax, err := r.getHtlcAmtLimits(tx, chanPoint)
amtMin, amtMax, err := r.getHtlcAmtLimits(channel)
if err != nil {
return err
}
@ -267,14 +442,9 @@ func (r *Manager) updateEdge(tx kvdb.RTx, chanPoint wire.OutPoint,
// getHtlcAmtLimits retrieves the negotiated channel min and max htlc amount
// constraints.
func (r *Manager) getHtlcAmtLimits(tx kvdb.RTx, chanPoint wire.OutPoint) (
func (r *Manager) getHtlcAmtLimits(ch *channeldb.OpenChannel) (
lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) {
ch, err := r.FetchChannel(tx, chanPoint)
if err != nil {
return 0, 0, err
}
// The max htlc policy field must be less than or equal to the channel
// capacity AND less than or equal to the max in-flight HTLC value.
// Since the latter is always less than or equal to the former, just

View File

@ -1,14 +1,19 @@
package localchans
import (
"encoding/hex"
"testing"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/discovery"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwire"
@ -35,6 +40,18 @@ func TestManager(t *testing.T) {
channelSet []channel
)
sp := [33]byte{}
_, _ = hex.Decode(sp[:], []byte("028d7500dd4c12685d1f568b4c2b5048e85"+
"34b873319f3a8daa612b469132ec7f7"))
rp := [33]byte{}
_, _ = hex.Decode(rp[:], []byte("034f355bdcb7cc0af728ef3cceb9615d906"+
"84bb5b2ca5f859ab0f0b704075871aa"))
selfpub, _ := btcec.ParsePubKey(sp[:])
remotepub, _ := btcec.ParsePubKey(rp[:])
localMultisigPrivKey, _ := btcec.NewPrivateKey()
localMultisigKey := localMultisigPrivKey.PubKey()
remoteMultisigPrivKey, _ := btcec.NewPrivateKey()
remoteMultisigKey := remoteMultisigPrivKey.PubKey()
newPolicy := routing.ChannelPolicy{
FeeSchema: routing.FeeSchema{
BaseFee: 100,
@ -131,17 +148,42 @@ func TestManager(t *testing.T) {
}
return &channeldb.OpenChannel{
FundingOutpoint: chanPointValid,
IdentityPub: remotepub,
LocalChanCfg: channeldb.ChannelConfig{
ChannelStateBounds: bounds,
MultiSigKey: keychain.KeyDescriptor{
PubKey: localMultisigKey,
},
},
RemoteChanCfg: channeldb.ChannelConfig{
ChannelStateBounds: bounds,
MultiSigKey: keychain.KeyDescriptor{
PubKey: remoteMultisigKey,
},
},
}, nil
}
addEdge := func(edge *models.ChannelEdgeInfo) error {
return nil
}
manager := Manager{
UpdateForwardingPolicies: updateForwardingPolicies,
PropagateChanPolicyUpdate: propagateChanPolicyUpdate,
ForAllOutgoingChannels: forAllOutgoingChannels,
FetchChannel: fetchChannel,
SelfPub: selfpub,
DefaultRoutingPolicy: models.ForwardingPolicy{
MinHTLCOut: minHTLC,
MaxHTLC: maxPendingAmount,
BaseFee: lnwire.MilliSatoshi(1000),
FeeRate: lnwire.MilliSatoshi(0),
InboundFee: models.InboundFee{},
TimeLockDelta: 80,
},
AddEdge: addEdge,
}
// Policy with no max htlc value.
@ -156,6 +198,7 @@ func TestManager(t *testing.T) {
newPolicy routing.ChannelPolicy
channelSet []channel
specifiedChanPoints []wire.OutPoint
createMissingEdge bool
expectedNumUpdates int
expectedUpdateFailures []lnrpc.UpdateFailure
expectErr error
@ -173,6 +216,7 @@ func TestManager(t *testing.T) {
},
},
specifiedChanPoints: []wire.OutPoint{chanPointValid},
createMissingEdge: false,
expectedNumUpdates: 1,
expectedUpdateFailures: []lnrpc.UpdateFailure{},
expectErr: nil,
@ -190,6 +234,7 @@ func TestManager(t *testing.T) {
},
},
specifiedChanPoints: []wire.OutPoint{},
createMissingEdge: false,
expectedNumUpdates: 1,
expectedUpdateFailures: []lnrpc.UpdateFailure{},
expectErr: nil,
@ -207,6 +252,7 @@ func TestManager(t *testing.T) {
},
},
specifiedChanPoints: []wire.OutPoint{chanPointMissing},
createMissingEdge: false,
expectedNumUpdates: 0,
expectedUpdateFailures: []lnrpc.UpdateFailure{
lnrpc.UpdateFailure_UPDATE_FAILURE_NOT_FOUND,
@ -228,10 +274,39 @@ func TestManager(t *testing.T) {
},
},
specifiedChanPoints: []wire.OutPoint{chanPointValid},
createMissingEdge: false,
expectedNumUpdates: 1,
expectedUpdateFailures: []lnrpc.UpdateFailure{},
expectErr: nil,
},
{
// Here, the edge is missing, causing the edge to be
// recreated.
name: "missing edge recreated",
currentPolicy: models.ChannelEdgePolicy{},
newPolicy: newPolicy,
channelSet: []channel{},
specifiedChanPoints: []wire.OutPoint{chanPointValid},
createMissingEdge: true,
expectedNumUpdates: 1,
expectedUpdateFailures: []lnrpc.UpdateFailure{},
expectErr: nil,
},
{
// Here, the edge is missing, but the edge will not be
// recreated, because createMissingEdge is false.
name: "missing edge ignored",
currentPolicy: models.ChannelEdgePolicy{},
newPolicy: newPolicy,
channelSet: []channel{},
specifiedChanPoints: []wire.OutPoint{chanPointValid},
createMissingEdge: false,
expectedNumUpdates: 0,
expectedUpdateFailures: []lnrpc.UpdateFailure{
lnrpc.UpdateFailure_UPDATE_FAILURE_UNKNOWN,
},
expectErr: nil,
},
}
for _, test := range tests {
@ -242,6 +317,7 @@ func TestManager(t *testing.T) {
expectedNumUpdates = test.expectedNumUpdates
failedUpdates, err := manager.UpdatePolicy(test.newPolicy,
test.createMissingEdge,
test.specifiedChanPoints...)
if len(failedUpdates) != len(test.expectedUpdateFailures) {
@ -258,3 +334,179 @@ func TestManager(t *testing.T) {
})
}
}
// Tests creating a new edge in the manager where the local pubkey is the
// lexicographically lesser or the two.
func TestCreateEdgeLower(t *testing.T) {
sp := [33]byte{}
_, _ = hex.Decode(sp[:], []byte("028d7500dd4c12685d1f568b4c2b5048e85"+
"34b873319f3a8daa612b469132ec7f7"))
rp := [33]byte{}
_, _ = hex.Decode(rp[:], []byte("034f355bdcb7cc0af728ef3cceb9615d906"+
"84bb5b2ca5f859ab0f0b704075871aa"))
selfpub, _ := btcec.ParsePubKey(sp[:])
remotepub, _ := btcec.ParsePubKey(rp[:])
localMultisigPrivKey, _ := btcec.NewPrivateKey()
localMultisigKey := localMultisigPrivKey.PubKey()
remoteMultisigPrivKey, _ := btcec.NewPrivateKey()
remoteMultisigKey := remoteMultisigPrivKey.PubKey()
timestamp := time.Now()
defaultPolicy := models.ForwardingPolicy{
MinHTLCOut: 1,
MaxHTLC: 2,
BaseFee: 3,
FeeRate: 4,
InboundFee: models.InboundFee{
Base: 5,
Rate: 6,
},
TimeLockDelta: 7,
}
channel := &channeldb.OpenChannel{
IdentityPub: remotepub,
LocalChanCfg: channeldb.ChannelConfig{
MultiSigKey: keychain.KeyDescriptor{
PubKey: localMultisigKey,
},
},
RemoteChanCfg: channeldb.ChannelConfig{
MultiSigKey: keychain.KeyDescriptor{
PubKey: remoteMultisigKey,
},
},
ShortChannelID: lnwire.NewShortChanIDFromInt(8),
ChainHash: *chaincfg.RegressionNetParams.GenesisHash,
Capacity: 9,
FundingOutpoint: wire.OutPoint{
Hash: chainhash.Hash([32]byte{}),
Index: 0,
},
}
expectedInfo := &models.ChannelEdgeInfo{
ChannelID: 8,
ChainHash: channel.ChainHash,
Features: []byte{0, 0},
Capacity: 9,
ChannelPoint: channel.FundingOutpoint,
NodeKey1Bytes: sp,
NodeKey2Bytes: rp,
BitcoinKey1Bytes: [33]byte(
localMultisigKey.SerializeCompressed()),
BitcoinKey2Bytes: [33]byte(
remoteMultisigKey.SerializeCompressed()),
AuthProof: nil,
ExtraOpaqueData: nil,
}
expectedEdge := &models.ChannelEdgePolicy{
ChannelID: 8,
LastUpdate: timestamp,
TimeLockDelta: 7,
ChannelFlags: 0,
MessageFlags: lnwire.ChanUpdateRequiredMaxHtlc,
FeeBaseMSat: 3,
FeeProportionalMillionths: 4,
MinHTLC: 1,
MaxHTLC: 2,
SigBytes: nil,
ToNode: rp,
ExtraOpaqueData: nil,
}
manager := Manager{
SelfPub: selfpub,
DefaultRoutingPolicy: defaultPolicy,
}
info, edge, err := manager.createEdge(channel, timestamp)
require.NoError(t, err)
require.Equal(t, expectedInfo, info)
require.Equal(t, expectedEdge, edge)
}
// Tests creating a new edge in the manager where the local pubkey is the
// lexicographically higher or the two.
func TestCreateEdgeHigher(t *testing.T) {
sp := [33]byte{}
_, _ = hex.Decode(sp[:], []byte("034f355bdcb7cc0af728ef3cceb9615d906"+
"84bb5b2ca5f859ab0f0b704075871aa"))
rp := [33]byte{}
_, _ = hex.Decode(rp[:], []byte("028d7500dd4c12685d1f568b4c2b5048e85"+
"34b873319f3a8daa612b469132ec7f7"))
selfpub, _ := btcec.ParsePubKey(sp[:])
remotepub, _ := btcec.ParsePubKey(rp[:])
localMultisigPrivKey, _ := btcec.NewPrivateKey()
localMultisigKey := localMultisigPrivKey.PubKey()
remoteMultisigPrivKey, _ := btcec.NewPrivateKey()
remoteMultisigKey := remoteMultisigPrivKey.PubKey()
timestamp := time.Now()
defaultPolicy := models.ForwardingPolicy{
MinHTLCOut: 1,
MaxHTLC: 2,
BaseFee: 3,
FeeRate: 4,
InboundFee: models.InboundFee{
Base: 5,
Rate: 6,
},
TimeLockDelta: 7,
}
channel := &channeldb.OpenChannel{
IdentityPub: remotepub,
LocalChanCfg: channeldb.ChannelConfig{
MultiSigKey: keychain.KeyDescriptor{
PubKey: localMultisigKey,
},
},
RemoteChanCfg: channeldb.ChannelConfig{
MultiSigKey: keychain.KeyDescriptor{
PubKey: remoteMultisigKey,
},
},
ShortChannelID: lnwire.NewShortChanIDFromInt(8),
ChainHash: *chaincfg.RegressionNetParams.GenesisHash,
Capacity: 9,
FundingOutpoint: wire.OutPoint{
Hash: chainhash.Hash([32]byte{}),
Index: 0,
},
}
expectedInfo := &models.ChannelEdgeInfo{
ChannelID: 8,
ChainHash: channel.ChainHash,
Features: []byte{0, 0},
Capacity: 9,
ChannelPoint: channel.FundingOutpoint,
NodeKey1Bytes: rp,
NodeKey2Bytes: sp,
BitcoinKey1Bytes: [33]byte(
remoteMultisigKey.SerializeCompressed()),
BitcoinKey2Bytes: [33]byte(
localMultisigKey.SerializeCompressed()),
AuthProof: nil,
ExtraOpaqueData: nil,
}
expectedEdge := &models.ChannelEdgePolicy{
ChannelID: 8,
LastUpdate: timestamp,
TimeLockDelta: 7,
ChannelFlags: 1,
MessageFlags: lnwire.ChanUpdateRequiredMaxHtlc,
FeeBaseMSat: 3,
FeeProportionalMillionths: 4,
MinHTLC: 1,
MaxHTLC: 2,
SigBytes: nil,
ToNode: rp,
ExtraOpaqueData: nil,
}
manager := Manager{
SelfPub: selfpub,
DefaultRoutingPolicy: defaultPolicy,
}
info, edge, err := manager.createEdge(channel, timestamp)
require.NoError(t, err)
require.Equal(t, expectedInfo, info)
require.Equal(t, expectedEdge, edge)
}

View File

@ -761,7 +761,7 @@ func (p *paymentLifecycle) amendFirstHopData(rt *route.Route) error {
// and apply its side effects to the UpdateAddHTLC message.
result, err := fn.MapOptionZ(
p.router.cfg.TrafficShaper,
//nolint:ll
//nolint:lll
func(ts htlcswitch.AuxTrafficShaper) fn.Result[extraDataRequest] {
newAmt, newRecords, err := ts.ProduceHtlcExtraData(
rt.TotalAmount, p.firstHopCustomRecords,

View File

@ -7742,7 +7742,7 @@ func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
// With the scope resolved, we'll now send this to the local channel
// manager so it can propagate the new policy for our target channel(s).
failedUpdates, err := r.server.localChanMgr.UpdatePolicy(chanPolicy,
targetChans...)
req.CreateMissingEdge, targetChans...)
if err != nil {
return nil, err
}

View File

@ -1098,12 +1098,31 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
ScidCloser: scidCloserMan,
}, nodeKeyDesc)
selfVertex := route.Vertex(nodeKeyDesc.PubKey.SerializeCompressed())
//nolint:lll
s.localChanMgr = &localchans.Manager{
ForAllOutgoingChannels: s.graphBuilder.ForAllOutgoingChannels,
SelfPub: nodeKeyDesc.PubKey,
DefaultRoutingPolicy: cc.RoutingPolicy,
ForAllOutgoingChannels: func(cb func(kvdb.RTx,
*models.ChannelEdgeInfo, *models.ChannelEdgePolicy) error) error {
return s.graphDB.ForEachNodeChannel(selfVertex,
func(tx kvdb.RTx, c *models.ChannelEdgeInfo,
e *models.ChannelEdgePolicy,
_ *models.ChannelEdgePolicy) error {
// NOTE: The invoked callback here may
// receive a nil channel policy.
return cb(tx, c, e)
},
)
},
PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies,
FetchChannel: s.chanStateDB.FetchChannel,
AddEdge: func(edge *models.ChannelEdgeInfo) error {
return s.graphBuilder.AddEdge(edge)
},
}
utxnStore, err := contractcourt.NewNurseryStore(

View File

@ -22,7 +22,7 @@ const (
// DefaultNumTxRetries is the default number of times we'll retry a
// transaction if it fails with an error that permits transaction
// repetition.
DefaultNumTxRetries = 10
DefaultNumTxRetries = 20
// DefaultRetryDelay is the default delay between retries. This will be
// used to generate a random delay between 0 and this value.

View File

@ -17,6 +17,16 @@ var (
// ErrRetriesExceeded is returned when a transaction is retried more
// than the max allowed valued without a success.
ErrRetriesExceeded = errors.New("db tx retries exceeded")
// postgresErrMsgs are strings that signify retriable errors resulting
// from serialization failures.
postgresErrMsgs = []string{
"could not serialize access",
"current transaction is aborted",
"not enough elements in RWConflictPool",
"deadlock detected",
"commit unexpectedly resulted in rollback",
}
)
// MapSQLError attempts to interpret a given error as a database agnostic SQL
@ -41,10 +51,11 @@ func MapSQLError(err error) error {
// Sometimes the error won't be properly wrapped, so we'll need to
// inspect raw error itself to detect something we can wrap properly.
// This handles a postgres variant of the error.
const postgresErrMsg = "could not serialize access"
if strings.Contains(err.Error(), postgresErrMsg) {
return &ErrSerializationError{
DBError: err,
for _, postgresErrMsg := range postgresErrMsgs {
if strings.Contains(err.Error(), postgresErrMsg) {
return &ErrSerializationError{
DBError: err,
}
}
}
@ -105,6 +116,20 @@ func parsePostgresError(pqErr *pgconn.PgError) error {
DBError: pqErr,
}
// In failed SQL transaction because we didn't catch a previous
// serialization error, so return this one as a serialization error.
case pgerrcode.InFailedSQLTransaction:
return &ErrSerializationError{
DBError: pqErr,
}
// Deadlock detedted because of a serialization error, so return this
// one as a serialization error.
case pgerrcode.DeadlockDetected:
return &ErrSerializationError{
DBError: pqErr,
}
default:
return fmt.Errorf("unknown postgres error: %w", pqErr)
}

View File

@ -1,4 +1,4 @@
FROM golang:1.22.6
FROM golang:1.22.11
RUN apt-get update && apt-get install -y git
ENV GOCACHE=/tmp/build/.cache