Merge pull request #7762 from guggero/empty-resp

lnrpc: return meaningful response instead of empty one
This commit is contained in:
Oliver Gugger 2024-11-07 19:14:44 +01:00 committed by GitHub
commit 0899077fb5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 3647 additions and 3294 deletions

View File

@ -40,9 +40,11 @@ type PeerConnector interface {
// the channel. In addition a LinkNode will be created for each new peer as // the channel. In addition a LinkNode will be created for each new peer as
// well, in order to expose the addressing information required to locate to // well, in order to expose the addressing information required to locate to
// and connect to each peer in order to initiate the recovery protocol. // and connect to each peer in order to initiate the recovery protocol.
// The number of channels that were successfully restored is returned.
func Recover(backups []Single, restorer ChannelRestorer, func Recover(backups []Single, restorer ChannelRestorer,
peerConnector PeerConnector) error { peerConnector PeerConnector) (int, error) {
var numRestored int
for i, backup := range backups { for i, backup := range backups {
log.Infof("Restoring ChannelPoint(%v) to disk: ", log.Infof("Restoring ChannelPoint(%v) to disk: ",
backup.FundingOutpoint) backup.FundingOutpoint)
@ -57,9 +59,10 @@ func Recover(backups []Single, restorer ChannelRestorer,
continue continue
} }
if err != nil { if err != nil {
return err return numRestored, err
} }
numRestored++
log.Infof("Attempting to connect to node=%x (addrs=%v) to "+ log.Infof("Attempting to connect to node=%x (addrs=%v) to "+
"restore ChannelPoint(%v)", "restore ChannelPoint(%v)",
backup.RemoteNodePub.SerializeCompressed(), backup.RemoteNodePub.SerializeCompressed(),
@ -70,7 +73,7 @@ func Recover(backups []Single, restorer ChannelRestorer,
backup.RemoteNodePub, backup.Addresses, backup.RemoteNodePub, backup.Addresses,
) )
if err != nil { if err != nil {
return err return numRestored, err
} }
// TODO(roasbeef): to handle case where node has changed addrs, // TODO(roasbeef): to handle case where node has changed addrs,
@ -80,7 +83,7 @@ func Recover(backups []Single, restorer ChannelRestorer,
// * just to to fresh w/ call to node addrs and de-dup? // * just to to fresh w/ call to node addrs and de-dup?
} }
return nil return numRestored, nil
} }
// TODO(roasbeef): more specific keychain interface? // TODO(roasbeef): more specific keychain interface?
@ -88,16 +91,17 @@ func Recover(backups []Single, restorer ChannelRestorer,
// UnpackAndRecoverSingles is a one-shot method, that given a set of packed // UnpackAndRecoverSingles is a one-shot method, that given a set of packed
// single channel backups, will restore the channel state to a channel shell, // single channel backups, will restore the channel state to a channel shell,
// and also reach out to connect to any of the known node addresses for that // and also reach out to connect to any of the known node addresses for that
// channel. It is assumes that after this method exists, if a connection we // channel. It is assumes that after this method exists, if a connection was
// able to be established, then then PeerConnector will continue to attempt to // established, then the PeerConnector will continue to attempt to re-establish
// re-establish a persistent connection in the background. // a persistent connection in the background. The number of channels that were
// successfully restored is returned.
func UnpackAndRecoverSingles(singles PackedSingles, func UnpackAndRecoverSingles(singles PackedSingles,
keyChain keychain.KeyRing, restorer ChannelRestorer, keyChain keychain.KeyRing, restorer ChannelRestorer,
peerConnector PeerConnector) error { peerConnector PeerConnector) (int, error) {
chanBackups, err := singles.Unpack(keyChain) chanBackups, err := singles.Unpack(keyChain)
if err != nil { if err != nil {
return err return 0, err
} }
return Recover(chanBackups, restorer, peerConnector) return Recover(chanBackups, restorer, peerConnector)
@ -106,16 +110,17 @@ func UnpackAndRecoverSingles(singles PackedSingles,
// UnpackAndRecoverMulti is a one-shot method, that given a set of packed // UnpackAndRecoverMulti is a one-shot method, that given a set of packed
// multi-channel backups, will restore the channel states to channel shells, // multi-channel backups, will restore the channel states to channel shells,
// and also reach out to connect to any of the known node addresses for that // and also reach out to connect to any of the known node addresses for that
// channel. It is assumes that after this method exists, if a connection we // channel. It is assumes that after this method exists, if a connection was
// able to be established, then then PeerConnector will continue to attempt to // established, then the PeerConnector will continue to attempt to re-establish
// re-establish a persistent connection in the background. // a persistent connection in the background. The number of channels that were
// successfully restored is returned.
func UnpackAndRecoverMulti(packedMulti PackedMulti, func UnpackAndRecoverMulti(packedMulti PackedMulti,
keyChain keychain.KeyRing, restorer ChannelRestorer, keyChain keychain.KeyRing, restorer ChannelRestorer,
peerConnector PeerConnector) error { peerConnector PeerConnector) (int, error) {
chanBackups, err := packedMulti.Unpack(keyChain) chanBackups, err := packedMulti.Unpack(keyChain)
if err != nil { if err != nil {
return err return 0, err
} }
return Recover(chanBackups.StaticBackups, restorer, peerConnector) return Recover(chanBackups.StaticBackups, restorer, peerConnector)

View File

@ -2,7 +2,7 @@ package chanbackup
import ( import (
"bytes" "bytes"
"fmt" "errors"
"net" "net"
"testing" "testing"
@ -11,6 +11,12 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
var (
errRestoreFail = errors.New("restore fail")
errConnectFail = errors.New("connect fail")
)
type mockChannelRestorer struct { type mockChannelRestorer struct {
fail bool fail bool
@ -19,7 +25,7 @@ type mockChannelRestorer struct {
func (m *mockChannelRestorer) RestoreChansFromSingles(...Single) error { func (m *mockChannelRestorer) RestoreChansFromSingles(...Single) error {
if m.fail { if m.fail {
return fmt.Errorf("fail") return errRestoreFail
} }
m.callCount++ m.callCount++
@ -33,11 +39,11 @@ type mockPeerConnector struct {
callCount int callCount int
} }
func (m *mockPeerConnector) ConnectPeer(node *btcec.PublicKey, func (m *mockPeerConnector) ConnectPeer(_ *btcec.PublicKey,
addrs []net.Addr) error { _ []net.Addr) error {
if m.fail { if m.fail {
return fmt.Errorf("fail") return errConnectFail
} }
m.callCount++ m.callCount++
@ -59,16 +65,13 @@ func TestUnpackAndRecoverSingles(t *testing.T) {
var packedBackups PackedSingles var packedBackups PackedSingles
for i := 0; i < numSingles; i++ { for i := 0; i < numSingles; i++ {
channel, err := genRandomOpenChannelShell() channel, err := genRandomOpenChannelShell()
if err != nil { require.NoError(t, err)
t.Fatalf("unable make channel: %v", err)
}
single := NewSingle(channel, nil) single := NewSingle(channel, nil)
var b bytes.Buffer var b bytes.Buffer
if err := single.PackToWriter(&b, keyRing); err != nil { err = single.PackToWriter(&b, keyRing)
t.Fatalf("unable to pack single: %v", err) require.NoError(t, err)
}
backups = append(backups, single) backups = append(backups, single)
packedBackups = append(packedBackups, b.Bytes()) packedBackups = append(packedBackups, b.Bytes())
@ -83,54 +86,47 @@ func TestUnpackAndRecoverSingles(t *testing.T) {
// If we make the channel restore fail, then the entire method should // If we make the channel restore fail, then the entire method should
// as well // as well
chanRestorer.fail = true chanRestorer.fail = true
err := UnpackAndRecoverSingles( _, err := UnpackAndRecoverSingles(
packedBackups, keyRing, &chanRestorer, &peerConnector, packedBackups, keyRing, &chanRestorer, &peerConnector,
) )
if err == nil { require.ErrorIs(t, err, errRestoreFail)
t.Fatalf("restoration should have failed")
}
chanRestorer.fail = false chanRestorer.fail = false
// If we make the peer connector fail, then the entire method should as // If we make the peer connector fail, then the entire method should as
// well // well
peerConnector.fail = true peerConnector.fail = true
err = UnpackAndRecoverSingles( _, err = UnpackAndRecoverSingles(
packedBackups, keyRing, &chanRestorer, &peerConnector, packedBackups, keyRing, &chanRestorer, &peerConnector,
) )
if err == nil { require.ErrorIs(t, err, errConnectFail)
t.Fatalf("restoration should have failed")
}
chanRestorer.callCount-- chanRestorer.callCount--
peerConnector.fail = false peerConnector.fail = false
// Next, we'll ensure that if all the interfaces function as expected, // Next, we'll ensure that if all the interfaces function as expected,
// then the channels will properly be unpacked and restored. // then the channels will properly be unpacked and restored.
err = UnpackAndRecoverSingles( numRestored, err := UnpackAndRecoverSingles(
packedBackups, keyRing, &chanRestorer, &peerConnector, packedBackups, keyRing, &chanRestorer, &peerConnector,
) )
require.NoError(t, err, "unable to recover chans") require.NoError(t, err)
require.EqualValues(t, numSingles, numRestored)
// Both the restorer, and connector should have been called 10 times, // Both the restorer, and connector should have been called 10 times,
// once for each backup. // once for each backup.
if chanRestorer.callCount != numSingles { require.EqualValues(
t.Fatalf("expected %v calls, instead got %v", t, numSingles, chanRestorer.callCount, "restorer call count",
numSingles, chanRestorer.callCount) )
} require.EqualValues(
if peerConnector.callCount != numSingles { t, numSingles, peerConnector.callCount, "peer call count",
t.Fatalf("expected %v calls, instead got %v", )
numSingles, peerConnector.callCount)
}
// If we modify the keyRing, then unpacking should fail. // If we modify the keyRing, then unpacking should fail.
keyRing.Fail = true keyRing.Fail = true
err = UnpackAndRecoverSingles( _, err = UnpackAndRecoverSingles(
packedBackups, keyRing, &chanRestorer, &peerConnector, packedBackups, keyRing, &chanRestorer, &peerConnector,
) )
if err == nil { require.ErrorContains(t, err, "fail")
t.Fatalf("unpacking should have failed")
}
// TODO(roasbeef): verify proper call args // TODO(roasbeef): verify proper call args
} }
@ -148,9 +144,7 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
backups := make([]Single, 0, numSingles) backups := make([]Single, 0, numSingles)
for i := 0; i < numSingles; i++ { for i := 0; i < numSingles; i++ {
channel, err := genRandomOpenChannelShell() channel, err := genRandomOpenChannelShell()
if err != nil { require.NoError(t, err)
t.Fatalf("unable make channel: %v", err)
}
single := NewSingle(channel, nil) single := NewSingle(channel, nil)
@ -162,9 +156,8 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
} }
var b bytes.Buffer var b bytes.Buffer
if err := multi.PackToWriter(&b, keyRing); err != nil { err := multi.PackToWriter(&b, keyRing)
t.Fatalf("unable to pack multi: %v", err) require.NoError(t, err)
}
// Next, we'll pack the set of singles into a packed multi, and also // Next, we'll pack the set of singles into a packed multi, and also
// create the set of interfaces we need to carry out the remainder of // create the set of interfaces we need to carry out the remainder of
@ -177,54 +170,47 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
// If we make the channel restore fail, then the entire method should // If we make the channel restore fail, then the entire method should
// as well // as well
chanRestorer.fail = true chanRestorer.fail = true
err := UnpackAndRecoverMulti( _, err = UnpackAndRecoverMulti(
packedMulti, keyRing, &chanRestorer, &peerConnector, packedMulti, keyRing, &chanRestorer, &peerConnector,
) )
if err == nil { require.ErrorIs(t, err, errRestoreFail)
t.Fatalf("restoration should have failed")
}
chanRestorer.fail = false chanRestorer.fail = false
// If we make the peer connector fail, then the entire method should as // If we make the peer connector fail, then the entire method should as
// well // well
peerConnector.fail = true peerConnector.fail = true
err = UnpackAndRecoverMulti( _, err = UnpackAndRecoverMulti(
packedMulti, keyRing, &chanRestorer, &peerConnector, packedMulti, keyRing, &chanRestorer, &peerConnector,
) )
if err == nil { require.ErrorIs(t, err, errConnectFail)
t.Fatalf("restoration should have failed")
}
chanRestorer.callCount-- chanRestorer.callCount--
peerConnector.fail = false peerConnector.fail = false
// Next, we'll ensure that if all the interfaces function as expected, // Next, we'll ensure that if all the interfaces function as expected,
// then the channels will properly be unpacked and restored. // then the channels will properly be unpacked and restored.
err = UnpackAndRecoverMulti( numRestored, err := UnpackAndRecoverMulti(
packedMulti, keyRing, &chanRestorer, &peerConnector, packedMulti, keyRing, &chanRestorer, &peerConnector,
) )
require.NoError(t, err, "unable to recover chans") require.NoError(t, err)
require.EqualValues(t, numSingles, numRestored)
// Both the restorer, and connector should have been called 10 times, // Both the restorer, and connector should have been called 10 times,
// once for each backup. // once for each backup.
if chanRestorer.callCount != numSingles { require.EqualValues(
t.Fatalf("expected %v calls, instead got %v", t, numSingles, chanRestorer.callCount, "restorer call count",
numSingles, chanRestorer.callCount) )
} require.EqualValues(
if peerConnector.callCount != numSingles { t, numSingles, peerConnector.callCount, "peer call count",
t.Fatalf("expected %v calls, instead got %v", )
numSingles, peerConnector.callCount)
}
// If we modify the keyRing, then unpacking should fail. // If we modify the keyRing, then unpacking should fail.
keyRing.Fail = true keyRing.Fail = true
err = UnpackAndRecoverMulti( _, err = UnpackAndRecoverMulti(
packedMulti, keyRing, &chanRestorer, &peerConnector, packedMulti, keyRing, &chanRestorer, &peerConnector,
) )
if err == nil { require.ErrorContains(t, err, "fail")
t.Fatalf("unpacking should have failed")
}
// TODO(roasbeef): verify proper call args // TODO(roasbeef): verify proper call args
} }

View File

@ -435,9 +435,9 @@ func TestPaymentControlDeleteNonInFlight(t *testing.T) {
} }
// Delete all failed payments. // Delete all failed payments.
if err := db.DeletePayments(true, false); err != nil { numPayments, err := db.DeletePayments(true, false)
t.Fatal(err) require.NoError(t, err)
} require.EqualValues(t, 1, numPayments)
// This should leave the succeeded and in-flight payments. // This should leave the succeeded and in-flight payments.
dbPayments, err := db.FetchPayments() dbPayments, err := db.FetchPayments()
@ -471,9 +471,9 @@ func TestPaymentControlDeleteNonInFlight(t *testing.T) {
} }
// Now delete all payments except in-flight. // Now delete all payments except in-flight.
if err := db.DeletePayments(false, false); err != nil { numPayments, err = db.DeletePayments(false, false)
t.Fatal(err) require.NoError(t, err)
} require.EqualValues(t, 2, numPayments)
// This should leave the in-flight payment. // This should leave the in-flight payment.
dbPayments, err = db.FetchPayments() dbPayments, err = db.FetchPayments()
@ -536,14 +536,18 @@ func TestPaymentControlDeletePayments(t *testing.T) {
assertPayments(t, db, payments) assertPayments(t, db, payments)
// Delete HTLC attempts for failed payments only. // Delete HTLC attempts for failed payments only.
require.NoError(t, db.DeletePayments(true, true)) numPayments, err := db.DeletePayments(true, true)
require.NoError(t, err)
require.EqualValues(t, 0, numPayments)
// The failed payment is the only altered one. // The failed payment is the only altered one.
payments[0].htlcs = 0 payments[0].htlcs = 0
assertPayments(t, db, payments) assertPayments(t, db, payments)
// Delete failed attempts for all payments. // Delete failed attempts for all payments.
require.NoError(t, db.DeletePayments(false, true)) numPayments, err = db.DeletePayments(false, true)
require.NoError(t, err)
require.EqualValues(t, 0, numPayments)
// The failed attempts should be deleted, except for the in-flight // The failed attempts should be deleted, except for the in-flight
// payment, that shouldn't be altered until it has completed. // payment, that shouldn't be altered until it has completed.
@ -551,12 +555,16 @@ func TestPaymentControlDeletePayments(t *testing.T) {
assertPayments(t, db, payments) assertPayments(t, db, payments)
// Now delete all failed payments. // Now delete all failed payments.
require.NoError(t, db.DeletePayments(true, false)) numPayments, err = db.DeletePayments(true, false)
require.NoError(t, err)
require.EqualValues(t, 1, numPayments)
assertPayments(t, db, payments[1:]) assertPayments(t, db, payments[1:])
// Finally delete all completed payments. // Finally delete all completed payments.
require.NoError(t, db.DeletePayments(false, false)) numPayments, err = db.DeletePayments(false, false)
require.NoError(t, err)
require.EqualValues(t, 1, numPayments)
assertPayments(t, db, payments[2:]) assertPayments(t, db, payments[2:])
} }

View File

@ -826,10 +826,12 @@ func (d *DB) DeletePayment(paymentHash lntypes.Hash,
// DeletePayments deletes all completed and failed payments from the DB. If // DeletePayments deletes all completed and failed payments from the DB. If
// failedOnly is set, only failed payments will be considered for deletion. If // failedOnly is set, only failed payments will be considered for deletion. If
// failedHtlsOnly is set, the payment itself won't be deleted, only failed HTLC // failedHtlcsOnly is set, the payment itself won't be deleted, only failed HTLC
// attempts. // attempts. The method returns the number of deleted payments, which is always
func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error { // 0 if failedHtlcsOnly is set.
return kvdb.Update(d, func(tx kvdb.RwTx) error { func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) (int, error) {
var numPayments int
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
payments := tx.ReadWriteBucket(paymentsRootBucket) payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil { if payments == nil {
return nil return nil
@ -906,6 +908,7 @@ func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error {
} }
deleteIndexes = append(deleteIndexes, seqNrs...) deleteIndexes = append(deleteIndexes, seqNrs...)
numPayments++
return nil return nil
}) })
if err != nil { if err != nil {
@ -956,7 +959,14 @@ func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error {
} }
return nil return nil
}, func() {}) }, func() {
numPayments = 0
})
if err != nil {
return 0, err
}
return numPayments, nil
} }
// fetchSequenceNumbers fetches all the sequence numbers associated with a // fetchSequenceNumbers fetches all the sequence numbers associated with a

View File

@ -10,6 +10,8 @@ import (
var sendCustomCommand = cli.Command{ var sendCustomCommand = cli.Command{
Name: "sendcustom", Name: "sendcustom",
Category: "Peers",
Usage: "Send a custom p2p wire message to a peer",
Flags: []cli.Flag{ Flags: []cli.Flag{
cli.StringFlag{ cli.StringFlag{
Name: "peer", Name: "peer",
@ -41,20 +43,24 @@ func sendCustom(ctx *cli.Context) error {
return err return err
} }
_, err = client.SendCustomMessage( resp, err := client.SendCustomMessage(
ctxc, ctxc, &lnrpc.SendCustomMessageRequest{
&lnrpc.SendCustomMessageRequest{
Peer: peer, Peer: peer,
Type: uint32(msgType), Type: uint32(msgType),
Data: data, Data: data,
}, },
) )
printRespJSON(resp)
return err return err
} }
var subscribeCustomCommand = cli.Command{ var subscribeCustomCommand = cli.Command{
Name: "subscribecustom", Name: "subscribecustom",
Category: "Peers",
Usage: "Subscribe to incoming custom p2p wire messages from all " +
"peers",
Action: actionDecorator(subscribeCustom), Action: actionDecorator(subscribeCustom),
} }
@ -64,8 +70,7 @@ func subscribeCustom(ctx *cli.Context) error {
defer cleanUp() defer cleanUp()
stream, err := client.SubscribeCustomMessages( stream, err := client.SubscribeCustomMessages(
ctxc, ctxc, &lnrpc.SubscribeCustomMessagesRequest{},
&lnrpc.SubscribeCustomMessagesRequest{},
) )
if err != nil { if err != nil {
return err return err

View File

@ -26,6 +26,7 @@ import (
"github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/routing/route"
"github.com/urfave/cli" "github.com/urfave/cli"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/protobuf/proto"
) )
const ( const (
@ -1781,11 +1782,7 @@ func deletePayments(ctx *cli.Context) error {
failedHTLCsOnly = ctx.Bool("failed_htlcs_only") failedHTLCsOnly = ctx.Bool("failed_htlcs_only")
includeNonFailed = ctx.Bool("include_non_failed") includeNonFailed = ctx.Bool("include_non_failed")
err error err error
okMsg = struct { resp proto.Message
OK bool `json:"ok"`
}{
OK: true,
}
) )
// We pack two RPCs into the same CLI so there are a few non-valid // We pack two RPCs into the same CLI so there are a few non-valid
@ -1812,10 +1809,12 @@ func deletePayments(ctx *cli.Context) error {
err) err)
} }
_, err = client.DeletePayment(ctxc, &lnrpc.DeletePaymentRequest{ resp, err = client.DeletePayment(
ctxc, &lnrpc.DeletePaymentRequest{
PaymentHash: paymentHash, PaymentHash: paymentHash,
FailedHtlcsOnly: failedHTLCsOnly, FailedHtlcsOnly: failedHTLCsOnly,
}) },
)
if err != nil { if err != nil {
return fmt.Errorf("error deleting single payment: %w", return fmt.Errorf("error deleting single payment: %w",
err) err)
@ -1832,7 +1831,7 @@ func deletePayments(ctx *cli.Context) error {
fmt.Printf("Removing %s payments, this might take a while...\n", fmt.Printf("Removing %s payments, this might take a while...\n",
what) what)
_, err = client.DeleteAllPayments( resp, err = client.DeleteAllPayments(
ctxc, &lnrpc.DeleteAllPaymentsRequest{ ctxc, &lnrpc.DeleteAllPaymentsRequest{
AllPayments: includeNonFailed, AllPayments: includeNonFailed,
FailedPaymentsOnly: !includeNonFailed, FailedPaymentsOnly: !includeNonFailed,
@ -1844,9 +1843,7 @@ func deletePayments(ctx *cli.Context) error {
} }
} }
// Users are confused by empty JSON outputs so let's return a simple OK printJSON(resp)
// instead of just printing the empty response RPC message.
printJSON(okMsg)
return nil return nil
} }

View File

@ -2128,11 +2128,13 @@ func stopDaemon(ctx *cli.Context) error {
client, cleanUp := getClient(ctx) client, cleanUp := getClient(ctx)
defer cleanUp() defer cleanUp()
_, err := client.StopDaemon(ctxc, &lnrpc.StopRequest{}) resp, err := client.StopDaemon(ctxc, &lnrpc.StopRequest{})
if err != nil { if err != nil {
return err return err
} }
printRespJSON(resp)
return nil return nil
} }
@ -2997,10 +2999,12 @@ func restoreChanBackup(ctx *cli.Context) error {
req.Backup = backups.Backup req.Backup = backups.Backup
_, err = client.RestoreChannelBackups(ctxc, &req) resp, err := client.RestoreChannelBackups(ctxc, &req)
if err != nil { if err != nil {
return fmt.Errorf("unable to restore chan backups: %w", err) return fmt.Errorf("unable to restore chan backups: %w", err)
} }
printRespJSON(resp)
return nil return nil
} }

View File

@ -83,6 +83,24 @@
## RPC Updates ## RPC Updates
* Some RPCs that previously just returned an empty response message now at least
return [a short status
message](https://github.com/lightningnetwork/lnd/pull/7762) to help command
line users to better understand that the command was executed successfully and
something was executed or initiated to run in the background. The following
CLI commands now don't just return an empty response (`{}`) anymore:
* `lncli wallet releaseoutput` (`WalletKit.ReleaseOutput` RPC)
* `lncli wallet accounts import-pubkey` (`WalletKit.ImportPublicKey` RPC)
* `lncli wallet labeltx` (`WalletKit.LabelTransaction` RPC)
* `lncli sendcustom` (`Lightning.SendCustomMessage` RPC)
* `lncli connect` (`Lightning.ConnectPeer` RPC)
* `lncli disconnect` (`Lightning.DisconnectPeer` RPC)
* `lncli stop` (`Lightning.Stop` RPC)
* `lncli deletepayments` (`Lightning.DeleteAllPaymentsResponse` RPC)
* `lncli abandonchannel` (`Lightning.AbandonChannel` RPC)
* `lncli restorechanbackup` (`Lightning.RestoreChannelBackups` RPC)
* `lncli verifychanbackup` (`Lightning.VerifyChanBackup` RPC)
## lncli Updates ## lncli Updates
## Code Health ## Code Health
@ -158,6 +176,7 @@
* CharlieZKSmith * CharlieZKSmith
* Elle Mouton * Elle Mouton
* George Tsagkarelis * George Tsagkarelis
* Oliver Gugger
* Pins * Pins
* Viktor Tigerström * Viktor Tigerström
* Ziggie * Ziggie

View File

@ -396,12 +396,22 @@ func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
req := &lnrpc.RestoreChanBackupRequest{ req := &lnrpc.RestoreChanBackupRequest{
Backup: backup, Backup: backup,
} }
newNode.RPC.RestoreChanBackups(req) res := newNode.RPC.RestoreChanBackups(
req,
)
require.EqualValues(
st, 1, res.NumRestored,
)
req = &lnrpc.RestoreChanBackupRequest{ req = &lnrpc.RestoreChanBackupRequest{
Backup: backup, Backup: backup,
} }
newNode.RPC.RestoreChanBackups(req) res = newNode.RPC.RestoreChanBackups(
req,
)
require.EqualValues(
st, 0, res.NumRestored,
)
return newNode return newNode
} }
@ -916,9 +926,27 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
} }
} }
containsChan := func(b *lnrpc.VerifyChanBackupResponse,
chanPoint *lnrpc.ChannelPoint) bool {
hash, err := lnrpc.GetChanPointFundingTxid(chanPoint)
require.NoError(ht, err)
chanPointStr := fmt.Sprintf("%s:%d", hash.String(),
chanPoint.OutputIndex)
for idx := range b.ChanPoints {
if b.ChanPoints[idx] == chanPointStr {
return true
}
}
return false
}
// assertBackupFileState is a helper function that we'll use to compare // assertBackupFileState is a helper function that we'll use to compare
// the on disk back up file to our currentBackup pointer above. // the on disk back up file to our currentBackup pointer above.
assertBackupFileState := func() { assertBackupFileState := func(expectAllChannels bool) {
err := wait.NoError(func() error { err := wait.NoError(func() error {
packedBackup, err := os.ReadFile(backupFilePath) packedBackup, err := os.ReadFile(backupFilePath)
if err != nil { if err != nil {
@ -946,7 +974,20 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
}, },
} }
carol.RPC.VerifyChanBackup(snapshot) res := carol.RPC.VerifyChanBackup(snapshot)
if !expectAllChannels {
continue
}
for idx := range chanPoints {
if containsChan(res, chanPoints[idx]) {
continue
}
return fmt.Errorf("backup %v doesn't "+
"contain chan_point: %v",
res.ChanPoints, chanPoints[idx])
}
} }
return nil return nil
@ -961,7 +1002,7 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
// The on disk file should also exactly match the latest backup that we // The on disk file should also exactly match the latest backup that we
// have. // have.
assertBackupFileState() assertBackupFileState(true)
// Next, we'll close the channels one by one. After each channel // Next, we'll close the channels one by one. After each channel
// closure, we should get a notification, and the on-disk state should // closure, we should get a notification, and the on-disk state should
@ -988,14 +1029,14 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
// Now that the channel's been fully resolved, we // Now that the channel's been fully resolved, we
// expect another notification. // expect another notification.
assertBackupNtfns(1) assertBackupNtfns(1)
assertBackupFileState() assertBackupFileState(false)
} else { } else {
ht.CloseChannel(alice, chanPoint) ht.CloseChannel(alice, chanPoint)
// We should get a single notification after closing, // We should get a single notification after closing,
// and the on-disk state should match this latest // and the on-disk state should match this latest
// notifications. // notifications.
assertBackupNtfns(1) assertBackupNtfns(1)
assertBackupFileState() assertBackupFileState(false)
} }
} }
} }
@ -1414,7 +1455,8 @@ func chanRestoreViaRPC(ht *lntest.HarnessTest, password []byte,
nil, nil,
) )
req := &lnrpc.RestoreChanBackupRequest{Backup: backup} req := &lnrpc.RestoreChanBackupRequest{Backup: backup}
newNode.RPC.RestoreChanBackups(req) res := newNode.RPC.RestoreChanBackups(req)
require.Greater(ht, res.NumRestored, uint32(0))
return newNode return newNode
} }

File diff suppressed because it is too large Load Diff

View File

@ -643,6 +643,8 @@ message SendCustomMessageRequest {
} }
message SendCustomMessageResponse { message SendCustomMessageResponse {
// The status of the send operation.
string status = 1;
} }
message Utxo { message Utxo {
@ -1317,6 +1319,8 @@ message ConnectPeerRequest {
uint64 timeout = 3; uint64 timeout = 3;
} }
message ConnectPeerResponse { message ConnectPeerResponse {
// The status of the connect operation.
string status = 1;
} }
message DisconnectPeerRequest { message DisconnectPeerRequest {
@ -1324,6 +1328,8 @@ message DisconnectPeerRequest {
string pub_key = 1; string pub_key = 1;
} }
message DisconnectPeerResponse { message DisconnectPeerResponse {
// The status of the disconnect operation.
string status = 1;
} }
message HTLC { message HTLC {
@ -3542,6 +3548,8 @@ message NetworkInfo {
message StopRequest { message StopRequest {
} }
message StopResponse { message StopResponse {
// The status of the stop operation.
string status = 1;
} }
message GraphTopologySubscription { message GraphTopologySubscription {
@ -4366,9 +4374,13 @@ message DeleteAllPaymentsRequest {
} }
message DeletePaymentResponse { message DeletePaymentResponse {
// The status of the delete operation.
string status = 1;
} }
message DeleteAllPaymentsResponse { message DeleteAllPaymentsResponse {
// The status of the delete operation.
string status = 1;
} }
message AbandonChannelRequest { message AbandonChannelRequest {
@ -4385,6 +4397,8 @@ message AbandonChannelRequest {
} }
message AbandonChannelResponse { message AbandonChannelResponse {
// The status of the abandon operation.
string status = 1;
} }
message DebugLevelRequest { message DebugLevelRequest {
@ -4724,12 +4738,15 @@ message RestoreChanBackupRequest {
} }
} }
message RestoreBackupResponse { message RestoreBackupResponse {
// The number of channels successfully restored.
uint32 num_restored = 1;
} }
message ChannelBackupSubscription { message ChannelBackupSubscription {
} }
message VerifyChanBackupResponse { message VerifyChanBackupResponse {
repeated string chan_points = 1;
} }
message MacaroonPermission { message MacaroonPermission {

View File

@ -3263,7 +3263,13 @@
} }
}, },
"lnrpcAbandonChannelResponse": { "lnrpcAbandonChannelResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the abandon operation."
}
}
}, },
"lnrpcAddInvoiceResponse": { "lnrpcAddInvoiceResponse": {
"type": "object", "type": "object",
@ -4593,7 +4599,13 @@
} }
}, },
"lnrpcConnectPeerResponse": { "lnrpcConnectPeerResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the connect operation."
}
}
}, },
"lnrpcCustomMessage": { "lnrpcCustomMessage": {
"type": "object", "type": "object",
@ -4635,7 +4647,13 @@
} }
}, },
"lnrpcDeleteAllPaymentsResponse": { "lnrpcDeleteAllPaymentsResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the delete operation."
}
}
}, },
"lnrpcDeleteMacaroonIDResponse": { "lnrpcDeleteMacaroonIDResponse": {
"type": "object", "type": "object",
@ -4647,10 +4665,22 @@
} }
}, },
"lnrpcDeletePaymentResponse": { "lnrpcDeletePaymentResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the delete operation."
}
}
}, },
"lnrpcDisconnectPeerResponse": { "lnrpcDisconnectPeerResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the disconnect operation."
}
}
}, },
"lnrpcEdgeLocator": { "lnrpcEdgeLocator": {
"type": "object", "type": "object",
@ -6922,7 +6952,14 @@
"description": " - ANCHOR: We resolved an anchor output.\n - INCOMING_HTLC: We are resolving an incoming htlc on chain. This if this htlc is\nclaimed, we swept the incoming htlc with the preimage. If it is timed\nout, our peer swept the timeout path.\n - OUTGOING_HTLC: We are resolving an outgoing htlc on chain. If this htlc is claimed,\nthe remote party swept the htlc with the preimage. If it is timed out,\nwe swept it with the timeout path.\n - COMMIT: We force closed and need to sweep our time locked commitment output." "description": " - ANCHOR: We resolved an anchor output.\n - INCOMING_HTLC: We are resolving an incoming htlc on chain. This if this htlc is\nclaimed, we swept the incoming htlc with the preimage. If it is timed\nout, our peer swept the timeout path.\n - OUTGOING_HTLC: We are resolving an outgoing htlc on chain. If this htlc is claimed,\nthe remote party swept the htlc with the preimage. If it is timed out,\nwe swept it with the timeout path.\n - COMMIT: We force closed and need to sweep our time locked commitment output."
}, },
"lnrpcRestoreBackupResponse": { "lnrpcRestoreBackupResponse": {
"type": "object" "type": "object",
"properties": {
"num_restored": {
"type": "integer",
"format": "int64",
"description": "The number of channels successfully restored."
}
}
}, },
"lnrpcRestoreChanBackupRequest": { "lnrpcRestoreChanBackupRequest": {
"type": "object", "type": "object",
@ -7133,7 +7170,13 @@
} }
}, },
"lnrpcSendCustomMessageResponse": { "lnrpcSendCustomMessageResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the send operation."
}
}
}, },
"lnrpcSendManyRequest": { "lnrpcSendManyRequest": {
"type": "object", "type": "object",
@ -7338,7 +7381,13 @@
"type": "object" "type": "object"
}, },
"lnrpcStopResponse": { "lnrpcStopResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the stop operation."
}
}
}, },
"lnrpcStreamAuth": { "lnrpcStreamAuth": {
"type": "object", "type": "object",
@ -7485,7 +7534,15 @@
} }
}, },
"lnrpcVerifyChanBackupResponse": { "lnrpcVerifyChanBackupResponse": {
"type": "object" "type": "object",
"properties": {
"chan_points": {
"type": "array",
"items": {
"type": "string"
}
}
}
}, },
"lnrpcVerifyMessageRequest": { "lnrpcVerifyMessageRequest": {
"type": "object", "type": "object",

File diff suppressed because it is too large Load Diff

View File

@ -414,6 +414,8 @@ message ReleaseOutputRequest {
} }
message ReleaseOutputResponse { message ReleaseOutputResponse {
// The status of the release operation.
string status = 1;
} }
message KeyReq { message KeyReq {
@ -699,6 +701,8 @@ message ImportPublicKeyRequest {
AddressType address_type = 2; AddressType address_type = 2;
} }
message ImportPublicKeyResponse { message ImportPublicKeyResponse {
// The status of the import operation.
string status = 1;
} }
message ImportTapscriptRequest { message ImportTapscriptRequest {
@ -1318,6 +1322,8 @@ message LabelTransactionRequest {
} }
message LabelTransactionResponse { message LabelTransactionResponse {
// The status of the label operation.
string status = 1;
} }
// The possible change address types for default accounts and single imported // The possible change address types for default accounts and single imported

View File

@ -1692,7 +1692,13 @@
} }
}, },
"walletrpcImportPublicKeyResponse": { "walletrpcImportPublicKeyResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the import operation."
}
}
}, },
"walletrpcImportTapscriptRequest": { "walletrpcImportTapscriptRequest": {
"type": "object", "type": "object",
@ -1764,7 +1770,13 @@
} }
}, },
"walletrpcLabelTransactionResponse": { "walletrpcLabelTransactionResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the label operation."
}
}
}, },
"walletrpcLeaseOutputRequest": { "walletrpcLeaseOutputRequest": {
"type": "object", "type": "object",
@ -2002,7 +2014,13 @@
} }
}, },
"walletrpcReleaseOutputResponse": { "walletrpcReleaseOutputResponse": {
"type": "object" "type": "object",
"properties": {
"status": {
"type": "string",
"description": "The status of the release operation."
}
}
}, },
"walletrpcRemoveTransactionResponse": { "walletrpcRemoveTransactionResponse": {
"type": "object", "type": "object",

View File

@ -536,7 +536,9 @@ func (w *WalletKit) ReleaseOutput(ctx context.Context,
return nil, err return nil, err
} }
return &ReleaseOutputResponse{}, nil return &ReleaseOutputResponse{
Status: fmt.Sprintf("output %v released", op.String()),
}, nil
} }
// ListLeases returns a list of all currently locked utxos. // ListLeases returns a list of all currently locked utxos.
@ -1443,7 +1445,10 @@ func (w *WalletKit) LabelTransaction(ctx context.Context,
} }
err = w.cfg.Wallet.LabelTransaction(*hash, req.Label, req.Overwrite) err = w.cfg.Wallet.LabelTransaction(*hash, req.Label, req.Overwrite)
return &LabelTransactionResponse{}, err
return &LabelTransactionResponse{
Status: fmt.Sprintf("transaction label '%s' added", req.Label),
}, err
} }
// FundPsbt creates a fully populated PSBT that contains enough inputs to fund // FundPsbt creates a fully populated PSBT that contains enough inputs to fund
@ -2896,7 +2901,10 @@ func (w *WalletKit) ImportPublicKey(_ context.Context,
return nil, err return nil, err
} }
return &ImportPublicKeyResponse{}, nil return &ImportPublicKeyResponse{
Status: fmt.Sprintf("public key %x imported",
pubKey.SerializeCompressed()),
}, nil
} }
// ImportTapscript imports a Taproot script and internal key and adds the // ImportTapscript imports a Taproot script and internal key and adds the

View File

@ -1806,23 +1806,24 @@ func (r *rpcServer) ConnectPeer(ctx context.Context,
// request. // request.
if in.Timeout != 0 { if in.Timeout != 0 {
timeout = time.Duration(in.Timeout) * time.Second timeout = time.Duration(in.Timeout) * time.Second
rpcsLog.Debugf( rpcsLog.Debugf("[connectpeer] connection timeout is set to %v",
"[connectpeer] connection timeout is set to %v", timeout)
timeout,
)
} }
if err := r.server.ConnectToPeer( if err := r.server.ConnectToPeer(
peerAddr, in.Perm, timeout, peerAddr, in.Perm, timeout,
); err != nil { ); err != nil {
rpcsLog.Errorf( rpcsLog.Errorf("[connectpeer]: error connecting to peer: %v",
"[connectpeer]: error connecting to peer: %v", err, err)
)
return nil, err return nil, err
} }
rpcsLog.Debugf("Connected to peer: %v", peerAddr.String()) rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
return &lnrpc.ConnectPeerResponse{}, nil
return &lnrpc.ConnectPeerResponse{
Status: fmt.Sprintf("connection to %v initiated",
peerAddr.String()),
}, nil
} }
// DisconnectPeer attempts to disconnect one peer from another identified by a // DisconnectPeer attempts to disconnect one peer from another identified by a
@ -1884,7 +1885,9 @@ func (r *rpcServer) DisconnectPeer(ctx context.Context,
return nil, fmt.Errorf("unable to disconnect peer: %w", err) return nil, fmt.Errorf("unable to disconnect peer: %w", err)
} }
return &lnrpc.DisconnectPeerResponse{}, nil return &lnrpc.DisconnectPeerResponse{
Status: "disconnect initiated",
}, nil
} }
// newFundingShimAssembler returns a new fully populated // newFundingShimAssembler returns a new fully populated
@ -3165,7 +3168,9 @@ func (r *rpcServer) AbandonChannel(_ context.Context,
return nil, err return nil, err
} }
return &lnrpc.AbandonChannelResponse{}, nil return &lnrpc.AbandonChannelResponse{
Status: fmt.Sprintf("channel %v abandoned", chanPoint.String()),
}, nil
} }
// GetInfo returns general information concerning the lightning node including // GetInfo returns general information concerning the lightning node including
@ -7035,7 +7040,10 @@ func (r *rpcServer) StopDaemon(_ context.Context,
} }
r.interceptor.RequestShutdown() r.interceptor.RequestShutdown()
return &lnrpc.StopResponse{}, nil
return &lnrpc.StopResponse{
Status: "shutdown initiated, check logs for progress",
}, nil
} }
// SubscribeChannelGraph launches a streaming RPC that allows the caller to // SubscribeChannelGraph launches a streaming RPC that allows the caller to
@ -7286,7 +7294,9 @@ func (r *rpcServer) DeletePayment(ctx context.Context,
return nil, err return nil, err
} }
return &lnrpc.DeletePaymentResponse{}, nil return &lnrpc.DeletePaymentResponse{
Status: "payment deleted",
}, nil
} }
// DeleteAllPayments deletes all outgoing payments from DB. // DeleteAllPayments deletes all outgoing payments from DB.
@ -7319,14 +7329,17 @@ func (r *rpcServer) DeleteAllPayments(ctx context.Context,
"failed_htlcs_only=%v", req.FailedPaymentsOnly, "failed_htlcs_only=%v", req.FailedPaymentsOnly,
req.FailedHtlcsOnly) req.FailedHtlcsOnly)
err := r.server.miscDB.DeletePayments( numDeletedPayments, err := r.server.miscDB.DeletePayments(
req.FailedPaymentsOnly, req.FailedHtlcsOnly, req.FailedPaymentsOnly, req.FailedHtlcsOnly,
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &lnrpc.DeleteAllPaymentsResponse{}, nil return &lnrpc.DeleteAllPaymentsResponse{
Status: fmt.Sprintf("%v payments deleted, failed_htlcs_only=%v",
numDeletedPayments, req.FailedHtlcsOnly),
}, nil
} }
// DebugLevel allows a caller to programmatically set the logging verbosity of // DebugLevel allows a caller to programmatically set the logging verbosity of
@ -7974,6 +7987,10 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context,
func (r *rpcServer) VerifyChanBackup(ctx context.Context, func (r *rpcServer) VerifyChanBackup(ctx context.Context,
in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) { in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) {
var (
channels []chanbackup.Single
err error
)
switch { switch {
// If neither a Single or Multi has been specified, then we have nothing // If neither a Single or Multi has been specified, then we have nothing
// to verify. // to verify.
@ -8004,7 +8021,7 @@ func (r *rpcServer) VerifyChanBackup(ctx context.Context,
// With our PackedSingles created, we'll attempt to unpack the // With our PackedSingles created, we'll attempt to unpack the
// backup. If this fails, then we know the backup is invalid for // backup. If this fails, then we know the backup is invalid for
// some reason. // some reason.
_, err := chanBackup.Unpack(r.server.cc.KeyRing) channels, err = chanBackup.Unpack(r.server.cc.KeyRing)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid single channel "+ return nil, fmt.Errorf("invalid single channel "+
"backup: %v", err) "backup: %v", err)
@ -8018,14 +8035,20 @@ func (r *rpcServer) VerifyChanBackup(ctx context.Context,
// We'll now attempt to unpack the Multi. If this fails, then we // We'll now attempt to unpack the Multi. If this fails, then we
// know it's invalid. // know it's invalid.
_, err := packedMulti.Unpack(r.server.cc.KeyRing) multi, err := packedMulti.Unpack(r.server.cc.KeyRing)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid multi channel backup: "+ return nil, fmt.Errorf("invalid multi channel backup: "+
"%v", err) "%v", err)
} }
channels = multi.StaticBackups
} }
return &lnrpc.VerifyChanBackupResponse{}, nil return &lnrpc.VerifyChanBackupResponse{
ChanPoints: fn.Map(func(c chanbackup.Single) string {
return c.FundingOutpoint.String()
}, channels),
}, nil
} }
// createBackupSnapshot converts the passed Single backup into a snapshot which // createBackupSnapshot converts the passed Single backup into a snapshot which
@ -8142,6 +8165,10 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
// We'll accept either a list of Single backups, or a single Multi // We'll accept either a list of Single backups, or a single Multi
// backup which contains several single backups. // backup which contains several single backups.
var (
numRestored int
err error
)
switch { switch {
case in.GetChanBackups() != nil: case in.GetChanBackups() != nil:
chanBackupsProtos := in.GetChanBackups() chanBackupsProtos := in.GetChanBackups()
@ -8159,7 +8186,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
// write the new backups to disk, and then attempt to connect // write the new backups to disk, and then attempt to connect
// out to any peers that we know of which were our prior // out to any peers that we know of which were our prior
// channel peers. // channel peers.
err := chanbackup.UnpackAndRecoverSingles( numRestored, err = chanbackup.UnpackAndRecoverSingles(
chanbackup.PackedSingles(packedBackups), chanbackup.PackedSingles(packedBackups),
r.server.cc.KeyRing, chanRestorer, r.server, r.server.cc.KeyRing, chanRestorer, r.server,
) )
@ -8176,7 +8203,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
// out to any peers that we know of which were our prior // out to any peers that we know of which were our prior
// channel peers. // channel peers.
packedMulti := chanbackup.PackedMulti(packedMultiBackup) packedMulti := chanbackup.PackedMulti(packedMultiBackup)
err := chanbackup.UnpackAndRecoverMulti( numRestored, err = chanbackup.UnpackAndRecoverMulti(
packedMulti, r.server.cc.KeyRing, chanRestorer, packedMulti, r.server.cc.KeyRing, chanRestorer,
r.server, r.server,
) )
@ -8186,7 +8213,9 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
} }
} }
return &lnrpc.RestoreBackupResponse{}, nil return &lnrpc.RestoreBackupResponse{
NumRestored: uint32(numRestored),
}, nil
} }
// SubscribeChannelBackups allows a client to sub-subscribe to the most up to // SubscribeChannelBackups allows a client to sub-subscribe to the most up to
@ -8789,8 +8818,9 @@ func (r *rpcServer) RegisterRPCMiddleware(
} }
// SendCustomMessage sends a custom peer message. // SendCustomMessage sends a custom peer message.
func (r *rpcServer) SendCustomMessage(ctx context.Context, req *lnrpc.SendCustomMessageRequest) ( func (r *rpcServer) SendCustomMessage(_ context.Context,
*lnrpc.SendCustomMessageResponse, error) { req *lnrpc.SendCustomMessageRequest) (*lnrpc.SendCustomMessageResponse,
error) {
peer, err := route.NewVertexFromBytes(req.Peer) peer, err := route.NewVertexFromBytes(req.Peer)
if err != nil { if err != nil {
@ -8801,18 +8831,21 @@ func (r *rpcServer) SendCustomMessage(ctx context.Context, req *lnrpc.SendCustom
peer, lnwire.MessageType(req.Type), req.Data, peer, lnwire.MessageType(req.Type), req.Data,
) )
switch { switch {
case err == ErrPeerNotConnected: case errors.Is(err, ErrPeerNotConnected):
return nil, status.Error(codes.NotFound, err.Error()) return nil, status.Error(codes.NotFound, err.Error())
case err != nil: case err != nil:
return nil, err return nil, err
} }
return &lnrpc.SendCustomMessageResponse{}, nil return &lnrpc.SendCustomMessageResponse{
Status: "message sent successfully",
}, nil
} }
// SubscribeCustomMessages subscribes to a stream of incoming custom peer // SubscribeCustomMessages subscribes to a stream of incoming custom peer
// messages. // messages.
func (r *rpcServer) SubscribeCustomMessages(req *lnrpc.SubscribeCustomMessagesRequest, func (r *rpcServer) SubscribeCustomMessages(
_ *lnrpc.SubscribeCustomMessagesRequest,
server lnrpc.Lightning_SubscribeCustomMessagesServer) error { server lnrpc.Lightning_SubscribeCustomMessagesServer) error {
client, err := r.server.SubscribeCustomMessages() client, err := r.server.SubscribeCustomMessages()
@ -8845,17 +8878,17 @@ func (r *rpcServer) SubscribeCustomMessages(req *lnrpc.SubscribeCustomMessagesRe
} }
// ListAliases returns the set of all aliases we have ever allocated along with // ListAliases returns the set of all aliases we have ever allocated along with
// their base SCID's and possibly a separate confirmed SCID in the case of // their base SCIDs and possibly a separate confirmed SCID in the case of
// zero-conf. // zero-conf.
func (r *rpcServer) ListAliases(ctx context.Context, func (r *rpcServer) ListAliases(_ context.Context,
in *lnrpc.ListAliasesRequest) (*lnrpc.ListAliasesResponse, error) { _ *lnrpc.ListAliasesRequest) (*lnrpc.ListAliasesResponse, error) {
// Fetch the map of all aliases. // Fetch the map of all aliases.
mapAliases := r.server.aliasMgr.ListAliases() mapAliases := r.server.aliasMgr.ListAliases()
// Fill out the response. This does not include the zero-conf confirmed // Fill out the response. This does not include the zero-conf confirmed
// SCID. Doing so would require more database lookups and it can be // SCID. Doing so would require more database lookups, and it can be
// cross-referenced with the output of listchannels/closedchannels. // cross-referenced with the output of ListChannels/ClosedChannels.
resp := &lnrpc.ListAliasesResponse{ resp := &lnrpc.ListAliasesResponse{
AliasMaps: make([]*lnrpc.AliasMap, 0), AliasMaps: make([]*lnrpc.AliasMap, 0),
} }

View File

@ -2248,7 +2248,7 @@ func (s *server) Start() error {
chainArb: s.chainArb, chainArb: s.chainArb,
} }
if len(s.chansToRestore.PackedSingleChanBackups) != 0 { if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
err := chanbackup.UnpackAndRecoverSingles( _, err := chanbackup.UnpackAndRecoverSingles(
s.chansToRestore.PackedSingleChanBackups, s.chansToRestore.PackedSingleChanBackups,
s.cc.KeyRing, chanRestorer, s, s.cc.KeyRing, chanRestorer, s,
) )
@ -2259,7 +2259,7 @@ func (s *server) Start() error {
} }
} }
if len(s.chansToRestore.PackedMultiChanBackup) != 0 { if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
err := chanbackup.UnpackAndRecoverMulti( _, err := chanbackup.UnpackAndRecoverMulti(
s.chansToRestore.PackedMultiChanBackup, s.chansToRestore.PackedMultiChanBackup,
s.cc.KeyRing, chanRestorer, s, s.cc.KeyRing, chanRestorer, s,
) )