mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-06-25 16:23:49 +02:00
Merge pull request #7762 from guggero/empty-resp
lnrpc: return meaningful response instead of empty one
This commit is contained in:
commit
0899077fb5
@ -40,9 +40,11 @@ type PeerConnector interface {
|
||||
// the channel. In addition a LinkNode will be created for each new peer as
|
||||
// well, in order to expose the addressing information required to locate to
|
||||
// and connect to each peer in order to initiate the recovery protocol.
|
||||
// The number of channels that were successfully restored is returned.
|
||||
func Recover(backups []Single, restorer ChannelRestorer,
|
||||
peerConnector PeerConnector) error {
|
||||
peerConnector PeerConnector) (int, error) {
|
||||
|
||||
var numRestored int
|
||||
for i, backup := range backups {
|
||||
log.Infof("Restoring ChannelPoint(%v) to disk: ",
|
||||
backup.FundingOutpoint)
|
||||
@ -57,9 +59,10 @@ func Recover(backups []Single, restorer ChannelRestorer,
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
return numRestored, err
|
||||
}
|
||||
|
||||
numRestored++
|
||||
log.Infof("Attempting to connect to node=%x (addrs=%v) to "+
|
||||
"restore ChannelPoint(%v)",
|
||||
backup.RemoteNodePub.SerializeCompressed(),
|
||||
@ -70,7 +73,7 @@ func Recover(backups []Single, restorer ChannelRestorer,
|
||||
backup.RemoteNodePub, backup.Addresses,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
return numRestored, err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): to handle case where node has changed addrs,
|
||||
@ -80,7 +83,7 @@ func Recover(backups []Single, restorer ChannelRestorer,
|
||||
// * just to to fresh w/ call to node addrs and de-dup?
|
||||
}
|
||||
|
||||
return nil
|
||||
return numRestored, nil
|
||||
}
|
||||
|
||||
// TODO(roasbeef): more specific keychain interface?
|
||||
@ -88,16 +91,17 @@ func Recover(backups []Single, restorer ChannelRestorer,
|
||||
// UnpackAndRecoverSingles is a one-shot method, that given a set of packed
|
||||
// single channel backups, will restore the channel state to a channel shell,
|
||||
// and also reach out to connect to any of the known node addresses for that
|
||||
// channel. It is assumes that after this method exists, if a connection we
|
||||
// able to be established, then then PeerConnector will continue to attempt to
|
||||
// re-establish a persistent connection in the background.
|
||||
// channel. It is assumes that after this method exists, if a connection was
|
||||
// established, then the PeerConnector will continue to attempt to re-establish
|
||||
// a persistent connection in the background. The number of channels that were
|
||||
// successfully restored is returned.
|
||||
func UnpackAndRecoverSingles(singles PackedSingles,
|
||||
keyChain keychain.KeyRing, restorer ChannelRestorer,
|
||||
peerConnector PeerConnector) error {
|
||||
peerConnector PeerConnector) (int, error) {
|
||||
|
||||
chanBackups, err := singles.Unpack(keyChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return Recover(chanBackups, restorer, peerConnector)
|
||||
@ -106,16 +110,17 @@ func UnpackAndRecoverSingles(singles PackedSingles,
|
||||
// UnpackAndRecoverMulti is a one-shot method, that given a set of packed
|
||||
// multi-channel backups, will restore the channel states to channel shells,
|
||||
// and also reach out to connect to any of the known node addresses for that
|
||||
// channel. It is assumes that after this method exists, if a connection we
|
||||
// able to be established, then then PeerConnector will continue to attempt to
|
||||
// re-establish a persistent connection in the background.
|
||||
// channel. It is assumes that after this method exists, if a connection was
|
||||
// established, then the PeerConnector will continue to attempt to re-establish
|
||||
// a persistent connection in the background. The number of channels that were
|
||||
// successfully restored is returned.
|
||||
func UnpackAndRecoverMulti(packedMulti PackedMulti,
|
||||
keyChain keychain.KeyRing, restorer ChannelRestorer,
|
||||
peerConnector PeerConnector) error {
|
||||
peerConnector PeerConnector) (int, error) {
|
||||
|
||||
chanBackups, err := packedMulti.Unpack(keyChain)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return Recover(chanBackups.StaticBackups, restorer, peerConnector)
|
||||
|
@ -2,7 +2,7 @@ package chanbackup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"errors"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
@ -11,6 +11,12 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
errRestoreFail = errors.New("restore fail")
|
||||
|
||||
errConnectFail = errors.New("connect fail")
|
||||
)
|
||||
|
||||
type mockChannelRestorer struct {
|
||||
fail bool
|
||||
|
||||
@ -19,7 +25,7 @@ type mockChannelRestorer struct {
|
||||
|
||||
func (m *mockChannelRestorer) RestoreChansFromSingles(...Single) error {
|
||||
if m.fail {
|
||||
return fmt.Errorf("fail")
|
||||
return errRestoreFail
|
||||
}
|
||||
|
||||
m.callCount++
|
||||
@ -33,11 +39,11 @@ type mockPeerConnector struct {
|
||||
callCount int
|
||||
}
|
||||
|
||||
func (m *mockPeerConnector) ConnectPeer(node *btcec.PublicKey,
|
||||
addrs []net.Addr) error {
|
||||
func (m *mockPeerConnector) ConnectPeer(_ *btcec.PublicKey,
|
||||
_ []net.Addr) error {
|
||||
|
||||
if m.fail {
|
||||
return fmt.Errorf("fail")
|
||||
return errConnectFail
|
||||
}
|
||||
|
||||
m.callCount++
|
||||
@ -59,16 +65,13 @@ func TestUnpackAndRecoverSingles(t *testing.T) {
|
||||
var packedBackups PackedSingles
|
||||
for i := 0; i < numSingles; i++ {
|
||||
channel, err := genRandomOpenChannelShell()
|
||||
if err != nil {
|
||||
t.Fatalf("unable make channel: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
single := NewSingle(channel, nil)
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := single.PackToWriter(&b, keyRing); err != nil {
|
||||
t.Fatalf("unable to pack single: %v", err)
|
||||
}
|
||||
err = single.PackToWriter(&b, keyRing)
|
||||
require.NoError(t, err)
|
||||
|
||||
backups = append(backups, single)
|
||||
packedBackups = append(packedBackups, b.Bytes())
|
||||
@ -83,54 +86,47 @@ func TestUnpackAndRecoverSingles(t *testing.T) {
|
||||
// If we make the channel restore fail, then the entire method should
|
||||
// as well
|
||||
chanRestorer.fail = true
|
||||
err := UnpackAndRecoverSingles(
|
||||
_, err := UnpackAndRecoverSingles(
|
||||
packedBackups, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatalf("restoration should have failed")
|
||||
}
|
||||
require.ErrorIs(t, err, errRestoreFail)
|
||||
|
||||
chanRestorer.fail = false
|
||||
|
||||
// If we make the peer connector fail, then the entire method should as
|
||||
// well
|
||||
peerConnector.fail = true
|
||||
err = UnpackAndRecoverSingles(
|
||||
_, err = UnpackAndRecoverSingles(
|
||||
packedBackups, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatalf("restoration should have failed")
|
||||
}
|
||||
require.ErrorIs(t, err, errConnectFail)
|
||||
|
||||
chanRestorer.callCount--
|
||||
peerConnector.fail = false
|
||||
|
||||
// Next, we'll ensure that if all the interfaces function as expected,
|
||||
// then the channels will properly be unpacked and restored.
|
||||
err = UnpackAndRecoverSingles(
|
||||
numRestored, err := UnpackAndRecoverSingles(
|
||||
packedBackups, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
require.NoError(t, err, "unable to recover chans")
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, numSingles, numRestored)
|
||||
|
||||
// Both the restorer, and connector should have been called 10 times,
|
||||
// once for each backup.
|
||||
if chanRestorer.callCount != numSingles {
|
||||
t.Fatalf("expected %v calls, instead got %v",
|
||||
numSingles, chanRestorer.callCount)
|
||||
}
|
||||
if peerConnector.callCount != numSingles {
|
||||
t.Fatalf("expected %v calls, instead got %v",
|
||||
numSingles, peerConnector.callCount)
|
||||
}
|
||||
require.EqualValues(
|
||||
t, numSingles, chanRestorer.callCount, "restorer call count",
|
||||
)
|
||||
require.EqualValues(
|
||||
t, numSingles, peerConnector.callCount, "peer call count",
|
||||
)
|
||||
|
||||
// If we modify the keyRing, then unpacking should fail.
|
||||
keyRing.Fail = true
|
||||
err = UnpackAndRecoverSingles(
|
||||
_, err = UnpackAndRecoverSingles(
|
||||
packedBackups, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatalf("unpacking should have failed")
|
||||
}
|
||||
require.ErrorContains(t, err, "fail")
|
||||
|
||||
// TODO(roasbeef): verify proper call args
|
||||
}
|
||||
@ -148,9 +144,7 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
|
||||
backups := make([]Single, 0, numSingles)
|
||||
for i := 0; i < numSingles; i++ {
|
||||
channel, err := genRandomOpenChannelShell()
|
||||
if err != nil {
|
||||
t.Fatalf("unable make channel: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
single := NewSingle(channel, nil)
|
||||
|
||||
@ -162,9 +156,8 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := multi.PackToWriter(&b, keyRing); err != nil {
|
||||
t.Fatalf("unable to pack multi: %v", err)
|
||||
}
|
||||
err := multi.PackToWriter(&b, keyRing)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Next, we'll pack the set of singles into a packed multi, and also
|
||||
// create the set of interfaces we need to carry out the remainder of
|
||||
@ -177,54 +170,47 @@ func TestUnpackAndRecoverMulti(t *testing.T) {
|
||||
// If we make the channel restore fail, then the entire method should
|
||||
// as well
|
||||
chanRestorer.fail = true
|
||||
err := UnpackAndRecoverMulti(
|
||||
_, err = UnpackAndRecoverMulti(
|
||||
packedMulti, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatalf("restoration should have failed")
|
||||
}
|
||||
require.ErrorIs(t, err, errRestoreFail)
|
||||
|
||||
chanRestorer.fail = false
|
||||
|
||||
// If we make the peer connector fail, then the entire method should as
|
||||
// well
|
||||
peerConnector.fail = true
|
||||
err = UnpackAndRecoverMulti(
|
||||
_, err = UnpackAndRecoverMulti(
|
||||
packedMulti, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatalf("restoration should have failed")
|
||||
}
|
||||
require.ErrorIs(t, err, errConnectFail)
|
||||
|
||||
chanRestorer.callCount--
|
||||
peerConnector.fail = false
|
||||
|
||||
// Next, we'll ensure that if all the interfaces function as expected,
|
||||
// then the channels will properly be unpacked and restored.
|
||||
err = UnpackAndRecoverMulti(
|
||||
numRestored, err := UnpackAndRecoverMulti(
|
||||
packedMulti, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
require.NoError(t, err, "unable to recover chans")
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, numSingles, numRestored)
|
||||
|
||||
// Both the restorer, and connector should have been called 10 times,
|
||||
// once for each backup.
|
||||
if chanRestorer.callCount != numSingles {
|
||||
t.Fatalf("expected %v calls, instead got %v",
|
||||
numSingles, chanRestorer.callCount)
|
||||
}
|
||||
if peerConnector.callCount != numSingles {
|
||||
t.Fatalf("expected %v calls, instead got %v",
|
||||
numSingles, peerConnector.callCount)
|
||||
}
|
||||
require.EqualValues(
|
||||
t, numSingles, chanRestorer.callCount, "restorer call count",
|
||||
)
|
||||
require.EqualValues(
|
||||
t, numSingles, peerConnector.callCount, "peer call count",
|
||||
)
|
||||
|
||||
// If we modify the keyRing, then unpacking should fail.
|
||||
keyRing.Fail = true
|
||||
err = UnpackAndRecoverMulti(
|
||||
_, err = UnpackAndRecoverMulti(
|
||||
packedMulti, keyRing, &chanRestorer, &peerConnector,
|
||||
)
|
||||
if err == nil {
|
||||
t.Fatalf("unpacking should have failed")
|
||||
}
|
||||
require.ErrorContains(t, err, "fail")
|
||||
|
||||
// TODO(roasbeef): verify proper call args
|
||||
}
|
||||
|
@ -435,9 +435,9 @@ func TestPaymentControlDeleteNonInFlight(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delete all failed payments.
|
||||
if err := db.DeletePayments(true, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
numPayments, err := db.DeletePayments(true, false)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, numPayments)
|
||||
|
||||
// This should leave the succeeded and in-flight payments.
|
||||
dbPayments, err := db.FetchPayments()
|
||||
@ -471,9 +471,9 @@ func TestPaymentControlDeleteNonInFlight(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now delete all payments except in-flight.
|
||||
if err := db.DeletePayments(false, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
numPayments, err = db.DeletePayments(false, false)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 2, numPayments)
|
||||
|
||||
// This should leave the in-flight payment.
|
||||
dbPayments, err = db.FetchPayments()
|
||||
@ -536,14 +536,18 @@ func TestPaymentControlDeletePayments(t *testing.T) {
|
||||
assertPayments(t, db, payments)
|
||||
|
||||
// Delete HTLC attempts for failed payments only.
|
||||
require.NoError(t, db.DeletePayments(true, true))
|
||||
numPayments, err := db.DeletePayments(true, true)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, numPayments)
|
||||
|
||||
// The failed payment is the only altered one.
|
||||
payments[0].htlcs = 0
|
||||
assertPayments(t, db, payments)
|
||||
|
||||
// Delete failed attempts for all payments.
|
||||
require.NoError(t, db.DeletePayments(false, true))
|
||||
numPayments, err = db.DeletePayments(false, true)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, numPayments)
|
||||
|
||||
// The failed attempts should be deleted, except for the in-flight
|
||||
// payment, that shouldn't be altered until it has completed.
|
||||
@ -551,12 +555,16 @@ func TestPaymentControlDeletePayments(t *testing.T) {
|
||||
assertPayments(t, db, payments)
|
||||
|
||||
// Now delete all failed payments.
|
||||
require.NoError(t, db.DeletePayments(true, false))
|
||||
numPayments, err = db.DeletePayments(true, false)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, numPayments)
|
||||
|
||||
assertPayments(t, db, payments[1:])
|
||||
|
||||
// Finally delete all completed payments.
|
||||
require.NoError(t, db.DeletePayments(false, false))
|
||||
numPayments, err = db.DeletePayments(false, false)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 1, numPayments)
|
||||
|
||||
assertPayments(t, db, payments[2:])
|
||||
}
|
||||
|
@ -826,10 +826,12 @@ func (d *DB) DeletePayment(paymentHash lntypes.Hash,
|
||||
|
||||
// DeletePayments deletes all completed and failed payments from the DB. If
|
||||
// failedOnly is set, only failed payments will be considered for deletion. If
|
||||
// failedHtlsOnly is set, the payment itself won't be deleted, only failed HTLC
|
||||
// attempts.
|
||||
func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error {
|
||||
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
||||
// failedHtlcsOnly is set, the payment itself won't be deleted, only failed HTLC
|
||||
// attempts. The method returns the number of deleted payments, which is always
|
||||
// 0 if failedHtlcsOnly is set.
|
||||
func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) (int, error) {
|
||||
var numPayments int
|
||||
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
|
||||
payments := tx.ReadWriteBucket(paymentsRootBucket)
|
||||
if payments == nil {
|
||||
return nil
|
||||
@ -906,6 +908,7 @@ func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error {
|
||||
}
|
||||
|
||||
deleteIndexes = append(deleteIndexes, seqNrs...)
|
||||
numPayments++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@ -956,7 +959,14 @@ func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error {
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func() {})
|
||||
}, func() {
|
||||
numPayments = 0
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return numPayments, nil
|
||||
}
|
||||
|
||||
// fetchSequenceNumbers fetches all the sequence numbers associated with a
|
||||
|
@ -9,7 +9,9 @@ import (
|
||||
)
|
||||
|
||||
var sendCustomCommand = cli.Command{
|
||||
Name: "sendcustom",
|
||||
Name: "sendcustom",
|
||||
Category: "Peers",
|
||||
Usage: "Send a custom p2p wire message to a peer",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "peer",
|
||||
@ -41,20 +43,24 @@ func sendCustom(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.SendCustomMessage(
|
||||
ctxc,
|
||||
&lnrpc.SendCustomMessageRequest{
|
||||
resp, err := client.SendCustomMessage(
|
||||
ctxc, &lnrpc.SendCustomMessageRequest{
|
||||
Peer: peer,
|
||||
Type: uint32(msgType),
|
||||
Data: data,
|
||||
},
|
||||
)
|
||||
|
||||
printRespJSON(resp)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var subscribeCustomCommand = cli.Command{
|
||||
Name: "subscribecustom",
|
||||
Name: "subscribecustom",
|
||||
Category: "Peers",
|
||||
Usage: "Subscribe to incoming custom p2p wire messages from all " +
|
||||
"peers",
|
||||
Action: actionDecorator(subscribeCustom),
|
||||
}
|
||||
|
||||
@ -64,8 +70,7 @@ func subscribeCustom(ctx *cli.Context) error {
|
||||
defer cleanUp()
|
||||
|
||||
stream, err := client.SubscribeCustomMessages(
|
||||
ctxc,
|
||||
&lnrpc.SubscribeCustomMessagesRequest{},
|
||||
ctxc, &lnrpc.SubscribeCustomMessagesRequest{},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/urfave/cli"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -1781,11 +1782,7 @@ func deletePayments(ctx *cli.Context) error {
|
||||
failedHTLCsOnly = ctx.Bool("failed_htlcs_only")
|
||||
includeNonFailed = ctx.Bool("include_non_failed")
|
||||
err error
|
||||
okMsg = struct {
|
||||
OK bool `json:"ok"`
|
||||
}{
|
||||
OK: true,
|
||||
}
|
||||
resp proto.Message
|
||||
)
|
||||
|
||||
// We pack two RPCs into the same CLI so there are a few non-valid
|
||||
@ -1812,10 +1809,12 @@ func deletePayments(ctx *cli.Context) error {
|
||||
err)
|
||||
}
|
||||
|
||||
_, err = client.DeletePayment(ctxc, &lnrpc.DeletePaymentRequest{
|
||||
PaymentHash: paymentHash,
|
||||
FailedHtlcsOnly: failedHTLCsOnly,
|
||||
})
|
||||
resp, err = client.DeletePayment(
|
||||
ctxc, &lnrpc.DeletePaymentRequest{
|
||||
PaymentHash: paymentHash,
|
||||
FailedHtlcsOnly: failedHTLCsOnly,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting single payment: %w",
|
||||
err)
|
||||
@ -1832,7 +1831,7 @@ func deletePayments(ctx *cli.Context) error {
|
||||
|
||||
fmt.Printf("Removing %s payments, this might take a while...\n",
|
||||
what)
|
||||
_, err = client.DeleteAllPayments(
|
||||
resp, err = client.DeleteAllPayments(
|
||||
ctxc, &lnrpc.DeleteAllPaymentsRequest{
|
||||
AllPayments: includeNonFailed,
|
||||
FailedPaymentsOnly: !includeNonFailed,
|
||||
@ -1844,9 +1843,7 @@ func deletePayments(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Users are confused by empty JSON outputs so let's return a simple OK
|
||||
// instead of just printing the empty response RPC message.
|
||||
printJSON(okMsg)
|
||||
printJSON(resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -2128,11 +2128,13 @@ func stopDaemon(ctx *cli.Context) error {
|
||||
client, cleanUp := getClient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
_, err := client.StopDaemon(ctxc, &lnrpc.StopRequest{})
|
||||
resp, err := client.StopDaemon(ctxc, &lnrpc.StopRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printRespJSON(resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2997,10 +2999,12 @@ func restoreChanBackup(ctx *cli.Context) error {
|
||||
|
||||
req.Backup = backups.Backup
|
||||
|
||||
_, err = client.RestoreChannelBackups(ctxc, &req)
|
||||
resp, err := client.RestoreChannelBackups(ctxc, &req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restore chan backups: %w", err)
|
||||
}
|
||||
|
||||
printRespJSON(resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -83,6 +83,24 @@
|
||||
|
||||
## RPC Updates
|
||||
|
||||
* Some RPCs that previously just returned an empty response message now at least
|
||||
return [a short status
|
||||
message](https://github.com/lightningnetwork/lnd/pull/7762) to help command
|
||||
line users to better understand that the command was executed successfully and
|
||||
something was executed or initiated to run in the background. The following
|
||||
CLI commands now don't just return an empty response (`{}`) anymore:
|
||||
* `lncli wallet releaseoutput` (`WalletKit.ReleaseOutput` RPC)
|
||||
* `lncli wallet accounts import-pubkey` (`WalletKit.ImportPublicKey` RPC)
|
||||
* `lncli wallet labeltx` (`WalletKit.LabelTransaction` RPC)
|
||||
* `lncli sendcustom` (`Lightning.SendCustomMessage` RPC)
|
||||
* `lncli connect` (`Lightning.ConnectPeer` RPC)
|
||||
* `lncli disconnect` (`Lightning.DisconnectPeer` RPC)
|
||||
* `lncli stop` (`Lightning.Stop` RPC)
|
||||
* `lncli deletepayments` (`Lightning.DeleteAllPaymentsResponse` RPC)
|
||||
* `lncli abandonchannel` (`Lightning.AbandonChannel` RPC)
|
||||
* `lncli restorechanbackup` (`Lightning.RestoreChannelBackups` RPC)
|
||||
* `lncli verifychanbackup` (`Lightning.VerifyChanBackup` RPC)
|
||||
|
||||
## lncli Updates
|
||||
|
||||
## Code Health
|
||||
@ -158,6 +176,7 @@
|
||||
* CharlieZKSmith
|
||||
* Elle Mouton
|
||||
* George Tsagkarelis
|
||||
* Oliver Gugger
|
||||
* Pins
|
||||
* Viktor Tigerström
|
||||
* Ziggie
|
||||
|
@ -396,12 +396,22 @@ func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
|
||||
req := &lnrpc.RestoreChanBackupRequest{
|
||||
Backup: backup,
|
||||
}
|
||||
newNode.RPC.RestoreChanBackups(req)
|
||||
res := newNode.RPC.RestoreChanBackups(
|
||||
req,
|
||||
)
|
||||
require.EqualValues(
|
||||
st, 1, res.NumRestored,
|
||||
)
|
||||
|
||||
req = &lnrpc.RestoreChanBackupRequest{
|
||||
Backup: backup,
|
||||
}
|
||||
newNode.RPC.RestoreChanBackups(req)
|
||||
res = newNode.RPC.RestoreChanBackups(
|
||||
req,
|
||||
)
|
||||
require.EqualValues(
|
||||
st, 0, res.NumRestored,
|
||||
)
|
||||
|
||||
return newNode
|
||||
}
|
||||
@ -916,9 +926,27 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
|
||||
}
|
||||
}
|
||||
|
||||
containsChan := func(b *lnrpc.VerifyChanBackupResponse,
|
||||
chanPoint *lnrpc.ChannelPoint) bool {
|
||||
|
||||
hash, err := lnrpc.GetChanPointFundingTxid(chanPoint)
|
||||
require.NoError(ht, err)
|
||||
|
||||
chanPointStr := fmt.Sprintf("%s:%d", hash.String(),
|
||||
chanPoint.OutputIndex)
|
||||
|
||||
for idx := range b.ChanPoints {
|
||||
if b.ChanPoints[idx] == chanPointStr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// assertBackupFileState is a helper function that we'll use to compare
|
||||
// the on disk back up file to our currentBackup pointer above.
|
||||
assertBackupFileState := func() {
|
||||
assertBackupFileState := func(expectAllChannels bool) {
|
||||
err := wait.NoError(func() error {
|
||||
packedBackup, err := os.ReadFile(backupFilePath)
|
||||
if err != nil {
|
||||
@ -946,7 +974,20 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
|
||||
},
|
||||
}
|
||||
|
||||
carol.RPC.VerifyChanBackup(snapshot)
|
||||
res := carol.RPC.VerifyChanBackup(snapshot)
|
||||
|
||||
if !expectAllChannels {
|
||||
continue
|
||||
}
|
||||
for idx := range chanPoints {
|
||||
if containsChan(res, chanPoints[idx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("backup %v doesn't "+
|
||||
"contain chan_point: %v",
|
||||
res.ChanPoints, chanPoints[idx])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -961,7 +1002,7 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
|
||||
|
||||
// The on disk file should also exactly match the latest backup that we
|
||||
// have.
|
||||
assertBackupFileState()
|
||||
assertBackupFileState(true)
|
||||
|
||||
// Next, we'll close the channels one by one. After each channel
|
||||
// closure, we should get a notification, and the on-disk state should
|
||||
@ -988,14 +1029,14 @@ func testChannelBackupUpdates(ht *lntest.HarnessTest) {
|
||||
// Now that the channel's been fully resolved, we
|
||||
// expect another notification.
|
||||
assertBackupNtfns(1)
|
||||
assertBackupFileState()
|
||||
assertBackupFileState(false)
|
||||
} else {
|
||||
ht.CloseChannel(alice, chanPoint)
|
||||
// We should get a single notification after closing,
|
||||
// and the on-disk state should match this latest
|
||||
// notifications.
|
||||
assertBackupNtfns(1)
|
||||
assertBackupFileState()
|
||||
assertBackupFileState(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1414,7 +1455,8 @@ func chanRestoreViaRPC(ht *lntest.HarnessTest, password []byte,
|
||||
nil,
|
||||
)
|
||||
req := &lnrpc.RestoreChanBackupRequest{Backup: backup}
|
||||
newNode.RPC.RestoreChanBackups(req)
|
||||
res := newNode.RPC.RestoreChanBackups(req)
|
||||
require.Greater(ht, res.NumRestored, uint32(0))
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -643,6 +643,8 @@ message SendCustomMessageRequest {
|
||||
}
|
||||
|
||||
message SendCustomMessageResponse {
|
||||
// The status of the send operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message Utxo {
|
||||
@ -1317,6 +1319,8 @@ message ConnectPeerRequest {
|
||||
uint64 timeout = 3;
|
||||
}
|
||||
message ConnectPeerResponse {
|
||||
// The status of the connect operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message DisconnectPeerRequest {
|
||||
@ -1324,6 +1328,8 @@ message DisconnectPeerRequest {
|
||||
string pub_key = 1;
|
||||
}
|
||||
message DisconnectPeerResponse {
|
||||
// The status of the disconnect operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message HTLC {
|
||||
@ -3542,6 +3548,8 @@ message NetworkInfo {
|
||||
message StopRequest {
|
||||
}
|
||||
message StopResponse {
|
||||
// The status of the stop operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message GraphTopologySubscription {
|
||||
@ -4366,9 +4374,13 @@ message DeleteAllPaymentsRequest {
|
||||
}
|
||||
|
||||
message DeletePaymentResponse {
|
||||
// The status of the delete operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message DeleteAllPaymentsResponse {
|
||||
// The status of the delete operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message AbandonChannelRequest {
|
||||
@ -4385,6 +4397,8 @@ message AbandonChannelRequest {
|
||||
}
|
||||
|
||||
message AbandonChannelResponse {
|
||||
// The status of the abandon operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message DebugLevelRequest {
|
||||
@ -4724,12 +4738,15 @@ message RestoreChanBackupRequest {
|
||||
}
|
||||
}
|
||||
message RestoreBackupResponse {
|
||||
// The number of channels successfully restored.
|
||||
uint32 num_restored = 1;
|
||||
}
|
||||
|
||||
message ChannelBackupSubscription {
|
||||
}
|
||||
|
||||
message VerifyChanBackupResponse {
|
||||
repeated string chan_points = 1;
|
||||
}
|
||||
|
||||
message MacaroonPermission {
|
||||
|
@ -3263,7 +3263,13 @@
|
||||
}
|
||||
},
|
||||
"lnrpcAbandonChannelResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the abandon operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcAddInvoiceResponse": {
|
||||
"type": "object",
|
||||
@ -4593,7 +4599,13 @@
|
||||
}
|
||||
},
|
||||
"lnrpcConnectPeerResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the connect operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcCustomMessage": {
|
||||
"type": "object",
|
||||
@ -4635,7 +4647,13 @@
|
||||
}
|
||||
},
|
||||
"lnrpcDeleteAllPaymentsResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the delete operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcDeleteMacaroonIDResponse": {
|
||||
"type": "object",
|
||||
@ -4647,10 +4665,22 @@
|
||||
}
|
||||
},
|
||||
"lnrpcDeletePaymentResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the delete operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcDisconnectPeerResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the disconnect operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcEdgeLocator": {
|
||||
"type": "object",
|
||||
@ -6922,7 +6952,14 @@
|
||||
"description": " - ANCHOR: We resolved an anchor output.\n - INCOMING_HTLC: We are resolving an incoming htlc on chain. This if this htlc is\nclaimed, we swept the incoming htlc with the preimage. If it is timed\nout, our peer swept the timeout path.\n - OUTGOING_HTLC: We are resolving an outgoing htlc on chain. If this htlc is claimed,\nthe remote party swept the htlc with the preimage. If it is timed out,\nwe swept it with the timeout path.\n - COMMIT: We force closed and need to sweep our time locked commitment output."
|
||||
},
|
||||
"lnrpcRestoreBackupResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"num_restored": {
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"description": "The number of channels successfully restored."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcRestoreChanBackupRequest": {
|
||||
"type": "object",
|
||||
@ -7133,7 +7170,13 @@
|
||||
}
|
||||
},
|
||||
"lnrpcSendCustomMessageResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the send operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcSendManyRequest": {
|
||||
"type": "object",
|
||||
@ -7338,7 +7381,13 @@
|
||||
"type": "object"
|
||||
},
|
||||
"lnrpcStopResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the stop operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcStreamAuth": {
|
||||
"type": "object",
|
||||
@ -7485,7 +7534,15 @@
|
||||
}
|
||||
},
|
||||
"lnrpcVerifyChanBackupResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"chan_points": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"lnrpcVerifyMessageRequest": {
|
||||
"type": "object",
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -414,6 +414,8 @@ message ReleaseOutputRequest {
|
||||
}
|
||||
|
||||
message ReleaseOutputResponse {
|
||||
// The status of the release operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message KeyReq {
|
||||
@ -699,6 +701,8 @@ message ImportPublicKeyRequest {
|
||||
AddressType address_type = 2;
|
||||
}
|
||||
message ImportPublicKeyResponse {
|
||||
// The status of the import operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
message ImportTapscriptRequest {
|
||||
@ -1318,6 +1322,8 @@ message LabelTransactionRequest {
|
||||
}
|
||||
|
||||
message LabelTransactionResponse {
|
||||
// The status of the label operation.
|
||||
string status = 1;
|
||||
}
|
||||
|
||||
// The possible change address types for default accounts and single imported
|
||||
|
@ -1692,7 +1692,13 @@
|
||||
}
|
||||
},
|
||||
"walletrpcImportPublicKeyResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the import operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"walletrpcImportTapscriptRequest": {
|
||||
"type": "object",
|
||||
@ -1764,7 +1770,13 @@
|
||||
}
|
||||
},
|
||||
"walletrpcLabelTransactionResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the label operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"walletrpcLeaseOutputRequest": {
|
||||
"type": "object",
|
||||
@ -2002,7 +2014,13 @@
|
||||
}
|
||||
},
|
||||
"walletrpcReleaseOutputResponse": {
|
||||
"type": "object"
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "The status of the release operation."
|
||||
}
|
||||
}
|
||||
},
|
||||
"walletrpcRemoveTransactionResponse": {
|
||||
"type": "object",
|
||||
|
@ -536,7 +536,9 @@ func (w *WalletKit) ReleaseOutput(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ReleaseOutputResponse{}, nil
|
||||
return &ReleaseOutputResponse{
|
||||
Status: fmt.Sprintf("output %v released", op.String()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListLeases returns a list of all currently locked utxos.
|
||||
@ -1443,7 +1445,10 @@ func (w *WalletKit) LabelTransaction(ctx context.Context,
|
||||
}
|
||||
|
||||
err = w.cfg.Wallet.LabelTransaction(*hash, req.Label, req.Overwrite)
|
||||
return &LabelTransactionResponse{}, err
|
||||
|
||||
return &LabelTransactionResponse{
|
||||
Status: fmt.Sprintf("transaction label '%s' added", req.Label),
|
||||
}, err
|
||||
}
|
||||
|
||||
// FundPsbt creates a fully populated PSBT that contains enough inputs to fund
|
||||
@ -2896,7 +2901,10 @@ func (w *WalletKit) ImportPublicKey(_ context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ImportPublicKeyResponse{}, nil
|
||||
return &ImportPublicKeyResponse{
|
||||
Status: fmt.Sprintf("public key %x imported",
|
||||
pubKey.SerializeCompressed()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ImportTapscript imports a Taproot script and internal key and adds the
|
||||
|
93
rpcserver.go
93
rpcserver.go
@ -1806,23 +1806,24 @@ func (r *rpcServer) ConnectPeer(ctx context.Context,
|
||||
// request.
|
||||
if in.Timeout != 0 {
|
||||
timeout = time.Duration(in.Timeout) * time.Second
|
||||
rpcsLog.Debugf(
|
||||
"[connectpeer] connection timeout is set to %v",
|
||||
timeout,
|
||||
)
|
||||
rpcsLog.Debugf("[connectpeer] connection timeout is set to %v",
|
||||
timeout)
|
||||
}
|
||||
|
||||
if err := r.server.ConnectToPeer(
|
||||
peerAddr, in.Perm, timeout,
|
||||
); err != nil {
|
||||
rpcsLog.Errorf(
|
||||
"[connectpeer]: error connecting to peer: %v", err,
|
||||
)
|
||||
rpcsLog.Errorf("[connectpeer]: error connecting to peer: %v",
|
||||
err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
|
||||
return &lnrpc.ConnectPeerResponse{}, nil
|
||||
|
||||
return &lnrpc.ConnectPeerResponse{
|
||||
Status: fmt.Sprintf("connection to %v initiated",
|
||||
peerAddr.String()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DisconnectPeer attempts to disconnect one peer from another identified by a
|
||||
@ -1884,7 +1885,9 @@ func (r *rpcServer) DisconnectPeer(ctx context.Context,
|
||||
return nil, fmt.Errorf("unable to disconnect peer: %w", err)
|
||||
}
|
||||
|
||||
return &lnrpc.DisconnectPeerResponse{}, nil
|
||||
return &lnrpc.DisconnectPeerResponse{
|
||||
Status: "disconnect initiated",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newFundingShimAssembler returns a new fully populated
|
||||
@ -3165,7 +3168,9 @@ func (r *rpcServer) AbandonChannel(_ context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lnrpc.AbandonChannelResponse{}, nil
|
||||
return &lnrpc.AbandonChannelResponse{
|
||||
Status: fmt.Sprintf("channel %v abandoned", chanPoint.String()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetInfo returns general information concerning the lightning node including
|
||||
@ -7035,7 +7040,10 @@ func (r *rpcServer) StopDaemon(_ context.Context,
|
||||
}
|
||||
|
||||
r.interceptor.RequestShutdown()
|
||||
return &lnrpc.StopResponse{}, nil
|
||||
|
||||
return &lnrpc.StopResponse{
|
||||
Status: "shutdown initiated, check logs for progress",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubscribeChannelGraph launches a streaming RPC that allows the caller to
|
||||
@ -7286,7 +7294,9 @@ func (r *rpcServer) DeletePayment(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lnrpc.DeletePaymentResponse{}, nil
|
||||
return &lnrpc.DeletePaymentResponse{
|
||||
Status: "payment deleted",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteAllPayments deletes all outgoing payments from DB.
|
||||
@ -7319,14 +7329,17 @@ func (r *rpcServer) DeleteAllPayments(ctx context.Context,
|
||||
"failed_htlcs_only=%v", req.FailedPaymentsOnly,
|
||||
req.FailedHtlcsOnly)
|
||||
|
||||
err := r.server.miscDB.DeletePayments(
|
||||
numDeletedPayments, err := r.server.miscDB.DeletePayments(
|
||||
req.FailedPaymentsOnly, req.FailedHtlcsOnly,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lnrpc.DeleteAllPaymentsResponse{}, nil
|
||||
return &lnrpc.DeleteAllPaymentsResponse{
|
||||
Status: fmt.Sprintf("%v payments deleted, failed_htlcs_only=%v",
|
||||
numDeletedPayments, req.FailedHtlcsOnly),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DebugLevel allows a caller to programmatically set the logging verbosity of
|
||||
@ -7974,6 +7987,10 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context,
|
||||
func (r *rpcServer) VerifyChanBackup(ctx context.Context,
|
||||
in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) {
|
||||
|
||||
var (
|
||||
channels []chanbackup.Single
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
// If neither a Single or Multi has been specified, then we have nothing
|
||||
// to verify.
|
||||
@ -8004,7 +8021,7 @@ func (r *rpcServer) VerifyChanBackup(ctx context.Context,
|
||||
// With our PackedSingles created, we'll attempt to unpack the
|
||||
// backup. If this fails, then we know the backup is invalid for
|
||||
// some reason.
|
||||
_, err := chanBackup.Unpack(r.server.cc.KeyRing)
|
||||
channels, err = chanBackup.Unpack(r.server.cc.KeyRing)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid single channel "+
|
||||
"backup: %v", err)
|
||||
@ -8018,14 +8035,20 @@ func (r *rpcServer) VerifyChanBackup(ctx context.Context,
|
||||
|
||||
// We'll now attempt to unpack the Multi. If this fails, then we
|
||||
// know it's invalid.
|
||||
_, err := packedMulti.Unpack(r.server.cc.KeyRing)
|
||||
multi, err := packedMulti.Unpack(r.server.cc.KeyRing)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid multi channel backup: "+
|
||||
"%v", err)
|
||||
}
|
||||
|
||||
channels = multi.StaticBackups
|
||||
}
|
||||
|
||||
return &lnrpc.VerifyChanBackupResponse{}, nil
|
||||
return &lnrpc.VerifyChanBackupResponse{
|
||||
ChanPoints: fn.Map(func(c chanbackup.Single) string {
|
||||
return c.FundingOutpoint.String()
|
||||
}, channels),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createBackupSnapshot converts the passed Single backup into a snapshot which
|
||||
@ -8142,6 +8165,10 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
|
||||
|
||||
// We'll accept either a list of Single backups, or a single Multi
|
||||
// backup which contains several single backups.
|
||||
var (
|
||||
numRestored int
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
case in.GetChanBackups() != nil:
|
||||
chanBackupsProtos := in.GetChanBackups()
|
||||
@ -8159,7 +8186,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
|
||||
// write the new backups to disk, and then attempt to connect
|
||||
// out to any peers that we know of which were our prior
|
||||
// channel peers.
|
||||
err := chanbackup.UnpackAndRecoverSingles(
|
||||
numRestored, err = chanbackup.UnpackAndRecoverSingles(
|
||||
chanbackup.PackedSingles(packedBackups),
|
||||
r.server.cc.KeyRing, chanRestorer, r.server,
|
||||
)
|
||||
@ -8176,7 +8203,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
|
||||
// out to any peers that we know of which were our prior
|
||||
// channel peers.
|
||||
packedMulti := chanbackup.PackedMulti(packedMultiBackup)
|
||||
err := chanbackup.UnpackAndRecoverMulti(
|
||||
numRestored, err = chanbackup.UnpackAndRecoverMulti(
|
||||
packedMulti, r.server.cc.KeyRing, chanRestorer,
|
||||
r.server,
|
||||
)
|
||||
@ -8186,7 +8213,9 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
return &lnrpc.RestoreBackupResponse{}, nil
|
||||
return &lnrpc.RestoreBackupResponse{
|
||||
NumRestored: uint32(numRestored),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubscribeChannelBackups allows a client to sub-subscribe to the most up to
|
||||
@ -8789,8 +8818,9 @@ func (r *rpcServer) RegisterRPCMiddleware(
|
||||
}
|
||||
|
||||
// SendCustomMessage sends a custom peer message.
|
||||
func (r *rpcServer) SendCustomMessage(ctx context.Context, req *lnrpc.SendCustomMessageRequest) (
|
||||
*lnrpc.SendCustomMessageResponse, error) {
|
||||
func (r *rpcServer) SendCustomMessage(_ context.Context,
|
||||
req *lnrpc.SendCustomMessageRequest) (*lnrpc.SendCustomMessageResponse,
|
||||
error) {
|
||||
|
||||
peer, err := route.NewVertexFromBytes(req.Peer)
|
||||
if err != nil {
|
||||
@ -8801,18 +8831,21 @@ func (r *rpcServer) SendCustomMessage(ctx context.Context, req *lnrpc.SendCustom
|
||||
peer, lnwire.MessageType(req.Type), req.Data,
|
||||
)
|
||||
switch {
|
||||
case err == ErrPeerNotConnected:
|
||||
case errors.Is(err, ErrPeerNotConnected):
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &lnrpc.SendCustomMessageResponse{}, nil
|
||||
return &lnrpc.SendCustomMessageResponse{
|
||||
Status: "message sent successfully",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubscribeCustomMessages subscribes to a stream of incoming custom peer
|
||||
// messages.
|
||||
func (r *rpcServer) SubscribeCustomMessages(req *lnrpc.SubscribeCustomMessagesRequest,
|
||||
func (r *rpcServer) SubscribeCustomMessages(
|
||||
_ *lnrpc.SubscribeCustomMessagesRequest,
|
||||
server lnrpc.Lightning_SubscribeCustomMessagesServer) error {
|
||||
|
||||
client, err := r.server.SubscribeCustomMessages()
|
||||
@ -8845,17 +8878,17 @@ func (r *rpcServer) SubscribeCustomMessages(req *lnrpc.SubscribeCustomMessagesRe
|
||||
}
|
||||
|
||||
// ListAliases returns the set of all aliases we have ever allocated along with
|
||||
// their base SCID's and possibly a separate confirmed SCID in the case of
|
||||
// their base SCIDs and possibly a separate confirmed SCID in the case of
|
||||
// zero-conf.
|
||||
func (r *rpcServer) ListAliases(ctx context.Context,
|
||||
in *lnrpc.ListAliasesRequest) (*lnrpc.ListAliasesResponse, error) {
|
||||
func (r *rpcServer) ListAliases(_ context.Context,
|
||||
_ *lnrpc.ListAliasesRequest) (*lnrpc.ListAliasesResponse, error) {
|
||||
|
||||
// Fetch the map of all aliases.
|
||||
mapAliases := r.server.aliasMgr.ListAliases()
|
||||
|
||||
// Fill out the response. This does not include the zero-conf confirmed
|
||||
// SCID. Doing so would require more database lookups and it can be
|
||||
// cross-referenced with the output of listchannels/closedchannels.
|
||||
// SCID. Doing so would require more database lookups, and it can be
|
||||
// cross-referenced with the output of ListChannels/ClosedChannels.
|
||||
resp := &lnrpc.ListAliasesResponse{
|
||||
AliasMaps: make([]*lnrpc.AliasMap, 0),
|
||||
}
|
||||
|
@ -2248,7 +2248,7 @@ func (s *server) Start() error {
|
||||
chainArb: s.chainArb,
|
||||
}
|
||||
if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
|
||||
err := chanbackup.UnpackAndRecoverSingles(
|
||||
_, err := chanbackup.UnpackAndRecoverSingles(
|
||||
s.chansToRestore.PackedSingleChanBackups,
|
||||
s.cc.KeyRing, chanRestorer, s,
|
||||
)
|
||||
@ -2259,7 +2259,7 @@ func (s *server) Start() error {
|
||||
}
|
||||
}
|
||||
if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
|
||||
err := chanbackup.UnpackAndRecoverMulti(
|
||||
_, err := chanbackup.UnpackAndRecoverMulti(
|
||||
s.chansToRestore.PackedMultiChanBackup,
|
||||
s.cc.KeyRing, chanRestorer, s,
|
||||
)
|
||||
|
Loading…
x
Reference in New Issue
Block a user