mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-05-29 17:19:33 +02:00
multi: rename local and remote chan DBs
As a preparation to not have a local and remote version of the database around anymore, we rename the variables into what their actual function is. In case of the RPC server we even directly use the channel graph instead of the DB instance. This should allow us to extract the channel graph into its own, separate database (perhaps with better access characteristics) in the future.
This commit is contained in:
parent
abf3942228
commit
9138c8abac
@ -68,11 +68,13 @@ type Config struct {
|
||||
// LtcdMode defines settings for connecting to an ltcd node.
|
||||
LtcdMode *lncfg.Btcd
|
||||
|
||||
// LocalChanDB is a pointer to the local backing channel database.
|
||||
LocalChanDB *channeldb.DB
|
||||
// HeightHintDB is a pointer to the database that stores the height
|
||||
// hints.
|
||||
HeightHintDB *channeldb.DB
|
||||
|
||||
// RemoteChanDB is a pointer to the remote backing channel database.
|
||||
RemoteChanDB *channeldb.DB
|
||||
// ChanStateDB is a pointer to the database that stores the channel
|
||||
// state.
|
||||
ChanStateDB *channeldb.DB
|
||||
|
||||
// BlockCacheSize is the size (in bytes) of blocks kept in memory.
|
||||
BlockCacheSize uint64
|
||||
@ -304,7 +306,7 @@ func NewChainControl(cfg *Config, blockCache *blockcache.BlockCache) (
|
||||
|
||||
// Initialize the height hint cache within the chain directory.
|
||||
hintCache, err := chainntnfs.NewHeightHintCache(
|
||||
heightHintCacheConfig, cfg.LocalChanDB,
|
||||
heightHintCacheConfig, cfg.HeightHintDB,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to initialize height hint "+
|
||||
@ -684,7 +686,7 @@ func NewChainControl(cfg *Config, blockCache *blockcache.BlockCache) (
|
||||
// Create, and start the lnwallet, which handles the core payment
|
||||
// channel logic, and exposes control via proxy state machines.
|
||||
walletCfg := lnwallet.Config{
|
||||
Database: cfg.RemoteChanDB,
|
||||
Database: cfg.ChanStateDB,
|
||||
Notifier: cc.ChainNotifier,
|
||||
WalletController: wc,
|
||||
Signer: cc.Signer,
|
||||
|
13
config.go
13
config.go
@ -1489,12 +1489,13 @@ func ValidateConfig(cfg Config, usageMessage string,
|
||||
return &cfg, err
|
||||
}
|
||||
|
||||
// localDatabaseDir returns the default directory where the
|
||||
// local bolt db files are stored.
|
||||
func (c *Config) localDatabaseDir() string {
|
||||
return filepath.Join(c.DataDir,
|
||||
defaultGraphSubDirname,
|
||||
lncfg.NormalizeNetwork(c.ActiveNetParams.Name))
|
||||
// graphDatabaseDir returns the default directory where the local bolt graph db
|
||||
// files are stored.
|
||||
func (c *Config) graphDatabaseDir() string {
|
||||
return filepath.Join(
|
||||
c.DataDir, defaultGraphSubDirname,
|
||||
lncfg.NormalizeNetwork(c.ActiveNetParams.Name),
|
||||
)
|
||||
}
|
||||
|
||||
// CleanAndExpandPath expands environment variables and leading ~ in the
|
||||
|
27
lncfg/db.go
27
lncfg/db.go
@ -79,22 +79,21 @@ func (db *DB) Init(ctx context.Context, dbPath string) error {
|
||||
}
|
||||
|
||||
// DatabaseBackends is a two-tuple that holds the set of active database
|
||||
// backends for the daemon. The two backends we expose are the local database
|
||||
// backend, and the remote backend. The LocalDB attribute will always be
|
||||
// populated. However, the remote DB will only be set if a replicated database
|
||||
// is active.
|
||||
// backends for the daemon. The two backends we expose are the graph database
|
||||
// backend, and the channel state backend.
|
||||
// TODO(guggero): Actually make fully remote.
|
||||
type DatabaseBackends struct {
|
||||
// LocalDB points to the local non-replicated backend.
|
||||
LocalDB kvdb.Backend
|
||||
// GraphDB points to the database backend that contains the less
|
||||
// critical data that is accessed often, such as the channel graph and
|
||||
// chain height hints.
|
||||
GraphDB kvdb.Backend
|
||||
|
||||
// RemoteDB points to a possibly networked replicated backend. If no
|
||||
// replicated backend is active, then this pointer will be nil.
|
||||
RemoteDB kvdb.Backend
|
||||
// ChanStateDB points to a possibly networked replicated backend that
|
||||
// contains the critical channel state related data.
|
||||
ChanStateDB kvdb.Backend
|
||||
}
|
||||
|
||||
// GetBackends returns a set of kvdb.Backends as set in the DB config. The
|
||||
// local database will ALWAYS be non-nil, while the remote database will only
|
||||
// be populated if etcd is specified.
|
||||
// GetBackends returns a set of kvdb.Backends as set in the DB config.
|
||||
func (db *DB) GetBackends(ctx context.Context, dbPath string) (
|
||||
*DatabaseBackends, error) {
|
||||
|
||||
@ -125,8 +124,8 @@ func (db *DB) GetBackends(ctx context.Context, dbPath string) (
|
||||
}
|
||||
|
||||
return &DatabaseBackends{
|
||||
LocalDB: localDB,
|
||||
RemoteDB: remoteDB,
|
||||
GraphDB: localDB,
|
||||
ChanStateDB: remoteDB,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
48
lnd.go
48
lnd.go
@ -258,7 +258,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
|
||||
// Run configuration dependent DB pre-initialization. Note that this
|
||||
// needs to be done early and once during the startup process, before
|
||||
// any DB access.
|
||||
if err := cfg.DB.Init(ctx, cfg.localDatabaseDir()); err != nil {
|
||||
if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -467,7 +467,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
|
||||
// environment. This will ensure that all members of the cluster
|
||||
// have access to the same wallet state.
|
||||
loaderOpt = btcwallet.LoaderWithExternalWalletDB(
|
||||
dbs.remoteChanDB.Backend,
|
||||
dbs.chanStateDB.Backend,
|
||||
)
|
||||
} else {
|
||||
// When "running locally", LND will use the bbolt wallet.db to
|
||||
@ -706,8 +706,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
|
||||
LitecoindMode: cfg.LitecoindMode,
|
||||
BtcdMode: cfg.BtcdMode,
|
||||
LtcdMode: cfg.LtcdMode,
|
||||
LocalChanDB: dbs.localChanDB,
|
||||
RemoteChanDB: dbs.remoteChanDB,
|
||||
HeightHintDB: dbs.graphDB,
|
||||
ChanStateDB: dbs.chanStateDB,
|
||||
PrivateWalletPw: privateWalletPw,
|
||||
PublicWalletPw: publicWalletPw,
|
||||
Birthday: walletInitParams.Birthday,
|
||||
@ -781,7 +781,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
|
||||
if cfg.WtClient.Active {
|
||||
var err error
|
||||
towerClientDB, err = wtdb.OpenClientDB(
|
||||
cfg.localDatabaseDir(), cfg.DB.Bolt.DBTimeout,
|
||||
cfg.graphDatabaseDir(), cfg.DB.Bolt.DBTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("unable to open watchtower client "+
|
||||
@ -1624,16 +1624,14 @@ func waitForWalletPassword(cfg *Config,
|
||||
// databaseInstances is a struct that holds all instances to the actual
|
||||
// databases that are used in lnd.
|
||||
type databaseInstances struct {
|
||||
localChanDB *channeldb.DB
|
||||
remoteChanDB *channeldb.DB
|
||||
graphDB *channeldb.DB
|
||||
chanStateDB *channeldb.DB
|
||||
}
|
||||
|
||||
// initializeDatabases extracts the current databases that we'll use for normal
|
||||
// operation in the daemon. Two databases are returned: one remote and one
|
||||
// local. However, only if the replicated database is active will the remote
|
||||
// database point to a unique database. Otherwise, the local and remote DB will
|
||||
// both point to the same local database. A function closure that closes all
|
||||
// opened databases is also returned.
|
||||
// operation in the daemon. A function closure that closes all opened databases
|
||||
// is also returned.
|
||||
// TODO(guggero): Actually make fully remote.
|
||||
func initializeDatabases(ctx context.Context,
|
||||
cfg *Config) (*databaseInstances, func(), error) {
|
||||
|
||||
@ -1648,7 +1646,7 @@ func initializeDatabases(ctx context.Context,
|
||||
|
||||
startOpenTime := time.Now()
|
||||
|
||||
databaseBackends, err := cfg.DB.GetBackends(ctx, cfg.localDatabaseDir())
|
||||
databaseBackends, err := cfg.DB.GetBackends(ctx, cfg.graphDatabaseDir())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to obtain database "+
|
||||
"backends: %v", err)
|
||||
@ -1660,11 +1658,11 @@ func initializeDatabases(ctx context.Context,
|
||||
dbs = &databaseInstances{}
|
||||
closeFuncs []func()
|
||||
)
|
||||
if databaseBackends.RemoteDB == nil {
|
||||
if databaseBackends.ChanStateDB == nil {
|
||||
// Open the channeldb, which is dedicated to storing channel,
|
||||
// and network related metadata.
|
||||
dbs.localChanDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.LocalDB,
|
||||
dbs.graphDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.GraphDB,
|
||||
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
|
||||
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
|
||||
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
|
||||
@ -1681,10 +1679,10 @@ func initializeDatabases(ctx context.Context,
|
||||
}
|
||||
|
||||
closeFuncs = append(closeFuncs, func() {
|
||||
dbs.localChanDB.Close()
|
||||
dbs.graphDB.Close()
|
||||
})
|
||||
|
||||
dbs.remoteChanDB = dbs.localChanDB
|
||||
dbs.chanStateDB = dbs.graphDB
|
||||
} else {
|
||||
ltndLog.Infof("Database replication is available! Creating " +
|
||||
"local and remote channeldb instances")
|
||||
@ -1692,8 +1690,8 @@ func initializeDatabases(ctx context.Context,
|
||||
// Otherwise, we'll open two instances, one for the state we
|
||||
// only need locally, and the other for things we want to
|
||||
// ensure are replicated.
|
||||
dbs.localChanDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.LocalDB,
|
||||
dbs.graphDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.GraphDB,
|
||||
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
|
||||
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
|
||||
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
|
||||
@ -1713,13 +1711,13 @@ func initializeDatabases(ctx context.Context,
|
||||
}
|
||||
|
||||
closeFuncs = append(closeFuncs, func() {
|
||||
dbs.localChanDB.Close()
|
||||
dbs.graphDB.Close()
|
||||
})
|
||||
|
||||
ltndLog.Infof("Opening replicated database instance...")
|
||||
|
||||
dbs.remoteChanDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.RemoteDB,
|
||||
dbs.chanStateDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.ChanStateDB,
|
||||
channeldb.OptionDryRunMigration(cfg.DryRunMigration),
|
||||
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
|
||||
)
|
||||
@ -1728,7 +1726,7 @@ func initializeDatabases(ctx context.Context,
|
||||
return nil, nil, err
|
||||
|
||||
case err != nil:
|
||||
dbs.localChanDB.Close()
|
||||
dbs.graphDB.Close()
|
||||
|
||||
err := fmt.Errorf("unable to open remote channeldb: %v", err)
|
||||
ltndLog.Error(err)
|
||||
@ -1736,7 +1734,7 @@ func initializeDatabases(ctx context.Context,
|
||||
}
|
||||
|
||||
closeFuncs = append(closeFuncs, func() {
|
||||
dbs.remoteChanDB.Close()
|
||||
dbs.chanStateDB.Close()
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -44,13 +44,13 @@ type Config struct {
|
||||
// specified.
|
||||
DefaultCLTVExpiry uint32
|
||||
|
||||
// LocalChanDB is a global boltdb instance which is needed to access the
|
||||
// GraphDB is a global database instance which is needed to access the
|
||||
// channel graph.
|
||||
LocalChanDB *channeldb.DB
|
||||
GraphDB *channeldb.ChannelGraph
|
||||
|
||||
// RemoteChanDB is a replicatd db instance which is the same as the
|
||||
// localdb when running without remote db.
|
||||
RemoteChanDB *channeldb.DB
|
||||
// ChanStateDB is a possibly replicated db instance which contains the
|
||||
// channel state
|
||||
ChanStateDB *channeldb.DB
|
||||
|
||||
// GenInvoiceFeatures returns a feature containing feature bits that
|
||||
// should be advertised on freshly generated invoices.
|
||||
|
@ -316,8 +316,8 @@ func (s *Server) AddHoldInvoice(ctx context.Context,
|
||||
ChainParams: s.cfg.ChainParams,
|
||||
NodeSigner: s.cfg.NodeSigner,
|
||||
DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry,
|
||||
ChanDB: s.cfg.RemoteChanDB,
|
||||
Graph: s.cfg.LocalChanDB.ChannelGraph(),
|
||||
ChanDB: s.cfg.ChanStateDB,
|
||||
Graph: s.cfg.GraphDB,
|
||||
GenInvoiceFeatures: s.cfg.GenInvoiceFeatures,
|
||||
GenAmpInvoiceFeatures: s.cfg.GenAmpInvoiceFeatures,
|
||||
}
|
||||
|
6
pilot.go
6
pilot.go
@ -185,7 +185,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot,
|
||||
cfg.MinConfs, lnwallet.DefaultAccountName,
|
||||
)
|
||||
},
|
||||
Graph: autopilot.ChannelGraphFromDatabase(svr.localChanDB.ChannelGraph()),
|
||||
Graph: autopilot.ChannelGraphFromDatabase(svr.graphDB),
|
||||
Constraints: atplConstraints,
|
||||
ConnectToPeer: func(target *btcec.PublicKey, addrs []net.Addr) (bool, error) {
|
||||
// First, we'll check if we're already connected to the
|
||||
@ -258,7 +258,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot,
|
||||
// We'll fetch the current state of open
|
||||
// channels from the database to use as initial
|
||||
// state for the auto-pilot agent.
|
||||
activeChannels, err := svr.remoteChanDB.FetchAllChannels()
|
||||
activeChannels, err := svr.chanStateDB.FetchAllChannels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -282,7 +282,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot,
|
||||
ChannelInfo: func(chanPoint wire.OutPoint) (
|
||||
*autopilot.LocalChannel, error) {
|
||||
|
||||
channel, err := svr.remoteChanDB.FetchChannel(chanPoint)
|
||||
channel, err := svr.chanStateDB.FetchChannel(chanPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
83
rpcserver.go
83
rpcserver.go
@ -614,12 +614,11 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
|
||||
chanPredicate *chanacceptor.ChainedAcceptor) error {
|
||||
|
||||
// Set up router rpc backend.
|
||||
channelGraph := s.localChanDB.ChannelGraph()
|
||||
selfNode, err := channelGraph.SourceNode()
|
||||
selfNode, err := s.graphDB.SourceNode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
graph := s.localChanDB.ChannelGraph()
|
||||
graph := s.graphDB
|
||||
routerBackend := &routerrpc.RouterBackend{
|
||||
SelfNode: selfNode.PubKeyBytes,
|
||||
FetchChannelCapacity: func(chanID uint64) (btcutil.Amount,
|
||||
@ -683,7 +682,7 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
|
||||
err = subServerCgs.PopulateDependencies(
|
||||
r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry,
|
||||
s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter,
|
||||
routerBackend, s.nodeSigner, s.localChanDB, s.remoteChanDB,
|
||||
routerBackend, s.nodeSigner, s.graphDB, s.chanStateDB,
|
||||
s.sweeper, tower, s.towerClient, s.anchorTowerClient,
|
||||
r.cfg.net.ResolveTCPAddr, genInvoiceFeatures,
|
||||
genAmpInvoiceFeatures, rpcsLog,
|
||||
@ -1509,7 +1508,7 @@ func (r *rpcServer) VerifyMessage(ctx context.Context,
|
||||
// channels signed the message.
|
||||
//
|
||||
// TODO(phlip9): Require valid nodes to have capital in active channels.
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
_, active, err := graph.HasLightningNode(pub)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query graph: %v", err)
|
||||
@ -1615,7 +1614,7 @@ func (r *rpcServer) DisconnectPeer(ctx context.Context,
|
||||
|
||||
// Next, we'll fetch the pending/active channels we have with a
|
||||
// particular peer.
|
||||
nodeChannels, err := r.server.remoteChanDB.FetchOpenChannels(peerPubKey)
|
||||
nodeChannels, err := r.server.chanStateDB.FetchOpenChannels(peerPubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch channels for peer: %v", err)
|
||||
}
|
||||
@ -2125,7 +2124,7 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
|
||||
|
||||
// First, we'll fetch the channel as is, as we'll need to examine it
|
||||
// regardless of if this is a force close or not.
|
||||
channel, err := r.server.remoteChanDB.FetchChannel(*chanPoint)
|
||||
channel, err := r.server.chanStateDB.FetchChannel(*chanPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2403,7 +2402,7 @@ func (r *rpcServer) AbandonChannel(_ context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbChan, err := r.server.remoteChanDB.FetchChannel(*chanPoint)
|
||||
dbChan, err := r.server.chanStateDB.FetchChannel(*chanPoint)
|
||||
switch {
|
||||
// If the channel isn't found in the set of open channels, then we can
|
||||
// continue on as it can't be loaded into the link/peer.
|
||||
@ -2450,13 +2449,11 @@ func (r *rpcServer) AbandonChannel(_ context.Context,
|
||||
// court. Between any step it's possible that the users restarts the
|
||||
// process all over again. As a result, each of the steps below are
|
||||
// intended to be idempotent.
|
||||
err = r.server.remoteChanDB.AbandonChannel(chanPoint, uint32(bestHeight))
|
||||
err = r.server.chanStateDB.AbandonChannel(chanPoint, uint32(bestHeight))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = abandonChanFromGraph(
|
||||
r.server.localChanDB.ChannelGraph(), chanPoint,
|
||||
)
|
||||
err = abandonChanFromGraph(r.server.graphDB, chanPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2489,7 +2486,7 @@ func (r *rpcServer) GetInfo(_ context.Context,
|
||||
|
||||
serverPeers := r.server.Peers()
|
||||
|
||||
openChannels, err := r.server.remoteChanDB.FetchAllOpenChannels()
|
||||
openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2504,7 +2501,7 @@ func (r *rpcServer) GetInfo(_ context.Context,
|
||||
|
||||
inactiveChannels := uint32(len(openChannels)) - activeChannels
|
||||
|
||||
pendingChannels, err := r.server.remoteChanDB.FetchPendingChannels()
|
||||
pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get retrieve pending "+
|
||||
"channels: %v", err)
|
||||
@ -2905,7 +2902,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context,
|
||||
pendingOpenRemoteBalance lnwire.MilliSatoshi
|
||||
)
|
||||
|
||||
openChannels, err := r.server.remoteChanDB.FetchAllOpenChannels()
|
||||
openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2925,7 +2922,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
pendingChannels, err := r.server.remoteChanDB.FetchPendingChannels()
|
||||
pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2999,7 +2996,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
|
||||
// First, we'll populate the response with all the channels that are
|
||||
// soon to be opened. We can easily fetch this data from the database
|
||||
// and map the db struct to the proto response.
|
||||
pendingOpenChannels, err := r.server.remoteChanDB.FetchPendingChannels()
|
||||
pendingOpenChannels, err := r.server.chanStateDB.FetchPendingChannels()
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("unable to fetch pending channels: %v", err)
|
||||
return nil, err
|
||||
@ -3047,7 +3044,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
|
||||
|
||||
// Next, we'll examine the channels that are soon to be closed so we
|
||||
// can populate these fields within the response.
|
||||
pendingCloseChannels, err := r.server.remoteChanDB.FetchClosedChannels(true)
|
||||
pendingCloseChannels, err := r.server.chanStateDB.FetchClosedChannels(true)
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("unable to fetch closed channels: %v", err)
|
||||
return nil, err
|
||||
@ -3076,7 +3073,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
|
||||
// not found, or the channel itself, this channel was closed
|
||||
// in a version before we started persisting historical
|
||||
// channels, so we silence the error.
|
||||
historical, err := r.server.remoteChanDB.FetchHistoricalChannel(
|
||||
historical, err := r.server.chanStateDB.FetchHistoricalChannel(
|
||||
&pendingClose.ChanPoint,
|
||||
)
|
||||
switch err {
|
||||
@ -3151,7 +3148,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
|
||||
// We'll also fetch all channels that are open, but have had their
|
||||
// commitment broadcasted, meaning they are waiting for the closing
|
||||
// transaction to confirm.
|
||||
waitingCloseChans, err := r.server.remoteChanDB.FetchWaitingCloseChannels()
|
||||
waitingCloseChans, err := r.server.chanStateDB.FetchWaitingCloseChannels()
|
||||
if err != nil {
|
||||
rpcsLog.Errorf("unable to fetch channels waiting close: %v",
|
||||
err)
|
||||
@ -3386,7 +3383,7 @@ func (r *rpcServer) ClosedChannels(ctx context.Context,
|
||||
|
||||
resp := &lnrpc.ClosedChannelsResponse{}
|
||||
|
||||
dbChannels, err := r.server.remoteChanDB.FetchClosedChannels(false)
|
||||
dbChannels, err := r.server.chanStateDB.FetchClosedChannels(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -3463,9 +3460,9 @@ func (r *rpcServer) ListChannels(ctx context.Context,
|
||||
|
||||
resp := &lnrpc.ListChannelsResponse{}
|
||||
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
dbChannels, err := r.server.remoteChanDB.FetchAllOpenChannels()
|
||||
dbChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -3816,7 +3813,7 @@ func (r *rpcServer) createRPCClosedChannel(
|
||||
CloseInitiator: closeInitiator,
|
||||
}
|
||||
|
||||
reports, err := r.server.remoteChanDB.FetchChannelReports(
|
||||
reports, err := r.server.chanStateDB.FetchChannelReports(
|
||||
*r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint,
|
||||
)
|
||||
switch err {
|
||||
@ -3921,7 +3918,7 @@ func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) (
|
||||
|
||||
// To get the close initiator for cooperative closes, we need
|
||||
// to get the channel status from the historical channel bucket.
|
||||
histChan, err := r.server.remoteChanDB.FetchHistoricalChannel(chanPoint)
|
||||
histChan, err := r.server.chanStateDB.FetchHistoricalChannel(chanPoint)
|
||||
switch {
|
||||
// The node has upgraded from a version where we did not store
|
||||
// historical channels, and has not closed a channel since. Do
|
||||
@ -3985,7 +3982,7 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
|
||||
// the server, or client exits.
|
||||
defer channelEventSub.Cancel()
|
||||
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
for {
|
||||
select {
|
||||
@ -4823,8 +4820,8 @@ func (r *rpcServer) AddInvoice(ctx context.Context,
|
||||
ChainParams: r.cfg.ActiveNetParams.Params,
|
||||
NodeSigner: r.server.nodeSigner,
|
||||
DefaultCLTVExpiry: defaultDelta,
|
||||
ChanDB: r.server.remoteChanDB,
|
||||
Graph: r.server.localChanDB.ChannelGraph(),
|
||||
ChanDB: r.server.chanStateDB,
|
||||
Graph: r.server.graphDB,
|
||||
GenInvoiceFeatures: func() *lnwire.FeatureVector {
|
||||
return r.server.featureMgr.Get(feature.SetInvoice)
|
||||
},
|
||||
@ -4949,7 +4946,7 @@ func (r *rpcServer) ListInvoices(ctx context.Context,
|
||||
PendingOnly: req.PendingOnly,
|
||||
Reversed: req.Reversed,
|
||||
}
|
||||
invoiceSlice, err := r.server.remoteChanDB.QueryInvoices(q)
|
||||
invoiceSlice, err := r.server.chanStateDB.QueryInvoices(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to query invoices: %v", err)
|
||||
}
|
||||
@ -5124,7 +5121,7 @@ func (r *rpcServer) DescribeGraph(ctx context.Context,
|
||||
// Obtain the pointer to the global singleton channel graph, this will
|
||||
// provide a consistent view of the graph due to bolt db's
|
||||
// transactional model.
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
// First iterate through all the known nodes (connected or unconnected
|
||||
// within the graph), collating their current state into the RPC
|
||||
@ -5263,7 +5260,7 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context,
|
||||
// Obtain the pointer to the global singleton channel graph, this will
|
||||
// provide a consistent view of the graph due to bolt db's
|
||||
// transactional model.
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
// Calculate betweenness centrality if requested. Note that depending on the
|
||||
// graph size, this may take up to a few minutes.
|
||||
@ -5302,7 +5299,7 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context,
|
||||
func (r *rpcServer) GetChanInfo(ctx context.Context,
|
||||
in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) {
|
||||
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId)
|
||||
if err != nil {
|
||||
@ -5322,7 +5319,7 @@ func (r *rpcServer) GetChanInfo(ctx context.Context,
|
||||
func (r *rpcServer) GetNodeInfo(ctx context.Context,
|
||||
in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) {
|
||||
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
// First, parse the hex-encoded public key into a full in-memory public
|
||||
// key object we can work with for querying.
|
||||
@ -5423,7 +5420,7 @@ func (r *rpcServer) QueryRoutes(ctx context.Context,
|
||||
func (r *rpcServer) GetNetworkInfo(ctx context.Context,
|
||||
_ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) {
|
||||
|
||||
graph := r.server.localChanDB.ChannelGraph()
|
||||
graph := r.server.graphDB
|
||||
|
||||
var (
|
||||
numNodes uint32
|
||||
@ -5735,7 +5732,7 @@ func (r *rpcServer) ListPayments(ctx context.Context,
|
||||
query.MaxPayments = math.MaxUint64
|
||||
}
|
||||
|
||||
paymentsQuerySlice, err := r.server.remoteChanDB.QueryPayments(query)
|
||||
paymentsQuerySlice, err := r.server.chanStateDB.QueryPayments(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -5770,7 +5767,7 @@ func (r *rpcServer) DeleteAllPayments(ctx context.Context,
|
||||
"failed_htlcs_only=%v", req.FailedPaymentsOnly,
|
||||
req.FailedHtlcsOnly)
|
||||
|
||||
err := r.server.remoteChanDB.DeletePayments(
|
||||
err := r.server.chanStateDB.DeletePayments(
|
||||
req.FailedPaymentsOnly, req.FailedHtlcsOnly,
|
||||
)
|
||||
if err != nil {
|
||||
@ -5893,7 +5890,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
|
||||
|
||||
rpcsLog.Debugf("[feereport]")
|
||||
|
||||
channelGraph := r.server.localChanDB.ChannelGraph()
|
||||
channelGraph := r.server.graphDB
|
||||
selfNode, err := channelGraph.SourceNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -5932,7 +5929,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fwdEventLog := r.server.remoteChanDB.ForwardingLog()
|
||||
fwdEventLog := r.server.chanStateDB.ForwardingLog()
|
||||
|
||||
// computeFeeSum is a helper function that computes the total fees for
|
||||
// a particular time slice described by a forwarding event query.
|
||||
@ -6170,7 +6167,7 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context,
|
||||
IndexOffset: req.IndexOffset,
|
||||
NumMaxEvents: numEvents,
|
||||
}
|
||||
timeSlice, err := r.server.remoteChanDB.ForwardingLog().Query(eventQuery)
|
||||
timeSlice, err := r.server.chanStateDB.ForwardingLog().Query(eventQuery)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to query forwarding log: %v", err)
|
||||
}
|
||||
@ -6232,7 +6229,7 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context,
|
||||
// the database. If this channel has been closed, or the outpoint is
|
||||
// unknown, then we'll return an error
|
||||
unpackedBackup, err := chanbackup.FetchBackupForChan(
|
||||
chanPoint, r.server.remoteChanDB,
|
||||
chanPoint, r.server.chanStateDB,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -6402,7 +6399,7 @@ func (r *rpcServer) ExportAllChannelBackups(ctx context.Context,
|
||||
// First, we'll attempt to read back ups for ALL currently opened
|
||||
// channels from disk.
|
||||
allUnpackedBackups, err := chanbackup.FetchStaticChanBackups(
|
||||
r.server.remoteChanDB,
|
||||
r.server.chanStateDB,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch all static chan "+
|
||||
@ -6425,7 +6422,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
|
||||
// restore either a set of chanbackup.Single or chanbackup.Multi
|
||||
// backups.
|
||||
chanRestorer := &chanDBRestorer{
|
||||
db: r.server.remoteChanDB,
|
||||
db: r.server.chanStateDB,
|
||||
secretKeys: r.server.cc.KeyRing,
|
||||
chainArb: r.server.chainArb,
|
||||
}
|
||||
@ -6523,7 +6520,7 @@ func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription
|
||||
// we'll obtains the current set of single channel
|
||||
// backups from disk.
|
||||
chanBackups, err := chanbackup.FetchStaticChanBackups(
|
||||
r.server.remoteChanDB,
|
||||
r.server.chanStateDB,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to fetch all "+
|
||||
|
85
server.go
85
server.go
@ -220,9 +220,9 @@ type server struct {
|
||||
|
||||
fundingMgr *funding.Manager
|
||||
|
||||
localChanDB *channeldb.DB
|
||||
graphDB *channeldb.ChannelGraph
|
||||
|
||||
remoteChanDB *channeldb.DB
|
||||
chanStateDB *channeldb.DB
|
||||
|
||||
htlcSwitch *htlcswitch.Switch
|
||||
|
||||
@ -387,7 +387,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
// the same directory as the channel graph database. We don't need to
|
||||
// replicate this data, so we'll store it locally.
|
||||
replayLog := htlcswitch.NewDecayedLog(
|
||||
cfg.localDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt,
|
||||
cfg.graphDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt,
|
||||
cc.ChainNotifier,
|
||||
)
|
||||
sphinxRouter := sphinx.NewRouter(
|
||||
@ -435,15 +435,15 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
|
||||
s := &server{
|
||||
cfg: cfg,
|
||||
localChanDB: dbs.localChanDB,
|
||||
remoteChanDB: dbs.remoteChanDB,
|
||||
graphDB: dbs.graphDB.ChannelGraph(),
|
||||
chanStateDB: dbs.chanStateDB,
|
||||
cc: cc,
|
||||
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer),
|
||||
writePool: writePool,
|
||||
readPool: readPool,
|
||||
chansToRestore: chansToRestore,
|
||||
|
||||
channelNotifier: channelnotifier.New(dbs.remoteChanDB),
|
||||
channelNotifier: channelnotifier.New(dbs.chanStateDB),
|
||||
|
||||
identityECDH: nodeKeyECDH,
|
||||
nodeSigner: netann.NewNodeSigner(nodeKeySigner),
|
||||
@ -475,7 +475,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
}
|
||||
|
||||
s.witnessBeacon = &preimageBeacon{
|
||||
wCache: dbs.remoteChanDB.NewWitnessCache(),
|
||||
wCache: dbs.chanStateDB.NewWitnessCache(),
|
||||
subscribers: make(map[uint64]*preimageSubscriber),
|
||||
}
|
||||
|
||||
@ -489,13 +489,13 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
uint32(currentHeight), currentHash, cc.ChainNotifier,
|
||||
)
|
||||
s.invoices = invoices.NewRegistry(
|
||||
dbs.remoteChanDB, expiryWatcher, ®istryConfig,
|
||||
dbs.chanStateDB, expiryWatcher, ®istryConfig,
|
||||
)
|
||||
|
||||
s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now)
|
||||
|
||||
s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
|
||||
DB: dbs.remoteChanDB,
|
||||
DB: dbs.chanStateDB,
|
||||
LocalChannelClose: func(pubKey []byte,
|
||||
request *htlcswitch.ChanClose) {
|
||||
|
||||
@ -510,7 +510,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
|
||||
peer.HandleLocalCloseChanReqs(request)
|
||||
},
|
||||
FwdingLog: dbs.remoteChanDB.ForwardingLog(),
|
||||
FwdingLog: dbs.chanStateDB.ForwardingLog(),
|
||||
SwitchPackager: channeldb.NewSwitchPackager(),
|
||||
ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter,
|
||||
FetchLastChannelUpdate: s.fetchLastChanUpdate(),
|
||||
@ -537,8 +537,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
MessageSigner: s.nodeSigner,
|
||||
IsChannelActive: s.htlcSwitch.HasActiveLink,
|
||||
ApplyChannelUpdate: s.applyChannelUpdate,
|
||||
DB: dbs.remoteChanDB,
|
||||
Graph: dbs.localChanDB.ChannelGraph(),
|
||||
DB: dbs.chanStateDB,
|
||||
Graph: dbs.graphDB.ChannelGraph(),
|
||||
}
|
||||
|
||||
chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg)
|
||||
@ -630,7 +630,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
|
||||
// As the graph can be obtained at anytime from the network, we won't
|
||||
// replicate it, and instead it'll only be stored locally.
|
||||
chanGraph := dbs.localChanDB.ChannelGraph()
|
||||
chanGraph := dbs.graphDB.ChannelGraph()
|
||||
|
||||
// We'll now reconstruct a node announcement based on our current
|
||||
// configuration so we can send it out as a sort of heart beat within
|
||||
@ -697,7 +697,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
|
||||
// The router will get access to the payment ID sequencer, such that it
|
||||
// can generate unique payment IDs.
|
||||
sequencer, err := htlcswitch.NewPersistentSequencer(dbs.remoteChanDB)
|
||||
sequencer, err := htlcswitch.NewPersistentSequencer(dbs.chanStateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -742,7 +742,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
}
|
||||
|
||||
s.missionControl, err = routing.NewMissionControl(
|
||||
dbs.remoteChanDB, selfNode.PubKeyBytes,
|
||||
dbs.chanStateDB, selfNode.PubKeyBytes,
|
||||
&routing.MissionControlConfig{
|
||||
ProbabilityEstimatorCfg: estimatorCfg,
|
||||
MaxMcHistory: routingConfig.MaxMcHistory,
|
||||
@ -775,7 +775,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
PathFindingConfig: pathFindingConfig,
|
||||
}
|
||||
|
||||
paymentControl := channeldb.NewPaymentControl(dbs.remoteChanDB)
|
||||
paymentControl := channeldb.NewPaymentControl(dbs.chanStateDB)
|
||||
|
||||
s.controlTower = routing.NewControlTower(paymentControl)
|
||||
|
||||
@ -803,12 +803,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
return nil, fmt.Errorf("can't create router: %v", err)
|
||||
}
|
||||
|
||||
chanSeries := discovery.NewChanSeries(s.localChanDB.ChannelGraph())
|
||||
gossipMessageStore, err := discovery.NewMessageStore(s.remoteChanDB)
|
||||
chanSeries := discovery.NewChanSeries(s.graphDB)
|
||||
gossipMessageStore, err := discovery.NewMessageStore(s.chanStateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
waitingProofStore, err := channeldb.NewWaitingProofStore(s.remoteChanDB)
|
||||
waitingProofStore, err := channeldb.NewWaitingProofStore(s.chanStateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -848,11 +848,11 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
ForAllOutgoingChannels: s.chanRouter.ForAllOutgoingChannels,
|
||||
PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
|
||||
UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies,
|
||||
FetchChannel: s.remoteChanDB.FetchChannel,
|
||||
FetchChannel: s.chanStateDB.FetchChannel,
|
||||
}
|
||||
|
||||
utxnStore, err := newNurseryStore(
|
||||
s.cfg.ActiveNetParams.GenesisHash, dbs.remoteChanDB,
|
||||
s.cfg.ActiveNetParams.GenesisHash, dbs.chanStateDB,
|
||||
)
|
||||
if err != nil {
|
||||
srvrLog.Errorf("unable to create nursery store: %v", err)
|
||||
@ -863,7 +863,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
sweep.DefaultBatchWindowDuration)
|
||||
|
||||
sweeperStore, err := sweep.NewSweeperStore(
|
||||
dbs.remoteChanDB, s.cfg.ActiveNetParams.GenesisHash,
|
||||
dbs.chanStateDB, s.cfg.ActiveNetParams.GenesisHash,
|
||||
)
|
||||
if err != nil {
|
||||
srvrLog.Errorf("unable to create sweeper store: %v", err)
|
||||
@ -890,8 +890,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
s.utxoNursery = newUtxoNursery(&NurseryConfig{
|
||||
ChainIO: cc.ChainIO,
|
||||
ConfDepth: 1,
|
||||
FetchClosedChannels: dbs.remoteChanDB.FetchClosedChannels,
|
||||
FetchClosedChannel: dbs.remoteChanDB.FetchClosedChannel,
|
||||
FetchClosedChannels: dbs.chanStateDB.FetchClosedChannels,
|
||||
FetchClosedChannel: dbs.chanStateDB.FetchClosedChannel,
|
||||
Notifier: cc.ChainNotifier,
|
||||
PublishTransaction: cc.Wallet.PublishTransaction,
|
||||
Store: utxnStore,
|
||||
@ -1012,18 +1012,18 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
|
||||
IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC,
|
||||
Clock: clock.NewDefaultClock(),
|
||||
}, dbs.remoteChanDB)
|
||||
}, dbs.chanStateDB)
|
||||
|
||||
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
||||
CloseLink: closeLink,
|
||||
DB: dbs.remoteChanDB,
|
||||
DB: dbs.chanStateDB,
|
||||
Estimator: s.cc.FeeEstimator,
|
||||
GenSweepScript: newSweepPkScriptGen(cc.Wallet),
|
||||
Notifier: cc.ChainNotifier,
|
||||
PublishTransaction: cc.Wallet.PublishTransaction,
|
||||
ContractBreaches: contractBreaches,
|
||||
Signer: cc.Wallet.Cfg.Signer,
|
||||
Store: newRetributionStore(dbs.remoteChanDB),
|
||||
Store: newRetributionStore(dbs.chanStateDB),
|
||||
})
|
||||
|
||||
// Select the configuration and furnding parameters for Bitcoin or
|
||||
@ -1071,7 +1071,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
FindChannel: func(chanID lnwire.ChannelID) (
|
||||
*channeldb.OpenChannel, error) {
|
||||
|
||||
dbChannels, err := dbs.remoteChanDB.FetchAllChannels()
|
||||
dbChannels, err := dbs.chanStateDB.FetchAllChannels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1243,10 +1243,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
// static backup of the latest channel state.
|
||||
chanNotifier := &channelNotifier{
|
||||
chanNotifier: s.channelNotifier,
|
||||
addrs: s.remoteChanDB,
|
||||
addrs: s.chanStateDB,
|
||||
}
|
||||
backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath)
|
||||
startingChans, err := chanbackup.FetchStaticChanBackups(s.remoteChanDB)
|
||||
startingChans, err := chanbackup.FetchStaticChanBackups(s.chanStateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1269,10 +1269,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
SubscribePeerEvents: func() (subscribe.Subscription, error) {
|
||||
return s.peerNotifier.SubscribePeerEvents()
|
||||
},
|
||||
GetOpenChannels: s.remoteChanDB.FetchAllOpenChannels,
|
||||
GetOpenChannels: s.chanStateDB.FetchAllOpenChannels,
|
||||
Clock: clock.NewDefaultClock(),
|
||||
ReadFlapCount: s.remoteChanDB.ReadFlapCount,
|
||||
WriteFlapCount: s.remoteChanDB.WriteFlapCounts,
|
||||
ReadFlapCount: s.chanStateDB.ReadFlapCount,
|
||||
WriteFlapCount: s.chanStateDB.WriteFlapCounts,
|
||||
FlapCountTicker: ticker.New(chanfitness.FlapCountFlushRate),
|
||||
})
|
||||
|
||||
@ -1677,7 +1677,7 @@ func (s *server) Start() error {
|
||||
// that have all the information we need to handle channel
|
||||
// recovery _before_ we even accept connections from any peers.
|
||||
chanRestorer := &chanDBRestorer{
|
||||
db: s.remoteChanDB,
|
||||
db: s.chanStateDB,
|
||||
secretKeys: s.cc.KeyRing,
|
||||
chainArb: s.chainArb,
|
||||
}
|
||||
@ -1722,7 +1722,7 @@ func (s *server) Start() error {
|
||||
// we'll prune our set of link nodes found within the database
|
||||
// to ensure we don't reconnect to any nodes we no longer have
|
||||
// open channels with.
|
||||
if err := s.remoteChanDB.PruneLinkNodes(); err != nil {
|
||||
if err := s.chanStateDB.PruneLinkNodes(); err != nil {
|
||||
startErr = err
|
||||
return
|
||||
}
|
||||
@ -2132,7 +2132,7 @@ func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, e
|
||||
// First, we'll create an instance of the ChannelGraphBootstrapper as
|
||||
// this can be used by default if we've already partially seeded the
|
||||
// network.
|
||||
chanGraph := autopilot.ChannelGraphFromDatabase(s.localChanDB.ChannelGraph())
|
||||
chanGraph := autopilot.ChannelGraphFromDatabase(s.graphDB)
|
||||
graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -2469,7 +2469,7 @@ func (s *server) createNewHiddenService() error {
|
||||
AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(),
|
||||
}
|
||||
copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed())
|
||||
if err := s.localChanDB.ChannelGraph().SetSourceNode(selfNode); err != nil {
|
||||
if err := s.graphDB.SetSourceNode(selfNode); err != nil {
|
||||
return fmt.Errorf("can't set self node: %v", err)
|
||||
}
|
||||
|
||||
@ -2526,7 +2526,7 @@ func (s *server) establishPersistentConnections() error {
|
||||
// Iterate through the list of LinkNodes to find addresses we should
|
||||
// attempt to connect to based on our set of previous connections. Set
|
||||
// the reconnection port to the default peer port.
|
||||
linkNodes, err := s.remoteChanDB.FetchAllLinkNodes()
|
||||
linkNodes, err := s.chanStateDB.FetchAllLinkNodes()
|
||||
if err != nil && err != channeldb.ErrLinkNodesNotFound {
|
||||
return err
|
||||
}
|
||||
@ -2542,8 +2542,7 @@ func (s *server) establishPersistentConnections() error {
|
||||
// After checking our previous connections for addresses to connect to,
|
||||
// iterate through the nodes in our channel graph to find addresses
|
||||
// that have been added via NodeAnnouncement messages.
|
||||
chanGraph := s.localChanDB.ChannelGraph()
|
||||
sourceNode, err := chanGraph.SourceNode()
|
||||
sourceNode, err := s.graphDB.SourceNode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -3249,8 +3248,8 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
|
||||
ReadPool: s.readPool,
|
||||
Switch: s.htlcSwitch,
|
||||
InterceptSwitch: s.interceptableSwitch,
|
||||
ChannelDB: s.remoteChanDB,
|
||||
ChannelGraph: s.localChanDB.ChannelGraph(),
|
||||
ChannelDB: s.chanStateDB,
|
||||
ChannelGraph: s.graphDB,
|
||||
ChainArb: s.chainArb,
|
||||
AuthGossiper: s.authGossiper,
|
||||
ChanStatusMgr: s.chanStatusMgr,
|
||||
@ -3908,7 +3907,7 @@ func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, err := s.localChanDB.ChannelGraph().FetchLightningNode(nil, vertex)
|
||||
node, err := s.graphDB.FetchLightningNode(nil, vertex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -92,8 +92,8 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config,
|
||||
chanRouter *routing.ChannelRouter,
|
||||
routerBackend *routerrpc.RouterBackend,
|
||||
nodeSigner *netann.NodeSigner,
|
||||
localChanDB *channeldb.DB,
|
||||
remoteChanDB *channeldb.DB,
|
||||
graphDB *channeldb.ChannelGraph,
|
||||
chanStateDB *channeldb.DB,
|
||||
sweeper *sweep.UtxoSweeper,
|
||||
tower *watchtower.Standalone,
|
||||
towerClient wtclient.Client,
|
||||
@ -222,11 +222,11 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config,
|
||||
subCfgValue.FieldByName("DefaultCLTVExpiry").Set(
|
||||
reflect.ValueOf(defaultDelta),
|
||||
)
|
||||
subCfgValue.FieldByName("LocalChanDB").Set(
|
||||
reflect.ValueOf(localChanDB),
|
||||
subCfgValue.FieldByName("GraphDB").Set(
|
||||
reflect.ValueOf(graphDB),
|
||||
)
|
||||
subCfgValue.FieldByName("RemoteChanDB").Set(
|
||||
reflect.ValueOf(remoteChanDB),
|
||||
subCfgValue.FieldByName("ChanStateDB").Set(
|
||||
reflect.ValueOf(chanStateDB),
|
||||
)
|
||||
subCfgValue.FieldByName("GenInvoiceFeatures").Set(
|
||||
reflect.ValueOf(genInvoiceFeatures),
|
||||
|
Loading…
x
Reference in New Issue
Block a user