multi: rename local and remote chan DBs

As a preparation to not have a local and remote version of the database
around anymore, we rename the variables into what their actual function
is. In case of the RPC server we even directly use the channel graph
instead of the DB instance. This should allow us to extract the channel
graph into its own, separate database (perhaps with better access
characteristics) in the future.
This commit is contained in:
Oliver Gugger 2021-08-03 09:57:26 +02:00
parent abf3942228
commit 9138c8abac
No known key found for this signature in database
GPG Key ID: 8E4256593F177720
10 changed files with 149 additions and 153 deletions

View File

@ -68,11 +68,13 @@ type Config struct {
// LtcdMode defines settings for connecting to an ltcd node. // LtcdMode defines settings for connecting to an ltcd node.
LtcdMode *lncfg.Btcd LtcdMode *lncfg.Btcd
// LocalChanDB is a pointer to the local backing channel database. // HeightHintDB is a pointer to the database that stores the height
LocalChanDB *channeldb.DB // hints.
HeightHintDB *channeldb.DB
// RemoteChanDB is a pointer to the remote backing channel database. // ChanStateDB is a pointer to the database that stores the channel
RemoteChanDB *channeldb.DB // state.
ChanStateDB *channeldb.DB
// BlockCacheSize is the size (in bytes) of blocks kept in memory. // BlockCacheSize is the size (in bytes) of blocks kept in memory.
BlockCacheSize uint64 BlockCacheSize uint64
@ -304,7 +306,7 @@ func NewChainControl(cfg *Config, blockCache *blockcache.BlockCache) (
// Initialize the height hint cache within the chain directory. // Initialize the height hint cache within the chain directory.
hintCache, err := chainntnfs.NewHeightHintCache( hintCache, err := chainntnfs.NewHeightHintCache(
heightHintCacheConfig, cfg.LocalChanDB, heightHintCacheConfig, cfg.HeightHintDB,
) )
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("unable to initialize height hint "+ return nil, nil, fmt.Errorf("unable to initialize height hint "+
@ -684,7 +686,7 @@ func NewChainControl(cfg *Config, blockCache *blockcache.BlockCache) (
// Create, and start the lnwallet, which handles the core payment // Create, and start the lnwallet, which handles the core payment
// channel logic, and exposes control via proxy state machines. // channel logic, and exposes control via proxy state machines.
walletCfg := lnwallet.Config{ walletCfg := lnwallet.Config{
Database: cfg.RemoteChanDB, Database: cfg.ChanStateDB,
Notifier: cc.ChainNotifier, Notifier: cc.ChainNotifier,
WalletController: wc, WalletController: wc,
Signer: cc.Signer, Signer: cc.Signer,

View File

@ -1489,12 +1489,13 @@ func ValidateConfig(cfg Config, usageMessage string,
return &cfg, err return &cfg, err
} }
// localDatabaseDir returns the default directory where the // graphDatabaseDir returns the default directory where the local bolt graph db
// local bolt db files are stored. // files are stored.
func (c *Config) localDatabaseDir() string { func (c *Config) graphDatabaseDir() string {
return filepath.Join(c.DataDir, return filepath.Join(
defaultGraphSubDirname, c.DataDir, defaultGraphSubDirname,
lncfg.NormalizeNetwork(c.ActiveNetParams.Name)) lncfg.NormalizeNetwork(c.ActiveNetParams.Name),
)
} }
// CleanAndExpandPath expands environment variables and leading ~ in the // CleanAndExpandPath expands environment variables and leading ~ in the

View File

@ -79,22 +79,21 @@ func (db *DB) Init(ctx context.Context, dbPath string) error {
} }
// DatabaseBackends is a two-tuple that holds the set of active database // DatabaseBackends is a two-tuple that holds the set of active database
// backends for the daemon. The two backends we expose are the local database // backends for the daemon. The two backends we expose are the graph database
// backend, and the remote backend. The LocalDB attribute will always be // backend, and the channel state backend.
// populated. However, the remote DB will only be set if a replicated database // TODO(guggero): Actually make fully remote.
// is active.
type DatabaseBackends struct { type DatabaseBackends struct {
// LocalDB points to the local non-replicated backend. // GraphDB points to the database backend that contains the less
LocalDB kvdb.Backend // critical data that is accessed often, such as the channel graph and
// chain height hints.
GraphDB kvdb.Backend
// RemoteDB points to a possibly networked replicated backend. If no // ChanStateDB points to a possibly networked replicated backend that
// replicated backend is active, then this pointer will be nil. // contains the critical channel state related data.
RemoteDB kvdb.Backend ChanStateDB kvdb.Backend
} }
// GetBackends returns a set of kvdb.Backends as set in the DB config. The // GetBackends returns a set of kvdb.Backends as set in the DB config.
// local database will ALWAYS be non-nil, while the remote database will only
// be populated if etcd is specified.
func (db *DB) GetBackends(ctx context.Context, dbPath string) ( func (db *DB) GetBackends(ctx context.Context, dbPath string) (
*DatabaseBackends, error) { *DatabaseBackends, error) {
@ -125,8 +124,8 @@ func (db *DB) GetBackends(ctx context.Context, dbPath string) (
} }
return &DatabaseBackends{ return &DatabaseBackends{
LocalDB: localDB, GraphDB: localDB,
RemoteDB: remoteDB, ChanStateDB: remoteDB,
}, nil }, nil
} }

48
lnd.go
View File

@ -258,7 +258,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
// Run configuration dependent DB pre-initialization. Note that this // Run configuration dependent DB pre-initialization. Note that this
// needs to be done early and once during the startup process, before // needs to be done early and once during the startup process, before
// any DB access. // any DB access.
if err := cfg.DB.Init(ctx, cfg.localDatabaseDir()); err != nil { if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil {
return err return err
} }
@ -467,7 +467,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
// environment. This will ensure that all members of the cluster // environment. This will ensure that all members of the cluster
// have access to the same wallet state. // have access to the same wallet state.
loaderOpt = btcwallet.LoaderWithExternalWalletDB( loaderOpt = btcwallet.LoaderWithExternalWalletDB(
dbs.remoteChanDB.Backend, dbs.chanStateDB.Backend,
) )
} else { } else {
// When "running locally", LND will use the bbolt wallet.db to // When "running locally", LND will use the bbolt wallet.db to
@ -706,8 +706,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
LitecoindMode: cfg.LitecoindMode, LitecoindMode: cfg.LitecoindMode,
BtcdMode: cfg.BtcdMode, BtcdMode: cfg.BtcdMode,
LtcdMode: cfg.LtcdMode, LtcdMode: cfg.LtcdMode,
LocalChanDB: dbs.localChanDB, HeightHintDB: dbs.graphDB,
RemoteChanDB: dbs.remoteChanDB, ChanStateDB: dbs.chanStateDB,
PrivateWalletPw: privateWalletPw, PrivateWalletPw: privateWalletPw,
PublicWalletPw: publicWalletPw, PublicWalletPw: publicWalletPw,
Birthday: walletInitParams.Birthday, Birthday: walletInitParams.Birthday,
@ -781,7 +781,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error
if cfg.WtClient.Active { if cfg.WtClient.Active {
var err error var err error
towerClientDB, err = wtdb.OpenClientDB( towerClientDB, err = wtdb.OpenClientDB(
cfg.localDatabaseDir(), cfg.DB.Bolt.DBTimeout, cfg.graphDatabaseDir(), cfg.DB.Bolt.DBTimeout,
) )
if err != nil { if err != nil {
err := fmt.Errorf("unable to open watchtower client "+ err := fmt.Errorf("unable to open watchtower client "+
@ -1624,16 +1624,14 @@ func waitForWalletPassword(cfg *Config,
// databaseInstances is a struct that holds all instances to the actual // databaseInstances is a struct that holds all instances to the actual
// databases that are used in lnd. // databases that are used in lnd.
type databaseInstances struct { type databaseInstances struct {
localChanDB *channeldb.DB graphDB *channeldb.DB
remoteChanDB *channeldb.DB chanStateDB *channeldb.DB
} }
// initializeDatabases extracts the current databases that we'll use for normal // initializeDatabases extracts the current databases that we'll use for normal
// operation in the daemon. Two databases are returned: one remote and one // operation in the daemon. A function closure that closes all opened databases
// local. However, only if the replicated database is active will the remote // is also returned.
// database point to a unique database. Otherwise, the local and remote DB will // TODO(guggero): Actually make fully remote.
// both point to the same local database. A function closure that closes all
// opened databases is also returned.
func initializeDatabases(ctx context.Context, func initializeDatabases(ctx context.Context,
cfg *Config) (*databaseInstances, func(), error) { cfg *Config) (*databaseInstances, func(), error) {
@ -1648,7 +1646,7 @@ func initializeDatabases(ctx context.Context,
startOpenTime := time.Now() startOpenTime := time.Now()
databaseBackends, err := cfg.DB.GetBackends(ctx, cfg.localDatabaseDir()) databaseBackends, err := cfg.DB.GetBackends(ctx, cfg.graphDatabaseDir())
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("unable to obtain database "+ return nil, nil, fmt.Errorf("unable to obtain database "+
"backends: %v", err) "backends: %v", err)
@ -1660,11 +1658,11 @@ func initializeDatabases(ctx context.Context,
dbs = &databaseInstances{} dbs = &databaseInstances{}
closeFuncs []func() closeFuncs []func()
) )
if databaseBackends.RemoteDB == nil { if databaseBackends.ChanStateDB == nil {
// Open the channeldb, which is dedicated to storing channel, // Open the channeldb, which is dedicated to storing channel,
// and network related metadata. // and network related metadata.
dbs.localChanDB, err = channeldb.CreateWithBackend( dbs.graphDB, err = channeldb.CreateWithBackend(
databaseBackends.LocalDB, databaseBackends.GraphDB,
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
@ -1681,10 +1679,10 @@ func initializeDatabases(ctx context.Context,
} }
closeFuncs = append(closeFuncs, func() { closeFuncs = append(closeFuncs, func() {
dbs.localChanDB.Close() dbs.graphDB.Close()
}) })
dbs.remoteChanDB = dbs.localChanDB dbs.chanStateDB = dbs.graphDB
} else { } else {
ltndLog.Infof("Database replication is available! Creating " + ltndLog.Infof("Database replication is available! Creating " +
"local and remote channeldb instances") "local and remote channeldb instances")
@ -1692,8 +1690,8 @@ func initializeDatabases(ctx context.Context,
// Otherwise, we'll open two instances, one for the state we // Otherwise, we'll open two instances, one for the state we
// only need locally, and the other for things we want to // only need locally, and the other for things we want to
// ensure are replicated. // ensure are replicated.
dbs.localChanDB, err = channeldb.CreateWithBackend( dbs.graphDB, err = channeldb.CreateWithBackend(
databaseBackends.LocalDB, databaseBackends.GraphDB,
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
@ -1713,13 +1711,13 @@ func initializeDatabases(ctx context.Context,
} }
closeFuncs = append(closeFuncs, func() { closeFuncs = append(closeFuncs, func() {
dbs.localChanDB.Close() dbs.graphDB.Close()
}) })
ltndLog.Infof("Opening replicated database instance...") ltndLog.Infof("Opening replicated database instance...")
dbs.remoteChanDB, err = channeldb.CreateWithBackend( dbs.chanStateDB, err = channeldb.CreateWithBackend(
databaseBackends.RemoteDB, databaseBackends.ChanStateDB,
channeldb.OptionDryRunMigration(cfg.DryRunMigration), channeldb.OptionDryRunMigration(cfg.DryRunMigration),
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
) )
@ -1728,7 +1726,7 @@ func initializeDatabases(ctx context.Context,
return nil, nil, err return nil, nil, err
case err != nil: case err != nil:
dbs.localChanDB.Close() dbs.graphDB.Close()
err := fmt.Errorf("unable to open remote channeldb: %v", err) err := fmt.Errorf("unable to open remote channeldb: %v", err)
ltndLog.Error(err) ltndLog.Error(err)
@ -1736,7 +1734,7 @@ func initializeDatabases(ctx context.Context,
} }
closeFuncs = append(closeFuncs, func() { closeFuncs = append(closeFuncs, func() {
dbs.remoteChanDB.Close() dbs.chanStateDB.Close()
}) })
} }

View File

@ -44,13 +44,13 @@ type Config struct {
// specified. // specified.
DefaultCLTVExpiry uint32 DefaultCLTVExpiry uint32
// LocalChanDB is a global boltdb instance which is needed to access the // GraphDB is a global database instance which is needed to access the
// channel graph. // channel graph.
LocalChanDB *channeldb.DB GraphDB *channeldb.ChannelGraph
// RemoteChanDB is a replicatd db instance which is the same as the // ChanStateDB is a possibly replicated db instance which contains the
// localdb when running without remote db. // channel state
RemoteChanDB *channeldb.DB ChanStateDB *channeldb.DB
// GenInvoiceFeatures returns a feature containing feature bits that // GenInvoiceFeatures returns a feature containing feature bits that
// should be advertised on freshly generated invoices. // should be advertised on freshly generated invoices.

View File

@ -316,8 +316,8 @@ func (s *Server) AddHoldInvoice(ctx context.Context,
ChainParams: s.cfg.ChainParams, ChainParams: s.cfg.ChainParams,
NodeSigner: s.cfg.NodeSigner, NodeSigner: s.cfg.NodeSigner,
DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry, DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry,
ChanDB: s.cfg.RemoteChanDB, ChanDB: s.cfg.ChanStateDB,
Graph: s.cfg.LocalChanDB.ChannelGraph(), Graph: s.cfg.GraphDB,
GenInvoiceFeatures: s.cfg.GenInvoiceFeatures, GenInvoiceFeatures: s.cfg.GenInvoiceFeatures,
GenAmpInvoiceFeatures: s.cfg.GenAmpInvoiceFeatures, GenAmpInvoiceFeatures: s.cfg.GenAmpInvoiceFeatures,
} }

View File

@ -185,7 +185,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot,
cfg.MinConfs, lnwallet.DefaultAccountName, cfg.MinConfs, lnwallet.DefaultAccountName,
) )
}, },
Graph: autopilot.ChannelGraphFromDatabase(svr.localChanDB.ChannelGraph()), Graph: autopilot.ChannelGraphFromDatabase(svr.graphDB),
Constraints: atplConstraints, Constraints: atplConstraints,
ConnectToPeer: func(target *btcec.PublicKey, addrs []net.Addr) (bool, error) { ConnectToPeer: func(target *btcec.PublicKey, addrs []net.Addr) (bool, error) {
// First, we'll check if we're already connected to the // First, we'll check if we're already connected to the
@ -258,7 +258,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot,
// We'll fetch the current state of open // We'll fetch the current state of open
// channels from the database to use as initial // channels from the database to use as initial
// state for the auto-pilot agent. // state for the auto-pilot agent.
activeChannels, err := svr.remoteChanDB.FetchAllChannels() activeChannels, err := svr.chanStateDB.FetchAllChannels()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -282,7 +282,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot,
ChannelInfo: func(chanPoint wire.OutPoint) ( ChannelInfo: func(chanPoint wire.OutPoint) (
*autopilot.LocalChannel, error) { *autopilot.LocalChannel, error) {
channel, err := svr.remoteChanDB.FetchChannel(chanPoint) channel, err := svr.chanStateDB.FetchChannel(chanPoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -614,12 +614,11 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
chanPredicate *chanacceptor.ChainedAcceptor) error { chanPredicate *chanacceptor.ChainedAcceptor) error {
// Set up router rpc backend. // Set up router rpc backend.
channelGraph := s.localChanDB.ChannelGraph() selfNode, err := s.graphDB.SourceNode()
selfNode, err := channelGraph.SourceNode()
if err != nil { if err != nil {
return err return err
} }
graph := s.localChanDB.ChannelGraph() graph := s.graphDB
routerBackend := &routerrpc.RouterBackend{ routerBackend := &routerrpc.RouterBackend{
SelfNode: selfNode.PubKeyBytes, SelfNode: selfNode.PubKeyBytes,
FetchChannelCapacity: func(chanID uint64) (btcutil.Amount, FetchChannelCapacity: func(chanID uint64) (btcutil.Amount,
@ -683,7 +682,7 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
err = subServerCgs.PopulateDependencies( err = subServerCgs.PopulateDependencies(
r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry, r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry,
s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter, s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter,
routerBackend, s.nodeSigner, s.localChanDB, s.remoteChanDB, routerBackend, s.nodeSigner, s.graphDB, s.chanStateDB,
s.sweeper, tower, s.towerClient, s.anchorTowerClient, s.sweeper, tower, s.towerClient, s.anchorTowerClient,
r.cfg.net.ResolveTCPAddr, genInvoiceFeatures, r.cfg.net.ResolveTCPAddr, genInvoiceFeatures,
genAmpInvoiceFeatures, rpcsLog, genAmpInvoiceFeatures, rpcsLog,
@ -1509,7 +1508,7 @@ func (r *rpcServer) VerifyMessage(ctx context.Context,
// channels signed the message. // channels signed the message.
// //
// TODO(phlip9): Require valid nodes to have capital in active channels. // TODO(phlip9): Require valid nodes to have capital in active channels.
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
_, active, err := graph.HasLightningNode(pub) _, active, err := graph.HasLightningNode(pub)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to query graph: %v", err) return nil, fmt.Errorf("failed to query graph: %v", err)
@ -1615,7 +1614,7 @@ func (r *rpcServer) DisconnectPeer(ctx context.Context,
// Next, we'll fetch the pending/active channels we have with a // Next, we'll fetch the pending/active channels we have with a
// particular peer. // particular peer.
nodeChannels, err := r.server.remoteChanDB.FetchOpenChannels(peerPubKey) nodeChannels, err := r.server.chanStateDB.FetchOpenChannels(peerPubKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to fetch channels for peer: %v", err) return nil, fmt.Errorf("unable to fetch channels for peer: %v", err)
} }
@ -2125,7 +2124,7 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
// First, we'll fetch the channel as is, as we'll need to examine it // First, we'll fetch the channel as is, as we'll need to examine it
// regardless of if this is a force close or not. // regardless of if this is a force close or not.
channel, err := r.server.remoteChanDB.FetchChannel(*chanPoint) channel, err := r.server.chanStateDB.FetchChannel(*chanPoint)
if err != nil { if err != nil {
return err return err
} }
@ -2403,7 +2402,7 @@ func (r *rpcServer) AbandonChannel(_ context.Context,
return nil, err return nil, err
} }
dbChan, err := r.server.remoteChanDB.FetchChannel(*chanPoint) dbChan, err := r.server.chanStateDB.FetchChannel(*chanPoint)
switch { switch {
// If the channel isn't found in the set of open channels, then we can // If the channel isn't found in the set of open channels, then we can
// continue on as it can't be loaded into the link/peer. // continue on as it can't be loaded into the link/peer.
@ -2450,13 +2449,11 @@ func (r *rpcServer) AbandonChannel(_ context.Context,
// court. Between any step it's possible that the users restarts the // court. Between any step it's possible that the users restarts the
// process all over again. As a result, each of the steps below are // process all over again. As a result, each of the steps below are
// intended to be idempotent. // intended to be idempotent.
err = r.server.remoteChanDB.AbandonChannel(chanPoint, uint32(bestHeight)) err = r.server.chanStateDB.AbandonChannel(chanPoint, uint32(bestHeight))
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = abandonChanFromGraph( err = abandonChanFromGraph(r.server.graphDB, chanPoint)
r.server.localChanDB.ChannelGraph(), chanPoint,
)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2489,7 +2486,7 @@ func (r *rpcServer) GetInfo(_ context.Context,
serverPeers := r.server.Peers() serverPeers := r.server.Peers()
openChannels, err := r.server.remoteChanDB.FetchAllOpenChannels() openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2504,7 +2501,7 @@ func (r *rpcServer) GetInfo(_ context.Context,
inactiveChannels := uint32(len(openChannels)) - activeChannels inactiveChannels := uint32(len(openChannels)) - activeChannels
pendingChannels, err := r.server.remoteChanDB.FetchPendingChannels() pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to get retrieve pending "+ return nil, fmt.Errorf("unable to get retrieve pending "+
"channels: %v", err) "channels: %v", err)
@ -2905,7 +2902,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context,
pendingOpenRemoteBalance lnwire.MilliSatoshi pendingOpenRemoteBalance lnwire.MilliSatoshi
) )
openChannels, err := r.server.remoteChanDB.FetchAllOpenChannels() openChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2925,7 +2922,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context,
} }
} }
pendingChannels, err := r.server.remoteChanDB.FetchPendingChannels() pendingChannels, err := r.server.chanStateDB.FetchPendingChannels()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2999,7 +2996,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// First, we'll populate the response with all the channels that are // First, we'll populate the response with all the channels that are
// soon to be opened. We can easily fetch this data from the database // soon to be opened. We can easily fetch this data from the database
// and map the db struct to the proto response. // and map the db struct to the proto response.
pendingOpenChannels, err := r.server.remoteChanDB.FetchPendingChannels() pendingOpenChannels, err := r.server.chanStateDB.FetchPendingChannels()
if err != nil { if err != nil {
rpcsLog.Errorf("unable to fetch pending channels: %v", err) rpcsLog.Errorf("unable to fetch pending channels: %v", err)
return nil, err return nil, err
@ -3047,7 +3044,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// Next, we'll examine the channels that are soon to be closed so we // Next, we'll examine the channels that are soon to be closed so we
// can populate these fields within the response. // can populate these fields within the response.
pendingCloseChannels, err := r.server.remoteChanDB.FetchClosedChannels(true) pendingCloseChannels, err := r.server.chanStateDB.FetchClosedChannels(true)
if err != nil { if err != nil {
rpcsLog.Errorf("unable to fetch closed channels: %v", err) rpcsLog.Errorf("unable to fetch closed channels: %v", err)
return nil, err return nil, err
@ -3076,7 +3073,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// not found, or the channel itself, this channel was closed // not found, or the channel itself, this channel was closed
// in a version before we started persisting historical // in a version before we started persisting historical
// channels, so we silence the error. // channels, so we silence the error.
historical, err := r.server.remoteChanDB.FetchHistoricalChannel( historical, err := r.server.chanStateDB.FetchHistoricalChannel(
&pendingClose.ChanPoint, &pendingClose.ChanPoint,
) )
switch err { switch err {
@ -3151,7 +3148,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// We'll also fetch all channels that are open, but have had their // We'll also fetch all channels that are open, but have had their
// commitment broadcasted, meaning they are waiting for the closing // commitment broadcasted, meaning they are waiting for the closing
// transaction to confirm. // transaction to confirm.
waitingCloseChans, err := r.server.remoteChanDB.FetchWaitingCloseChannels() waitingCloseChans, err := r.server.chanStateDB.FetchWaitingCloseChannels()
if err != nil { if err != nil {
rpcsLog.Errorf("unable to fetch channels waiting close: %v", rpcsLog.Errorf("unable to fetch channels waiting close: %v",
err) err)
@ -3386,7 +3383,7 @@ func (r *rpcServer) ClosedChannels(ctx context.Context,
resp := &lnrpc.ClosedChannelsResponse{} resp := &lnrpc.ClosedChannelsResponse{}
dbChannels, err := r.server.remoteChanDB.FetchClosedChannels(false) dbChannels, err := r.server.chanStateDB.FetchClosedChannels(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -3463,9 +3460,9 @@ func (r *rpcServer) ListChannels(ctx context.Context,
resp := &lnrpc.ListChannelsResponse{} resp := &lnrpc.ListChannelsResponse{}
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
dbChannels, err := r.server.remoteChanDB.FetchAllOpenChannels() dbChannels, err := r.server.chanStateDB.FetchAllOpenChannels()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -3816,7 +3813,7 @@ func (r *rpcServer) createRPCClosedChannel(
CloseInitiator: closeInitiator, CloseInitiator: closeInitiator,
} }
reports, err := r.server.remoteChanDB.FetchChannelReports( reports, err := r.server.chanStateDB.FetchChannelReports(
*r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint, *r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint,
) )
switch err { switch err {
@ -3921,7 +3918,7 @@ func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) (
// To get the close initiator for cooperative closes, we need // To get the close initiator for cooperative closes, we need
// to get the channel status from the historical channel bucket. // to get the channel status from the historical channel bucket.
histChan, err := r.server.remoteChanDB.FetchHistoricalChannel(chanPoint) histChan, err := r.server.chanStateDB.FetchHistoricalChannel(chanPoint)
switch { switch {
// The node has upgraded from a version where we did not store // The node has upgraded from a version where we did not store
// historical channels, and has not closed a channel since. Do // historical channels, and has not closed a channel since. Do
@ -3985,7 +3982,7 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
// the server, or client exits. // the server, or client exits.
defer channelEventSub.Cancel() defer channelEventSub.Cancel()
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
for { for {
select { select {
@ -4823,8 +4820,8 @@ func (r *rpcServer) AddInvoice(ctx context.Context,
ChainParams: r.cfg.ActiveNetParams.Params, ChainParams: r.cfg.ActiveNetParams.Params,
NodeSigner: r.server.nodeSigner, NodeSigner: r.server.nodeSigner,
DefaultCLTVExpiry: defaultDelta, DefaultCLTVExpiry: defaultDelta,
ChanDB: r.server.remoteChanDB, ChanDB: r.server.chanStateDB,
Graph: r.server.localChanDB.ChannelGraph(), Graph: r.server.graphDB,
GenInvoiceFeatures: func() *lnwire.FeatureVector { GenInvoiceFeatures: func() *lnwire.FeatureVector {
return r.server.featureMgr.Get(feature.SetInvoice) return r.server.featureMgr.Get(feature.SetInvoice)
}, },
@ -4949,7 +4946,7 @@ func (r *rpcServer) ListInvoices(ctx context.Context,
PendingOnly: req.PendingOnly, PendingOnly: req.PendingOnly,
Reversed: req.Reversed, Reversed: req.Reversed,
} }
invoiceSlice, err := r.server.remoteChanDB.QueryInvoices(q) invoiceSlice, err := r.server.chanStateDB.QueryInvoices(q)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to query invoices: %v", err) return nil, fmt.Errorf("unable to query invoices: %v", err)
} }
@ -5124,7 +5121,7 @@ func (r *rpcServer) DescribeGraph(ctx context.Context,
// Obtain the pointer to the global singleton channel graph, this will // Obtain the pointer to the global singleton channel graph, this will
// provide a consistent view of the graph due to bolt db's // provide a consistent view of the graph due to bolt db's
// transactional model. // transactional model.
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
// First iterate through all the known nodes (connected or unconnected // First iterate through all the known nodes (connected or unconnected
// within the graph), collating their current state into the RPC // within the graph), collating their current state into the RPC
@ -5263,7 +5260,7 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context,
// Obtain the pointer to the global singleton channel graph, this will // Obtain the pointer to the global singleton channel graph, this will
// provide a consistent view of the graph due to bolt db's // provide a consistent view of the graph due to bolt db's
// transactional model. // transactional model.
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
// Calculate betweenness centrality if requested. Note that depending on the // Calculate betweenness centrality if requested. Note that depending on the
// graph size, this may take up to a few minutes. // graph size, this may take up to a few minutes.
@ -5302,7 +5299,7 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context,
func (r *rpcServer) GetChanInfo(ctx context.Context, func (r *rpcServer) GetChanInfo(ctx context.Context,
in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) { in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) {
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId) edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId)
if err != nil { if err != nil {
@ -5322,7 +5319,7 @@ func (r *rpcServer) GetChanInfo(ctx context.Context,
func (r *rpcServer) GetNodeInfo(ctx context.Context, func (r *rpcServer) GetNodeInfo(ctx context.Context,
in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) { in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) {
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
// First, parse the hex-encoded public key into a full in-memory public // First, parse the hex-encoded public key into a full in-memory public
// key object we can work with for querying. // key object we can work with for querying.
@ -5423,7 +5420,7 @@ func (r *rpcServer) QueryRoutes(ctx context.Context,
func (r *rpcServer) GetNetworkInfo(ctx context.Context, func (r *rpcServer) GetNetworkInfo(ctx context.Context,
_ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) { _ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) {
graph := r.server.localChanDB.ChannelGraph() graph := r.server.graphDB
var ( var (
numNodes uint32 numNodes uint32
@ -5735,7 +5732,7 @@ func (r *rpcServer) ListPayments(ctx context.Context,
query.MaxPayments = math.MaxUint64 query.MaxPayments = math.MaxUint64
} }
paymentsQuerySlice, err := r.server.remoteChanDB.QueryPayments(query) paymentsQuerySlice, err := r.server.chanStateDB.QueryPayments(query)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -5770,7 +5767,7 @@ func (r *rpcServer) DeleteAllPayments(ctx context.Context,
"failed_htlcs_only=%v", req.FailedPaymentsOnly, "failed_htlcs_only=%v", req.FailedPaymentsOnly,
req.FailedHtlcsOnly) req.FailedHtlcsOnly)
err := r.server.remoteChanDB.DeletePayments( err := r.server.chanStateDB.DeletePayments(
req.FailedPaymentsOnly, req.FailedHtlcsOnly, req.FailedPaymentsOnly, req.FailedHtlcsOnly,
) )
if err != nil { if err != nil {
@ -5893,7 +5890,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
rpcsLog.Debugf("[feereport]") rpcsLog.Debugf("[feereport]")
channelGraph := r.server.localChanDB.ChannelGraph() channelGraph := r.server.graphDB
selfNode, err := channelGraph.SourceNode() selfNode, err := channelGraph.SourceNode()
if err != nil { if err != nil {
return nil, err return nil, err
@ -5932,7 +5929,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
return nil, err return nil, err
} }
fwdEventLog := r.server.remoteChanDB.ForwardingLog() fwdEventLog := r.server.chanStateDB.ForwardingLog()
// computeFeeSum is a helper function that computes the total fees for // computeFeeSum is a helper function that computes the total fees for
// a particular time slice described by a forwarding event query. // a particular time slice described by a forwarding event query.
@ -6170,7 +6167,7 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context,
IndexOffset: req.IndexOffset, IndexOffset: req.IndexOffset,
NumMaxEvents: numEvents, NumMaxEvents: numEvents,
} }
timeSlice, err := r.server.remoteChanDB.ForwardingLog().Query(eventQuery) timeSlice, err := r.server.chanStateDB.ForwardingLog().Query(eventQuery)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to query forwarding log: %v", err) return nil, fmt.Errorf("unable to query forwarding log: %v", err)
} }
@ -6232,7 +6229,7 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context,
// the database. If this channel has been closed, or the outpoint is // the database. If this channel has been closed, or the outpoint is
// unknown, then we'll return an error // unknown, then we'll return an error
unpackedBackup, err := chanbackup.FetchBackupForChan( unpackedBackup, err := chanbackup.FetchBackupForChan(
chanPoint, r.server.remoteChanDB, chanPoint, r.server.chanStateDB,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -6402,7 +6399,7 @@ func (r *rpcServer) ExportAllChannelBackups(ctx context.Context,
// First, we'll attempt to read back ups for ALL currently opened // First, we'll attempt to read back ups for ALL currently opened
// channels from disk. // channels from disk.
allUnpackedBackups, err := chanbackup.FetchStaticChanBackups( allUnpackedBackups, err := chanbackup.FetchStaticChanBackups(
r.server.remoteChanDB, r.server.chanStateDB,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to fetch all static chan "+ return nil, fmt.Errorf("unable to fetch all static chan "+
@ -6425,7 +6422,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
// restore either a set of chanbackup.Single or chanbackup.Multi // restore either a set of chanbackup.Single or chanbackup.Multi
// backups. // backups.
chanRestorer := &chanDBRestorer{ chanRestorer := &chanDBRestorer{
db: r.server.remoteChanDB, db: r.server.chanStateDB,
secretKeys: r.server.cc.KeyRing, secretKeys: r.server.cc.KeyRing,
chainArb: r.server.chainArb, chainArb: r.server.chainArb,
} }
@ -6523,7 +6520,7 @@ func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription
// we'll obtains the current set of single channel // we'll obtains the current set of single channel
// backups from disk. // backups from disk.
chanBackups, err := chanbackup.FetchStaticChanBackups( chanBackups, err := chanbackup.FetchStaticChanBackups(
r.server.remoteChanDB, r.server.chanStateDB,
) )
if err != nil { if err != nil {
return fmt.Errorf("unable to fetch all "+ return fmt.Errorf("unable to fetch all "+

View File

@ -220,9 +220,9 @@ type server struct {
fundingMgr *funding.Manager fundingMgr *funding.Manager
localChanDB *channeldb.DB graphDB *channeldb.ChannelGraph
remoteChanDB *channeldb.DB chanStateDB *channeldb.DB
htlcSwitch *htlcswitch.Switch htlcSwitch *htlcswitch.Switch
@ -387,7 +387,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// the same directory as the channel graph database. We don't need to // the same directory as the channel graph database. We don't need to
// replicate this data, so we'll store it locally. // replicate this data, so we'll store it locally.
replayLog := htlcswitch.NewDecayedLog( replayLog := htlcswitch.NewDecayedLog(
cfg.localDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt, cfg.graphDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt,
cc.ChainNotifier, cc.ChainNotifier,
) )
sphinxRouter := sphinx.NewRouter( sphinxRouter := sphinx.NewRouter(
@ -435,15 +435,15 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
s := &server{ s := &server{
cfg: cfg, cfg: cfg,
localChanDB: dbs.localChanDB, graphDB: dbs.graphDB.ChannelGraph(),
remoteChanDB: dbs.remoteChanDB, chanStateDB: dbs.chanStateDB,
cc: cc, cc: cc,
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer), sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer),
writePool: writePool, writePool: writePool,
readPool: readPool, readPool: readPool,
chansToRestore: chansToRestore, chansToRestore: chansToRestore,
channelNotifier: channelnotifier.New(dbs.remoteChanDB), channelNotifier: channelnotifier.New(dbs.chanStateDB),
identityECDH: nodeKeyECDH, identityECDH: nodeKeyECDH,
nodeSigner: netann.NewNodeSigner(nodeKeySigner), nodeSigner: netann.NewNodeSigner(nodeKeySigner),
@ -475,7 +475,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
} }
s.witnessBeacon = &preimageBeacon{ s.witnessBeacon = &preimageBeacon{
wCache: dbs.remoteChanDB.NewWitnessCache(), wCache: dbs.chanStateDB.NewWitnessCache(),
subscribers: make(map[uint64]*preimageSubscriber), subscribers: make(map[uint64]*preimageSubscriber),
} }
@ -489,13 +489,13 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
uint32(currentHeight), currentHash, cc.ChainNotifier, uint32(currentHeight), currentHash, cc.ChainNotifier,
) )
s.invoices = invoices.NewRegistry( s.invoices = invoices.NewRegistry(
dbs.remoteChanDB, expiryWatcher, &registryConfig, dbs.chanStateDB, expiryWatcher, &registryConfig,
) )
s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now) s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now)
s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{ s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
DB: dbs.remoteChanDB, DB: dbs.chanStateDB,
LocalChannelClose: func(pubKey []byte, LocalChannelClose: func(pubKey []byte,
request *htlcswitch.ChanClose) { request *htlcswitch.ChanClose) {
@ -510,7 +510,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
peer.HandleLocalCloseChanReqs(request) peer.HandleLocalCloseChanReqs(request)
}, },
FwdingLog: dbs.remoteChanDB.ForwardingLog(), FwdingLog: dbs.chanStateDB.ForwardingLog(),
SwitchPackager: channeldb.NewSwitchPackager(), SwitchPackager: channeldb.NewSwitchPackager(),
ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter, ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter,
FetchLastChannelUpdate: s.fetchLastChanUpdate(), FetchLastChannelUpdate: s.fetchLastChanUpdate(),
@ -537,8 +537,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
MessageSigner: s.nodeSigner, MessageSigner: s.nodeSigner,
IsChannelActive: s.htlcSwitch.HasActiveLink, IsChannelActive: s.htlcSwitch.HasActiveLink,
ApplyChannelUpdate: s.applyChannelUpdate, ApplyChannelUpdate: s.applyChannelUpdate,
DB: dbs.remoteChanDB, DB: dbs.chanStateDB,
Graph: dbs.localChanDB.ChannelGraph(), Graph: dbs.graphDB.ChannelGraph(),
} }
chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg) chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg)
@ -630,7 +630,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// As the graph can be obtained at anytime from the network, we won't // As the graph can be obtained at anytime from the network, we won't
// replicate it, and instead it'll only be stored locally. // replicate it, and instead it'll only be stored locally.
chanGraph := dbs.localChanDB.ChannelGraph() chanGraph := dbs.graphDB.ChannelGraph()
// We'll now reconstruct a node announcement based on our current // We'll now reconstruct a node announcement based on our current
// configuration so we can send it out as a sort of heart beat within // configuration so we can send it out as a sort of heart beat within
@ -697,7 +697,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// The router will get access to the payment ID sequencer, such that it // The router will get access to the payment ID sequencer, such that it
// can generate unique payment IDs. // can generate unique payment IDs.
sequencer, err := htlcswitch.NewPersistentSequencer(dbs.remoteChanDB) sequencer, err := htlcswitch.NewPersistentSequencer(dbs.chanStateDB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -742,7 +742,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
} }
s.missionControl, err = routing.NewMissionControl( s.missionControl, err = routing.NewMissionControl(
dbs.remoteChanDB, selfNode.PubKeyBytes, dbs.chanStateDB, selfNode.PubKeyBytes,
&routing.MissionControlConfig{ &routing.MissionControlConfig{
ProbabilityEstimatorCfg: estimatorCfg, ProbabilityEstimatorCfg: estimatorCfg,
MaxMcHistory: routingConfig.MaxMcHistory, MaxMcHistory: routingConfig.MaxMcHistory,
@ -775,7 +775,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
PathFindingConfig: pathFindingConfig, PathFindingConfig: pathFindingConfig,
} }
paymentControl := channeldb.NewPaymentControl(dbs.remoteChanDB) paymentControl := channeldb.NewPaymentControl(dbs.chanStateDB)
s.controlTower = routing.NewControlTower(paymentControl) s.controlTower = routing.NewControlTower(paymentControl)
@ -803,12 +803,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
return nil, fmt.Errorf("can't create router: %v", err) return nil, fmt.Errorf("can't create router: %v", err)
} }
chanSeries := discovery.NewChanSeries(s.localChanDB.ChannelGraph()) chanSeries := discovery.NewChanSeries(s.graphDB)
gossipMessageStore, err := discovery.NewMessageStore(s.remoteChanDB) gossipMessageStore, err := discovery.NewMessageStore(s.chanStateDB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
waitingProofStore, err := channeldb.NewWaitingProofStore(s.remoteChanDB) waitingProofStore, err := channeldb.NewWaitingProofStore(s.chanStateDB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -848,11 +848,11 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
ForAllOutgoingChannels: s.chanRouter.ForAllOutgoingChannels, ForAllOutgoingChannels: s.chanRouter.ForAllOutgoingChannels,
PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate, PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies, UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies,
FetchChannel: s.remoteChanDB.FetchChannel, FetchChannel: s.chanStateDB.FetchChannel,
} }
utxnStore, err := newNurseryStore( utxnStore, err := newNurseryStore(
s.cfg.ActiveNetParams.GenesisHash, dbs.remoteChanDB, s.cfg.ActiveNetParams.GenesisHash, dbs.chanStateDB,
) )
if err != nil { if err != nil {
srvrLog.Errorf("unable to create nursery store: %v", err) srvrLog.Errorf("unable to create nursery store: %v", err)
@ -863,7 +863,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
sweep.DefaultBatchWindowDuration) sweep.DefaultBatchWindowDuration)
sweeperStore, err := sweep.NewSweeperStore( sweeperStore, err := sweep.NewSweeperStore(
dbs.remoteChanDB, s.cfg.ActiveNetParams.GenesisHash, dbs.chanStateDB, s.cfg.ActiveNetParams.GenesisHash,
) )
if err != nil { if err != nil {
srvrLog.Errorf("unable to create sweeper store: %v", err) srvrLog.Errorf("unable to create sweeper store: %v", err)
@ -890,8 +890,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
s.utxoNursery = newUtxoNursery(&NurseryConfig{ s.utxoNursery = newUtxoNursery(&NurseryConfig{
ChainIO: cc.ChainIO, ChainIO: cc.ChainIO,
ConfDepth: 1, ConfDepth: 1,
FetchClosedChannels: dbs.remoteChanDB.FetchClosedChannels, FetchClosedChannels: dbs.chanStateDB.FetchClosedChannels,
FetchClosedChannel: dbs.remoteChanDB.FetchClosedChannel, FetchClosedChannel: dbs.chanStateDB.FetchClosedChannel,
Notifier: cc.ChainNotifier, Notifier: cc.ChainNotifier,
PublishTransaction: cc.Wallet.PublishTransaction, PublishTransaction: cc.Wallet.PublishTransaction,
Store: utxnStore, Store: utxnStore,
@ -1012,18 +1012,18 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod, PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC, IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC,
Clock: clock.NewDefaultClock(), Clock: clock.NewDefaultClock(),
}, dbs.remoteChanDB) }, dbs.chanStateDB)
s.breachArbiter = newBreachArbiter(&BreachConfig{ s.breachArbiter = newBreachArbiter(&BreachConfig{
CloseLink: closeLink, CloseLink: closeLink,
DB: dbs.remoteChanDB, DB: dbs.chanStateDB,
Estimator: s.cc.FeeEstimator, Estimator: s.cc.FeeEstimator,
GenSweepScript: newSweepPkScriptGen(cc.Wallet), GenSweepScript: newSweepPkScriptGen(cc.Wallet),
Notifier: cc.ChainNotifier, Notifier: cc.ChainNotifier,
PublishTransaction: cc.Wallet.PublishTransaction, PublishTransaction: cc.Wallet.PublishTransaction,
ContractBreaches: contractBreaches, ContractBreaches: contractBreaches,
Signer: cc.Wallet.Cfg.Signer, Signer: cc.Wallet.Cfg.Signer,
Store: newRetributionStore(dbs.remoteChanDB), Store: newRetributionStore(dbs.chanStateDB),
}) })
// Select the configuration and furnding parameters for Bitcoin or // Select the configuration and furnding parameters for Bitcoin or
@ -1071,7 +1071,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
FindChannel: func(chanID lnwire.ChannelID) ( FindChannel: func(chanID lnwire.ChannelID) (
*channeldb.OpenChannel, error) { *channeldb.OpenChannel, error) {
dbChannels, err := dbs.remoteChanDB.FetchAllChannels() dbChannels, err := dbs.chanStateDB.FetchAllChannels()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1243,10 +1243,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// static backup of the latest channel state. // static backup of the latest channel state.
chanNotifier := &channelNotifier{ chanNotifier := &channelNotifier{
chanNotifier: s.channelNotifier, chanNotifier: s.channelNotifier,
addrs: s.remoteChanDB, addrs: s.chanStateDB,
} }
backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath) backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath)
startingChans, err := chanbackup.FetchStaticChanBackups(s.remoteChanDB) startingChans, err := chanbackup.FetchStaticChanBackups(s.chanStateDB)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1269,10 +1269,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
SubscribePeerEvents: func() (subscribe.Subscription, error) { SubscribePeerEvents: func() (subscribe.Subscription, error) {
return s.peerNotifier.SubscribePeerEvents() return s.peerNotifier.SubscribePeerEvents()
}, },
GetOpenChannels: s.remoteChanDB.FetchAllOpenChannels, GetOpenChannels: s.chanStateDB.FetchAllOpenChannels,
Clock: clock.NewDefaultClock(), Clock: clock.NewDefaultClock(),
ReadFlapCount: s.remoteChanDB.ReadFlapCount, ReadFlapCount: s.chanStateDB.ReadFlapCount,
WriteFlapCount: s.remoteChanDB.WriteFlapCounts, WriteFlapCount: s.chanStateDB.WriteFlapCounts,
FlapCountTicker: ticker.New(chanfitness.FlapCountFlushRate), FlapCountTicker: ticker.New(chanfitness.FlapCountFlushRate),
}) })
@ -1677,7 +1677,7 @@ func (s *server) Start() error {
// that have all the information we need to handle channel // that have all the information we need to handle channel
// recovery _before_ we even accept connections from any peers. // recovery _before_ we even accept connections from any peers.
chanRestorer := &chanDBRestorer{ chanRestorer := &chanDBRestorer{
db: s.remoteChanDB, db: s.chanStateDB,
secretKeys: s.cc.KeyRing, secretKeys: s.cc.KeyRing,
chainArb: s.chainArb, chainArb: s.chainArb,
} }
@ -1722,7 +1722,7 @@ func (s *server) Start() error {
// we'll prune our set of link nodes found within the database // we'll prune our set of link nodes found within the database
// to ensure we don't reconnect to any nodes we no longer have // to ensure we don't reconnect to any nodes we no longer have
// open channels with. // open channels with.
if err := s.remoteChanDB.PruneLinkNodes(); err != nil { if err := s.chanStateDB.PruneLinkNodes(); err != nil {
startErr = err startErr = err
return return
} }
@ -2132,7 +2132,7 @@ func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, e
// First, we'll create an instance of the ChannelGraphBootstrapper as // First, we'll create an instance of the ChannelGraphBootstrapper as
// this can be used by default if we've already partially seeded the // this can be used by default if we've already partially seeded the
// network. // network.
chanGraph := autopilot.ChannelGraphFromDatabase(s.localChanDB.ChannelGraph()) chanGraph := autopilot.ChannelGraphFromDatabase(s.graphDB)
graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph) graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph)
if err != nil { if err != nil {
return nil, err return nil, err
@ -2469,7 +2469,7 @@ func (s *server) createNewHiddenService() error {
AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(), AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(),
} }
copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed()) copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed())
if err := s.localChanDB.ChannelGraph().SetSourceNode(selfNode); err != nil { if err := s.graphDB.SetSourceNode(selfNode); err != nil {
return fmt.Errorf("can't set self node: %v", err) return fmt.Errorf("can't set self node: %v", err)
} }
@ -2526,7 +2526,7 @@ func (s *server) establishPersistentConnections() error {
// Iterate through the list of LinkNodes to find addresses we should // Iterate through the list of LinkNodes to find addresses we should
// attempt to connect to based on our set of previous connections. Set // attempt to connect to based on our set of previous connections. Set
// the reconnection port to the default peer port. // the reconnection port to the default peer port.
linkNodes, err := s.remoteChanDB.FetchAllLinkNodes() linkNodes, err := s.chanStateDB.FetchAllLinkNodes()
if err != nil && err != channeldb.ErrLinkNodesNotFound { if err != nil && err != channeldb.ErrLinkNodesNotFound {
return err return err
} }
@ -2542,8 +2542,7 @@ func (s *server) establishPersistentConnections() error {
// After checking our previous connections for addresses to connect to, // After checking our previous connections for addresses to connect to,
// iterate through the nodes in our channel graph to find addresses // iterate through the nodes in our channel graph to find addresses
// that have been added via NodeAnnouncement messages. // that have been added via NodeAnnouncement messages.
chanGraph := s.localChanDB.ChannelGraph() sourceNode, err := s.graphDB.SourceNode()
sourceNode, err := chanGraph.SourceNode()
if err != nil { if err != nil {
return err return err
} }
@ -3249,8 +3248,8 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
ReadPool: s.readPool, ReadPool: s.readPool,
Switch: s.htlcSwitch, Switch: s.htlcSwitch,
InterceptSwitch: s.interceptableSwitch, InterceptSwitch: s.interceptableSwitch,
ChannelDB: s.remoteChanDB, ChannelDB: s.chanStateDB,
ChannelGraph: s.localChanDB.ChannelGraph(), ChannelGraph: s.graphDB,
ChainArb: s.chainArb, ChainArb: s.chainArb,
AuthGossiper: s.authGossiper, AuthGossiper: s.authGossiper,
ChanStatusMgr: s.chanStatusMgr, ChanStatusMgr: s.chanStatusMgr,
@ -3908,7 +3907,7 @@ func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error)
return nil, err return nil, err
} }
node, err := s.localChanDB.ChannelGraph().FetchLightningNode(nil, vertex) node, err := s.graphDB.FetchLightningNode(nil, vertex)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -92,8 +92,8 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config,
chanRouter *routing.ChannelRouter, chanRouter *routing.ChannelRouter,
routerBackend *routerrpc.RouterBackend, routerBackend *routerrpc.RouterBackend,
nodeSigner *netann.NodeSigner, nodeSigner *netann.NodeSigner,
localChanDB *channeldb.DB, graphDB *channeldb.ChannelGraph,
remoteChanDB *channeldb.DB, chanStateDB *channeldb.DB,
sweeper *sweep.UtxoSweeper, sweeper *sweep.UtxoSweeper,
tower *watchtower.Standalone, tower *watchtower.Standalone,
towerClient wtclient.Client, towerClient wtclient.Client,
@ -222,11 +222,11 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config,
subCfgValue.FieldByName("DefaultCLTVExpiry").Set( subCfgValue.FieldByName("DefaultCLTVExpiry").Set(
reflect.ValueOf(defaultDelta), reflect.ValueOf(defaultDelta),
) )
subCfgValue.FieldByName("LocalChanDB").Set( subCfgValue.FieldByName("GraphDB").Set(
reflect.ValueOf(localChanDB), reflect.ValueOf(graphDB),
) )
subCfgValue.FieldByName("RemoteChanDB").Set( subCfgValue.FieldByName("ChanStateDB").Set(
reflect.ValueOf(remoteChanDB), reflect.ValueOf(chanStateDB),
) )
subCfgValue.FieldByName("GenInvoiceFeatures").Set( subCfgValue.FieldByName("GenInvoiceFeatures").Set(
reflect.ValueOf(genInvoiceFeatures), reflect.ValueOf(genInvoiceFeatures),