mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-09-23 00:04:57 +02:00
multi: further decouple graph
To further separate the channel graph from the channel state, we refactor the AddrsForNode method to use the graphs's public methods instead of directly accessing any buckets. This makes sure that we can have the channel state cached with just its buckets while not using a kvdb level cache for the graph. At the same time we refactor the graph's test to also be less dependent upon the channel state DB.
This commit is contained in:
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/clock"
|
||||
"github.com/lightningnetwork/lnd/kvdb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -286,10 +287,14 @@ func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB,
|
||||
// Set the parent pointer (only used in tests).
|
||||
chanDB.channelStateDB.parent = chanDB
|
||||
|
||||
chanDB.graph = newChannelGraph(
|
||||
var err error
|
||||
chanDB.graph, err = NewChannelGraph(
|
||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||
opts.BatchCommitInterval,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Synchronize the version of database and apply migrations if needed.
|
||||
if err := chanDB.syncVersions(dbVersions); err != nil {
|
||||
@@ -305,7 +310,7 @@ func (d *DB) Path() string {
|
||||
return d.dbPath
|
||||
}
|
||||
|
||||
var topLevelBuckets = [][]byte{
|
||||
var dbTopLevelBuckets = [][]byte{
|
||||
openChannelBucket,
|
||||
closedChannelBucket,
|
||||
forwardingLogBucket,
|
||||
@@ -316,10 +321,6 @@ var topLevelBuckets = [][]byte{
|
||||
paymentsIndexBucket,
|
||||
peersBucket,
|
||||
nodeInfoBucket,
|
||||
nodeBucket,
|
||||
edgeBucket,
|
||||
edgeIndexBucket,
|
||||
graphMetaBucket,
|
||||
metaBucket,
|
||||
closeSummaryBucket,
|
||||
outpointBucket,
|
||||
@@ -330,7 +331,7 @@ var topLevelBuckets = [][]byte{
|
||||
// operation is fully atomic.
|
||||
func (d *DB) Wipe() error {
|
||||
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
|
||||
for _, tlb := range topLevelBuckets {
|
||||
for _, tlb := range dbTopLevelBuckets {
|
||||
err := tx.DeleteTopLevelBucket(tlb)
|
||||
if err != nil && err != kvdb.ErrBucketNotFound {
|
||||
return err
|
||||
@@ -358,42 +359,12 @@ func initChannelDB(db kvdb.Backend) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, tlb := range topLevelBuckets {
|
||||
for _, tlb := range dbTopLevelBuckets {
|
||||
if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nodes := tx.ReadWriteBucket(nodeBucket)
|
||||
_, err = nodes.CreateBucket(aliasIndexBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = nodes.CreateBucket(nodeUpdateIndexBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
edges := tx.ReadWriteBucket(edgeBucket)
|
||||
if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(channelPointBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := edges.CreateBucket(zombieBucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
graphMeta := tx.ReadWriteBucket(graphMetaBucket)
|
||||
_, err = graphMeta.CreateBucket(pruneLogBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
meta.DbVersionNumber = getLatestDBVersion(dbVersions)
|
||||
return putMeta(meta, tx)
|
||||
}, func() {})
|
||||
@@ -1157,30 +1128,21 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var graphNode LightningNode
|
||||
err = kvdb.View(d, func(tx kvdb.RTx) error {
|
||||
// We'll also query the graph for this peer to see if they have
|
||||
// any addresses that we don't currently have stored within the
|
||||
// link node database.
|
||||
nodes := tx.ReadBucket(nodeBucket)
|
||||
if nodes == nil {
|
||||
return ErrGraphNotFound
|
||||
}
|
||||
compressedPubKey := nodePub.SerializeCompressed()
|
||||
graphNode, err = fetchLightningNode(nodes, compressedPubKey)
|
||||
if err != nil && err != ErrGraphNodeNotFound {
|
||||
// If the node isn't found, then that's OK, as we still
|
||||
// have the link node data.
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func() {
|
||||
linkNode = nil
|
||||
})
|
||||
// We'll also query the graph for this peer to see if they have any
|
||||
// addresses that we don't currently have stored within the link node
|
||||
// database.
|
||||
pubKey, err := route.NewVertexFromBytes(nodePub.SerializeCompressed())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
graphNode, err := d.graph.FetchLightningNode(pubKey)
|
||||
if err != nil && err != ErrGraphNodeNotFound {
|
||||
return nil, err
|
||||
} else if err == ErrGraphNodeNotFound {
|
||||
// If the node isn't found, then that's OK, as we still have the
|
||||
// link node data. But any other error needs to be returned.
|
||||
graphNode = &LightningNode{}
|
||||
}
|
||||
|
||||
// Now that we have both sources of addrs for this node, we'll use a
|
||||
// map to de-duplicate any addresses between the two sources, and
|
||||
|
Reference in New Issue
Block a user