mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-05-05 17:30:21 +02:00
lnd+channeldb: pre-allocate cache size
To avoid the channel map needing to be re-grown while we fill the cache initially, we might as well pre-allocate it with a somewhat sane value to decrease the number of grow events.
This commit is contained in:
parent
bf27d05aa8
commit
a95a3728b5
@ -290,7 +290,7 @@ func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB,
|
|||||||
var err error
|
var err error
|
||||||
chanDB.graph, err = NewChannelGraph(
|
chanDB.graph, err = NewChannelGraph(
|
||||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||||
opts.BatchCommitInterval,
|
opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -188,7 +188,8 @@ type ChannelGraph struct {
|
|||||||
// NewChannelGraph allocates a new ChannelGraph backed by a DB instance. The
|
// NewChannelGraph allocates a new ChannelGraph backed by a DB instance. The
|
||||||
// returned instance has its own unique reject cache and channel cache.
|
// returned instance has its own unique reject cache and channel cache.
|
||||||
func NewChannelGraph(db kvdb.Backend, rejectCacheSize, chanCacheSize int,
|
func NewChannelGraph(db kvdb.Backend, rejectCacheSize, chanCacheSize int,
|
||||||
batchCommitInterval time.Duration) (*ChannelGraph, error) {
|
batchCommitInterval time.Duration,
|
||||||
|
preAllocCacheNumNodes int) (*ChannelGraph, error) {
|
||||||
|
|
||||||
if err := initChannelGraph(db); err != nil {
|
if err := initChannelGraph(db); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -198,7 +199,7 @@ func NewChannelGraph(db kvdb.Backend, rejectCacheSize, chanCacheSize int,
|
|||||||
db: db,
|
db: db,
|
||||||
rejectCache: newRejectCache(rejectCacheSize),
|
rejectCache: newRejectCache(rejectCacheSize),
|
||||||
chanCache: newChannelCache(chanCacheSize),
|
chanCache: newChannelCache(chanCacheSize),
|
||||||
graphCache: NewGraphCache(),
|
graphCache: NewGraphCache(preAllocCacheNumNodes),
|
||||||
}
|
}
|
||||||
g.chanScheduler = batch.NewTimeScheduler(
|
g.chanScheduler = batch.NewTimeScheduler(
|
||||||
db, &g.cacheMu, batchCommitInterval,
|
db, &g.cacheMu, batchCommitInterval,
|
||||||
|
@ -175,10 +175,19 @@ type GraphCache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewGraphCache creates a new graphCache.
|
// NewGraphCache creates a new graphCache.
|
||||||
func NewGraphCache() *GraphCache {
|
func NewGraphCache(preAllocNumNodes int) *GraphCache {
|
||||||
return &GraphCache{
|
return &GraphCache{
|
||||||
nodeChannels: make(map[route.Vertex]map[uint64]*DirectedChannel),
|
nodeChannels: make(
|
||||||
nodeFeatures: make(map[route.Vertex]*lnwire.FeatureVector),
|
map[route.Vertex]map[uint64]*DirectedChannel,
|
||||||
|
// A channel connects two nodes, so we can look it up
|
||||||
|
// from both sides, meaning we get double the number of
|
||||||
|
// entries.
|
||||||
|
preAllocNumNodes*2,
|
||||||
|
),
|
||||||
|
nodeFeatures: make(
|
||||||
|
map[route.Vertex]*lnwire.FeatureVector,
|
||||||
|
preAllocNumNodes,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ func TestGraphCacheAddNode(t *testing.T) {
|
|||||||
outPolicies: []*ChannelEdgePolicy{outPolicy1},
|
outPolicies: []*ChannelEdgePolicy{outPolicy1},
|
||||||
inPolicies: []*ChannelEdgePolicy{inPolicy1},
|
inPolicies: []*ChannelEdgePolicy{inPolicy1},
|
||||||
}
|
}
|
||||||
cache := NewGraphCache()
|
cache := NewGraphCache(10)
|
||||||
require.NoError(t, cache.AddNode(nil, node))
|
require.NoError(t, cache.AddNode(nil, node))
|
||||||
|
|
||||||
var fromChannels, toChannels []*DirectedChannel
|
var fromChannels, toChannels []*DirectedChannel
|
||||||
|
@ -75,7 +75,7 @@ func MakeTestGraph(modifiers ...OptionModifier) (*ChannelGraph, func(), error) {
|
|||||||
|
|
||||||
graph, err := NewChannelGraph(
|
graph, err := NewChannelGraph(
|
||||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||||
opts.BatchCommitInterval,
|
opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
backendCleanup()
|
backendCleanup()
|
||||||
|
@ -17,6 +17,12 @@ const (
|
|||||||
// in order to reply to gossip queries. This produces a cache size of
|
// in order to reply to gossip queries. This produces a cache size of
|
||||||
// around 40MB.
|
// around 40MB.
|
||||||
DefaultChannelCacheSize = 20000
|
DefaultChannelCacheSize = 20000
|
||||||
|
|
||||||
|
// DefaultPreAllocCacheNumNodes is the default number of channels we
|
||||||
|
// assume for mainnet for pre-allocating the graph cache. As of
|
||||||
|
// September 2021, there currently are 14k nodes in a strictly pruned
|
||||||
|
// graph, so we choose a number that is slightly higher.
|
||||||
|
DefaultPreAllocCacheNumNodes = 15000
|
||||||
)
|
)
|
||||||
|
|
||||||
// Options holds parameters for tuning and customizing a channeldb.DB.
|
// Options holds parameters for tuning and customizing a channeldb.DB.
|
||||||
@ -35,6 +41,10 @@ type Options struct {
|
|||||||
// wait before attempting to commit a pending set of updates.
|
// wait before attempting to commit a pending set of updates.
|
||||||
BatchCommitInterval time.Duration
|
BatchCommitInterval time.Duration
|
||||||
|
|
||||||
|
// PreAllocCacheNumNodes is the number of nodes we expect to be in the
|
||||||
|
// graph cache, so we can pre-allocate the map accordingly.
|
||||||
|
PreAllocCacheNumNodes int
|
||||||
|
|
||||||
// clock is the time source used by the database.
|
// clock is the time source used by the database.
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
|
|
||||||
@ -54,6 +64,7 @@ func DefaultOptions() Options {
|
|||||||
},
|
},
|
||||||
RejectCacheSize: DefaultRejectCacheSize,
|
RejectCacheSize: DefaultRejectCacheSize,
|
||||||
ChannelCacheSize: DefaultChannelCacheSize,
|
ChannelCacheSize: DefaultChannelCacheSize,
|
||||||
|
PreAllocCacheNumNodes: DefaultPreAllocCacheNumNodes,
|
||||||
clock: clock.NewDefaultClock(),
|
clock: clock.NewDefaultClock(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -75,6 +86,13 @@ func OptionSetChannelCacheSize(n int) OptionModifier {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OptionSetPreAllocCacheNumNodes sets the PreAllocCacheNumNodes to n.
|
||||||
|
func OptionSetPreAllocCacheNumNodes(n int) OptionModifier {
|
||||||
|
return func(o *Options) {
|
||||||
|
o.PreAllocCacheNumNodes = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// OptionSetSyncFreelist allows the database to sync its freelist.
|
// OptionSetSyncFreelist allows the database to sync its freelist.
|
||||||
func OptionSetSyncFreelist(b bool) OptionModifier {
|
func OptionSetSyncFreelist(b bool) OptionModifier {
|
||||||
return func(o *Options) {
|
return func(o *Options) {
|
||||||
|
22
lnd.go
22
lnd.go
@ -22,6 +22,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/btcsuite/btcwallet/wallet"
|
"github.com/btcsuite/btcwallet/wallet"
|
||||||
@ -1679,14 +1680,27 @@ func initializeDatabases(ctx context.Context,
|
|||||||
"instances")
|
"instances")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, we'll open two instances, one for the state we only need
|
dbOptions := []channeldb.OptionModifier{
|
||||||
// locally, and the other for things we want to ensure are replicated.
|
|
||||||
dbs.graphDB, err = channeldb.CreateWithBackend(
|
|
||||||
databaseBackends.GraphDB,
|
|
||||||
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
|
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
|
||||||
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
|
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
|
||||||
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
|
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
|
||||||
channeldb.OptionDryRunMigration(cfg.DryRunMigration),
|
channeldb.OptionDryRunMigration(cfg.DryRunMigration),
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want to pre-allocate the channel graph cache according to what we
|
||||||
|
// expect for mainnet to speed up memory allocation.
|
||||||
|
if cfg.ActiveNetParams.Name == chaincfg.MainNetParams.Name {
|
||||||
|
dbOptions = append(
|
||||||
|
dbOptions, channeldb.OptionSetPreAllocCacheNumNodes(
|
||||||
|
channeldb.DefaultPreAllocCacheNumNodes,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, we'll open two instances, one for the state we only need
|
||||||
|
// locally, and the other for things we want to ensure are replicated.
|
||||||
|
dbs.graphDB, err = channeldb.CreateWithBackend(
|
||||||
|
databaseBackends.GraphDB, dbOptions...,
|
||||||
)
|
)
|
||||||
switch {
|
switch {
|
||||||
// Give the DB a chance to dry run the migration. Since we know that
|
// Give the DB a chance to dry run the migration. Since we know that
|
||||||
|
@ -171,7 +171,7 @@ func makeTestGraph() (*channeldb.ChannelGraph, kvdb.Backend, func(), error) {
|
|||||||
opts := channeldb.DefaultOptions()
|
opts := channeldb.DefaultOptions()
|
||||||
graph, err := channeldb.NewChannelGraph(
|
graph, err := channeldb.NewChannelGraph(
|
||||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||||
opts.BatchCommitInterval,
|
opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cleanUp()
|
cleanUp()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user