mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-07-01 19:10:59 +02:00
rpc: optimize DescribeGraph by caching the response
Unfortunately, we can't use the graph cache directly here as it doesn't include all the information we need, since it only includes the minimal amount of information needed for path finding. Instead, we use a simple mutex guarded cache that evicts itself after a certain interval. The default is set small enough that most users shouldn't really notice. We also provide a way to disable the cache, and also disable the cache in the itests. Fixes https://github.com/lightningnetwork/lnd/issues/1232
This commit is contained in:
42
rpcserver.go
42
rpcserver.go
@ -628,6 +628,10 @@ type rpcServer struct {
|
||||
|
||||
// interceptor is used to be able to request a shutdown
|
||||
interceptor signal.Interceptor
|
||||
|
||||
graphCache sync.RWMutex
|
||||
describeGraphResp *lnrpc.ChannelGraph
|
||||
graphCacheEvictor *time.Timer
|
||||
}
|
||||
|
||||
// A compile time check to ensure that rpcServer fully implements the
|
||||
@ -813,6 +817,23 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service,
|
||||
r.chanPredicate = chanPredicate
|
||||
r.macService = macService
|
||||
r.selfNode = selfNode.PubKeyBytes
|
||||
|
||||
graphCacheDuration := r.cfg.Caches.RPCGraphCacheDuration
|
||||
if graphCacheDuration != 0 {
|
||||
r.graphCacheEvictor = time.AfterFunc(graphCacheDuration, func() {
|
||||
// Grab the mutex and purge the current populated
|
||||
// describe graph response.
|
||||
r.graphCache.Lock()
|
||||
defer r.graphCache.Unlock()
|
||||
|
||||
r.describeGraphResp = nil
|
||||
|
||||
// Reset ourselves as well at the end so we run again
|
||||
// after the duration.
|
||||
r.graphCacheEvictor.Reset(graphCacheDuration)
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5381,6 +5402,20 @@ func (r *rpcServer) DescribeGraph(ctx context.Context,
|
||||
resp := &lnrpc.ChannelGraph{}
|
||||
includeUnannounced := req.IncludeUnannounced
|
||||
|
||||
// Check to see if the cache is already populated, if so then we can
|
||||
// just return it directly.
|
||||
//
|
||||
// TODO(roasbeef): move this to an interceptor level feature?
|
||||
graphCacheActive := r.cfg.Caches.RPCGraphCacheDuration != 0
|
||||
if graphCacheActive {
|
||||
r.graphCache.Lock()
|
||||
defer r.graphCache.Unlock()
|
||||
|
||||
if r.describeGraphResp != nil {
|
||||
return r.describeGraphResp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Obtain the pointer to the global singleton channel graph, this will
|
||||
// provide a consistent view of the graph due to bolt db's
|
||||
// transactional model.
|
||||
@ -5439,6 +5474,13 @@ func (r *rpcServer) DescribeGraph(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We still have the mutex held, so we can safely populate the cache
|
||||
// now to save on GC churn for this query, but only if the cache isn't
|
||||
// disabled.
|
||||
if graphCacheActive {
|
||||
r.describeGraphResp = resp
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user