mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-09-15 18:13:36 +02:00
lnd: update Main logs to use structured logging
This commit is contained in:
80
lnd.go
80
lnd.go
@@ -149,7 +149,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
interceptor signal.Interceptor) error {
|
interceptor signal.Interceptor) error {
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
ltndLog.Info("Shutdown complete\n")
|
ltndLog.Info("Shutdown complete")
|
||||||
err := cfg.LogRotator.Close()
|
err := cfg.LogRotator.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ltndLog.Errorf("Could not close log rotator: %v", err)
|
ltndLog.Errorf("Could not close log rotator: %v", err)
|
||||||
@@ -183,9 +183,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Show version at startup.
|
// Show version at startup.
|
||||||
ltndLog.Infof("Version: %s commit=%s, build=%s, logging=%s, "+
|
ltndLog.InfoS(ctx, "Version Info",
|
||||||
"debuglevel=%s", build.Version(), build.Commit,
|
slog.String("version", build.Version()),
|
||||||
build.Deployment, build.LoggingType, cfg.DebugLevel)
|
slog.String("commit", build.Commit),
|
||||||
|
slog.Any("debuglevel", build.Deployment),
|
||||||
|
slog.String("logging", cfg.DebugLevel))
|
||||||
|
|
||||||
var network string
|
var network string
|
||||||
switch {
|
switch {
|
||||||
@@ -205,9 +207,9 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
network = "signet"
|
network = "signet"
|
||||||
}
|
}
|
||||||
|
|
||||||
ltndLog.Infof("Active chain: %v (network=%v)",
|
ltndLog.InfoS(ctx, "Network Info",
|
||||||
strings.Title(BitcoinChainName), network,
|
"active_chain", strings.Title(BitcoinChainName),
|
||||||
)
|
"network", network)
|
||||||
|
|
||||||
// Enable http profiling server if requested.
|
// Enable http profiling server if requested.
|
||||||
if cfg.Pprof.Profile != "" {
|
if cfg.Pprof.Profile != "" {
|
||||||
@@ -233,7 +235,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
"/debug/pprof/", http.StatusSeeOther,
|
"/debug/pprof/", http.StatusSeeOther,
|
||||||
))
|
))
|
||||||
|
|
||||||
ltndLog.Infof("Pprof listening on %v", cfg.Pprof.Profile)
|
ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)
|
||||||
|
|
||||||
// Create the pprof server.
|
// Create the pprof server.
|
||||||
pprofServer := &http.Server{
|
pprofServer := &http.Server{
|
||||||
@@ -244,11 +246,10 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
|
|
||||||
// Shut the server down when lnd is shutting down.
|
// Shut the server down when lnd is shutting down.
|
||||||
defer func() {
|
defer func() {
|
||||||
ltndLog.Info("Stopping pprof server...")
|
ltndLog.InfoS(ctx, "Stopping pprof server...")
|
||||||
err := pprofServer.Shutdown(ctx)
|
err := pprofServer.Shutdown(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ltndLog.Errorf("Stop pprof server got err: %v",
|
ltndLog.ErrorS(ctx, "Stop pprof server", err)
|
||||||
err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -256,7 +257,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
go func() {
|
go func() {
|
||||||
err := pprofServer.ListenAndServe()
|
err := pprofServer.ListenAndServe()
|
||||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
ltndLog.Errorf("Serving pprof got err: %v", err)
|
ltndLog.ErrorS(ctx, "Could not serve pprof "+
|
||||||
|
"server", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -420,8 +422,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
cancelElection()
|
cancelElection()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ltndLog.Infof("Using %v leader elector",
|
ltndLog.InfoS(ctx, "Using leader elector",
|
||||||
cfg.Cluster.LeaderElector)
|
"elector", cfg.Cluster.LeaderElector)
|
||||||
|
|
||||||
leaderElector, err = cfg.Cluster.MakeLeaderElector(
|
leaderElector, err = cfg.Cluster.MakeLeaderElector(
|
||||||
electionCtx, cfg.DB,
|
electionCtx, cfg.DB,
|
||||||
@@ -435,8 +437,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ltndLog.Infof("Attempting to resign from leader role "+
|
ltndLog.InfoS(ctx, "Attempting to resign from "+
|
||||||
"(%v)", cfg.Cluster.ID)
|
"leader role", "cluster_id", cfg.Cluster.ID)
|
||||||
|
|
||||||
// Ensure that we don't block the shutdown process if
|
// Ensure that we don't block the shutdown process if
|
||||||
// the leader resigning process takes too long. The
|
// the leader resigning process takes too long. The
|
||||||
@@ -454,21 +456,23 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ltndLog.Infof("Starting leadership campaign (%v)",
|
ltndLog.InfoS(ctx, "Starting leadership campaign",
|
||||||
cfg.Cluster.ID)
|
"cluster_id", cfg.Cluster.ID)
|
||||||
|
|
||||||
if err := leaderElector.Campaign(electionCtx); err != nil {
|
if err := leaderElector.Campaign(electionCtx); err != nil {
|
||||||
return mkErr("leadership campaign failed", err)
|
return mkErr("leadership campaign failed", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
elected = true
|
elected = true
|
||||||
ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID)
|
ltndLog.InfoS(ctx, "Elected as leader",
|
||||||
|
"cluster_id", cfg.Cluster.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
|
dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
|
||||||
switch {
|
switch {
|
||||||
case err == channeldb.ErrDryRunMigrationOK:
|
case err == channeldb.ErrDryRunMigrationOK:
|
||||||
ltndLog.Infof("%v, exiting", err)
|
ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
|
||||||
|
slog.Any("err", err))
|
||||||
return nil
|
return nil
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return mkErr("unable to open databases", err)
|
return mkErr("unable to open databases", err)
|
||||||
@@ -512,14 +516,14 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
|
|
||||||
if cfg.Tor.Active {
|
if cfg.Tor.Active {
|
||||||
if cfg.Tor.SkipProxyForClearNetTargets {
|
if cfg.Tor.SkipProxyForClearNetTargets {
|
||||||
srvrLog.Info("Onion services are accessible via Tor! " +
|
srvrLog.InfoS(ctx, "Onion services are accessible "+
|
||||||
"NOTE: Traffic to clearnet services is not " +
|
"via Tor! NOTE: Traffic to clearnet services "+
|
||||||
"routed via Tor.")
|
"is not routed via Tor.")
|
||||||
} else {
|
} else {
|
||||||
srvrLog.Infof("Proxying all network traffic via Tor "+
|
srvrLog.InfoS(ctx, "Proxying all network traffic "+
|
||||||
"(stream_isolation=%v)! NOTE: Ensure the "+
|
"via Tor! NOTE: Ensure the backend node is "+
|
||||||
"backend node is proxying over Tor as well",
|
"proxying over Tor as well",
|
||||||
cfg.Tor.StreamIsolation)
|
"stream_isolation", cfg.Tor.StreamIsolation)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -541,8 +545,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := torController.Stop(); err != nil {
|
if err := torController.Stop(); err != nil {
|
||||||
ltndLog.Errorf("error stopping tor "+
|
ltndLog.ErrorS(ctx, "Error stopping tor "+
|
||||||
"controller: %v", err)
|
"controller", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -684,8 +688,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
return mkErr("unable to determine chain tip", err)
|
return mkErr("unable to determine chain tip", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ltndLog.Infof("Waiting for chain backend to finish sync, "+
|
ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
|
||||||
"start_height=%v", bestHeight)
|
slog.Int64("start_height", int64(bestHeight)))
|
||||||
|
|
||||||
type syncResult struct {
|
type syncResult struct {
|
||||||
synced bool
|
synced bool
|
||||||
@@ -715,9 +719,9 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
"is synced", res.err)
|
"is synced", res.err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ltndLog.Debugf("Syncing to block timestamp: %v, is "+
|
ltndLog.DebugS(ctx, "Syncing to block chain",
|
||||||
"synced=%v", time.Unix(res.bestBlockTime, 0),
|
"best_block_time", time.Unix(res.bestBlockTime, 0),
|
||||||
res.synced)
|
"is_synced", res.synced)
|
||||||
|
|
||||||
if res.synced {
|
if res.synced {
|
||||||
break
|
break
|
||||||
@@ -742,8 +746,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
return mkErr("unable to determine chain tip", err)
|
return mkErr("unable to determine chain tip", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
|
ltndLog.InfoS(ctx, "Chain backend is fully synced!",
|
||||||
bestHeight)
|
"end_height", bestHeight)
|
||||||
|
|
||||||
// With all the relevant chains initialized, we can finally start the
|
// With all the relevant chains initialized, we can finally start the
|
||||||
// server itself. We start the server in an asynchronous goroutine so
|
// server itself. We start the server in an asynchronous goroutine so
|
||||||
@@ -757,8 +761,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
|
|||||||
defer func() {
|
defer func() {
|
||||||
err := server.Stop()
|
err := server.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ltndLog.Warnf("Stopping the server including all "+
|
ltndLog.WarnS(ctx, "Stopping the server including all "+
|
||||||
"its subsystems failed with %v", err)
|
"its subsystems failed with", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user