mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-06-03 03:29:50 +02:00
multi: enhance loggings and fix logging format
Also adds TODO for a possible bug.
This commit is contained in:
parent
d793a53bba
commit
6618ab493a
@ -497,10 +497,10 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
|
|||||||
"chan_point=%v", chanState.FundingOutpoint)
|
"chan_point=%v", chanState.FundingOutpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
|
log.Tracef("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
|
||||||
chanState.FundingOutpoint, chanState.ChanType,
|
chanState.FundingOutpoint, chanState.ChanType,
|
||||||
spew.Sdump(localCommit))
|
spew.Sdump(localCommit))
|
||||||
log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v",
|
log.Tracef("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v",
|
||||||
chanState.FundingOutpoint, chanState.ChanType,
|
chanState.FundingOutpoint, chanState.ChanType,
|
||||||
spew.Sdump(remoteCommit))
|
spew.Sdump(remoteCommit))
|
||||||
|
|
||||||
@ -527,7 +527,7 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
|
|||||||
var remotePendingCommit *channeldb.ChannelCommitment
|
var remotePendingCommit *channeldb.ChannelCommitment
|
||||||
if remoteChainTip != nil {
|
if remoteChainTip != nil {
|
||||||
remotePendingCommit = &remoteChainTip.Commitment
|
remotePendingCommit = &remoteChainTip.Commitment
|
||||||
log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+
|
log.Tracef("ChannelPoint(%v): remote_pending_commit_type=%v, "+
|
||||||
"remote_pending_commit=%v", chanState.FundingOutpoint,
|
"remote_pending_commit=%v", chanState.FundingOutpoint,
|
||||||
chanState.ChanType,
|
chanState.ChanType,
|
||||||
spew.Sdump(remoteChainTip.Commitment))
|
spew.Sdump(remoteChainTip.Commitment))
|
||||||
|
@ -398,6 +398,7 @@ func (m *ChanStatusManager) processEnableRequest(outpoint wire.OutPoint,
|
|||||||
|
|
||||||
// Channel is already enabled, nothing to do.
|
// Channel is already enabled, nothing to do.
|
||||||
case ChanStatusEnabled:
|
case ChanStatusEnabled:
|
||||||
|
log.Debugf("Channel(%v) already enabled, skipped announcement")
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
// The channel is enabled, though we are now canceling the scheduled
|
// The channel is enabled, though we are now canceling the scheduled
|
||||||
|
@ -1898,7 +1898,7 @@ func messageSummary(msg lnwire.Message) string {
|
|||||||
return fmt.Sprintf("type=%d", msg.Type)
|
return fmt.Sprintf("type=%d", msg.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
return fmt.Sprintf("unknown msg type=%T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// logWireMessage logs the receipt or sending of particular wire message. This
|
// logWireMessage logs the receipt or sending of particular wire message. This
|
||||||
|
@ -291,6 +291,8 @@ lifecycle:
|
|||||||
continue lifecycle
|
continue lifecycle
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Tracef("Found route: %s", spew.Sdump(rt.Hops))
|
||||||
|
|
||||||
// If this route will consume the last remaining amount to send
|
// If this route will consume the last remaining amount to send
|
||||||
// to the receiver, this will be our last shard (for now).
|
// to the receiver, this will be our last shard (for now).
|
||||||
lastShard := rt.ReceiverAmt() == currentState.remainingAmt
|
lastShard := rt.ReceiverAmt() == currentState.remainingAmt
|
||||||
|
@ -105,6 +105,8 @@ func (u *unifiedEdge) amtInRange(amt lnwire.MilliSatoshi) bool {
|
|||||||
if u.capacity > 0 &&
|
if u.capacity > 0 &&
|
||||||
amt > lnwire.NewMSatFromSatoshis(u.capacity) {
|
amt > lnwire.NewMSatFromSatoshis(u.capacity) {
|
||||||
|
|
||||||
|
log.Tracef("Not enough capacity: amt=%v, capacity=%v",
|
||||||
|
amt, u.capacity)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,11 +114,15 @@ func (u *unifiedEdge) amtInRange(amt lnwire.MilliSatoshi) bool {
|
|||||||
if u.policy.MessageFlags.HasMaxHtlc() &&
|
if u.policy.MessageFlags.HasMaxHtlc() &&
|
||||||
amt > u.policy.MaxHTLC {
|
amt > u.policy.MaxHTLC {
|
||||||
|
|
||||||
|
log.Tracef("Exceeds policy's MaxHTLC: amt=%v, MaxHTLC=%v",
|
||||||
|
amt, u.policy.MaxHTLC)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip channels for which this htlc is too small.
|
// Skip channels for which this htlc is too small.
|
||||||
if amt < u.policy.MinHTLC {
|
if amt < u.policy.MinHTLC {
|
||||||
|
log.Tracef("below policy's MinHTLC: amt=%v, MinHTLC=%v",
|
||||||
|
amt, u.policy.MinHTLC)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,6 +161,8 @@ func (u *edgeUnifier) getEdgeLocal(amt lnwire.MilliSatoshi,
|
|||||||
for _, edge := range u.edges {
|
for _, edge := range u.edges {
|
||||||
// Check valid amount range for the channel.
|
// Check valid amount range for the channel.
|
||||||
if !edge.amtInRange(amt) {
|
if !edge.amtInRange(amt) {
|
||||||
|
log.Debugf("Amount %v not in range for edge %v",
|
||||||
|
amt, edge.policy.ChannelID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,11 +181,26 @@ func (u *edgeUnifier) getEdgeLocal(amt lnwire.MilliSatoshi,
|
|||||||
edge.policy.ChannelID, amt,
|
edge.policy.ChannelID, amt,
|
||||||
)
|
)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
log.Debugf("Cannot get bandwidth for edge %v, use max "+
|
||||||
|
"instead", edge.policy.ChannelID)
|
||||||
bandwidth = lnwire.MaxMilliSatoshi
|
bandwidth = lnwire.MaxMilliSatoshi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(yy): if the above `!ok` is chosen, we'd have
|
||||||
|
// `bandwidth` to be the max value, which will end up having
|
||||||
|
// the `maxBandwidth` to be have the largest value and this
|
||||||
|
// edge will be the chosen one. This is wrong in two ways,
|
||||||
|
// 1. we need to understand why `availableChanBandwidth` cannot
|
||||||
|
// find bandwidth for this edge as something is wrong with this
|
||||||
|
// channel, and,
|
||||||
|
// 2. this edge is likely NOT the local channel with the
|
||||||
|
// highest available bandwidth.
|
||||||
|
//
|
||||||
// Skip channels that can't carry the payment.
|
// Skip channels that can't carry the payment.
|
||||||
if amt > bandwidth {
|
if amt > bandwidth {
|
||||||
|
log.Debugf("Skipped edge %v: not enough bandwidth, "+
|
||||||
|
"bandwidth=%v, amt=%v", edge.policy.ChannelID,
|
||||||
|
bandwidth, amt)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,6 +210,9 @@ func (u *edgeUnifier) getEdgeLocal(amt lnwire.MilliSatoshi,
|
|||||||
// querying the bandwidth hints and sending out the
|
// querying the bandwidth hints and sending out the
|
||||||
// htlc.
|
// htlc.
|
||||||
if bandwidth < maxBandwidth {
|
if bandwidth < maxBandwidth {
|
||||||
|
log.Debugf("Skipped edge %v: not max bandwidth, "+
|
||||||
|
"bandwidth=%v, maxBandwidth=%v",
|
||||||
|
bandwidth, maxBandwidth)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
maxBandwidth = bandwidth
|
maxBandwidth = bandwidth
|
||||||
@ -213,6 +239,8 @@ func (u *edgeUnifier) getEdgeNetwork(amt lnwire.MilliSatoshi) *unifiedEdge {
|
|||||||
for _, edge := range u.edges {
|
for _, edge := range u.edges {
|
||||||
// Check valid amount range for the channel.
|
// Check valid amount range for the channel.
|
||||||
if !edge.amtInRange(amt) {
|
if !edge.amtInRange(amt) {
|
||||||
|
log.Debugf("Amount %v not in range for edge %v",
|
||||||
|
amt, edge.policy.ChannelID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,6 +248,8 @@ func (u *edgeUnifier) getEdgeNetwork(amt lnwire.MilliSatoshi) *unifiedEdge {
|
|||||||
edgeFlags := edge.policy.ChannelFlags
|
edgeFlags := edge.policy.ChannelFlags
|
||||||
isDisabled := edgeFlags&lnwire.ChanUpdateDisabled != 0
|
isDisabled := edgeFlags&lnwire.ChanUpdateDisabled != 0
|
||||||
if isDisabled {
|
if isDisabled {
|
||||||
|
log.Debugf("Skipped edge %v due to it being disabled",
|
||||||
|
edge.policy.ChannelID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,6 +275,9 @@ func (u *edgeUnifier) getEdgeNetwork(amt lnwire.MilliSatoshi) *unifiedEdge {
|
|||||||
// specific amount.
|
// specific amount.
|
||||||
fee := edge.policy.ComputeFee(amt)
|
fee := edge.policy.ComputeFee(amt)
|
||||||
if fee < maxFee {
|
if fee < maxFee {
|
||||||
|
log.Debugf("Skipped edge %v due to it produces less "+
|
||||||
|
"fee: fee=%v, maxFee=%v",
|
||||||
|
edge.policy.ChannelID, fee, maxFee)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
maxFee = fee
|
maxFee = fee
|
||||||
|
@ -198,7 +198,7 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) error {
|
|||||||
vertex := route.Vertex(msg.PubKeyBytes)
|
vertex := route.Vertex(msg.PubKeyBytes)
|
||||||
signals, ok = v.nodeAnnDependencies[vertex]
|
signals, ok = v.nodeAnnDependencies[vertex]
|
||||||
|
|
||||||
jobDesc = fmt.Sprintf("job=channeldb.LightningNode, pub=%x",
|
jobDesc = fmt.Sprintf("job=channeldb.LightningNode, pub=%s",
|
||||||
vertex)
|
vertex)
|
||||||
|
|
||||||
case *lnwire.ChannelUpdate:
|
case *lnwire.ChannelUpdate:
|
||||||
@ -210,7 +210,7 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) error {
|
|||||||
case *lnwire.NodeAnnouncement:
|
case *lnwire.NodeAnnouncement:
|
||||||
vertex := route.Vertex(msg.NodeID)
|
vertex := route.Vertex(msg.NodeID)
|
||||||
signals, ok = v.nodeAnnDependencies[vertex]
|
signals, ok = v.nodeAnnDependencies[vertex]
|
||||||
jobDesc = fmt.Sprintf("job=lnwire.NodeAnnouncement, pub=%x",
|
jobDesc = fmt.Sprintf("job=lnwire.NodeAnnouncement, pub=%s",
|
||||||
vertex)
|
vertex)
|
||||||
|
|
||||||
// Other types of jobs can be executed immediately, so we'll just
|
// Other types of jobs can be executed immediately, so we'll just
|
||||||
|
11
server.go
11
server.go
@ -3188,18 +3188,16 @@ func (s *server) prunePersistentPeerConnection(compressedPubKey [33]byte) {
|
|||||||
func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
|
func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
|
||||||
msgs ...lnwire.Message) error {
|
msgs ...lnwire.Message) error {
|
||||||
|
|
||||||
srvrLog.Debugf("Broadcasting %v messages", len(msgs))
|
|
||||||
|
|
||||||
// Filter out peers found in the skips map. We synchronize access to
|
// Filter out peers found in the skips map. We synchronize access to
|
||||||
// peersByPub throughout this process to ensure we deliver messages to
|
// peersByPub throughout this process to ensure we deliver messages to
|
||||||
// exact set of peers present at the time of invocation.
|
// exact set of peers present at the time of invocation.
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
peers := make([]*peer.Brontide, 0, len(s.peersByPub))
|
peers := make([]*peer.Brontide, 0, len(s.peersByPub))
|
||||||
for _, sPeer := range s.peersByPub {
|
for pubStr, sPeer := range s.peersByPub {
|
||||||
if skips != nil {
|
if skips != nil {
|
||||||
if _, ok := skips[sPeer.PubKey()]; ok {
|
if _, ok := skips[sPeer.PubKey()]; ok {
|
||||||
srvrLog.Tracef("Skipping %x in broadcast",
|
srvrLog.Debugf("Skipping %x in broadcast with "+
|
||||||
sPeer.PubKey())
|
"pubStr=%x", sPeer.PubKey(), pubStr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3212,6 +3210,9 @@ func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
|
|||||||
// all messages to each of peers.
|
// all messages to each of peers.
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, sPeer := range peers {
|
for _, sPeer := range peers {
|
||||||
|
srvrLog.Debugf("Sending %v messages to peer %x", len(msgs),
|
||||||
|
sPeer.PubKey())
|
||||||
|
|
||||||
// Dispatch a go routine to enqueue all messages to this peer.
|
// Dispatch a go routine to enqueue all messages to this peer.
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user