From 50186ba834a77974c62ebaa72d78f97cab1be4e1 Mon Sep 17 00:00:00 2001 From: nsa Date: Wed, 24 Jun 2020 23:47:07 -0400 Subject: [PATCH 01/15] lnpeer/peer.go: modifying interface comment --- lnpeer/peer.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lnpeer/peer.go b/lnpeer/peer.go index 1b118bfb1..fb6589dd2 100644 --- a/lnpeer/peer.go +++ b/lnpeer/peer.go @@ -9,8 +9,7 @@ import ( "github.com/lightningnetwork/lnd/lnwire" ) -// Peer is an interface which represents the remote lightning node inside our -// system. +// Peer is an interface which represents a remote lightning node. type Peer interface { // SendMessage sends a variadic number of high-priority message to // remote peer. The first argument denotes if the method should block From 01415f4a5fbe0aaa28bca47d040aff828106f4b2 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 25 Jun 2020 15:50:08 -0400 Subject: [PATCH 02/15] peer: define Config and LinkUpdater interface This commit defines a Config for initializing a peer and the LinkUpdater interface. --- peer/config.go | 228 +++++++++++++++++++++++++++++++++++++++++++++ peer/interfaces.go | 11 +++ 2 files changed, 239 insertions(+) create mode 100644 peer/config.go create mode 100644 peer/interfaces.go diff --git a/peer/config.go b/peer/config.go new file mode 100644 index 000000000..8c0f93139 --- /dev/null +++ b/peer/config.go @@ -0,0 +1,228 @@ +package peer + +import ( + "net" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/connmgr" + + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/contractcourt" + "github.com/lightningnetwork/lnd/discovery" + "github.com/lightningnetwork/lnd/htlcswitch" + "github.com/lightningnetwork/lnd/htlcswitch/hodl" + "github.com/lightningnetwork/lnd/htlcswitch/hop" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/lnpeer" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/netann" + "github.com/lightningnetwork/lnd/pool" + "github.com/lightningnetwork/lnd/queue" + "github.com/lightningnetwork/lnd/watchtower/wtclient" +) + +// Config defines configuration fields that are necessary for a peer object +// to function. +type Config struct { + // Conn is the underlying network connection for this peer. + Conn net.Conn + + // ConnReq stores information related to the persistent connection request + // for this peer. + ConnReq *connmgr.ConnReq + + // PubKeyBytes is the serialized, compressed public key of this peer. + PubKeyBytes [33]byte + + // Addr is the network address of the peer. + Addr *lnwire.NetAddress + + // Inbound indicates whether or not the peer is an inbound peer. + Inbound bool + + // Features is the set of features that we advertise to the remote party. + Features *lnwire.FeatureVector + + // LegacyFeatures is the set of features that we advertise to the remote + // peer for backwards compatibility. Nodes that have not implemented + // flat features will still be able to read our feature bits from the + // legacy global field, but we will also advertise everything in the + // default features field. + LegacyFeatures *lnwire.FeatureVector + + // OutgoingCltvRejectDelta defines the number of blocks before expiry of + // an htlc where we don't offer it anymore. + OutgoingCltvRejectDelta uint32 + + // ChanActiveTimeout specifies the duration the peer will wait to request + // a channel reenable, beginning from the time the peer was started. + ChanActiveTimeout time.Duration + + // ErrorBuffer stores a set of errors related to a peer. It contains error + // messages that our peer has recently sent us over the wire and records of + // unknown messages that were sent to us so that we can have a full track + // record of the communication errors we have had with our peer. If we + // choose to disconnect from a peer, it also stores the reason we had for + // disconnecting. + ErrorBuffer *queue.CircularBuffer + + // WritePool is the task pool that manages reuse of write buffers. Write + // tasks are submitted to the pool in order to conserve the total number of + // write buffers allocated at any one time, and decouple write buffer + // allocation from the peer life cycle. + WritePool *pool.Write + + // ReadPool is the task pool that manages reuse of read buffers. + ReadPool *pool.Read + + // Switch is a pointer to the htlcswitch. It is used to setup, get, and + // tear-down ChannelLinks. + Switch *htlcswitch.Switch + + // InterceptSwitch is a pointer to the InterceptableSwitch, a wrapper around + // the regular Switch. We only export it here to pass ForwardPackets to the + // ChannelLinkConfig. + InterceptSwitch *htlcswitch.InterceptableSwitch + + // ChannelDB is used to fetch opened channels, closed channels, and the + // channel graph. + ChannelDB *channeldb.DB + + // ChainArb is used to subscribe to channel events, update contract signals, + // and force close channels. + ChainArb *contractcourt.ChainArbitrator + + // AuthGossiper is needed so that the Brontide impl can register with the + // gossiper and process remote channel announcements. + AuthGossiper *discovery.AuthenticatedGossiper + + // ChanStatusMgr is used to set or un-set the disabled bit in channel + // updates. + ChanStatusMgr *netann.ChanStatusManager + + // ChainIO is used to retrieve the best block. + ChainIO lnwallet.BlockChainIO + + // FeeEstimator is used to compute our target ideal fee-per-kw when + // initializing the coop close process. + FeeEstimator chainfee.Estimator + + // Signer is used when creating *lnwallet.LightningChannel instances. + Signer input.Signer + + // SigPool is used when creating *lnwallet.LightningChannel instances. + SigPool *lnwallet.SigPool + + // Wallet is used to publish transactions and generate delivery scripts + // during the coop close process. + Wallet *lnwallet.LightningWallet + + // ChainNotifier is used to receive confirmations of a coop close + // transaction. + ChainNotifier chainntnfs.ChainNotifier + + // RoutingPolicy is used to set the forwarding policy for links created by + // the Brontide. + RoutingPolicy htlcswitch.ForwardingPolicy + + // Sphinx is used when setting up ChannelLinks so they can decode sphinx + // onion blobs. + Sphinx *hop.OnionProcessor + + // WitnessBeacon is used when setting up ChannelLinks so they can add any + // preimages that they learn. + WitnessBeacon contractcourt.WitnessBeacon + + // Invoices is passed to the ChannelLink on creation and handles all + // invoice-related logic. + Invoices *invoices.InvoiceRegistry + + // ChannelNotifier is used by the link to notify other sub-systems about + // channel-related events and by the Brontide to subscribe to + // ActiveLinkEvents. + ChannelNotifier *channelnotifier.ChannelNotifier + + // HtlcNotifier is used when creating a ChannelLink. + HtlcNotifier *htlcswitch.HtlcNotifier + + // TowerClient is used when creating a ChannelLink. + TowerClient wtclient.Client + + // DisconnectPeer is used to disconnect this peer if the cooperative close + // process fails. + DisconnectPeer func(*btcec.PublicKey) error + + // GenNodeAnnouncement is used to send our node announcement to the remote + // on startup. + GenNodeAnnouncement func(bool, + ...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error) + + // PrunePersistentPeerConnection is used to remove all internal state + // related to this peer in the server. + PrunePersistentPeerConnection func([33]byte) + + // FetchLastChanUpdate fetches our latest channel update for a target + // channel. + FetchLastChanUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, + error) + + // ProcessFundingOpen is used to hand off an OpenChannel message to the + // funding manager. + ProcessFundingOpen func(*lnwire.OpenChannel, lnpeer.Peer) + + // ProcessFundingAccept is used to hand off an AcceptChannel message to the + // funding manager. + ProcessFundingAccept func(*lnwire.AcceptChannel, lnpeer.Peer) + + // ProcessFundingCreated is used to hand off a FundingCreated message to + // the funding manager. + ProcessFundingCreated func(*lnwire.FundingCreated, lnpeer.Peer) + + // ProcessFundingSigned is used to hand off a FundingSigned message to the + // funding manager. + ProcessFundingSigned func(*lnwire.FundingSigned, lnpeer.Peer) + + // ProcessFundingLocked is used to hand off a FundingLocked message to the + // funding manager. + ProcessFundingLocked func(*lnwire.FundingLocked, lnpeer.Peer) + + // ProcessFundingError is used to hand off an Error message to the funding + // manager. + ProcessFundingError func(*lnwire.Error, *btcec.PublicKey) + + // IsPendingChannel is used to determine whether to send an Error message + // to the funding manager or not. + IsPendingChannel func([32]byte, *btcec.PublicKey) bool + + // Hodl is used when creating ChannelLinks to specify HodlFlags as + // breakpoints in dev builds. + Hodl *hodl.Config + + // UnsafeReplay is used when creating ChannelLinks to specify whether or + // not to replay adds on its commitment tx. + UnsafeReplay bool + + // MaxOutgoingCltvExpiry is used when creating ChannelLinks and is the max + // number of blocks that funds could be locked up for when forwarding + // payments. + MaxOutgoingCltvExpiry uint32 + + // MaxChannelFeeAllocation is used when creating ChannelLinks and is the + // maximum percentage of total funds that can be allocated to a channel's + // commitment fee. This only applies for the initiator of the channel. + MaxChannelFeeAllocation float64 + + // ServerPubKey is the serialized, compressed public key of our lnd node. + // It is used to determine which policy (channel edge) to pass to the + // ChannelLink. + ServerPubKey [33]byte + + // Quit is the server's quit channel. If this is closed, we halt operation. + Quit chan struct{} +} diff --git a/peer/interfaces.go b/peer/interfaces.go new file mode 100644 index 000000000..05fe4a983 --- /dev/null +++ b/peer/interfaces.go @@ -0,0 +1,11 @@ +package peer + +import "github.com/lightningnetwork/lnd/lnwire" + +// LinkUpdater is an interface implemented by most messages in BOLT 2 that are +// allowed to update the channel state. +type LinkUpdater interface { + // TargetChanID returns the channel id of the link for which this message + // is intended. + TargetChanID() lnwire.ChannelID +} From d23b30d8893e0121329b353ff8864126da29a2dc Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 17:55:40 -0400 Subject: [PATCH 03/15] peer.go: comment clarifications, improved formatting --- peer.go | 104 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 49 deletions(-) diff --git a/peer.go b/peer.go index 6667fe95f..efe58fcdc 100644 --- a/peer.go +++ b/peer.go @@ -41,14 +41,16 @@ const ( // idleTimeout is the duration of inactivity before we time out a peer. idleTimeout = 5 * time.Minute - // writeMessageTimeout is the timeout used when writing a message to peer. + // writeMessageTimeout is the timeout used when writing a message to the + // peer. writeMessageTimeout = 5 * time.Second // readMessageTimeout is the timeout used when reading a message from a // peer. readMessageTimeout = 5 * time.Second - // handshakeTimeout is the timeout used when waiting for peer init message. + // handshakeTimeout is the timeout used when waiting for the peer's init + // message. handshakeTimeout = 15 * time.Second // outgoingQueueLen is the buffer size of the channel which houses @@ -70,14 +72,14 @@ type outgoingMsg struct { } // newChannelMsg packages a channeldb.OpenChannel with a channel that allows -// the receiver of the request to report when the funding transaction has been -// confirmed and the channel creation process completed. +// the receiver of the request to report when the channel creation process has +// completed. type newChannelMsg struct { channel *channeldb.OpenChannel err chan error } -// closeMsgs is a wrapper struct around any wire messages that deal with the +// closeMsg is a wrapper struct around any wire messages that deal with the // cooperative channel closure negotiation process. This struct includes the // raw channel ID targeted along with the original message. type closeMsg struct { @@ -115,18 +117,18 @@ type peer struct { started int32 disconnect int32 - // The following fields are only meant to be used *atomically* + // MUST be used atomically. bytesReceived uint64 bytesSent uint64 // pingTime is a rough estimate of the RTT (round-trip-time) between us - // and the connected peer. This time is expressed in micro seconds. + // and the connected peer. This time is expressed in microseconds. // To be used atomically. // TODO(roasbeef): also use a WMA or EMA? pingTime int64 // pingLastSend is the Unix time expressed in nanoseconds when we sent - // our last ping message. To be used atomically. + // our last ping message. To be used atomically. pingLastSend int64 cfg *Config @@ -141,9 +143,8 @@ type peer struct { // ready to process messages. activeSignal chan struct{} - // startTime is the time this peer connection was successfully - // established. It will be zero for peers that did not successfully - // Start(). + // startTime is the time this peer connection was successfully established. + // It will be zero for peers that did not successfully call Start(). startTime time.Time inbound bool @@ -157,7 +158,7 @@ type peer struct { outgoingQueue chan outgoingMsg // activeChanMtx protects access to the activeChannels and - // addeddChannels maps. + // addedChannels maps. activeChanMtx sync.RWMutex // activeChannels is a map which stores the state machines of all @@ -186,11 +187,10 @@ type peer struct { // proxy messages to individual, active links. activeMsgStreams map[lnwire.ChannelID]*msgStream - // activeChanCloses is a map that keep track of all the active - // cooperative channel closures that are active. Any channel closing - // messages are directed to one of these active state machines. Once - // the channel has been closed, the state machine will be delete from - // the map. + // activeChanCloses is a map that keeps track of all the active + // cooperative channel closures. Any channel closing messages are directed + // to one of these active state machines. Once the channel has been closed, + // the state machine will be deleted from the map. activeChanCloses map[lnwire.ChannelID]*chancloser.ChanCloser // localCloseChanReqs is a channel in which any local requests to close @@ -457,8 +457,8 @@ func (p *peer) initGossipSync() { srvrLog.Infof("Negotiated chan series queries with %x", p.pubKeyBytes[:]) - // Register the this peer's for gossip syncer with the gossiper. - // This is blocks synchronously to ensure the gossip syncer is + // Register the peer's gossip syncer with the gossiper. + // This blocks synchronously to ensure the gossip syncer is // registered with the gossiper before attempting to read // messages from the remote peer. // @@ -468,6 +468,7 @@ func (p *peer) initGossipSync() { // peers. p.server.authGossiper.InitSyncState(p) } + } // QuitSignal is a method that should return a channel which will be sent upon @@ -624,7 +625,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( return msgs, nil } -// addLink creates and adds a new link from the specified channel. +// addLink creates and adds a new ChannelLink from the specified channel. func (p *peer) addLink(chanPoint *wire.OutPoint, lnChan *lnwallet.LightningChannel, forwardingPolicy *htlcswitch.ForwardingPolicy, @@ -704,7 +705,7 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, } // maybeSendNodeAnn sends our node announcement to the remote peer if at least -// one confirmed advertised channel exists with them. +// one confirmed public channel exists with them. func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { hasConfirmedPublicChan := false for _, channel := range channels { @@ -735,7 +736,7 @@ func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { } // WaitForDisconnect waits until the peer has disconnected. A peer may be -// disconnected if the local or remote side terminating the connection, or an +// disconnected if the local or remote side terminates the connection, or an // irrecoverable protocol error has been encountered. This method will only // begin watching the peer's waitgroup after the ready channel or the peer's // quit channel are signaled. The ready channel should only be signaled if a @@ -994,7 +995,9 @@ func (ms *msgStream) AddMsg(msg lnwire.Message) { // waitUntilLinkActive waits until the target link is active and returns a // ChannelLink to pass messages to. It accomplishes this by subscribing to // an ActiveLinkEvent which is emitted by the link when it first starts up. -func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink { +func waitUntilLinkActive(p *peer, + cid lnwire.ChannelID) htlcswitch.ChannelLink { + // Subscribe to receive channel events. // // NOTE: If the link is already active by SubscribeChannelEvents, then @@ -1319,7 +1322,7 @@ func (p *peer) isActiveChannel(chanID lnwire.ChannelID) bool { // storeError stores an error in our peer's buffer of recent errors with the // current timestamp. Errors are only stored if we have at least one active -// channel with the peer to mitigate dos attack vectors where a peer costlessly +// channel with the peer to mitigate a dos vector where a peer costlessly // connects to us and spams us with errors. func (p *peer) storeError(err error) { var haveChannels bool @@ -1581,9 +1584,9 @@ func (p *peer) logWireMessage(msg lnwire.Message, read bool) { // writeMessage writes and flushes the target lnwire.Message to the remote peer. // If the passed message is nil, this method will only try to flush an existing -// message buffered on the connection. It is safe to recall this method with a -// nil message iff a timeout error is returned. This will continue to flush the -// pending message to the wire. +// message buffered on the connection. It is safe to call this method again +// with a nil message iff a timeout error is returned. This will continue to +// flush the pending message to the wire. func (p *peer) writeMessage(msg lnwire.Message) error { // Simply exit if we're shutting down. if atomic.LoadInt32(&p.disconnect) != 0 { @@ -1861,11 +1864,14 @@ func (p *peer) queueMsgLazy(msg lnwire.Message, errChan chan error) { // queue sends a given message to the queueHandler using the passed priority. If // the errChan is non-nil, an error is sent back if the msg failed to queue or // failed to write, and nil otherwise. -func (p *peer) queue(priority bool, msg lnwire.Message, errChan chan error) { +func (p *peer) queue(priority bool, msg lnwire.Message, + errChan chan error) { + select { case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}: case <-p.quit: - peerLog.Tracef("Peer shutting down, could not enqueue msg.") + peerLog.Tracef("Peer shutting down, could not enqueue msg: %v.", + spew.Sdump(msg)) if errChan != nil { errChan <- lnpeer.ErrPeerExiting } @@ -2453,10 +2459,10 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { } } -// linkFailureReport is sent to the channelManager whenever a link that was -// added to the switch reports a link failure, and is forced to exit. The report -// houses the necessary information to cleanup the channel state, send back the -// error message, and force close if necessary. +// linkFailureReport is sent to the channelManager whenever a link reports a +// link failure, and is forced to exit. The report houses the necessary +// information to clean up the channel state, send back the error message, and +// force close if necessary. type linkFailureReport struct { chanPoint wire.OutPoint chanID lnwire.ChannelID @@ -2465,7 +2471,7 @@ type linkFailureReport struct { } // handleLinkFailure processes a link failure report when a link in the switch -// fails. It handles facilitates removal of all channel state within the peer, +// fails. It facilitates the removal of all channel state within the peer, // force closing the channel depending on severity, and sending the error // message back to the remote party. func (p *peer) handleLinkFailure(failure linkFailureReport) { @@ -2617,7 +2623,7 @@ func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, } // WipeChannel removes the passed channel point from all indexes associated with -// the peer, and the switch. +// the peer and the switch. func (p *peer) WipeChannel(chanPoint *wire.OutPoint) { chanID := lnwire.NewChanIDFromOutPoint(chanPoint) @@ -2631,7 +2637,7 @@ func (p *peer) WipeChannel(chanPoint *wire.OutPoint) { } // handleInitMsg handles the incoming init message which contains global and -// local features vectors. If feature vectors are incompatible then disconnect. +// local feature vectors. If feature vectors are incompatible then disconnect. func (p *peer) handleInitMsg(msg *lnwire.Init) error { // First, merge any features from the legacy global features field into // those presented in the local features fields. @@ -2641,7 +2647,7 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error { err) } - // Then, finalize the remote feature vector providing the flatteneed + // Then, finalize the remote feature vector providing the flattened // feature bit namespace. p.remoteFeatures = lnwire.NewFeatureVector( msg.Features, lnwire.Features, @@ -2654,8 +2660,8 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error { return fmt.Errorf("invalid remote features: %v", err) } - // Ensure the remote party's feature vector contains all transistive - // dependencies. We know ours are are correct since they are validated + // Ensure the remote party's feature vector contains all transitive + // dependencies. We know ours are correct since they are validated // during the feature manager's instantiation. err = feature.ValidateDeps(p.remoteFeatures) if err != nil { @@ -2690,8 +2696,8 @@ func (p *peer) RemoteFeatures() *lnwire.FeatureVector { return p.remoteFeatures } -// sendInitMsg sends init message to remote peer which contains our currently -// supported local and global features. +// sendInitMsg sends the Init message to the remote peer. This message contains our +// currently supported local and global features. func (p *peer) sendInitMsg() error { msg := lnwire.NewInitMessage( p.legacyFeatures.RawFeatureVector, @@ -2745,20 +2751,20 @@ func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { return nil } -// SendMessage sends a variadic number of high-priority message to remote peer. -// The first argument denotes if the method should block until the messages have -// been sent to the remote peer or an error is returned, otherwise it returns -// immediately after queuing. +// SendMessage sends a variadic number of high-priority messages to the remote +// peer. The first argument denotes if the method should block until the +// messages have been sent to the remote peer or an error is returned, +// otherwise it returns immediately after queuing. // // NOTE: Part of the lnpeer.Peer interface. func (p *peer) SendMessage(sync bool, msgs ...lnwire.Message) error { return p.sendMessage(sync, true, msgs...) } -// SendMessageLazy sends a variadic number of low-priority message to remote -// peer. The first argument denotes if the method should block until the -// messages have been sent to the remote peer or an error is returned, otherwise -// it returns immediately after queueing. +// SendMessageLazy sends a variadic number of low-priority messages to the +// remote peer. The first argument denotes if the method should block until +// the messages have been sent to the remote peer or an error is returned, +// otherwise it returns immediately after queueing. // // NOTE: Part of the lnpeer.Peer interface. func (p *peer) SendMessageLazy(sync bool, msgs ...lnwire.Message) error { From bfaef46579e109e207632ad9aa240fac7d3030b5 Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 19:21:48 -0400 Subject: [PATCH 04/15] peer_test.go: comment clarifications --- peer_test.go | 52 ++++++++++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/peer_test.go b/peer_test.go index 48e2a473d..e36066dff 100644 --- a/peer_test.go +++ b/peer_test.go @@ -73,8 +73,9 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { respDeliveryScript := shutdownMsg.Address - // Alice will thereafter send a ClosingSigned message, indicating her - // proposed closing transaction fee. + // Alice will then send a ClosingSigned message, indicating her proposed + // closing transaction fee. Alice sends the ClosingSigned message as she is + // the initiator of the channel. select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg @@ -107,8 +108,8 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { msg: closingSigned, } - // The responder will now see that we agreed on the fee, and broadcast - // the closing transaction. + // Alice should now see that we agreed on the fee, and should broadcast the + // closing transaction. select { case <-broadcastTxChan: case <-time.After(timeout): @@ -137,7 +138,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { } defer cleanUp() - // We make the initiator send a shutdown request. + // We make Alice send a shutdown request. updateChan := make(chan interface{}, 1) errChan := make(chan error, 1) closeCommand := &htlcswitch.ChanClose{ @@ -149,7 +150,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { } initiator.localCloseChanReqs <- closeCommand - // We should now be getting the shutdown request. + // We can now pull a Shutdown message off of Alice's outgoingQueue. var msg lnwire.Message select { case outMsg := <-initiator.outgoingQueue: @@ -165,8 +166,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { initiatorDeliveryScript := shutdownMsg.Address - // We'll answer the shutdown message with our own Shutdown, and then a - // ClosingSigned message. + // Bob will respond with his own Shutdown message. chanID := shutdownMsg.ChannelID initiator.chanCloseMsgs <- &closeMsg{ cid: chanID, @@ -197,8 +197,10 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { msg: closingSigned, } - // And we expect the initiator to accept the fee, and broadcast the - // closing transaction. + // Alice should accept Bob's fee, broadcast the cooperative close tx, and + // send a ClosingSigned message back to Bob. + + // Alice should now broadcast the closing transaction. select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg @@ -273,8 +275,8 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { respDeliveryScript := shutdownMsg.Address - // Alice will thereafter send a ClosingSigned message, indicating her - // proposed closing transaction fee. + // As Alice is the channel initiator, she will send her ClosingSigned + // message. select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg @@ -287,7 +289,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Fatalf("expected ClosingSigned message, got %T", msg) } - // We don't agree with the fee, and will send back one that's 2.5x. + // Bob doesn't agree with the fee and will send one back that's 2.5x. preferredRespFee := responderClosingSigned.FeeSatoshis increasedFee := btcutil.Amount(float64(preferredRespFee) * 2.5) initiatorSig, _, _, err := initiatorChan.CreateCloseProposal( @@ -307,10 +309,10 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { msg: closingSigned, } - // The responder will see the new fee we propose, but with current - // settings it won't accept it immediately as it differs too much by - // its ideal fee. We should get a new proposal back, which should have - // the average fee rate proposed. + // Alice will now see the new fee we propose, but with current settings it + // won't accept it immediately as it differs too much by its ideal fee. We + // should get a new proposal back, which should have the average fee rate + // proposed. select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg @@ -351,8 +353,9 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { msg: closingSigned, } - // It still won't be accepted, and we should get a new proposal, the - // average of what we proposed, and what they proposed last time. + // Bob's latest proposal still won't be accepted and Alice should send over + // a new ClosingSigned message. It should be the average of what Bob and + // Alice each proposed last time. select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg @@ -396,8 +399,8 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { msg: closingSigned, } - // The responder will now see that we agreed on the fee, and broadcast - // the closing transaction. + // Alice will now see that Bob agreed on the fee, and broadcast the coop + // close transaction. select { case <-broadcastTxChan: case <-time.After(timeout): @@ -476,7 +479,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { increasedFee, dummyDeliveryScript, initiatorDeliveryScript, ) if err != nil { - t.Fatalf("unable to create close proposal: %v", err) + t.Fatalf("error creating close proposal: %v", err) } parsedSig, err := lnwire.NewSigFromSignature(closeSig) if err != nil { @@ -555,8 +558,9 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { msg: closingSigned, } - // It still won't be accepted, and we should get a new proposal, the - // average of what we proposed, and what they proposed last time. + // Alice won't accept Bob's new proposal, and Bob should receive a new + // proposal which is the average of what Bob proposed and Alice proposed + // last time. select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg From ac72479b10749d4d0216845c5b006c06c117a23e Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 2 Jul 2020 18:51:28 -0400 Subject: [PATCH 05/15] server.go: log DisconnectPeer errors, use public functions to access peer internals. --- server.go | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/server.go b/server.go index 3cd357a03..f53f4170e 100644 --- a/server.go +++ b/server.go @@ -1481,7 +1481,13 @@ func (s *server) Stop() error { // Disconnect from each active peers to ensure that // peerTerminationWatchers signal completion to each peer. for _, peer := range s.Peers() { - s.DisconnectPeer(peer.addr.IdentityKey) + err := s.DisconnectPeer(peer.IdentityKey()) + if err != nil { + srvrLog.Warnf("could not disconnect peer: %v"+ + "received error: %v", peer.IdentityKey(), + err, + ) + } } // Now that all connections have been torn down, stop the tower @@ -1820,7 +1826,7 @@ func (s *server) peerBootstrapper(numTargetPeers uint32, s.mu.RLock() ignoreList := make(map[autopilot.NodeID]struct{}) for _, peer := range s.peersByPub { - nID := autopilot.NewNodeID(peer.addr.IdentityKey) + nID := autopilot.NewNodeID(peer.IdentityKey()) ignoreList[nID] = struct{}{} } s.mu.RUnlock() @@ -2313,9 +2319,9 @@ func (s *server) BroadcastMessage(skips map[route.Vertex]struct{}, peers := make([]*peer, 0, len(s.peersByPub)) for _, sPeer := range s.peersByPub { if skips != nil { - if _, ok := skips[sPeer.pubKeyBytes]; ok { + if _, ok := skips[sPeer.PubKey()]; ok { srvrLog.Tracef("Skipping %x in broadcast", - sPeer.pubKeyBytes[:]) + sPeer.PubKey()) continue } } @@ -2905,7 +2911,7 @@ func (s *server) peerInitializer(p *peer) { // was successful, and to begin watching the peer's wait group. close(ready) - pubStr := string(p.addr.IdentityKey.SerializeCompressed()) + pubStr := string(p.IdentityKey().SerializeCompressed()) s.mu.Lock() defer s.mu.Unlock() @@ -2952,7 +2958,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { // available for use. s.fundingMgr.CancelPeerReservations(p.PubKey()) - pubKey := p.addr.IdentityKey + pubKey := p.IdentityKey() // We'll also inform the gossiper that this peer is no longer active, // so we don't need to maintain sync state for it any longer. @@ -2963,13 +2969,13 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { // with this interface should be closed. // // TODO(roasbeef): instead add a PurgeInterfaceLinks function? - links, err := p.server.htlcSwitch.GetLinksByInterface(p.pubKeyBytes) + links, err := s.htlcSwitch.GetLinksByInterface(p.PubKey()) if err != nil && err != htlcswitch.ErrNoLinksFound { srvrLog.Errorf("Unable to get channel links for %v: %v", p, err) } for _, link := range links { - p.server.htlcSwitch.RemoveLink(link.ChanID()) + s.htlcSwitch.RemoveLink(link.ChanID()) } s.mu.Lock() @@ -3124,7 +3130,8 @@ func (s *server) removePeer(p *peer) { return } - pubSer := p.addr.IdentityKey.SerializeCompressed() + pKey := p.PubKey() + pubSer := pKey[:] pubStr := string(pubSer) delete(s.peersByPub, pubStr) From ac3d416b04346422d71ef3bee7fe2a6c0fcc53e4 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 2 Jul 2020 18:52:38 -0400 Subject: [PATCH 06/15] peer_test.go: fixes to coop close tests, modifying variable names to either be alice or bob-prefixed. This helps an observer understand what the test is actually doing. --- peer_test.go | 400 ++++++++++++++++++++++++++++----------------------- 1 file changed, 218 insertions(+), 182 deletions(-) diff --git a/peer_test.go b/peer_test.go index e36066dff..1d1afbc09 100644 --- a/peer_test.go +++ b/peer_test.go @@ -7,13 +7,13 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/htlcswitch" - "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwallet/chancloser" "github.com/lightningnetwork/lnd/lnwire" ) @@ -40,7 +40,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - responder, responderChan, initiatorChan, cleanUp, err := createTestPeer( + alicePeer, _, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -48,19 +48,19 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { } defer cleanUp() - chanID := lnwire.NewChanIDFromOutPoint(responderChan.ChannelPoint()) + chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) // We send a shutdown request to Alice. She will now be the responding // node in this shutdown procedure. We first expect Alice to answer // this shutdown request with a Shutdown message. - responder.chanCloseMsgs <- &closeMsg{ + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: lnwire.NewShutdown(chanID, dummyDeliveryScript), } var msg lnwire.Message select { - case outMsg := <-responder.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive shutdown message") @@ -77,33 +77,33 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { // closing transaction fee. Alice sends the ClosingSigned message as she is // the initiator of the channel. select { - case outMsg := <-responder.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive ClosingSigned message") } - responderClosingSigned, ok := msg.(*lnwire.ClosingSigned) + respClosingSigned, ok := msg.(*lnwire.ClosingSigned) if !ok { t.Fatalf("expected ClosingSigned message, got %T", msg) } // We accept the fee, and send a ClosingSigned with the same fee back, // so she knows we agreed. - peerFee := responderClosingSigned.FeeSatoshis - initiatorSig, _, _, err := initiatorChan.CreateCloseProposal( - peerFee, dummyDeliveryScript, respDeliveryScript, + aliceFee := respClosingSigned.FeeSatoshis + bobSig, _, _, err := bobChan.CreateCloseProposal( + aliceFee, dummyDeliveryScript, respDeliveryScript, ) if err != nil { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromSignature(initiatorSig) + parsedSig, err := lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } - closingSigned := lnwire.NewClosingSigned(chanID, peerFee, parsedSig) - responder.chanCloseMsgs <- &closeMsg{ + closingSigned := lnwire.NewClosingSigned(chanID, aliceFee, parsedSig) + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -116,7 +116,18 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { t.Fatalf("closing tx not broadcast") } - // And the initiator should be waiting for a confirmation notification. + // Need to pull the remaining message off of Alice's outgoing queue. + select { + case outMsg := <-alicePeer.outgoingQueue: + msg = outMsg.msg + case <-time.After(timeout): + t.Fatalf("did not receive ClosingSigned message") + } + if _, ok := msg.(*lnwire.ClosingSigned); !ok { + t.Fatalf("expected ClosingSigned message, got %T", msg) + } + + // Alice should be waiting in a goroutine for a confirmation. notifier.confChannel <- &chainntnfs.TxConfirmation{} } @@ -130,7 +141,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - initiator, initiatorChan, responderChan, cleanUp, err := createTestPeer( + alicePeer, _, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -143,17 +154,17 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { errChan := make(chan error, 1) closeCommand := &htlcswitch.ChanClose{ CloseType: htlcswitch.CloseRegular, - ChanPoint: initiatorChan.ChannelPoint(), + ChanPoint: bobChan.ChannelPoint(), Updates: updateChan, TargetFeePerKw: 12500, Err: errChan, } - initiator.localCloseChanReqs <- closeCommand + alicePeer.localCloseChanReqs <- closeCommand // We can now pull a Shutdown message off of Alice's outgoingQueue. var msg lnwire.Message select { - case outMsg := <-initiator.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive shutdown request") @@ -164,35 +175,45 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { t.Fatalf("expected Shutdown message, got %T", msg) } - initiatorDeliveryScript := shutdownMsg.Address + aliceDeliveryScript := shutdownMsg.Address // Bob will respond with his own Shutdown message. chanID := shutdownMsg.ChannelID - initiator.chanCloseMsgs <- &closeMsg{ + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: lnwire.NewShutdown(chanID, dummyDeliveryScript), } - estimator := chainfee.NewStaticEstimator(12500, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) + // Alice will reply with a ClosingSigned here. + select { + case outMsg := <-alicePeer.outgoingQueue: + msg = outMsg.msg + case <-time.After(timeout): + t.Fatalf("did not receive closing signed message") } - fee := responderChan.CalcFee(feePerKw) - closeSig, _, _, err := responderChan.CreateCloseProposal(fee, - dummyDeliveryScript, initiatorDeliveryScript) + closingSignedMsg, ok := msg.(*lnwire.ClosingSigned) + if !ok { + t.Fatalf("expected to receive closing signed message, got %T", msg) + } + + // Bob should reply with the exact same fee in his next ClosingSigned + // message. + bobFee := closingSignedMsg.FeeSatoshis + bobSig, _, _, err := bobChan.CreateCloseProposal( + bobFee, dummyDeliveryScript, aliceDeliveryScript, + ) if err != nil { t.Fatalf("unable to create close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromSignature(closeSig) + parsedSig, err := lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("unable to parse signature: %v", err) } closingSigned := lnwire.NewClosingSigned(shutdownMsg.ChannelID, - fee, parsedSig) - initiator.chanCloseMsgs <- &closeMsg{ + bobFee, parsedSig) + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -202,31 +223,31 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { // Alice should now broadcast the closing transaction. select { - case outMsg := <-initiator.outgoingQueue: - msg = outMsg.msg - case <-time.After(timeout): - t.Fatalf("did not receive closing signed message") - } - - closingSignedMsg, ok := msg.(*lnwire.ClosingSigned) - if !ok { - t.Fatalf("expected ClosingSigned message, got %T", msg) - } - - if closingSignedMsg.FeeSatoshis != fee { - t.Fatalf("expected ClosingSigned fee to be %v, instead got %v", - fee, closingSignedMsg.FeeSatoshis) - } - - // The initiator will now see that we agreed on the fee, and broadcast - // the closing transaction. - select { case <-broadcastTxChan: case <-time.After(timeout): t.Fatalf("closing tx not broadcast") } - // And the initiator should be waiting for a confirmation notification. + // Alice should respond with the ClosingSigned they both agreed upon. + + select { + case outMsg := <-alicePeer.outgoingQueue: + msg = outMsg.msg + case <-time.After(timeout): + t.Fatalf("did not receive closing signed message") + } + + closingSignedMsg, ok = msg.(*lnwire.ClosingSigned) + if !ok { + t.Fatalf("expected ClosingSigned message, got %T", msg) + } + + if closingSignedMsg.FeeSatoshis != bobFee { + t.Fatalf("expected ClosingSigned fee to be %v, instead got %v", + bobFee, closingSignedMsg.FeeSatoshis) + } + + // Alice should be waiting on a single confirmation for the coop close tx. notifier.confChannel <- &chainntnfs.TxConfirmation{} } @@ -241,7 +262,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - responder, responderChan, initiatorChan, cleanUp, err := createTestPeer( + alicePeer, _, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -249,12 +270,12 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { } defer cleanUp() - chanID := lnwire.NewChanIDFromOutPoint(responderChan.ChannelPoint()) + chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) - // We send a shutdown request to Alice. She will now be the responding - // node in this shutdown procedure. We first expect Alice to answer - // this shutdown request with a Shutdown message. - responder.chanCloseMsgs <- &closeMsg{ + // Bob sends a shutdown request to Alice. She will now be the responding + // node in this shutdown procedure. We first expect Alice to answer this + // Shutdown request with a Shutdown message. + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: lnwire.NewShutdown(chanID, dummyDeliveryScript), @@ -262,7 +283,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { var msg lnwire.Message select { - case outMsg := <-responder.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive shutdown message") @@ -273,38 +294,38 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Fatalf("expected Shutdown message, got %T", msg) } - respDeliveryScript := shutdownMsg.Address + aliceDeliveryScript := shutdownMsg.Address // As Alice is the channel initiator, she will send her ClosingSigned // message. select { - case outMsg := <-responder.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } - responderClosingSigned, ok := msg.(*lnwire.ClosingSigned) + aliceClosingSigned, ok := msg.(*lnwire.ClosingSigned) if !ok { t.Fatalf("expected ClosingSigned message, got %T", msg) } // Bob doesn't agree with the fee and will send one back that's 2.5x. - preferredRespFee := responderClosingSigned.FeeSatoshis + preferredRespFee := aliceClosingSigned.FeeSatoshis increasedFee := btcutil.Amount(float64(preferredRespFee) * 2.5) - initiatorSig, _, _, err := initiatorChan.CreateCloseProposal( - increasedFee, dummyDeliveryScript, respDeliveryScript, + bobSig, _, _, err := bobChan.CreateCloseProposal( + increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) if err != nil { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromSignature(initiatorSig) + parsedSig, err := lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) - responder.chanCloseMsgs <- &closeMsg{ + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -314,41 +335,41 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { // should get a new proposal back, which should have the average fee rate // proposed. select { - case outMsg := <-responder.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } - responderClosingSigned, ok = msg.(*lnwire.ClosingSigned) + aliceClosingSigned, ok = msg.(*lnwire.ClosingSigned) if !ok { t.Fatalf("expected ClosingSigned message, got %T", msg) } - // The fee sent by the responder should be less than the fee we just - // sent as it should attempt to compromise. - peerFee := responderClosingSigned.FeeSatoshis - if peerFee > increasedFee { + // The fee sent by Alice should be less than the fee Bob just sent as Alice + // should attempt to compromise. + aliceFee := aliceClosingSigned.FeeSatoshis + if aliceFee > increasedFee { t.Fatalf("new fee should be less than our fee: new=%v, "+ - "prior=%v", peerFee, increasedFee) + "prior=%v", aliceFee, increasedFee) } - lastFeeResponder := peerFee + lastFeeResponder := aliceFee // We try negotiating a 2.1x fee, which should also be rejected. increasedFee = btcutil.Amount(float64(preferredRespFee) * 2.1) - initiatorSig, _, _, err = initiatorChan.CreateCloseProposal( - increasedFee, dummyDeliveryScript, respDeliveryScript, + bobSig, _, _, err = bobChan.CreateCloseProposal( + increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) if err != nil { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromSignature(initiatorSig) + parsedSig, err = lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) - responder.chanCloseMsgs <- &closeMsg{ + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -357,44 +378,44 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { // a new ClosingSigned message. It should be the average of what Bob and // Alice each proposed last time. select { - case outMsg := <-responder.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } - responderClosingSigned, ok = msg.(*lnwire.ClosingSigned) + aliceClosingSigned, ok = msg.(*lnwire.ClosingSigned) if !ok { t.Fatalf("expected ClosingSigned message, got %T", msg) } - // The peer should inch towards our fee, in order to compromise. - // Additionally, this fee should be less than the fee we sent prior. - peerFee = responderClosingSigned.FeeSatoshis - if peerFee < lastFeeResponder { + // Alice should inch towards Bob's fee, in order to compromise. + // Additionally, this fee should be less than the fee Bob sent before. + aliceFee = aliceClosingSigned.FeeSatoshis + if aliceFee < lastFeeResponder { t.Fatalf("new fee should be greater than prior: new=%v, "+ - "prior=%v", peerFee, lastFeeResponder) + "prior=%v", aliceFee, lastFeeResponder) } - if peerFee > increasedFee { - t.Fatalf("new fee should be less than our fee: new=%v, "+ - "prior=%v", peerFee, increasedFee) + if aliceFee > increasedFee { + t.Fatalf("new fee should be less than Bob's fee: new=%v, "+ + "prior=%v", aliceFee, increasedFee) } - // Finally, we'll accept the fee by echoing back the same fee that they - // sent to us. - initiatorSig, _, _, err = initiatorChan.CreateCloseProposal( - peerFee, dummyDeliveryScript, respDeliveryScript, + // Finally, Bob will accept the fee by echoing back the same fee that Alice + // just sent over. + bobSig, _, _, err = bobChan.CreateCloseProposal( + aliceFee, dummyDeliveryScript, aliceDeliveryScript, ) if err != nil { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromSignature(initiatorSig) + parsedSig, err = lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } - closingSigned = lnwire.NewClosingSigned(chanID, peerFee, parsedSig) - responder.chanCloseMsgs <- &closeMsg{ + closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig) + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -407,7 +428,18 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Fatalf("closing tx not broadcast") } - // And the responder should be waiting for a confirmation notification. + // Alice should respond with the ClosingSigned they both agreed upon. + select { + case outMsg := <-alicePeer.outgoingQueue: + msg = outMsg.msg + case <-time.After(timeout): + t.Fatalf("did not receive closing signed message") + } + if _, ok := msg.(*lnwire.ClosingSigned); !ok { + t.Fatalf("expected to receive closing signed message, got %T", msg) + } + + // Alice should be waiting on a single confirmation for the coop close tx. notifier.confChannel <- &chainntnfs.TxConfirmation{} } @@ -422,7 +454,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - initiator, initiatorChan, responderChan, cleanUp, err := createTestPeer( + alicePeer, _, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -435,18 +467,18 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { errChan := make(chan error, 1) closeCommand := &htlcswitch.ChanClose{ CloseType: htlcswitch.CloseRegular, - ChanPoint: initiatorChan.ChannelPoint(), + ChanPoint: bobChan.ChannelPoint(), Updates: updateChan, TargetFeePerKw: 12500, Err: errChan, } - initiator.localCloseChanReqs <- closeCommand + alicePeer.localCloseChanReqs <- closeCommand - // We should now be getting the shutdown request. + // Alice should now send a Shutdown request to Bob. var msg lnwire.Message select { - case outMsg := <-initiator.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive shutdown request") @@ -457,47 +489,20 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { t.Fatalf("expected Shutdown message, got %T", msg) } - initiatorDeliveryScript := shutdownMsg.Address + aliceDeliveryScript := shutdownMsg.Address - // We'll answer the shutdown message with our own Shutdown, and then a - // ClosingSigned message. - chanID := lnwire.NewChanIDFromOutPoint(initiatorChan.ChannelPoint()) + // Bob will answer the Shutdown message with his own Shutdown. + chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint()) respShutdown := lnwire.NewShutdown(chanID, dummyDeliveryScript) - initiator.chanCloseMsgs <- &closeMsg{ + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: respShutdown, } - estimator := chainfee.NewStaticEstimator(12500, 0) - initiatorIdealFeeRate, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - initiatorIdealFee := responderChan.CalcFee(initiatorIdealFeeRate) - increasedFee := btcutil.Amount(float64(initiatorIdealFee) * 2.5) - closeSig, _, _, err := responderChan.CreateCloseProposal( - increasedFee, dummyDeliveryScript, initiatorDeliveryScript, - ) - if err != nil { - t.Fatalf("error creating close proposal: %v", err) - } - parsedSig, err := lnwire.NewSigFromSignature(closeSig) - if err != nil { - t.Fatalf("unable to parse signature: %v", err) - } - - closingSigned := lnwire.NewClosingSigned( - shutdownMsg.ChannelID, increasedFee, parsedSig, - ) - initiator.chanCloseMsgs <- &closeMsg{ - cid: chanID, - msg: closingSigned, - } - - // We should get two closing signed messages, the first will be the - // ideal fee sent by the initiator in response to our shutdown request. + // Alice should now respond with a ClosingSigned message with her ideal + // fee rate. select { - case outMsg := <-initiator.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive closing signed") @@ -506,16 +511,35 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { if !ok { t.Fatalf("expected ClosingSigned message, got %T", msg) } - if closingSignedMsg.FeeSatoshis != initiatorIdealFee { - t.Fatalf("expected ClosingSigned fee to be %v, instead got %v", - initiatorIdealFee, closingSignedMsg.FeeSatoshis) - } - lastFeeSent := closingSignedMsg.FeeSatoshis - // The second message should be the compromise fee sent in response to - // them receiving our fee proposal. + idealFeeRate := closingSignedMsg.FeeSatoshis + lastReceivedFee := idealFeeRate + + increasedFee := btcutil.Amount(float64(idealFeeRate) * 2.1) + lastSentFee := increasedFee + + bobSig, _, _, err := bobChan.CreateCloseProposal( + increasedFee, dummyDeliveryScript, aliceDeliveryScript, + ) + if err != nil { + t.Fatalf("error creating close proposal: %v", err) + } + + parsedSig, err := lnwire.NewSigFromSignature(bobSig) + if err != nil { + t.Fatalf("unable to parse signature: %v", err) + } + + closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) + alicePeer.chanCloseMsgs <- &closeMsg{ + cid: chanID, + msg: closingSigned, + } + + // It still won't be accepted, and we should get a new proposal, the + // average of what we proposed, and what they proposed last time. select { - case outMsg := <-initiator.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive closing signed") @@ -525,35 +549,36 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { t.Fatalf("expected ClosingSigned message, got %T", msg) } - // The peer should inch towards our fee, in order to compromise. - // Additionally, this fee should be less than the fee we sent prior. - peerFee := closingSignedMsg.FeeSatoshis - if peerFee < lastFeeSent { - t.Fatalf("new fee should be greater than prior: new=%v, "+ - "prior=%v", peerFee, lastFeeSent) + aliceFee := closingSignedMsg.FeeSatoshis + if aliceFee < lastReceivedFee { + t.Fatalf("new fee should be greater than prior: new=%v, old=%v", + aliceFee, lastReceivedFee) } - if peerFee > increasedFee { - t.Fatalf("new fee should be less than our fee: new=%v, "+ - "prior=%v", peerFee, increasedFee) + if aliceFee > lastSentFee { + t.Fatalf("new fee should be less than our fee: new=%v, old=%v", + aliceFee, lastSentFee) } - lastFeeSent = closingSignedMsg.FeeSatoshis - // We try negotiating a 2.1x fee, which should also be rejected. - increasedFee = btcutil.Amount(float64(initiatorIdealFee) * 2.1) - responderSig, _, _, err := responderChan.CreateCloseProposal( - increasedFee, dummyDeliveryScript, initiatorDeliveryScript, + lastReceivedFee = aliceFee + + // We'll try negotiating a 1.5x fee, which should also be rejected. + increasedFee = btcutil.Amount(float64(idealFeeRate) * 1.5) + lastSentFee = increasedFee + + bobSig, _, _, err = bobChan.CreateCloseProposal( + increasedFee, dummyDeliveryScript, aliceDeliveryScript, ) if err != nil { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromSignature(responderSig) + parsedSig, err = lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig) - initiator.chanCloseMsgs <- &closeMsg{ + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -562,44 +587,41 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { // proposal which is the average of what Bob proposed and Alice proposed // last time. select { - case outMsg := <-initiator.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive closing signed") } - - initiatorClosingSigned, ok := msg.(*lnwire.ClosingSigned) + closingSignedMsg, ok = msg.(*lnwire.ClosingSigned) if !ok { t.Fatalf("expected ClosingSigned message, got %T", msg) } - // Once again, the fee sent by the initiator should be greater than the - // last fee they sent, but less than the last fee we sent. - peerFee = initiatorClosingSigned.FeeSatoshis - if peerFee < lastFeeSent { - t.Fatalf("new fee should be greater than prior: new=%v, "+ - "prior=%v", peerFee, lastFeeSent) + aliceFee = closingSignedMsg.FeeSatoshis + if aliceFee < lastReceivedFee { + t.Fatalf("new fee should be greater than prior: new=%v, old=%v", + aliceFee, lastReceivedFee) } - if peerFee > increasedFee { - t.Fatalf("new fee should be less than our fee: new=%v, "+ - "prior=%v", peerFee, increasedFee) + if aliceFee > lastSentFee { + t.Fatalf("new fee should be less than Bob's fee: new=%v, old=%v", + aliceFee, lastSentFee) } - // At this point, we'll accept their fee by sending back a CloseSigned - // message with an identical fee. - responderSig, _, _, err = responderChan.CreateCloseProposal( - peerFee, dummyDeliveryScript, initiatorDeliveryScript, + // Bob will now accept their fee by sending back a ClosingSigned message + // with an identical fee. + bobSig, _, _, err = bobChan.CreateCloseProposal( + aliceFee, dummyDeliveryScript, aliceDeliveryScript, ) if err != nil { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromSignature(responderSig) + parsedSig, err = lnwire.NewSigFromSignature(bobSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } - closingSigned = lnwire.NewClosingSigned(chanID, peerFee, parsedSig) - initiator.chanCloseMsgs <- &closeMsg{ + closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig) + alicePeer.chanCloseMsgs <- &closeMsg{ cid: chanID, msg: closingSigned, } @@ -610,6 +632,20 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { case <-time.After(timeout): t.Fatalf("closing tx not broadcast") } + + // Alice should respond with the ClosingSigned they both agreed upon. + select { + case outMsg := <-alicePeer.outgoingQueue: + msg = outMsg.msg + case <-time.After(timeout): + t.Fatalf("did not receive closing signed message") + } + if _, ok := msg.(*lnwire.ClosingSigned); !ok { + t.Fatalf("expected to receive closing signed message, got %T", msg) + } + + // Alice should be waiting on a single confirmation for the coop close tx. + notifier.confChannel <- &chainntnfs.TxConfirmation{} } // TestChooseDeliveryScript tests that chooseDeliveryScript correctly errors @@ -752,7 +788,7 @@ func TestCustomShutdownScript(t *testing.T) { broadcastTxChan := make(chan *wire.MsgTx) // Open a channel. - initiator, initiatorChan, _, cleanUp, err := createTestPeer( + alicePeer, _, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, test.update, ) if err != nil { @@ -764,7 +800,7 @@ func TestCustomShutdownScript(t *testing.T) { // a specified delivery address. updateChan := make(chan interface{}, 1) errChan := make(chan error, 1) - chanPoint := initiatorChan.ChannelPoint() + chanPoint := bobChan.ChannelPoint() closeCommand := htlcswitch.ChanClose{ CloseType: htlcswitch.CloseRegular, ChanPoint: chanPoint, @@ -776,11 +812,11 @@ func TestCustomShutdownScript(t *testing.T) { // Send the close command for the correct channel and check that a // shutdown message is sent. - initiator.localCloseChanReqs <- &closeCommand + alicePeer.localCloseChanReqs <- &closeCommand var msg lnwire.Message select { - case outMsg := <-initiator.outgoingQueue: + case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg case <-time.After(timeout): t.Fatalf("did not receive shutdown message") @@ -824,7 +860,7 @@ func genScript(t *testing.T, address string) lnwire.DeliveryAddress { // Generate an address which can be used for testing. deliveryAddr, err := btcutil.DecodeAddress( address, - activeNetParams.Params, + &chaincfg.TestNet3Params, ) if err != nil { t.Fatalf("invalid delivery address: %v", err) From 0dda9e06ce5ed8cec78928435f2d6a4f09dca47b Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 21:04:15 -0400 Subject: [PATCH 07/15] multi: exporting PendingUpdate, ChannelCloseUpdate, ErrorBufferSize --- peer.go | 32 ++++++++++++++++---------------- rpcserver.go | 18 +++++++++--------- server.go | 2 +- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/peer.go b/peer.go index efe58fcdc..fba338c5e 100644 --- a/peer.go +++ b/peer.go @@ -58,8 +58,8 @@ const ( // this struct. outgoingQueueLen = 50 - // errorBufferSize is the number of historic peer errors that we store. - errorBufferSize = 10 + // ErrorBufferSize is the number of historic peer errors that we store. + ErrorBufferSize = 10 ) // outgoingMsg packages an lnwire.Message to be sent out on the wire, along with @@ -87,23 +87,23 @@ type closeMsg struct { msg lnwire.Message } -// pendingUpdate describes the pending state of a closing channel. -type pendingUpdate struct { +// PendingUpdate describes the pending state of a closing channel. +type PendingUpdate struct { Txid []byte OutputIndex uint32 } -// channelCloseUpdate contains the outcome of the close channel operation. -type channelCloseUpdate struct { +// ChannelCloseUpdate contains the outcome of the close channel operation. +type ChannelCloseUpdate struct { ClosingTxid []byte Success bool } -// timestampedError is a timestamped error that is used to store the most recent +// TimestampedError is a timestamped error that is used to store the most recent // errors we have experienced with our peers. -type timestampedError struct { - error error - timestamp time.Time +type TimestampedError struct { + Error error + Timestamp time.Time } // peer is an active peer on the Lightning Network. This struct is responsible @@ -1347,7 +1347,7 @@ func (p *peer) storeError(err error) { } p.errorBuffer.Add( - ×tampedError{timestamp: time.Now(), error: err}, + &TimestampedError{Timestamp: time.Now(), Error: err}, ) } @@ -2562,18 +2562,18 @@ func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { // If this is a locally requested shutdown, update the caller with a // new event detailing the current pending state of this request. if closeReq != nil { - closeReq.Updates <- &pendingUpdate{ + closeReq.Updates <- &PendingUpdate{ Txid: closingTxid[:], } } - go waitForChanToClose(chanCloser.NegotiationHeight(), notifier, errChan, + go WaitForChanToClose(chanCloser.NegotiationHeight(), notifier, errChan, chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() { // Respond to the local subsystem which requested the // channel closure. if closeReq != nil { - closeReq.Updates <- &channelCloseUpdate{ + closeReq.Updates <- &ChannelCloseUpdate{ ClosingTxid: closingTxid[:], Success: true, } @@ -2581,12 +2581,12 @@ func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { }) } -// waitForChanToClose uses the passed notifier to wait until the channel has +// WaitForChanToClose uses the passed notifier to wait until the channel has // been detected as closed on chain and then concludes by executing the // following actions: the channel point will be sent over the settleChan, and // finally the callback will be executed. If any error is encountered within // the function, then it will be sent over the errChan. -func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, +func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, errChan chan error, chanPoint *wire.OutPoint, closingTxID *chainhash.Hash, closeScript []byte, cb func()) { diff --git a/rpcserver.go b/rpcserver.go index 2b161f7bd..8adef1379 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -2105,17 +2105,17 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // With the transaction broadcast, we send our first update to // the client. updateChan = make(chan interface{}, 2) - updateChan <- &pendingUpdate{ + updateChan <- &PendingUpdate{ Txid: closingTxid[:], } errChan = make(chan error, 1) notifier := r.server.cc.chainNotifier - go waitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint, + go WaitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() { // Respond to the local subsystem which // requested the channel closure. - updateChan <- &channelCloseUpdate{ + updateChan <- &ChannelCloseUpdate{ ClosingTxid: closingTxid[:], Success: true, } @@ -2228,7 +2228,7 @@ out: // then we can break out of our dispatch loop as we no // longer need to process any further updates. switch closeUpdate := closingUpdate.(type) { - case *channelCloseUpdate: + case *ChannelCloseUpdate: h, _ := chainhash.NewHash(closeUpdate.ClosingTxid) rpcsLog.Infof("[closechannel] close completed: "+ "txid(%v)", h) @@ -2246,7 +2246,7 @@ func createRPCCloseUpdate(update interface{}) ( *lnrpc.CloseStatusUpdate, error) { switch u := update.(type) { - case *channelCloseUpdate: + case *ChannelCloseUpdate: return &lnrpc.CloseStatusUpdate{ Update: &lnrpc.CloseStatusUpdate_ChanClose{ ChanClose: &lnrpc.ChannelCloseUpdate{ @@ -2254,7 +2254,7 @@ func createRPCCloseUpdate(update interface{}) ( }, }, }, nil - case *pendingUpdate: + case *PendingUpdate: return &lnrpc.CloseStatusUpdate{ Update: &lnrpc.CloseStatusUpdate_ClosePending{ ClosePending: &lnrpc.PendingUpdate{ @@ -2601,11 +2601,11 @@ func (r *rpcServer) ListPeers(ctx context.Context, // Add the relevant peer errors to our response. for _, error := range peerErrors { - tsError := error.(*timestampedError) + tsError := error.(*TimestampedError) rpcErr := &lnrpc.TimestampedError{ - Timestamp: uint64(tsError.timestamp.Unix()), - Error: tsError.error.Error(), + Timestamp: uint64(tsError.Timestamp.Unix()), + Error: tsError.Error.Error(), } peer.Errors = append(peer.Errors, rpcErr) diff --git a/server.go b/server.go index f53f4170e..f3ac22019 100644 --- a/server.go +++ b/server.go @@ -2792,7 +2792,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, errBuffer, ok := s.peerErrors[pkStr] if !ok { var err error - errBuffer, err = queue.NewCircularBuffer(errorBufferSize) + errBuffer, err = queue.NewCircularBuffer(ErrorBufferSize) if err != nil { srvrLog.Errorf("unable to create peer %v", err) return From 328094d8fb1e19e044dd3c701f89634fad519001 Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 21:05:10 -0400 Subject: [PATCH 08/15] peer.go: replace switch statements with if statements --- peer.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/peer.go b/peer.go index fba338c5e..3bb118799 100644 --- a/peer.go +++ b/peer.go @@ -449,11 +449,10 @@ func (p *peer) Start() error { // initGossipSync initializes either a gossip syncer or an initial routing // dump, depending on the negotiated synchronization method. func (p *peer) initGossipSync() { - switch { // If the remote peer knows of the new gossip queries feature, then // we'll create a new gossipSyncer in the AuthenticatedGossiper for it. - case p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional): + if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) { srvrLog.Infof("Negotiated chan series queries with %x", p.pubKeyBytes[:]) @@ -509,9 +508,8 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( // Skip adding any permanently irreconcilable channels to the // htlcswitch. - switch { - case !dbChan.HasChanStatus(channeldb.ChanStatusDefault) && - !dbChan.HasChanStatus(channeldb.ChanStatusRestored): + if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) && + !dbChan.HasChanStatus(channeldb.ChanStatusRestored) { peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+ "start.", chanPoint, dbChan.ChanStatus()) @@ -2670,8 +2668,7 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error { // Now that we know we understand their requirements, we'll check to // see if they don't support anything that we deem to be mandatory. - switch { - case !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired): + if !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired) { return fmt.Errorf("data loss protection required") } From 4ee4a1a24e89b2a27017c3a252ddbe789d613f2f Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 21:06:16 -0400 Subject: [PATCH 09/15] peer.go: remove height argument to addLink function Best height is not needed to create a link. --- peer.go | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/peer.go b/peer.go index 3bb118799..be4a20833 100644 --- a/peer.go +++ b/peer.go @@ -532,11 +532,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( continue } - _, currentHeight, err := p.server.cc.chainIO.GetBestBlock() - if err != nil { - return nil, err - } - // Before we register this new link with the HTLC Switch, we'll // need to fetch its current link-layer forwarding policy from // the database. @@ -608,7 +603,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( err = p.addLink( chanPoint, lnChan, forwardingPolicy, chainEvents, - currentHeight, true, + true, ) if err != nil { return nil, fmt.Errorf("unable to add link %v to "+ @@ -628,7 +623,7 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, lnChan *lnwallet.LightningChannel, forwardingPolicy *htlcswitch.ForwardingPolicy, chainEvents *contractcourt.ChainEventSubscription, - currentHeight int32, syncStates bool) error { + syncStates bool) error { // onChannelFailure will be called by the link in case the channel // fails for some reason. @@ -2005,15 +2000,6 @@ out: // necessary items it needs to function. // // TODO(roasbeef): panic on below? - _, currentHeight, err := p.server.cc.chainIO.GetBestBlock() - if err != nil { - err := fmt.Errorf("unable to get best "+ - "block: %v", err) - peerLog.Errorf(err.Error()) - - newChanReq.err <- err - continue - } chainEvents, err := p.server.chainArb.SubscribeChannelEvents( *chanPoint, ) @@ -2052,7 +2038,7 @@ out: // Create the link and add it to the switch. err = p.addLink( chanPoint, lnChan, forwardingPolicy, - chainEvents, currentHeight, shouldReestablish, + chainEvents, shouldReestablish, ) if err != nil { err := fmt.Errorf("can't register new channel "+ From 02fd905f61de91dc07c56bb5c0952e954f069c59 Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 21:07:00 -0400 Subject: [PATCH 10/15] peer.go: moving the apply function outside of the constructor This is mainly for readability so that the function isn't indented. --- peer.go | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/peer.go b/peer.go index be4a20833..e5b0d75b0 100644 --- a/peer.go +++ b/peer.go @@ -1060,35 +1060,37 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { var chanLink htlcswitch.ChannelLink + apply := func(msg lnwire.Message) { + // This check is fine because if the link no longer exists, it will + // be removed from the activeChannels map and subsequent messages + // shouldn't reach the chan msg stream. + if chanLink == nil { + chanLink = waitUntilLinkActive(p, cid) + + // If the link is still not active and the calling function + // errored out, just return. + if chanLink == nil { + return + } + } + + // In order to avoid unnecessarily delivering message + // as the peer is exiting, we'll check quickly to see + // if we need to exit. + select { + case <-p.quit: + return + default: + } + + chanLink.HandleChannelUpdate(msg) + } + return newMsgStream(p, fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]), fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]), 1000, - func(msg lnwire.Message) { - // This check is fine because if the link no longer exists, it will - // be removed from the activeChannels map and subsequent messages - // shouldn't reach the chan msg stream. - if chanLink == nil { - chanLink = waitUntilLinkActive(p, cid) - - // If the link is still not active and the calling function - // errored out, just return. - if chanLink == nil { - return - } - } - - // In order to avoid unnecessarily delivering message - // as the peer is exiting, we'll check quickly to see - // if we need to exit. - select { - case <-p.quit: - return - default: - } - - chanLink.HandleChannelUpdate(msg) - }, + apply, ) } From 1a91013c1482c093bee4a9d795d4bb0ffa723bf9 Mon Sep 17 00:00:00 2001 From: nsa Date: Fri, 26 Jun 2020 21:51:51 -0400 Subject: [PATCH 11/15] peer_test+test_utils: remove duplicate *LightningChannel return param It was already present in the peer object that was returned. --- peer_test.go | 10 +++++----- test_utils.go | 44 ++++++++++++++++++++++---------------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/peer_test.go b/peer_test.go index 1d1afbc09..adbb65700 100644 --- a/peer_test.go +++ b/peer_test.go @@ -40,7 +40,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - alicePeer, _, bobChan, cleanUp, err := createTestPeer( + alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -141,7 +141,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - alicePeer, _, bobChan, cleanUp, err := createTestPeer( + alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -262,7 +262,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - alicePeer, _, bobChan, cleanUp, err := createTestPeer( + alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -454,7 +454,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { } broadcastTxChan := make(chan *wire.MsgTx) - alicePeer, _, bobChan, cleanUp, err := createTestPeer( + alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, noUpdate, ) if err != nil { @@ -788,7 +788,7 @@ func TestCustomShutdownScript(t *testing.T) { broadcastTxChan := make(chan *wire.MsgTx) // Open a channel. - alicePeer, _, bobChan, cleanUp, err := createTestPeer( + alicePeer, bobChan, cleanUp, err := createTestPeer( notifier, broadcastTxChan, test.update, ) if err != nil { diff --git a/test_utils.go b/test_utils.go index 0e032ee09..d0f9a68d2 100644 --- a/test_utils.go +++ b/test_utils.go @@ -102,7 +102,7 @@ var noUpdate = func(a, b *channeldb.OpenChannel) {} // the channel states for each peer. func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, updateChan func(a, b *channeldb.OpenChannel)) (*peer, *lnwallet.LightningChannel, - *lnwallet.LightningChannel, func(), error) { + func(), error) { aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes( btcec.S256(), alicesPrivKey, @@ -178,23 +178,23 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize()) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot) bobFirstRevoke, err := bobPreimageProducer.AtIndex(0) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:]) aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize()) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot) aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:]) @@ -203,33 +203,33 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } alicePath, err := ioutil.TempDir("", "alicedb") if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } dbAlice, err := channeldb.Open(alicePath) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobPath, err := ioutil.TempDir("", "bobdb") if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } dbBob, err := channeldb.Open(bobPath) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } estimator := chainfee.NewStaticEstimator(12500, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } // TODO(roasbeef): need to factor in commit fee? @@ -254,7 +254,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, var chanIDBytes [8]byte if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } shortChanID := lnwire.NewShortChanIDFromInt( @@ -305,7 +305,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, } if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobAddr := &net.TCPAddr{ @@ -314,7 +314,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, } if err := bobChannelState.SyncPending(bobAddr, 0); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } cleanUpFunc := func() { @@ -330,7 +330,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, aliceSigner, aliceChannelState, alicePool, ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } alicePool.Start() @@ -339,7 +339,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, bobSigner, bobChannelState, bobPool, ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobPool.Start() @@ -384,7 +384,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, _, currentHeight, err := s.cc.chainIO.GetBestBlock() if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } htlcSwitch, err := htlcswitch.New(htlcswitch.Config{ @@ -399,10 +399,10 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, htlcswitch.DefaultAckInterval), }, uint32(currentHeight)) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } if err = htlcSwitch.Start(); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } s.htlcSwitch = htlcSwitch @@ -422,10 +422,10 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, ApplyChannelUpdate: func(*lnwire.ChannelUpdate) error { return nil }, }) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } if err = chanStatusMgr.Start(); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } s.chanStatusMgr = chanStatusMgr @@ -458,5 +458,5 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, alicePeer.wg.Add(1) go alicePeer.channelManager() - return alicePeer, channelAlice, channelBob, cleanUpFunc, nil + return alicePeer, channelBob, cleanUpFunc, nil } From e1632dd1721cbd3c791fc34b777430b1b79b2874 Mon Sep 17 00:00:00 2001 From: nsa Date: Mon, 29 Jun 2020 18:01:45 -0400 Subject: [PATCH 12/15] peer.go: handleCloseMsg function for channelManager goroutine --- peer.go | 129 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 64 insertions(+), 65 deletions(-) diff --git a/peer.go b/peer.go index e5b0d75b0..d514c70b1 100644 --- a/peer.go +++ b/peer.go @@ -467,7 +467,7 @@ func (p *peer) initGossipSync() { // peers. p.server.authGossiper.InitSyncState(p) } - + } // QuitSignal is a method that should return a channel which will be sent upon @@ -1861,7 +1861,7 @@ func (p *peer) queueMsgLazy(msg lnwire.Message, errChan chan error) { // failed to write, and nil otherwise. func (p *peer) queue(priority bool, msg lnwire.Message, errChan chan error) { - + select { case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}: case <-p.quit: @@ -2071,69 +2071,7 @@ out: // message from the remote peer, we'll use this message to // advance the chan closer state machine. case closeMsg := <-p.chanCloseMsgs: - // We'll now fetch the matching closing state machine - // in order to continue, or finalize the channel - // closure process. - chanCloser, err := p.fetchActiveChanCloser(closeMsg.cid) - if err != nil { - // If the channel is not known to us, we'll - // simply ignore this message. - if err == ErrChannelNotFound { - continue - } - - peerLog.Errorf("Unable to respond to remote "+ - "close msg: %v", err) - - errMsg := &lnwire.Error{ - ChanID: closeMsg.cid, - Data: lnwire.ErrorData(err.Error()), - } - p.queueMsg(errMsg, nil) - continue - } - - // Next, we'll process the next message using the - // target state machine. We'll either continue - // negotiation, or halt. - msgs, closeFin, err := chanCloser.ProcessCloseMsg( - closeMsg.msg, - ) - if err != nil { - err := fmt.Errorf("unable to process close "+ - "msg: %v", err) - peerLog.Error(err) - - // As the negotiations failed, we'll reset the - // channel state to ensure we act to on-chain - // events as normal. - chanCloser.Channel().ResetState() - - if chanCloser.CloseRequest() != nil { - chanCloser.CloseRequest().Err <- err - } - delete(p.activeChanCloses, closeMsg.cid) - continue - } - - // Queue any messages to the remote peer that need to - // be sent as a part of this latest round of - // negotiations. - for _, msg := range msgs { - p.queueMsg(msg, nil) - } - - // If we haven't finished close negotiations, then - // we'll continue as we can't yet finalize the closure. - if !closeFin { - continue - } - - // Otherwise, we've agreed on a closing fee! In this - // case, we'll wrap up the channel closure by notifying - // relevant sub-systems and launching a goroutine to - // wait for close tx conf. - p.finalizeChanClosure(chanCloser) + p.handleCloseMsg(closeMsg) // The channel reannounce delay has elapsed, broadcast the // reenabled channel updates to the network. This should only @@ -2858,6 +2796,67 @@ func (p *peer) StartTime() time.Time { return p.startTime } +// handleCloseMsg is called when a new cooperative channel closure related +// message is received from the remote peer. We'll use this message to advance +// the chan closer state machine. +func (p *peer) handleCloseMsg(msg *closeMsg) { + // We'll now fetch the matching closing state machine in order to continue, + // or finalize the channel closure process. + chanCloser, err := p.fetchActiveChanCloser(msg.cid) + if err != nil { + // If the channel is not known to us, we'll simply ignore this message. + if err == ErrChannelNotFound { + return + } + + peerLog.Errorf("Unable to respond to remote close msg: %v", err) + + errMsg := &lnwire.Error{ + ChanID: msg.cid, + Data: lnwire.ErrorData(err.Error()), + } + p.queueMsg(errMsg, nil) + return + } + + // Next, we'll process the next message using the target state machine. + // We'll either continue negotiation, or halt. + msgs, closeFin, err := chanCloser.ProcessCloseMsg( + msg.msg, + ) + if err != nil { + err := fmt.Errorf("unable to process close msg: %v", err) + peerLog.Error(err) + + // As the negotiations failed, we'll reset the channel state machine to + // ensure we act to on-chain events as normal. + chanCloser.Channel().ResetState() + + if chanCloser.CloseRequest() != nil { + chanCloser.CloseRequest().Err <- err + } + delete(p.activeChanCloses, msg.cid) + return + } + + // Queue any messages to the remote peer that need to be sent as a part of + // this latest round of negotiations. + for _, msg := range msgs { + p.queueMsg(msg, nil) + } + + // If we haven't finished close negotiations, then we'll continue as we + // can't yet finalize the closure. + if !closeFin { + return + } + + // Otherwise, we've agreed on a closing fee! In this case, we'll wrap up + // the channel closure by notifying relevant sub-systems and launching a + // goroutine to wait for close tx conf. + p.finalizeChanClosure(chanCloser) +} + // LinkUpdater is an interface implemented by most messages in BOLT 2 that are // allowed to update the channel state. type LinkUpdater interface { From bf161c98358ed8a07fda2fdb86bcdc608ce26554 Mon Sep 17 00:00:00 2001 From: nsa Date: Mon, 29 Jun 2020 20:42:06 -0400 Subject: [PATCH 13/15] peer+server: add HandleLocalCloseChanReqs to peer, call from server --- peer.go | 14 ++++++++++++++ server.go | 10 +--------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/peer.go b/peer.go index d514c70b1..0bd3eff00 100644 --- a/peer.go +++ b/peer.go @@ -2857,6 +2857,20 @@ func (p *peer) handleCloseMsg(msg *closeMsg) { p.finalizeChanClosure(chanCloser) } +// HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto +// the channelManager goroutine, which will shut down the link and possibly +// close the channel. +func (p *peer) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) { + select { + case p.localCloseChanReqs <- req: + peerLog.Infof("Local close channel request delivered to peer: %v", + p.PubKey()) + case <-p.quit: + peerLog.Infof("Unable to deliver local close channel request to peer "+ + "%x", p.PubKey()) + } +} + // LinkUpdater is an interface implemented by most messages in BOLT 2 that are // allowed to update the channel state. type LinkUpdater interface { diff --git a/server.go b/server.go index f3ac22019..e0e1084b8 100644 --- a/server.go +++ b/server.go @@ -491,15 +491,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, chanDB *channeldb.DB, return } - select { - case peer.localCloseChanReqs <- request: - srvrLog.Infof("Local close channel request "+ - "delivered to peer: %x", pubKey[:]) - case <-peer.quit: - srvrLog.Errorf("Unable to deliver local close "+ - "channel request to peer %x, err: %v", - pubKey[:], err) - } + peer.HandleLocalCloseChanReqs(request) }, FwdingLog: chanDB.ForwardingLog(), SwitchPackager: channeldb.NewSwitchPackager(), From 9be9d6934943765a725cf4b5111d690a18ec0064 Mon Sep 17 00:00:00 2001 From: nsa Date: Mon, 29 Jun 2020 21:29:22 -0400 Subject: [PATCH 14/15] multi: initialize peer with peer.Config --- peer.go | 303 ++++++++++++++++++++++---------------------------- rpcserver.go | 12 +- server.go | 88 +++++++++++---- test_utils.go | 93 ++++++---------- 4 files changed, 243 insertions(+), 253 deletions(-) diff --git a/peer.go b/peer.go index 0bd3eff00..4852f56fb 100644 --- a/peer.go +++ b/peer.go @@ -29,7 +29,7 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chancloser" "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/pool" + ppeer "github.com/lightningnetwork/lnd/peer" "github.com/lightningnetwork/lnd/queue" "github.com/lightningnetwork/lnd/ticker" ) @@ -131,13 +131,7 @@ type peer struct { // our last ping message. To be used atomically. pingLastSend int64 - cfg *Config - - connReq *connmgr.ConnReq - conn net.Conn - - addr *lnwire.NetAddress - pubKeyBytes [33]byte + cfg ppeer.Config // activeSignal when closed signals that the peer is now active and // ready to process messages. @@ -147,8 +141,6 @@ type peer struct { // It will be zero for peers that did not successfully call Start(). startTime time.Time - inbound bool - // sendQueue is the channel which is used to queue outgoing to be // written onto the wire. Note that this channel is unbuffered. sendQueue chan outgoingMsg @@ -206,28 +198,6 @@ type peer struct { // well as lnwire.ClosingSigned messages. chanCloseMsgs chan *closeMsg - // chanActiveTimeout specifies the duration the peer will wait to - // request a channel reenable, beginning from the time the peer was - // started. - chanActiveTimeout time.Duration - - server *server - - // features is the set of features that we advertised to the remote - // node. - features *lnwire.FeatureVector - - // legacyFeatures is the set of features that we advertised to the remote - // node for backwards compatibility. Nodes that have not implemented - // flat featurs will still be able to read our feature bits from the - // legacy global field, but we will also advertise everything in the - // default features field. - legacyFeatures *lnwire.FeatureVector - - // outgoingCltvRejectDelta defines the number of blocks before expiry of - // an htlc where we don't offer an htlc anymore. - outgoingCltvRejectDelta uint32 - // remoteFeatures is the feature vector received from the peer during // the connection handshake. remoteFeatures *lnwire.FeatureVector @@ -238,22 +208,6 @@ type peer struct { // peer's chansync message with its own over and over again. resentChanSyncMsg map[lnwire.ChannelID]struct{} - // errorBuffer stores a set of errors related to a peer. It contains - // error messages that our peer has recently sent us over the wire and - // records of unknown messages that were sent to us and, so that we can - // track a full record of the communication errors we have had with our - // peer. If we choose to disconnect from a peer, it also stores the - // reason we had for disconnecting. - errorBuffer *queue.CircularBuffer - - // writePool is the task pool to that manages reuse of write buffers. - // Write tasks are submitted to the pool in order to conserve the total - // number of write buffers allocated at any one time, and decouple write - // buffer allocation from the peer life cycle. - writePool *pool.Write - - readPool *pool.Read - queueQuit chan struct{} quit chan struct{} wg sync.WaitGroup @@ -262,41 +216,14 @@ type peer struct { // A compile-time check to ensure that peer satisfies the lnpeer.Peer interface. var _ lnpeer.Peer = (*peer)(nil) -// newPeer creates a new peer from an establish connection object, and a -// pointer to the main server. It takes an error buffer which may contain errors -// from a previous connection with the peer if we have been connected to them -// before. -func newPeer(cfg *Config, conn net.Conn, connReq *connmgr.ConnReq, server *server, - addr *lnwire.NetAddress, inbound bool, - features, legacyFeatures *lnwire.FeatureVector, - chanActiveTimeout time.Duration, - outgoingCltvRejectDelta uint32, - errBuffer *queue.CircularBuffer) ( - *peer, error) { - - nodePub := addr.IdentityKey +// newPeer creates a new peer from a peer.Config object. +func newPeer(cfg ppeer.Config) *peer { p := &peer{ - conn: conn, - addr: addr, - - cfg: cfg, - - activeSignal: make(chan struct{}), - - inbound: inbound, - connReq: connReq, - - server: server, - - features: features, - legacyFeatures: legacyFeatures, - - outgoingCltvRejectDelta: outgoingCltvRejectDelta, - - sendQueue: make(chan outgoingMsg), - outgoingQueue: make(chan outgoingMsg), - + cfg: cfg, + activeSignal: make(chan struct{}), + sendQueue: make(chan outgoingMsg), + outgoingQueue: make(chan outgoingMsg), addedChannels: make(map[lnwire.ChannelID]struct{}), activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel), newChannels: make(chan *newChannelMsg, 1), @@ -308,20 +235,11 @@ func newPeer(cfg *Config, conn net.Conn, connReq *connmgr.ConnReq, server *serve linkFailures: make(chan linkFailureReport), chanCloseMsgs: make(chan *closeMsg), resentChanSyncMsg: make(map[lnwire.ChannelID]struct{}), - - chanActiveTimeout: chanActiveTimeout, - - errorBuffer: errBuffer, - - writePool: server.writePool, - readPool: server.readPool, - - queueQuit: make(chan struct{}), - quit: make(chan struct{}), + queueQuit: make(chan struct{}), + quit: make(chan struct{}), } - copy(p.pubKeyBytes[:], nodePub.SerializeCompressed()) - return p, nil + return p } // Start starts all helper goroutines the peer needs for normal operations. In @@ -385,7 +303,7 @@ func (p *peer) Start() error { // Fetch and then load all the active channels we have with this remote // peer from the database. - activeChans, err := p.server.chanDB.FetchOpenChannels(p.addr.IdentityKey) + activeChans, err := p.cfg.ChannelDB.FetchOpenChannels(p.cfg.Addr.IdentityKey) if err != nil { peerLog.Errorf("unable to fetch active chans "+ "for peer %v: %v", p, err) @@ -393,7 +311,7 @@ func (p *peer) Start() error { } if len(activeChans) == 0 { - p.server.prunePersistentPeerConnection(p.pubKeyBytes) + p.cfg.PrunePersistentPeerConnection(p.cfg.PubKeyBytes) } // Next, load all the active channels we have with this peer, @@ -454,7 +372,7 @@ func (p *peer) initGossipSync() { // we'll create a new gossipSyncer in the AuthenticatedGossiper for it. if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) { srvrLog.Infof("Negotiated chan series queries with %x", - p.pubKeyBytes[:]) + p.cfg.PubKeyBytes[:]) // Register the peer's gossip syncer with the gossiper. // This blocks synchronously to ensure the gossip syncer is @@ -465,7 +383,7 @@ func (p *peer) initGossipSync() { // requires an improved version of the current network // bootstrapper to ensure we can find and connect to non-channel // peers. - p.server.authGossiper.InitSyncState(p) + p.cfg.AuthGossiper.InitSyncState(p) } } @@ -493,7 +411,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( for _, dbChan := range chans { lnChan, err := lnwallet.NewLightningChannel( - p.server.cc.signer, dbChan, p.server.sigPool, + p.cfg.Signer, dbChan, p.cfg.SigPool, ) if err != nil { return nil, err @@ -535,7 +453,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( // Before we register this new link with the HTLC Switch, we'll // need to fetch its current link-layer forwarding policy from // the database. - graph := p.server.chanDB.ChannelGraph() + graph := p.cfg.ChannelDB.ChannelGraph() info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint) if err != nil && err != channeldb.ErrEdgeNotFound { return nil, err @@ -550,7 +468,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( // particular channel. var selfPolicy *channeldb.ChannelEdgePolicy if info != nil && bytes.Equal(info.NodeKey1Bytes[:], - p.server.identityECDH.PubKey().SerializeCompressed()) { + p.cfg.ServerPubKey[:]) { selfPolicy = p1 } else { @@ -573,7 +491,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( peerLog.Warnf("Unable to find our forwarding policy "+ "for channel %v, using default values", chanPoint) - forwardingPolicy = &p.server.cc.routingPolicy + forwardingPolicy = &p.cfg.RoutingPolicy } peerLog.Tracef("Using link policy of: %v", @@ -594,7 +512,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( } // Subscribe to the set of on-chain events for this channel. - chainEvents, err := p.server.chainArb.SubscribeChannelEvents( + chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents( *chanPoint, ) if err != nil { @@ -641,26 +559,26 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, select { case p.linkFailures <- failure: case <-p.quit: - case <-p.server.quit: + case <-p.cfg.Quit: } } linkCfg := htlcswitch.ChannelLinkConfig{ Peer: p, - DecodeHopIterators: p.server.sphinx.DecodeHopIterators, - ExtractErrorEncrypter: p.server.sphinx.ExtractErrorEncrypter, - FetchLastChannelUpdate: p.server.fetchLastChanUpdate(), + DecodeHopIterators: p.cfg.Sphinx.DecodeHopIterators, + ExtractErrorEncrypter: p.cfg.Sphinx.ExtractErrorEncrypter, + FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate, HodlMask: p.cfg.Hodl.Mask(), - Registry: p.server.invoices, - Switch: p.server.htlcSwitch, - Circuits: p.server.htlcSwitch.CircuitModifier(), - ForwardPackets: p.server.interceptableSwitch.ForwardPackets, + Registry: p.cfg.Invoices, + Switch: p.cfg.Switch, + Circuits: p.cfg.Switch.CircuitModifier(), + ForwardPackets: p.cfg.InterceptSwitch.ForwardPackets, FwrdingPolicy: *forwardingPolicy, - FeeEstimator: p.server.cc.feeEstimator, - PreimageCache: p.server.witnessBeacon, + FeeEstimator: p.cfg.FeeEstimator, + PreimageCache: p.cfg.WitnessBeacon, ChainEvents: chainEvents, UpdateContractSignals: func(signals *contractcourt.ContractSignals) error { - return p.server.chainArb.UpdateContractSignals( + return p.cfg.ChainArb.UpdateContractSignals( *chanPoint, signals, ) }, @@ -673,14 +591,14 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, UnsafeReplay: p.cfg.UnsafeReplay, MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout, MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout, - OutgoingCltvRejectDelta: p.outgoingCltvRejectDelta, - TowerClient: p.server.towerClient, + OutgoingCltvRejectDelta: p.cfg.OutgoingCltvRejectDelta, + TowerClient: p.cfg.TowerClient, MaxOutgoingCltvExpiry: p.cfg.MaxOutgoingCltvExpiry, MaxFeeAllocation: p.cfg.MaxChannelFeeAllocation, - NotifyActiveLink: p.server.channelNotifier.NotifyActiveLinkEvent, - NotifyActiveChannel: p.server.channelNotifier.NotifyActiveChannelEvent, - NotifyInactiveChannel: p.server.channelNotifier.NotifyInactiveChannelEvent, - HtlcNotifier: p.server.htlcNotifier, + NotifyActiveLink: p.cfg.ChannelNotifier.NotifyActiveLinkEvent, + NotifyActiveChannel: p.cfg.ChannelNotifier.NotifyActiveChannelEvent, + NotifyInactiveChannel: p.cfg.ChannelNotifier.NotifyInactiveChannelEvent, + HtlcNotifier: p.cfg.HtlcNotifier, } link := htlcswitch.NewChannelLink(linkCfg, lnChan) @@ -689,12 +607,12 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, // links going by the same channel id. If one is found, we'll shut it // down to ensure that the mailboxes are only ever under the control of // one link. - p.server.htlcSwitch.RemoveLink(link.ChanID()) + p.cfg.Switch.RemoveLink(link.ChanID()) // With the channel link created, we'll now notify the htlc switch so // this channel can be used to dispatch local payments and also // passively forward payments. - return p.server.htlcSwitch.AddLink(link) + return p.cfg.Switch.AddLink(link) } // maybeSendNodeAnn sends our node announcement to the remote peer if at least @@ -716,7 +634,7 @@ func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { return } - ourNodeAnn, err := p.server.genNodeAnnouncement(false) + ourNodeAnn, err := p.cfg.GenNodeAnnouncement(false) if err != nil { srvrLog.Debugf("Unable to retrieve node announcement: %v", err) return @@ -724,7 +642,7 @@ func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil { srvrLog.Debugf("Unable to resend node announcement to %x: %v", - p.pubKeyBytes, err) + p.cfg.PubKeyBytes, err) } } @@ -759,20 +677,20 @@ func (p *peer) Disconnect(reason error) { peerLog.Infof(err.Error()) // Ensure that the TCP connection is properly closed before continuing. - p.conn.Close() + p.cfg.Conn.Close() close(p.quit) } // String returns the string representation of this peer. func (p *peer) String() string { - return fmt.Sprintf("%x@%s", p.pubKeyBytes, p.conn.RemoteAddr()) + return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr()) } // readNextMessage reads, and returns the next message on the wire along with // any additional raw payload. func (p *peer) readNextMessage() (lnwire.Message, error) { - noiseConn, ok := p.conn.(*brontide.Conn) + noiseConn, ok := p.cfg.Conn.(*brontide.Conn) if !ok { return nil, fmt.Errorf("brontide.Conn required to read messages") } @@ -792,7 +710,7 @@ func (p *peer) readNextMessage() (lnwire.Message, error) { // is message oriented and allows nodes to pad on additional data to // the message stream. var rawMsg []byte - err = p.readPool.Submit(func(buf *buffer.Read) error { + err = p.cfg.ReadPool.Submit(func(buf *buffer.Read) error { // Before reading the body of the message, set the read timeout // accordingly to ensure we don't block other readers using the // pool. We do so only after the task has been scheduled to @@ -1000,7 +918,7 @@ func waitUntilLinkActive(p *peer, // we will get an ActiveLinkEvent notification and retrieve the link. If // the call to GetLink is before SubscribeChannelEvents, however, there // will be a race condition. - sub, err := p.server.channelNotifier.SubscribeChannelEvents() + sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents() if err != nil { // If we have a non-nil error, then the server is shutting down and we // can exit here and return nil. This means no message will be delivered @@ -1011,7 +929,7 @@ func waitUntilLinkActive(p *peer, // The link may already be active by this point, and we may have missed the // ActiveLinkEvent. Check if the link exists. - link, _ := p.server.htlcSwitch.GetLink(cid) + link, _ := p.cfg.Switch.GetLink(cid) if link != nil { return link } @@ -1041,7 +959,7 @@ func waitUntilLinkActive(p *peer, // The link shouldn't be nil as we received an // ActiveLinkEvent. If it is nil, we return nil and the // calling function should catch it. - link, _ = p.server.htlcSwitch.GetLink(cid) + link, _ = p.cfg.Switch.GetLink(cid) return link case <-p.quit: @@ -1103,7 +1021,7 @@ func newDiscMsgStream(p *peer) *msgStream { "Update stream for gossiper exited", 1000, func(msg lnwire.Message) { - p.server.authGossiper.ProcessRemoteAnnouncement(msg, p) + p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p) }, ) } @@ -1207,15 +1125,15 @@ out: p.queueMsg(lnwire.NewPong(pongBytes), nil) case *lnwire.OpenChannel: - p.server.fundingMgr.processFundingOpen(msg, p) + p.cfg.ProcessFundingOpen(msg, p) case *lnwire.AcceptChannel: - p.server.fundingMgr.processFundingAccept(msg, p) + p.cfg.ProcessFundingAccept(msg, p) case *lnwire.FundingCreated: - p.server.fundingMgr.processFundingCreated(msg, p) + p.cfg.ProcessFundingCreated(msg, p) case *lnwire.FundingSigned: - p.server.fundingMgr.processFundingSigned(msg, p) + p.cfg.ProcessFundingSigned(msg, p) case *lnwire.FundingLocked: - p.server.fundingMgr.processFundingLocked(msg, p) + p.cfg.ProcessFundingLocked(msg, p) case *lnwire.Shutdown: select { @@ -1341,7 +1259,7 @@ func (p *peer) storeError(err error) { return } - p.errorBuffer.Add( + p.cfg.ErrorBuffer.Add( &TimestampedError{Timestamp: time.Now(), Error: err}, ) } @@ -1353,7 +1271,7 @@ func (p *peer) storeError(err error) { // // NOTE: This method should only be called from within the readHandler. func (p *peer) handleError(msg *lnwire.Error) bool { - key := p.addr.IdentityKey + key := p.cfg.Addr.IdentityKey // Store the error we have received. p.storeError(msg) @@ -1370,8 +1288,8 @@ func (p *peer) handleError(msg *lnwire.Error) bool { // If the channel ID for the error message corresponds to a pending // channel, then the funding manager will handle the error. - case p.server.fundingMgr.IsPendingChannel(msg.ChanID, key): - p.server.fundingMgr.processFundingError(msg, key) + case p.cfg.IsPendingChannel(msg.ChanID, key): + p.cfg.ProcessFundingError(msg, key) return false // If not we hand the error to the channel link for this channel. @@ -1593,7 +1511,7 @@ func (p *peer) writeMessage(msg lnwire.Message) error { p.logWireMessage(msg, false) } - noiseConn, ok := p.conn.(*brontide.Conn) + noiseConn, ok := p.cfg.Conn.(*brontide.Conn) if !ok { return fmt.Errorf("brontide.Conn required to write messages") } @@ -1629,7 +1547,7 @@ func (p *peer) writeMessage(msg lnwire.Message) error { // Otherwise, this is a new message. We'll acquire a write buffer to // serialize the message and buffer the ciphertext on the connection. - err := p.writePool.Submit(func(buf *bytes.Buffer) error { + err := p.cfg.WritePool.Submit(func(buf *bytes.Buffer) error { // Using a buffer allocated by the write pool, encode the // message directly into the buffer. _, writeErr := lnwire.WriteMessage(buf, msg, 0) @@ -1902,7 +1820,7 @@ func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot { // genDeliveryScript returns a new script to be used to send our funds to in // the case of a cooperative channel close negotiation. func (p *peer) genDeliveryScript() ([]byte, error) { - deliveryAddr, err := p.server.cc.wallet.NewAddress( + deliveryAddr, err := p.cfg.Wallet.NewAddress( lnwallet.WitnessPubKey, false, ) if err != nil { @@ -1925,7 +1843,7 @@ func (p *peer) channelManager() { // reenableTimeout will fire once after the configured channel status // interval has elapsed. This will trigger us to sign new channel // updates and broadcast them with the "disabled" flag unset. - reenableTimeout := time.After(p.chanActiveTimeout) + reenableTimeout := time.After(p.cfg.ChanActiveTimeout) out: for { @@ -1977,7 +1895,7 @@ out: // set of active channels, so we can look it up later // easily according to its channel ID. lnChan, err := lnwallet.NewLightningChannel( - p.server.cc.signer, newChan, p.server.sigPool, + p.cfg.Signer, newChan, p.cfg.SigPool, ) if err != nil { p.activeChanMtx.Unlock() @@ -2002,7 +1920,7 @@ out: // necessary items it needs to function. // // TODO(roasbeef): panic on below? - chainEvents, err := p.server.chainArb.SubscribeChannelEvents( + chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents( *chanPoint, ) if err != nil { @@ -2021,7 +1939,7 @@ out: // at initial channel creation. Note that the maximum HTLC value // defaults to the cap on the total value of outstanding HTLCs. fwdMinHtlc := lnChan.FwdMinHtlc() - defaultPolicy := p.server.cc.routingPolicy + defaultPolicy := p.cfg.RoutingPolicy forwardingPolicy := &htlcswitch.ForwardingPolicy{ MinHTLCOut: fwdMinHtlc, MaxHTLC: newChan.LocalChanCfg.MaxPendingAmount, @@ -2151,7 +2069,7 @@ func (p *peer) reenableActiveChannels() { // disabled bit to false and send out a new ChannelUpdate. If this // channel is already active, the update won't be sent. for _, chanPoint := range activePublicChans { - err := p.server.chanStatusMgr.RequestEnable(chanPoint) + err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint) if err != nil { srvrLog.Errorf("Unable to enable channel %v: %v", chanPoint, err) @@ -2208,14 +2126,14 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) ( // In order to begin fee negotiations, we'll first compute our // target ideal fee-per-kw. We'll set this to a lax value, as // we weren't the ones that initiated the channel closure. - feePerKw, err := p.server.cc.feeEstimator.EstimateFeePerKW(6) + feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(6) if err != nil { peerLog.Errorf("unable to query fee estimator: %v", err) return nil, fmt.Errorf("unable to estimate fee") } - _, startingHeight, err := p.server.cc.chainIO.GetBestBlock() + _, startingHeight, err := p.cfg.ChainIO.GetBestBlock() if err != nil { peerLog.Errorf("unable to obtain best block: %v", err) return nil, fmt.Errorf("cannot obtain best block") @@ -2224,11 +2142,11 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) ( chanCloser = chancloser.NewChanCloser( chancloser.ChanCloseCfg{ Channel: channel, - UnregisterChannel: p.server.htlcSwitch.RemoveLink, - BroadcastTx: p.server.cc.wallet.PublishTransaction, - DisableChannel: p.server.chanStatusMgr.RequestDisable, + UnregisterChannel: p.cfg.Switch.RemoveLink, + BroadcastTx: p.cfg.Wallet.PublishTransaction, + DisableChannel: p.cfg.ChanStatusMgr.RequestDisable, Disconnect: func() error { - return p.server.DisconnectPeer(p.IdentityKey()) + return p.cfg.DisconnectPeer(p.IdentityKey()) }, Quit: p.quit, }, @@ -2330,7 +2248,7 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { // Next, we'll create a new channel closer state machine to // handle the close negotiation. - _, startingHeight, err := p.server.cc.chainIO.GetBestBlock() + _, startingHeight, err := p.cfg.ChainIO.GetBestBlock() if err != nil { peerLog.Errorf(err.Error()) req.Err <- err @@ -2340,11 +2258,11 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { chanCloser := chancloser.NewChanCloser( chancloser.ChanCloseCfg{ Channel: channel, - UnregisterChannel: p.server.htlcSwitch.RemoveLink, - BroadcastTx: p.server.cc.wallet.PublishTransaction, - DisableChannel: p.server.chanStatusMgr.RequestDisable, + UnregisterChannel: p.cfg.Switch.RemoveLink, + BroadcastTx: p.cfg.Wallet.PublishTransaction, + DisableChannel: p.cfg.ChanStatusMgr.RequestDisable, Disconnect: func() error { - return p.server.DisconnectPeer(p.IdentityKey()) + return p.cfg.DisconnectPeer(p.IdentityKey()) }, Quit: p.quit, }, @@ -2414,7 +2332,7 @@ func (p *peer) handleLinkFailure(failure linkFailureReport) { peerLog.Warnf("Force closing link(%v)", failure.shortChanID) - closeTx, err := p.server.chainArb.ForceCloseContract( + closeTx, err := p.cfg.ChainArb.ForceCloseContract( failure.chanPoint, ) if err != nil { @@ -2463,7 +2381,7 @@ func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { // Next, we'll launch a goroutine which will request to be notified by // the ChainNotifier once the closure transaction obtains a single // confirmation. - notifier := p.server.cc.chainNotifier + notifier := p.cfg.ChainNotifier // If any error happens during waitForChanToClose, forward it to // closeReq. If this channel closure is not locally initiated, closeReq @@ -2557,7 +2475,7 @@ func (p *peer) WipeChannel(chanPoint *wire.OutPoint) { // Instruct the HtlcSwitch to close this link as the channel is no // longer active. - p.server.htlcSwitch.RemoveLink(chanID) + p.cfg.Switch.RemoveLink(chanID) } // handleInitMsg handles the incoming init message which contains global and @@ -2607,7 +2525,7 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error { // // NOTE: Part of the lnpeer.Peer interface. func (p *peer) LocalFeatures() *lnwire.FeatureVector { - return p.features + return p.cfg.Features } // RemoteFeatures returns the set of global features that has been advertised by @@ -2623,8 +2541,8 @@ func (p *peer) RemoteFeatures() *lnwire.FeatureVector { // currently supported local and global features. func (p *peer) sendInitMsg() error { msg := lnwire.NewInitMessage( - p.legacyFeatures.RawFeatureVector, - p.features.RawFeatureVector, + p.cfg.LegacyFeatures.RawFeatureVector, + p.cfg.Features.RawFeatureVector, ) return p.writeMessage(msg) @@ -2640,7 +2558,7 @@ func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { } // Check if we have any channel sync messages stored for this channel. - c, err := p.server.chanDB.FetchClosedChannelForID(cid) + c, err := p.cfg.ChannelDB.FetchClosedChannelForID(cid) if err != nil { return fmt.Errorf("unable to fetch channel sync messages for "+ "peer %v: %v", p, err) @@ -2730,7 +2648,7 @@ func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error { return err case <-p.quit: return lnpeer.ErrPeerExiting - case <-p.server.quit: + case <-p.cfg.Quit: return lnpeer.ErrPeerExiting } } @@ -2742,21 +2660,21 @@ func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error { // // NOTE: Part of the lnpeer.Peer interface. func (p *peer) PubKey() [33]byte { - return p.pubKeyBytes + return p.cfg.PubKeyBytes } // IdentityKey returns the public key of the remote peer. // // NOTE: Part of the lnpeer.Peer interface. func (p *peer) IdentityKey() *btcec.PublicKey { - return p.addr.IdentityKey + return p.cfg.Addr.IdentityKey } // Address returns the network address of the remote peer. // // NOTE: Part of the lnpeer.Peer interface. func (p *peer) Address() net.Addr { - return p.addr.Address + return p.cfg.Addr.Address } // AddNewChannel adds a new channel to the peer. The channel should fail to be @@ -2871,6 +2789,51 @@ func (p *peer) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) { } } +// NetAddress returns the network of the remote peer as an lnwire.NetAddress. +func (p *peer) NetAddress() *lnwire.NetAddress { + return p.cfg.Addr +} + +// Inbound returns cfg.Inbound. +func (p *peer) Inbound() bool { + return p.cfg.Inbound +} + +// ConnReq returns cfg.ConnReq. +func (p *peer) ConnReq() *connmgr.ConnReq { + return p.cfg.ConnReq +} + +// ErrorBuffer returns cfg.ErrorBuffer. +func (p *peer) ErrorBuffer() *queue.CircularBuffer { + return p.cfg.ErrorBuffer +} + +// SetAddress sets the remote peer's address given an address. +func (p *peer) SetAddress(address net.Addr) { + p.cfg.Addr.Address = address +} + +// ActiveSignal returns the peer's active signal. +func (p *peer) ActiveSignal() chan struct{} { + return p.activeSignal +} + +// Conn returns a pointer to the peer's connection struct. +func (p *peer) Conn() net.Conn { + return p.cfg.Conn +} + +// BytesReceived returns the number of bytes received from the peer. +func (p *peer) BytesReceived() uint64 { + return atomic.LoadUint64(&p.bytesReceived) +} + +// BytesSent returns the number of bytes sent to the peer. +func (p *peer) BytesSent() uint64 { + return atomic.LoadUint64(&p.bytesSent) +} + // LinkUpdater is an interface implemented by most messages in BOLT 2 that are // allowed to update the channel state. type LinkUpdater interface { diff --git a/rpcserver.go b/rpcserver.go index 8adef1379..3517eb22c 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -2573,10 +2573,10 @@ func (r *rpcServer) ListPeers(ctx context.Context, peer := &lnrpc.Peer{ PubKey: hex.EncodeToString(nodePub[:]), - Address: serverPeer.conn.RemoteAddr().String(), - Inbound: serverPeer.inbound, - BytesRecv: atomic.LoadUint64(&serverPeer.bytesReceived), - BytesSent: atomic.LoadUint64(&serverPeer.bytesSent), + Address: serverPeer.Conn().RemoteAddr().String(), + Inbound: serverPeer.Inbound(), + BytesRecv: serverPeer.BytesReceived(), + BytesSent: serverPeer.BytesSent(), SatSent: satSent, SatRecv: satRecv, PingTime: serverPeer.PingTime(), @@ -2591,12 +2591,12 @@ func (r *rpcServer) ListPeers(ctx context.Context, // it is non-nil. If we want all the stored errors, simply // add the full list to our set of errors. if in.LatestError { - latestErr := serverPeer.errorBuffer.Latest() + latestErr := serverPeer.ErrorBuffer().Latest() if latestErr != nil { peerErrors = []interface{}{latestErr} } } else { - peerErrors = serverPeer.errorBuffer.List() + peerErrors = serverPeer.ErrorBuffer().List() } // Add the relevant peer errors to our response. diff --git a/server.go b/server.go index e0e1084b8..28852df2d 100644 --- a/server.go +++ b/server.go @@ -52,6 +52,7 @@ import ( "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/nat" "github.com/lightningnetwork/lnd/netann" + ppeer "github.com/lightningnetwork/lnd/peer" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/pool" "github.com/lightningnetwork/lnd/queue" @@ -2563,7 +2564,7 @@ func (s *server) InboundPeerConnected(conn net.Conn) { // we'll close out the new connection s.t there's only a single // connection between us. localPub := s.identityECDH.PubKey() - if !connectedPeer.inbound && + if !connectedPeer.Inbound() && !shouldDropLocalConnection(localPub, nodePub) { srvrLog.Warnf("Received inbound connection from "+ @@ -2674,7 +2675,7 @@ func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) // we'll close out the new connection s.t there's only a single // connection between us. localPub := s.identityECDH.PubKey() - if connectedPeer.inbound && + if connectedPeer.Inbound() && shouldDropLocalConnection(localPub, nodePub) { srvrLog.Warnf("Established outbound connection to "+ @@ -2797,16 +2798,63 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, // offered that would trigger channel closure. In case of outgoing // htlcs, an extra block is added to prevent the channel from being // closed when the htlc is outstanding and a new block comes in. - p, err := newPeer( - s.cfg, conn, connReq, s, peerAddr, inbound, initFeatures, - legacyFeatures, s.cfg.ChanEnableTimeout, - lncfg.DefaultOutgoingCltvRejectDelta, errBuffer, - ) - if err != nil { - srvrLog.Errorf("unable to create peer %v", err) - return + pCfg := ppeer.Config{ + Conn: conn, + ConnReq: connReq, + Addr: peerAddr, + Inbound: inbound, + Features: initFeatures, + LegacyFeatures: legacyFeatures, + OutgoingCltvRejectDelta: lncfg.DefaultOutgoingCltvRejectDelta, + ChanActiveTimeout: s.cfg.ChanEnableTimeout, + ErrorBuffer: errBuffer, + WritePool: s.writePool, + ReadPool: s.readPool, + Switch: s.htlcSwitch, + InterceptSwitch: s.interceptableSwitch, + ChannelDB: s.chanDB, + ChainArb: s.chainArb, + AuthGossiper: s.authGossiper, + ChanStatusMgr: s.chanStatusMgr, + ChainIO: s.cc.chainIO, + FeeEstimator: s.cc.feeEstimator, + Signer: s.cc.wallet.Cfg.Signer, + SigPool: s.sigPool, + Wallet: s.cc.wallet, + ChainNotifier: s.cc.chainNotifier, + RoutingPolicy: s.cc.routingPolicy, + Sphinx: s.sphinx, + WitnessBeacon: s.witnessBeacon, + Invoices: s.invoices, + ChannelNotifier: s.channelNotifier, + HtlcNotifier: s.htlcNotifier, + TowerClient: s.towerClient, + DisconnectPeer: s.DisconnectPeer, + GenNodeAnnouncement: s.genNodeAnnouncement, + + PrunePersistentPeerConnection: s.prunePersistentPeerConnection, + + FetchLastChanUpdate: s.fetchLastChanUpdate(), + ProcessFundingOpen: s.fundingMgr.processFundingOpen, + ProcessFundingAccept: s.fundingMgr.processFundingAccept, + ProcessFundingCreated: s.fundingMgr.processFundingCreated, + ProcessFundingSigned: s.fundingMgr.processFundingSigned, + ProcessFundingLocked: s.fundingMgr.processFundingLocked, + ProcessFundingError: s.fundingMgr.processFundingError, + IsPendingChannel: s.fundingMgr.IsPendingChannel, + + Hodl: s.cfg.Hodl, + UnsafeReplay: s.cfg.UnsafeReplay, + MaxOutgoingCltvExpiry: s.cfg.MaxOutgoingCltvExpiry, + MaxChannelFeeAllocation: s.cfg.MaxChannelFeeAllocation, + Quit: s.quit, } + copy(pCfg.PubKeyBytes[:], peerAddr.IdentityKey.SerializeCompressed()) + copy(pCfg.ServerPubKey[:], s.identityECDH.PubKey().SerializeCompressed()) + + p := newPeer(pCfg) + // TODO(roasbeef): update IP address for link-node // * also mark last-seen, do it one single transaction? @@ -2842,12 +2890,12 @@ func (s *server) addPeer(p *peer) { // TODO(roasbeef): pipe all requests through to the // queryHandler/peerManager - pubSer := p.addr.IdentityKey.SerializeCompressed() + pubSer := p.NetAddress().IdentityKey.SerializeCompressed() pubStr := string(pubSer) s.peersByPub[pubStr] = p - if p.inbound { + if p.Inbound() { s.inboundPeers[pubStr] = p } else { s.outboundPeers[pubStr] = p @@ -3020,12 +3068,12 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { // within the peer's address for reconnection purposes. // // TODO(roasbeef): use them all? - if p.inbound { + if p.Inbound() { advertisedAddr, err := s.fetchNodeAdvertisedAddr(pubKey) switch { // We found an advertised address, so use it. case err == nil: - p.addr.Address = advertisedAddr + p.SetAddress(advertisedAddr) // The peer doesn't have an advertised address. case err == errNoAdvertisedAddr: @@ -3058,7 +3106,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { // Otherwise, we'll launch a new connection request in order to // attempt to maintain a persistent connection with this peer. connReq := &connmgr.ConnReq{ - Addr: p.addr, + Addr: p.NetAddress(), Permanent: true, } s.persistentConnReqs[pubStr] = append( @@ -3113,8 +3161,8 @@ func (s *server) removePeer(p *peer) { p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p)) // If this peer had an active persistent connection request, remove it. - if p.connReq != nil { - s.connMgr.Remove(p.connReq.ID()) + if p.ConnReq() != nil { + s.connMgr.Remove(p.ConnReq().ID()) } // Ignore deleting peers if we're shutting down. @@ -3128,7 +3176,7 @@ func (s *server) removePeer(p *peer) { delete(s.peersByPub, pubStr) - if p.inbound { + if p.Inbound() { delete(s.inboundPeers, pubStr) } else { delete(s.outboundPeers, pubStr) @@ -3136,8 +3184,8 @@ func (s *server) removePeer(p *peer) { // Copy the peer's error buffer across to the server if it has any items // in it so that we can restore peer errors across connections. - if p.errorBuffer.Total() > 0 { - s.peerErrors[pubStr] = p.errorBuffer + if p.ErrorBuffer().Total() > 0 { + s.peerErrors[pubStr] = p.ErrorBuffer() } // Inform the peer notifier of a peer offline event so that it can be diff --git a/test_utils.go b/test_utils.go index d0f9a68d2..c9d6f653e 100644 --- a/test_utils.go +++ b/test_utils.go @@ -17,16 +17,15 @@ import ( "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/clock" - "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" - "github.com/lightningnetwork/lnd/lnwallet/chancloser" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/netann" + ppeer "github.com/lightningnetwork/lnd/peer" + "github.com/lightningnetwork/lnd/queue" "github.com/lightningnetwork/lnd/shachain" "github.com/lightningnetwork/lnd/ticker" ) @@ -352,37 +351,8 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, publishedTransactions: publTx, }, } - cc := &chainControl{ - feeEstimator: estimator, - chainIO: chainIO, - chainNotifier: notifier, - wallet: wallet, - } - breachArbiter := &breachArbiter{} - - chainArb := contractcourt.NewChainArbitrator( - contractcourt.ChainArbitratorConfig{ - Notifier: notifier, - ChainIO: chainIO, - IsForwardedHTLC: func(chanID lnwire.ShortChannelID, - htlcIndex uint64) bool { - - return true - }, - Clock: clock.NewDefaultClock(), - }, dbAlice, - ) - chainArb.WatchNewChannel(aliceChannelState) - - s := &server{ - chanDB: dbAlice, - cc: cc, - breachArbiter: breachArbiter, - chainArb: chainArb, - } - - _, currentHeight, err := s.cc.chainIO.GetBestBlock() + _, currentHeight, err := chainIO.GetBestBlock() if err != nil { return nil, nil, nil, err } @@ -404,7 +374,6 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, if err = htlcSwitch.Start(); err != nil { return nil, nil, nil, err } - s.htlcSwitch = htlcSwitch nodeSignerAlice := netann.NewNodeSigner(aliceKeySigner) @@ -418,7 +387,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, Graph: dbAlice.ChannelGraph(), MessageSigner: nodeSignerAlice, OurPubKey: aliceKeyPub, - IsChannelActive: s.htlcSwitch.HasActiveLink, + IsChannelActive: htlcSwitch.HasActiveLink, ApplyChannelUpdate: func(*lnwire.ChannelUpdate) error { return nil }, }) if err != nil { @@ -427,31 +396,41 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, if err = chanStatusMgr.Start(); err != nil { return nil, nil, nil, err } - s.chanStatusMgr = chanStatusMgr - alicePeer := &peer{ - addr: &lnwire.NetAddress{ - IdentityKey: aliceKeyPub, - Address: aliceAddr, - }, - - server: s, - sendQueue: make(chan outgoingMsg, 1), - outgoingQueue: make(chan outgoingMsg, outgoingQueueLen), - - activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel), - newChannels: make(chan *newChannelMsg, 1), - - activeChanCloses: make(map[lnwire.ChannelID]*chancloser.ChanCloser), - localCloseChanReqs: make(chan *htlcswitch.ChanClose), - chanCloseMsgs: make(chan *closeMsg), - - chanActiveTimeout: chanActiveTimeout, - - queueQuit: make(chan struct{}), - quit: make(chan struct{}), + errBuffer, err := queue.NewCircularBuffer(ErrorBufferSize) + if err != nil { + return nil, nil, nil, err } + var pubKey [33]byte + copy(pubKey[:], aliceKeyPub.SerializeCompressed()) + + cfgAddr := &lnwire.NetAddress{ + IdentityKey: aliceKeyPub, + Address: aliceAddr, + ChainNet: wire.SimNet, + } + + pCfg := ppeer.Config{ + Addr: cfgAddr, + PubKeyBytes: pubKey, + ErrorBuffer: errBuffer, + ChainIO: chainIO, + Switch: htlcSwitch, + + ChanActiveTimeout: chanActiveTimeout, + InterceptSwitch: htlcswitch.NewInterceptableSwitch(htlcSwitch), + + ChannelDB: dbAlice, + FeeEstimator: estimator, + Wallet: wallet, + ChainNotifier: notifier, + ChanStatusMgr: chanStatusMgr, + DisconnectPeer: func(b *btcec.PublicKey) error { return nil }, + } + + alicePeer := newPeer(pCfg) + chanID := lnwire.NewChanIDFromOutPoint(channelAlice.ChannelPoint()) alicePeer.activeChannels[chanID] = channelAlice From cbd54101f8ae73884576aeb329b85535d27587d2 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 2 Jul 2020 17:46:06 -0400 Subject: [PATCH 15/15] multi: remove peer.go, change all references to point to peer pkg --- fundingmanager_test.go | 5 + lnwire/commit_sig.go | 2 +- lnwire/revoke_and_ack.go | 2 +- lnwire/update_add_htlc.go | 2 +- lnwire/update_fail_htlc.go | 2 +- lnwire/update_fail_malformed_htlc.go | 2 +- lnwire/update_fee.go | 2 +- lnwire/update_fulfill_htlc.go | 2 +- log.go | 3 +- peer.go => peer/brontide.go | 228 ++++----- peer_test.go => peer/brontide_test.go | 15 +- peer/log.go | 40 ++ peer/test_utils.go | 695 ++++++++++++++++++++++++++ rpcserver.go | 21 +- server.go | 52 +- test_utils.go | 377 -------------- 16 files changed, 903 insertions(+), 547 deletions(-) rename peer.go => peer/brontide.go (94%) rename peer_test.go => peer/brontide_test.go (99%) create mode 100644 peer/log.go create mode 100644 peer/test_utils.go diff --git a/fundingmanager_test.go b/fundingmanager_test.go index 96e2ddc2c..ec8a0df92 100644 --- a/fundingmanager_test.go +++ b/fundingmanager_test.go @@ -166,6 +166,11 @@ func (m *mockChanEvent) NotifyPendingOpenChannelEvent(outpoint wire.OutPoint, } } +type newChannelMsg struct { + channel *channeldb.OpenChannel + err chan error +} + type testNode struct { privKey *btcec.PrivateKey addr *lnwire.NetAddress diff --git a/lnwire/commit_sig.go b/lnwire/commit_sig.go index 72c235b34..2455c0165 100644 --- a/lnwire/commit_sig.go +++ b/lnwire/commit_sig.go @@ -89,7 +89,7 @@ func (c *CommitSig) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *CommitSig) TargetChanID() ChannelID { return c.ChanID } diff --git a/lnwire/revoke_and_ack.go b/lnwire/revoke_and_ack.go index f63951083..d685f0f32 100644 --- a/lnwire/revoke_and_ack.go +++ b/lnwire/revoke_and_ack.go @@ -85,7 +85,7 @@ func (c *RevokeAndAck) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *RevokeAndAck) TargetChanID() ChannelID { return c.ChanID } diff --git a/lnwire/update_add_htlc.go b/lnwire/update_add_htlc.go index b3add950b..028c6320d 100644 --- a/lnwire/update_add_htlc.go +++ b/lnwire/update_add_htlc.go @@ -113,7 +113,7 @@ func (c *UpdateAddHTLC) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *UpdateAddHTLC) TargetChanID() ChannelID { return c.ChanID } diff --git a/lnwire/update_fail_htlc.go b/lnwire/update_fail_htlc.go index 17fc3cd4c..194f2ecd0 100644 --- a/lnwire/update_fail_htlc.go +++ b/lnwire/update_fail_htlc.go @@ -89,7 +89,7 @@ func (c *UpdateFailHTLC) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *UpdateFailHTLC) TargetChanID() ChannelID { return c.ChanID } diff --git a/lnwire/update_fail_malformed_htlc.go b/lnwire/update_fail_malformed_htlc.go index 68f0a61b8..39d4b8709 100644 --- a/lnwire/update_fail_malformed_htlc.go +++ b/lnwire/update_fail_malformed_htlc.go @@ -77,7 +77,7 @@ func (c *UpdateFailMalformedHTLC) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *UpdateFailMalformedHTLC) TargetChanID() ChannelID { return c.ChanID } diff --git a/lnwire/update_fee.go b/lnwire/update_fee.go index 5657633b0..2d27c3772 100644 --- a/lnwire/update_fee.go +++ b/lnwire/update_fee.go @@ -72,7 +72,7 @@ func (c *UpdateFee) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *UpdateFee) TargetChanID() ChannelID { return c.ChanID } diff --git a/lnwire/update_fulfill_htlc.go b/lnwire/update_fulfill_htlc.go index 49344008a..6c0e6339f 100644 --- a/lnwire/update_fulfill_htlc.go +++ b/lnwire/update_fulfill_htlc.go @@ -82,7 +82,7 @@ func (c *UpdateFulfillHTLC) MaxPayloadLength(uint32) uint32 { // TargetChanID returns the channel id of the link for which this message is // intended. // -// NOTE: Part of lnd.LinkUpdater interface. +// NOTE: Part of peer.LinkUpdater interface. func (c *UpdateFulfillHTLC) TargetChanID() ChannelID { return c.ChanID } diff --git a/log.go b/log.go index bf1949693..f809acdf8 100644 --- a/log.go +++ b/log.go @@ -30,6 +30,7 @@ import ( "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/monitoring" "github.com/lightningnetwork/lnd/netann" + "github.com/lightningnetwork/lnd/peer" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/localchans" @@ -75,7 +76,6 @@ var ( // function should always be called as soon as possible to finish // setting them up properly with a root logger. ltndLog = addLndPkgLogger("LTND") - peerLog = addLndPkgLogger("PEER") rpcsLog = addLndPkgLogger("RPCS") srvrLog = addLndPkgLogger("SRVR") fndgLog = addLndPkgLogger("FNDG") @@ -122,6 +122,7 @@ func SetupLoggers(root *build.RotatingLogWriter) { AddSubLogger(root, "WTCL", wtclient.UseLogger) AddSubLogger(root, "PRNF", peernotifier.UseLogger) AddSubLogger(root, "CHFD", chanfunding.UseLogger) + AddSubLogger(root, "PEER", peer.UseLogger) AddSubLogger(root, "CHCL", chancloser.UseLogger) AddSubLogger(root, routing.Subsystem, routing.UseLogger, localchans.UseLogger) diff --git a/peer.go b/peer/brontide.go similarity index 94% rename from peer.go rename to peer/brontide.go index 4852f56fb..948900f18 100644 --- a/peer.go +++ b/peer/brontide.go @@ -1,4 +1,4 @@ -package lnd +package peer import ( "bytes" @@ -29,7 +29,6 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chancloser" "github.com/lightningnetwork/lnd/lnwire" - ppeer "github.com/lightningnetwork/lnd/peer" "github.com/lightningnetwork/lnd/queue" "github.com/lightningnetwork/lnd/ticker" ) @@ -62,6 +61,13 @@ const ( ErrorBufferSize = 10 ) +var ( + // ErrChannelNotFound is an error returned when a channel is queried and + // either the Brontide doesn't know of it, or the channel in question + // is pending. + ErrChannelNotFound = fmt.Errorf("channel not found") +) + // outgoingMsg packages an lnwire.Message to be sent out on the wire, along with // a buffered channel which will be sent upon once the write is complete. This // buffered channel acts as a semaphore to be used for synchronization purposes. @@ -106,13 +112,13 @@ type TimestampedError struct { Timestamp time.Time } -// peer is an active peer on the Lightning Network. This struct is responsible +// Brontide is an active peer on the Lightning Network. This struct is responsible // for managing any channel state related to this peer. To do so, it has // several helper goroutines to handle events such as HTLC timeouts, new // funding workflow, and detecting an uncooperative closure of any active // channels. // TODO(roasbeef): proper reconnection logic -type peer struct { +type Brontide struct { // MUST be used atomically. started int32 disconnect int32 @@ -131,7 +137,7 @@ type peer struct { // our last ping message. To be used atomically. pingLastSend int64 - cfg ppeer.Config + cfg Config // activeSignal when closed signals that the peer is now active and // ready to process messages. @@ -141,7 +147,7 @@ type peer struct { // It will be zero for peers that did not successfully call Start(). startTime time.Time - // sendQueue is the channel which is used to queue outgoing to be + // sendQueue is the channel which is used to queue outgoing messages to be // written onto the wire. Note that this channel is unbuffered. sendQueue chan outgoingMsg @@ -213,13 +219,12 @@ type peer struct { wg sync.WaitGroup } -// A compile-time check to ensure that peer satisfies the lnpeer.Peer interface. -var _ lnpeer.Peer = (*peer)(nil) +// A compile-time check to ensure that Brontide satisfies the lnpeer.Peer interface. +var _ lnpeer.Peer = (*Brontide)(nil) -// newPeer creates a new peer from a peer.Config object. -func newPeer(cfg ppeer.Config) *peer { - - p := &peer{ +// NewBrontide creates a new Brontide from a peer.Config struct. +func NewBrontide(cfg Config) *Brontide { + p := &Brontide{ cfg: cfg, activeSignal: make(chan struct{}), sendQueue: make(chan outgoingMsg), @@ -228,8 +233,7 @@ func newPeer(cfg ppeer.Config) *peer { activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel), newChannels: make(chan *newChannelMsg, 1), - activeMsgStreams: make(map[lnwire.ChannelID]*msgStream), - + activeMsgStreams: make(map[lnwire.ChannelID]*msgStream), activeChanCloses: make(map[lnwire.ChannelID]*chancloser.ChanCloser), localCloseChanReqs: make(chan *htlcswitch.ChanClose), linkFailures: make(chan linkFailureReport), @@ -244,7 +248,7 @@ func newPeer(cfg ppeer.Config) *peer { // Start starts all helper goroutines the peer needs for normal operations. In // the case this peer has already been started, then this function is a loop. -func (p *peer) Start() error { +func (p *Brontide) Start() error { if atomic.AddInt32(&p.started, 1) != 1 { return nil } @@ -366,12 +370,12 @@ func (p *peer) Start() error { // initGossipSync initializes either a gossip syncer or an initial routing // dump, depending on the negotiated synchronization method. -func (p *peer) initGossipSync() { +func (p *Brontide) initGossipSync() { // If the remote peer knows of the new gossip queries feature, then // we'll create a new gossipSyncer in the AuthenticatedGossiper for it. if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) { - srvrLog.Infof("Negotiated chan series queries with %x", + peerLog.Infof("Negotiated chan series queries with %x", p.cfg.PubKeyBytes[:]) // Register the peer's gossip syncer with the gossiper. @@ -385,7 +389,6 @@ func (p *peer) initGossipSync() { // peers. p.cfg.AuthGossiper.InitSyncState(p) } - } // QuitSignal is a method that should return a channel which will be sent upon @@ -394,7 +397,7 @@ func (p *peer) initGossipSync() { // exits. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) QuitSignal() <-chan struct{} { +func (p *Brontide) QuitSignal() <-chan struct{} { return p.quit } @@ -402,7 +405,7 @@ func (p *peer) QuitSignal() <-chan struct{} { // channels returned by the database. It returns a slice of channel reestablish // messages that should be sent to the peer immediately, in case we have borked // channels that haven't been closed yet. -func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( +func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) ( []lnwire.Message, error) { // Return a slice of messages to send to the peers in case the channel @@ -537,7 +540,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( } // addLink creates and adds a new ChannelLink from the specified channel. -func (p *peer) addLink(chanPoint *wire.OutPoint, +func (p *Brontide) addLink(chanPoint *wire.OutPoint, lnChan *lnwallet.LightningChannel, forwardingPolicy *htlcswitch.ForwardingPolicy, chainEvents *contractcourt.ChainEventSubscription, @@ -563,25 +566,25 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, } } + updateContractSignals := func(signals *contractcourt.ContractSignals) error { + return p.cfg.ChainArb.UpdateContractSignals(*chanPoint, signals) + } + linkCfg := htlcswitch.ChannelLinkConfig{ - Peer: p, - DecodeHopIterators: p.cfg.Sphinx.DecodeHopIterators, - ExtractErrorEncrypter: p.cfg.Sphinx.ExtractErrorEncrypter, - FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate, - HodlMask: p.cfg.Hodl.Mask(), - Registry: p.cfg.Invoices, - Switch: p.cfg.Switch, - Circuits: p.cfg.Switch.CircuitModifier(), - ForwardPackets: p.cfg.InterceptSwitch.ForwardPackets, - FwrdingPolicy: *forwardingPolicy, - FeeEstimator: p.cfg.FeeEstimator, - PreimageCache: p.cfg.WitnessBeacon, - ChainEvents: chainEvents, - UpdateContractSignals: func(signals *contractcourt.ContractSignals) error { - return p.cfg.ChainArb.UpdateContractSignals( - *chanPoint, signals, - ) - }, + Peer: p, + DecodeHopIterators: p.cfg.Sphinx.DecodeHopIterators, + ExtractErrorEncrypter: p.cfg.Sphinx.ExtractErrorEncrypter, + FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate, + HodlMask: p.cfg.Hodl.Mask(), + Registry: p.cfg.Invoices, + Switch: p.cfg.Switch, + Circuits: p.cfg.Switch.CircuitModifier(), + ForwardPackets: p.cfg.InterceptSwitch.ForwardPackets, + FwrdingPolicy: *forwardingPolicy, + FeeEstimator: p.cfg.FeeEstimator, + PreimageCache: p.cfg.WitnessBeacon, + ChainEvents: chainEvents, + UpdateContractSignals: updateContractSignals, OnChannelFailure: onChannelFailure, SyncStates: syncStates, BatchTicker: ticker.New(50 * time.Millisecond), @@ -617,7 +620,7 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, // maybeSendNodeAnn sends our node announcement to the remote peer if at least // one confirmed public channel exists with them. -func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { +func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { hasConfirmedPublicChan := false for _, channel := range channels { if channel.IsPending { @@ -636,12 +639,12 @@ func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { ourNodeAnn, err := p.cfg.GenNodeAnnouncement(false) if err != nil { - srvrLog.Debugf("Unable to retrieve node announcement: %v", err) + peerLog.Debugf("Unable to retrieve node announcement: %v", err) return } if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil { - srvrLog.Debugf("Unable to resend node announcement to %x: %v", + peerLog.Debugf("Unable to resend node announcement to %x: %v", p.cfg.PubKeyBytes, err) } } @@ -654,7 +657,7 @@ func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { // call to Start returns no error. Otherwise, if the peer fails to start, // calling Disconnect will signal the quit channel and the method will not // block, since no goroutines were spawned. -func (p *peer) WaitForDisconnect(ready chan struct{}) { +func (p *Brontide) WaitForDisconnect(ready chan struct{}) { select { case <-ready: case <-p.quit: @@ -666,7 +669,7 @@ func (p *peer) WaitForDisconnect(ready chan struct{}) { // Disconnect terminates the connection with the remote peer. Additionally, a // signal is sent to the server and htlcSwitch indicating the resources // allocated to the peer can now be cleaned up. -func (p *peer) Disconnect(reason error) { +func (p *Brontide) Disconnect(reason error) { if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) { return } @@ -683,13 +686,13 @@ func (p *peer) Disconnect(reason error) { } // String returns the string representation of this peer. -func (p *peer) String() string { +func (p *Brontide) String() string { return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr()) } // readNextMessage reads, and returns the next message on the wire along with // any additional raw payload. -func (p *peer) readNextMessage() (lnwire.Message, error) { +func (p *Brontide) readNextMessage() (lnwire.Message, error) { noiseConn, ok := p.cfg.Conn.(*brontide.Conn) if !ok { return nil, fmt.Errorf("brontide.Conn required to read messages") @@ -725,7 +728,6 @@ func (p *peer) readNextMessage() (lnwire.Message, error) { rawMsg, readErr = noiseConn.ReadNextBody(buf[:pktLen]) return readErr }) - atomic.AddUint64(&p.bytesReceived, uint64(len(rawMsg))) if err != nil { return nil, err @@ -752,7 +754,7 @@ func (p *peer) readNextMessage() (lnwire.Message, error) { type msgStream struct { streamShutdown int32 // To be used atomically. - peer *peer + peer *Brontide apply func(lnwire.Message) @@ -775,7 +777,7 @@ type msgStream struct { // that should be buffered in the internal queue. Callers should set this to a // sane value that avoids blocking unnecessarily, but doesn't allow an // unbounded amount of memory to be allocated to buffer incoming messages. -func newMsgStream(p *peer, startMsg, stopMsg string, bufSize uint32, +func newMsgStream(p *Brontide, startMsg, stopMsg string, bufSize uint32, apply func(lnwire.Message)) *msgStream { stream := &msgStream{ @@ -906,7 +908,7 @@ func (ms *msgStream) AddMsg(msg lnwire.Message) { // waitUntilLinkActive waits until the target link is active and returns a // ChannelLink to pass messages to. It accomplishes this by subscribing to // an ActiveLinkEvent which is emitted by the link when it first starts up. -func waitUntilLinkActive(p *peer, +func waitUntilLinkActive(p *Brontide, cid lnwire.ChannelID) htlcswitch.ChannelLink { // Subscribe to receive channel events. @@ -974,7 +976,7 @@ func waitUntilLinkActive(p *peer, // dispatch a message to a channel before it is fully active. A reference to the // channel this stream forwards to his held in scope to prevent unnecessary // lookups. -func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { +func newChanMsgStream(p *Brontide, cid lnwire.ChannelID) *msgStream { var chanLink htlcswitch.ChannelLink @@ -1015,14 +1017,17 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { // newDiscMsgStream is used to setup a msgStream between the peer and the // authenticated gossiper. This stream should be used to forward all remote // channel announcements. -func newDiscMsgStream(p *peer) *msgStream { - return newMsgStream(p, +func newDiscMsgStream(p *Brontide) *msgStream { + apply := func(msg lnwire.Message) { + p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p) + } + + return newMsgStream( + p, "Update stream for gossiper created", "Update stream for gossiper exited", 1000, - func(msg lnwire.Message) { - p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p) - }, + apply, ) } @@ -1030,7 +1035,7 @@ func newDiscMsgStream(p *peer) *msgStream { // properly dispatching the handling of the message to the proper subsystem. // // NOTE: This method MUST be run as a goroutine. -func (p *peer) readHandler() { +func (p *Brontide) readHandler() { defer p.wg.Done() // We'll stop the timer after a new messages is received, and also @@ -1226,7 +1231,7 @@ out: // isActiveChannel returns true if the provided channel id is active, otherwise // returns false. -func (p *peer) isActiveChannel(chanID lnwire.ChannelID) bool { +func (p *Brontide) isActiveChannel(chanID lnwire.ChannelID) bool { p.activeChanMtx.RLock() _, ok := p.activeChannels[chanID] p.activeChanMtx.RUnlock() @@ -1237,7 +1242,7 @@ func (p *peer) isActiveChannel(chanID lnwire.ChannelID) bool { // current timestamp. Errors are only stored if we have at least one active // channel with the peer to mitigate a dos vector where a peer costlessly // connects to us and spams us with errors. -func (p *peer) storeError(err error) { +func (p *Brontide) storeError(err error) { var haveChannels bool p.activeChanMtx.RLock() @@ -1270,7 +1275,7 @@ func (p *peer) storeError(err error) { // open with the peer. // // NOTE: This method should only be called from within the readHandler. -func (p *peer) handleError(msg *lnwire.Error) bool { +func (p *Brontide) handleError(msg *lnwire.Error) bool { key := p.cfg.Addr.IdentityKey // Store the error we have received. @@ -1438,7 +1443,7 @@ func messageSummary(msg lnwire.Message) string { // less spammy log messages in trace mode by setting the 'Curve" parameter to // nil. Doing this avoids printing out each of the field elements in the curve // parameters for secp256k1. -func (p *peer) logWireMessage(msg lnwire.Message, read bool) { +func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) { summaryPrefix := "Received" if !read { summaryPrefix = "Sending" @@ -1500,7 +1505,7 @@ func (p *peer) logWireMessage(msg lnwire.Message, read bool) { // message buffered on the connection. It is safe to call this method again // with a nil message iff a timeout error is returned. This will continue to // flush the pending message to the wire. -func (p *peer) writeMessage(msg lnwire.Message) error { +func (p *Brontide) writeMessage(msg lnwire.Message) error { // Simply exit if we're shutting down. if atomic.LoadInt32(&p.disconnect) != 0 { return lnpeer.ErrPeerExiting @@ -1574,7 +1579,7 @@ func (p *peer) writeMessage(msg lnwire.Message) error { // drained. // // NOTE: This method MUST be run as a goroutine. -func (p *peer) writeHandler() { +func (p *Brontide) writeHandler() { // We'll stop the timer after a new messages is sent, and also reset it // after we process the next message. idleTimer := time.AfterFunc(idleTimeout, func() { @@ -1667,7 +1672,7 @@ out: // to be eventually sent out on the wire by the writeHandler. // // NOTE: This method MUST be run as a goroutine. -func (p *peer) queueHandler() { +func (p *Brontide) queueHandler() { defer p.wg.Done() // priorityMsgs holds an in order list of messages deemed high-priority @@ -1735,7 +1740,7 @@ func (p *peer) queueHandler() { // connection is still active. // // NOTE: This method MUST be run as a goroutine. -func (p *peer) pingHandler() { +func (p *Brontide) pingHandler() { defer p.wg.Done() pingTicker := time.NewTicker(pingInterval) @@ -1756,28 +1761,28 @@ out: } // PingTime returns the estimated ping time to the peer in microseconds. -func (p *peer) PingTime() int64 { +func (p *Brontide) PingTime() int64 { return atomic.LoadInt64(&p.pingTime) } // queueMsg adds the lnwire.Message to the back of the high priority send queue. // If the errChan is non-nil, an error is sent back if the msg failed to queue // or failed to write, and nil otherwise. -func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) { +func (p *Brontide) queueMsg(msg lnwire.Message, errChan chan error) { p.queue(true, msg, errChan) } // queueMsgLazy adds the lnwire.Message to the back of the low priority send // queue. If the errChan is non-nil, an error is sent back if the msg failed to // queue or failed to write, and nil otherwise. -func (p *peer) queueMsgLazy(msg lnwire.Message, errChan chan error) { +func (p *Brontide) queueMsgLazy(msg lnwire.Message, errChan chan error) { p.queue(false, msg, errChan) } // queue sends a given message to the queueHandler using the passed priority. If // the errChan is non-nil, an error is sent back if the msg failed to queue or // failed to write, and nil otherwise. -func (p *peer) queue(priority bool, msg lnwire.Message, +func (p *Brontide) queue(priority bool, msg lnwire.Message, errChan chan error) { select { @@ -1793,7 +1798,7 @@ func (p *peer) queue(priority bool, msg lnwire.Message, // ChannelSnapshots returns a slice of channel snapshots detailing all // currently active channels maintained with the remote peer. -func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot { +func (p *Brontide) ChannelSnapshots() []*channeldb.ChannelSnapshot { p.activeChanMtx.RLock() defer p.activeChanMtx.RUnlock() @@ -1819,7 +1824,7 @@ func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot { // genDeliveryScript returns a new script to be used to send our funds to in // the case of a cooperative channel close negotiation. -func (p *peer) genDeliveryScript() ([]byte, error) { +func (p *Brontide) genDeliveryScript() ([]byte, error) { deliveryAddr, err := p.cfg.Wallet.NewAddress( lnwallet.WitnessPubKey, false, ) @@ -1837,7 +1842,7 @@ func (p *peer) genDeliveryScript() ([]byte, error) { // channels maintained with the remote peer. // // NOTE: This method MUST be run as a goroutine. -func (p *peer) channelManager() { +func (p *Brontide) channelManager() { defer p.wg.Done() // reenableTimeout will fire once after the configured channel status @@ -2010,7 +2015,6 @@ out: reenableTimeout = nil case <-p.quit: - // As, we've been signalled to exit, we'll reset all // our active channel back to their default state. p.activeChanMtx.Lock() @@ -2033,7 +2037,7 @@ out: // peer, and reenables each public, non-pending channel. This is done at the // gossip level by broadcasting a new ChannelUpdate with the disabled bit unset. // No message will be sent if the channel is already enabled. -func (p *peer) reenableActiveChannels() { +func (p *Brontide) reenableActiveChannels() { // First, filter all known channels with this peer for ones that are // both public and not pending. var activePublicChans []wire.OutPoint @@ -2071,7 +2075,7 @@ func (p *peer) reenableActiveChannels() { for _, chanPoint := range activePublicChans { err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint) if err != nil { - srvrLog.Errorf("Unable to enable channel %v: %v", + peerLog.Errorf("Unable to enable channel %v: %v", chanPoint, err) } } @@ -2081,7 +2085,7 @@ func (p *peer) reenableActiveChannels() { // for the target channel ID. If the channel isn't active an error is returned. // Otherwise, either an existing state machine will be returned, or a new one // will be created. -func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) ( +func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) ( *chancloser.ChanCloser, error) { // First, we'll ensure that we actually know of the target channel. If @@ -2195,7 +2199,7 @@ func chooseDeliveryScript(upfront, // handleLocalCloseReq kicks-off the workflow to execute a cooperative or // forced unilateral closure of the channel initiated by a local subsystem. -func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { +func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) { chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint) p.activeChanMtx.RLock() @@ -2316,7 +2320,7 @@ type linkFailureReport struct { // fails. It facilitates the removal of all channel state within the peer, // force closing the channel depending on severity, and sending the error // message back to the remote party. -func (p *peer) handleLinkFailure(failure linkFailureReport) { +func (p *Brontide) handleLinkFailure(failure linkFailureReport) { // We begin by wiping the link, which will remove it from the switch, // such that it won't be attempted used for any more updates. // @@ -2371,7 +2375,7 @@ func (p *peer) handleLinkFailure(failure linkFailureReport) { // machine should be passed in. Once the transaction has been sufficiently // confirmed, the channel will be marked as fully closed within the database, // and any clients will be notified of updates to the closing state. -func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { +func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) { closeReq := chanCloser.CloseRequest() // First, we'll clear all indexes related to the channel in question. @@ -2466,7 +2470,7 @@ func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, // WipeChannel removes the passed channel point from all indexes associated with // the peer and the switch. -func (p *peer) WipeChannel(chanPoint *wire.OutPoint) { +func (p *Brontide) WipeChannel(chanPoint *wire.OutPoint) { chanID := lnwire.NewChanIDFromOutPoint(chanPoint) p.activeChanMtx.Lock() @@ -2480,7 +2484,7 @@ func (p *peer) WipeChannel(chanPoint *wire.OutPoint) { // handleInitMsg handles the incoming init message which contains global and // local feature vectors. If feature vectors are incompatible then disconnect. -func (p *peer) handleInitMsg(msg *lnwire.Init) error { +func (p *Brontide) handleInitMsg(msg *lnwire.Init) error { // First, merge any features from the legacy global features field into // those presented in the local features fields. err := msg.Features.Merge(msg.GlobalFeatures) @@ -2524,7 +2528,7 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error { // behavior off the set of negotiated feature bits. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) LocalFeatures() *lnwire.FeatureVector { +func (p *Brontide) LocalFeatures() *lnwire.FeatureVector { return p.cfg.Features } @@ -2533,13 +2537,13 @@ func (p *peer) LocalFeatures() *lnwire.FeatureVector { // their behavior off the set of negotiated feature bits. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) RemoteFeatures() *lnwire.FeatureVector { +func (p *Brontide) RemoteFeatures() *lnwire.FeatureVector { return p.remoteFeatures } // sendInitMsg sends the Init message to the remote peer. This message contains our // currently supported local and global features. -func (p *peer) sendInitMsg() error { +func (p *Brontide) sendInitMsg() error { msg := lnwire.NewInitMessage( p.cfg.LegacyFeatures.RawFeatureVector, p.cfg.Features.RawFeatureVector, @@ -2550,7 +2554,7 @@ func (p *peer) sendInitMsg() error { // resendChanSyncMsg will attempt to find a channel sync message for the closed // channel and resend it to our peer. -func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { +func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error { // If we already re-sent the mssage for this channel, we won't do it // again. if _, ok := p.resentChanSyncMsg[cid]; ok { @@ -2598,7 +2602,7 @@ func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { // otherwise it returns immediately after queuing. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) SendMessage(sync bool, msgs ...lnwire.Message) error { +func (p *Brontide) SendMessage(sync bool, msgs ...lnwire.Message) error { return p.sendMessage(sync, true, msgs...) } @@ -2608,7 +2612,7 @@ func (p *peer) SendMessage(sync bool, msgs ...lnwire.Message) error { // otherwise it returns immediately after queueing. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) SendMessageLazy(sync bool, msgs ...lnwire.Message) error { +func (p *Brontide) SendMessageLazy(sync bool, msgs ...lnwire.Message) error { return p.sendMessage(sync, false, msgs...) } @@ -2616,7 +2620,7 @@ func (p *peer) SendMessageLazy(sync bool, msgs ...lnwire.Message) error { // to the remote peer. If sync is true, this method will block until the // messages have been sent to the remote peer or an error is returned, otherwise // it returns immediately after queueing. -func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error { +func (p *Brontide) sendMessage(sync, priority bool, msgs ...lnwire.Message) error { // Add all incoming messages to the outgoing queue. A list of error // chans is populated for each message if the caller requested a sync // send. @@ -2659,21 +2663,21 @@ func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error { // PubKey returns the pubkey of the peer in compressed serialized format. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) PubKey() [33]byte { +func (p *Brontide) PubKey() [33]byte { return p.cfg.PubKeyBytes } // IdentityKey returns the public key of the remote peer. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) IdentityKey() *btcec.PublicKey { +func (p *Brontide) IdentityKey() *btcec.PublicKey { return p.cfg.Addr.IdentityKey } // Address returns the network address of the remote peer. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) Address() net.Addr { +func (p *Brontide) Address() net.Addr { return p.cfg.Addr.Address } @@ -2681,7 +2685,7 @@ func (p *peer) Address() net.Addr { // added if the cancel channel is closed. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) AddNewChannel(channel *channeldb.OpenChannel, +func (p *Brontide) AddNewChannel(channel *channeldb.OpenChannel, cancel <-chan struct{}) error { errChan := make(chan error, 1) @@ -2710,14 +2714,14 @@ func (p *peer) AddNewChannel(channel *channeldb.OpenChannel, // StartTime returns the time at which the connection was established if the // peer started successfully, and zero otherwise. -func (p *peer) StartTime() time.Time { +func (p *Brontide) StartTime() time.Time { return p.startTime } // handleCloseMsg is called when a new cooperative channel closure related // message is received from the remote peer. We'll use this message to advance // the chan closer state machine. -func (p *peer) handleCloseMsg(msg *closeMsg) { +func (p *Brontide) handleCloseMsg(msg *closeMsg) { // We'll now fetch the matching closing state machine in order to continue, // or finalize the channel closure process. chanCloser, err := p.fetchActiveChanCloser(msg.cid) @@ -2778,7 +2782,7 @@ func (p *peer) handleCloseMsg(msg *closeMsg) { // HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto // the channelManager goroutine, which will shut down the link and possibly // close the channel. -func (p *peer) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) { +func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) { select { case p.localCloseChanReqs <- req: peerLog.Infof("Local close channel request delivered to peer: %v", @@ -2790,56 +2794,46 @@ func (p *peer) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) { } // NetAddress returns the network of the remote peer as an lnwire.NetAddress. -func (p *peer) NetAddress() *lnwire.NetAddress { +func (p *Brontide) NetAddress() *lnwire.NetAddress { return p.cfg.Addr } -// Inbound returns cfg.Inbound. -func (p *peer) Inbound() bool { +// Inbound is a getter for the Brontide's Inbound boolean in cfg. +func (p *Brontide) Inbound() bool { return p.cfg.Inbound } -// ConnReq returns cfg.ConnReq. -func (p *peer) ConnReq() *connmgr.ConnReq { +// ConnReq is a getter for the Brontide's connReq in cfg. +func (p *Brontide) ConnReq() *connmgr.ConnReq { return p.cfg.ConnReq } -// ErrorBuffer returns cfg.ErrorBuffer. -func (p *peer) ErrorBuffer() *queue.CircularBuffer { +// ErrorBuffer is a getter for the Brontide's errorBuffer in cfg. +func (p *Brontide) ErrorBuffer() *queue.CircularBuffer { return p.cfg.ErrorBuffer } // SetAddress sets the remote peer's address given an address. -func (p *peer) SetAddress(address net.Addr) { +func (p *Brontide) SetAddress(address net.Addr) { p.cfg.Addr.Address = address } // ActiveSignal returns the peer's active signal. -func (p *peer) ActiveSignal() chan struct{} { +func (p *Brontide) ActiveSignal() chan struct{} { return p.activeSignal } // Conn returns a pointer to the peer's connection struct. -func (p *peer) Conn() net.Conn { +func (p *Brontide) Conn() net.Conn { return p.cfg.Conn } // BytesReceived returns the number of bytes received from the peer. -func (p *peer) BytesReceived() uint64 { +func (p *Brontide) BytesReceived() uint64 { return atomic.LoadUint64(&p.bytesReceived) } // BytesSent returns the number of bytes sent to the peer. -func (p *peer) BytesSent() uint64 { +func (p *Brontide) BytesSent() uint64 { return atomic.LoadUint64(&p.bytesSent) } - -// LinkUpdater is an interface implemented by most messages in BOLT 2 that are -// allowed to update the channel state. -type LinkUpdater interface { - // TargetChanID returns the channel id of the link for which this - // message is intended. - TargetChanID() lnwire.ChannelID -} - -// TODO(roasbeef): make all start/stop mutexes a CAS diff --git a/peer_test.go b/peer/brontide_test.go similarity index 99% rename from peer_test.go rename to peer/brontide_test.go index adbb65700..78800b115 100644 --- a/peer_test.go +++ b/peer/brontide_test.go @@ -1,6 +1,4 @@ -// +build !rpctest - -package lnd +package peer import ( "bytes" @@ -35,7 +33,7 @@ var ( func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { t.Parallel() - notifier := &mockNotfier{ + notifier := &mockNotifier{ confChannel: make(chan *chainntnfs.TxConfirmation), } broadcastTxChan := make(chan *wire.MsgTx) @@ -136,7 +134,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { t.Parallel() - notifier := &mockNotfier{ + notifier := &mockNotifier{ confChannel: make(chan *chainntnfs.TxConfirmation), } broadcastTxChan := make(chan *wire.MsgTx) @@ -229,7 +227,6 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { } // Alice should respond with the ClosingSigned they both agreed upon. - select { case outMsg := <-alicePeer.outgoingQueue: msg = outMsg.msg @@ -257,7 +254,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Parallel() - notifier := &mockNotfier{ + notifier := &mockNotifier{ confChannel: make(chan *chainntnfs.TxConfirmation), } broadcastTxChan := make(chan *wire.MsgTx) @@ -449,7 +446,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { t.Parallel() - notifier := &mockNotfier{ + notifier := &mockNotifier{ confChannel: make(chan *chainntnfs.TxConfirmation), } broadcastTxChan := make(chan *wire.MsgTx) @@ -782,7 +779,7 @@ func TestCustomShutdownScript(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - notifier := &mockNotfier{ + notifier := &mockNotifier{ confChannel: make(chan *chainntnfs.TxConfirmation), } broadcastTxChan := make(chan *wire.MsgTx) diff --git a/peer/log.go b/peer/log.go new file mode 100644 index 000000000..a1f9bda3e --- /dev/null +++ b/peer/log.go @@ -0,0 +1,40 @@ +package peer + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" +) + +// peerLog is a logger that is initialized with the btclog.Disabled logger. +var peerLog btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger("PEER", nil)) +} + +// DisableLog disables all logging output. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(logger btclog.Logger) { + peerLog = logger +} + +// logClosure is used to provide a closure over expensive logging operations +// so they aren't performed when the logging level doesn't warrant it. +type logClosure func() string + +// String invokes the underlying function and returns the result. +func (c logClosure) String() string { + return c() +} + +// newLogClosure returns a new closure over a function that returns a string +// which itself provides a Stringer interface so that it can be used with the +// logging system. +func newLogClosure(c func() string) logClosure { + return logClosure(c) +} diff --git a/peer/test_utils.go b/peer/test_utils.go new file mode 100644 index 000000000..df74eda11 --- /dev/null +++ b/peer/test_utils.go @@ -0,0 +1,695 @@ +package peer + +import ( + "bytes" + crand "crypto/rand" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net" + "os" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcwallet/wallet/txauthor" + "github.com/btcsuite/btcwallet/wtxmgr" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/netann" + "github.com/lightningnetwork/lnd/queue" + "github.com/lightningnetwork/lnd/shachain" + "github.com/lightningnetwork/lnd/ticker" +) + +const ( + broadcastHeight = 100 +) + +var ( + alicesPrivKey = []byte{ + 0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf, + 0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9, + 0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f, + 0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90, + } + + bobsPrivKey = []byte{ + 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, + 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, + } + + // Use a hard-coded HD seed. + testHdSeed = [32]byte{ + 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, + 0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4, + 0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9, + 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, + } + + // Just use some arbitrary bytes as delivery script. + dummyDeliveryScript = alicesPrivKey + + // testTx is used as the default funding txn for single-funder channels. + testTx = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 5000000000, + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 5, + } +) + +// noUpdate is a function which can be used as a parameter in createTestPeer to +// call the setup code with no custom values on the channels set up. +var noUpdate = func(a, b *channeldb.OpenChannel) {} + +type mockSigner struct { + key *btcec.PrivateKey +} + +func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, + signDesc *input.SignDescriptor) (input.Signature, error) { + amt := signDesc.Output.Value + witnessScript := signDesc.WitnessScript + privKey := m.key + + if !privKey.PubKey().IsEqual(signDesc.KeyDesc.PubKey) { + return nil, fmt.Errorf("incorrect key passed") + } + + switch { + case signDesc.SingleTweak != nil: + privKey = input.TweakPrivKey(privKey, + signDesc.SingleTweak) + case signDesc.DoubleTweak != nil: + privKey = input.DeriveRevocationPrivKey(privKey, + signDesc.DoubleTweak) + } + + sig, err := txscript.RawTxInWitnessSignature(tx, signDesc.SigHashes, + signDesc.InputIndex, amt, witnessScript, signDesc.HashType, + privKey) + if err != nil { + return nil, err + } + + return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) +} + +func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, + signDesc *input.SignDescriptor) (*input.Script, error) { + + // TODO(roasbeef): expose tweaked signer from lnwallet so don't need to + // duplicate this code? + + privKey := m.key + + switch { + case signDesc.SingleTweak != nil: + privKey = input.TweakPrivKey(privKey, + signDesc.SingleTweak) + case signDesc.DoubleTweak != nil: + privKey = input.DeriveRevocationPrivKey(privKey, + signDesc.DoubleTweak) + } + + witnessScript, err := txscript.WitnessSignature(tx, signDesc.SigHashes, + signDesc.InputIndex, signDesc.Output.Value, signDesc.Output.PkScript, + signDesc.HashType, privKey, true) + if err != nil { + return nil, err + } + + return &input.Script{ + Witness: witnessScript, + }, nil +} + +var _ input.Signer = (*mockSigner)(nil) + +type mockChainIO struct { + bestHeight int32 +} + +func (m *mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) { + return nil, m.bestHeight, nil +} + +func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte, + heightHint uint32, _ <-chan struct{}) (*wire.TxOut, error) { + return nil, nil +} + +func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) { + return nil, nil +} + +func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) { + return nil, nil +} + +var _ lnwallet.BlockChainIO = (*mockChainIO)(nil) + +type mockWalletController struct { + rootKey *btcec.PrivateKey + publishedTxns chan *wire.MsgTx +} + +func (*mockWalletController) FetchInputInfo(prevOut *wire.OutPoint) ( + *lnwallet.Utxo, error) { + + return nil, nil +} + +func (*mockWalletController) ConfirmedBalance(confs int32) (btcutil.Amount, + error) { + + return 0, nil +} + +func (m *mockWalletController) NewAddress(addrType lnwallet.AddressType, + change bool) (btcutil.Address, error) { + + addr, _ := btcutil.NewAddressPubKey( + m.rootKey.PubKey().SerializeCompressed(), &chaincfg.MainNetParams, + ) + return addr, nil +} + +func (*mockWalletController) LastUnusedAddress(addrType lnwallet.AddressType) ( + btcutil.Address, error) { + + return nil, nil +} + +func (*mockWalletController) IsOurAddress(a btcutil.Address) bool { + return false +} + +func (*mockWalletController) SendOutputs(outputs []*wire.TxOut, + feeRate chainfee.SatPerKWeight, label string) (*wire.MsgTx, error) { + + return nil, nil +} + +func (*mockWalletController) CreateSimpleTx(outputs []*wire.TxOut, + feeRate chainfee.SatPerKWeight, dryRun bool) (*txauthor.AuthoredTx, error) { + + return nil, nil +} + +func (*mockWalletController) ListUnspentWitness(minconfirms, + maxconfirms int32) ([]*lnwallet.Utxo, error) { + + return nil, nil +} + +func (*mockWalletController) ListTransactionDetails(startHeight, + endHeight int32) ([]*lnwallet.TransactionDetail, error) { + + return nil, nil +} + +func (*mockWalletController) LockOutpoint(o wire.OutPoint) {} + +func (*mockWalletController) UnlockOutpoint(o wire.OutPoint) {} + +func (m *mockWalletController) PublishTransaction(tx *wire.MsgTx, + label string) error { + m.publishedTxns <- tx + return nil +} + +func (*mockWalletController) LabelTransaction(hash chainhash.Hash, + label string, overwrite bool) error { + + return nil +} + +func (*mockWalletController) SubscribeTransactions() ( + lnwallet.TransactionSubscription, error) { + + return nil, nil +} + +func (*mockWalletController) IsSynced() (bool, int64, error) { + return false, 0, nil +} + +func (*mockWalletController) Start() error { + return nil +} + +func (*mockWalletController) Stop() error { + return nil +} + +func (*mockWalletController) BackEnd() string { + return "" +} + +func (*mockWalletController) LeaseOutput(wtxmgr.LockID, + wire.OutPoint) (time.Time, error) { + + return time.Now(), nil +} + +func (*mockWalletController) ReleaseOutput(wtxmgr.LockID, wire.OutPoint) error { + return nil +} + +func (*mockWalletController) GetRecoveryInfo() (bool, float64, error) { + return false, 0, nil +} + +var _ lnwallet.WalletController = (*mockWalletController)(nil) + +type mockNotifier struct { + confChannel chan *chainntnfs.TxConfirmation +} + +func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, + _ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, + error) { + + return &chainntnfs.ConfirmationEvent{ + Confirmed: m.confChannel, + }, nil +} + +func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte, + heightHint uint32) (*chainntnfs.SpendEvent, error) { + + return &chainntnfs.SpendEvent{ + Spend: make(chan *chainntnfs.SpendDetail), + Cancel: func() {}, + }, nil +} + +func (m *mockNotifier) RegisterBlockEpochNtfn( + bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) { + + return &chainntnfs.BlockEpochEvent{ + Epochs: make(chan *chainntnfs.BlockEpoch), + Cancel: func() {}, + }, nil +} + +func (m *mockNotifier) Start() error { + return nil +} + +func (m *mockNotifier) Stop() error { + return nil +} + +func (m *mockNotifier) Started() bool { + return true +} + +var _ chainntnfs.ChainNotifier = (*mockNotifier)(nil) + +// createTestPeer creates a channel between two nodes, and returns a peer for +// one of the nodes, together with the channel seen from both nodes. It takes +// an updateChan function which can be used to modify the default values on +// the channel states for each peer. +func createTestPeer(notifier chainntnfs.ChainNotifier, + publTx chan *wire.MsgTx, updateChan func(a, b *channeldb.OpenChannel)) ( + *Brontide, *lnwallet.LightningChannel, func(), error) { + + aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes( + btcec.S256(), alicesPrivKey, + ) + aliceKeySigner := &keychain.PrivKeyDigestSigner{PrivKey: aliceKeyPriv} + bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes( + btcec.S256(), bobsPrivKey, + ) + + channelCapacity := btcutil.Amount(10 * 1e8) + channelBal := channelCapacity / 2 + aliceDustLimit := btcutil.Amount(200) + bobDustLimit := btcutil.Amount(1300) + csvTimeoutAlice := uint32(5) + csvTimeoutBob := uint32(4) + + prevOut := &wire.OutPoint{ + Hash: chainhash.Hash(testHdSeed), + Index: 0, + } + fundingTxIn := wire.NewTxIn(prevOut, nil, nil) + + aliceCfg := channeldb.ChannelConfig{ + ChannelConstraints: channeldb.ChannelConstraints{ + DustLimit: aliceDustLimit, + MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), + ChanReserve: btcutil.Amount(rand.Int63()), + MinHTLC: lnwire.MilliSatoshi(rand.Int63()), + MaxAcceptedHtlcs: uint16(rand.Int31()), + CsvDelay: uint16(csvTimeoutAlice), + }, + MultiSigKey: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + RevocationBasePoint: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + PaymentBasePoint: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + DelayBasePoint: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + HtlcBasePoint: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + } + bobCfg := channeldb.ChannelConfig{ + ChannelConstraints: channeldb.ChannelConstraints{ + DustLimit: bobDustLimit, + MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), + ChanReserve: btcutil.Amount(rand.Int63()), + MinHTLC: lnwire.MilliSatoshi(rand.Int63()), + MaxAcceptedHtlcs: uint16(rand.Int31()), + CsvDelay: uint16(csvTimeoutBob), + }, + MultiSigKey: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + RevocationBasePoint: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + PaymentBasePoint: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + DelayBasePoint: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + HtlcBasePoint: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + } + + bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize()) + if err != nil { + return nil, nil, nil, err + } + bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot) + bobFirstRevoke, err := bobPreimageProducer.AtIndex(0) + if err != nil { + return nil, nil, nil, err + } + bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:]) + + aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize()) + if err != nil { + return nil, nil, nil, err + } + alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot) + aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0) + if err != nil { + return nil, nil, nil, err + } + aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:]) + + aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( + channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, + bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + return nil, nil, nil, err + } + + alicePath, err := ioutil.TempDir("", "alicedb") + if err != nil { + return nil, nil, nil, err + } + + dbAlice, err := channeldb.Open(alicePath) + if err != nil { + return nil, nil, nil, err + } + + bobPath, err := ioutil.TempDir("", "bobdb") + if err != nil { + return nil, nil, nil, err + } + + dbBob, err := channeldb.Open(bobPath) + if err != nil { + return nil, nil, nil, err + } + + estimator := chainfee.NewStaticEstimator(12500, 0) + feePerKw, err := estimator.EstimateFeePerKW(1) + if err != nil { + return nil, nil, nil, err + } + + // TODO(roasbeef): need to factor in commit fee? + aliceCommit := channeldb.ChannelCommitment{ + CommitHeight: 0, + LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), + RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), + FeePerKw: btcutil.Amount(feePerKw), + CommitFee: feePerKw.FeeForWeight(input.CommitWeight), + CommitTx: aliceCommitTx, + CommitSig: bytes.Repeat([]byte{1}, 71), + } + bobCommit := channeldb.ChannelCommitment{ + CommitHeight: 0, + LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), + RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), + FeePerKw: btcutil.Amount(feePerKw), + CommitFee: feePerKw.FeeForWeight(input.CommitWeight), + CommitTx: bobCommitTx, + CommitSig: bytes.Repeat([]byte{1}, 71), + } + + var chanIDBytes [8]byte + if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil { + return nil, nil, nil, err + } + + shortChanID := lnwire.NewShortChanIDFromInt( + binary.BigEndian.Uint64(chanIDBytes[:]), + ) + + aliceChannelState := &channeldb.OpenChannel{ + LocalChanCfg: aliceCfg, + RemoteChanCfg: bobCfg, + IdentityPub: aliceKeyPub, + FundingOutpoint: *prevOut, + ShortChannelID: shortChanID, + ChanType: channeldb.SingleFunderTweaklessBit, + IsInitiator: true, + Capacity: channelCapacity, + RemoteCurrentRevocation: bobCommitPoint, + RevocationProducer: alicePreimageProducer, + RevocationStore: shachain.NewRevocationStore(), + LocalCommitment: aliceCommit, + RemoteCommitment: aliceCommit, + Db: dbAlice, + Packager: channeldb.NewChannelPackager(shortChanID), + FundingTxn: testTx, + } + bobChannelState := &channeldb.OpenChannel{ + LocalChanCfg: bobCfg, + RemoteChanCfg: aliceCfg, + IdentityPub: bobKeyPub, + FundingOutpoint: *prevOut, + ChanType: channeldb.SingleFunderTweaklessBit, + IsInitiator: false, + Capacity: channelCapacity, + RemoteCurrentRevocation: aliceCommitPoint, + RevocationProducer: bobPreimageProducer, + RevocationStore: shachain.NewRevocationStore(), + LocalCommitment: bobCommit, + RemoteCommitment: bobCommit, + Db: dbBob, + Packager: channeldb.NewChannelPackager(shortChanID), + } + + // Set custom values on the channel states. + updateChan(aliceChannelState, bobChannelState) + + aliceAddr := &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 18555, + } + + if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil { + return nil, nil, nil, err + } + + bobAddr := &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 18556, + } + + if err := bobChannelState.SyncPending(bobAddr, 0); err != nil { + return nil, nil, nil, err + } + + cleanUpFunc := func() { + os.RemoveAll(bobPath) + os.RemoveAll(alicePath) + } + + aliceSigner := &mockSigner{aliceKeyPriv} + bobSigner := &mockSigner{bobKeyPriv} + + alicePool := lnwallet.NewSigPool(1, aliceSigner) + channelAlice, err := lnwallet.NewLightningChannel( + aliceSigner, aliceChannelState, alicePool, + ) + if err != nil { + return nil, nil, nil, err + } + _ = alicePool.Start() + + bobPool := lnwallet.NewSigPool(1, bobSigner) + channelBob, err := lnwallet.NewLightningChannel( + bobSigner, bobChannelState, bobPool, + ) + if err != nil { + return nil, nil, nil, err + } + _ = bobPool.Start() + + chainIO := &mockChainIO{ + bestHeight: broadcastHeight, + } + wallet := &lnwallet.LightningWallet{ + WalletController: &mockWalletController{ + rootKey: aliceKeyPriv, + publishedTxns: publTx, + }, + } + + _, currentHeight, err := chainIO.GetBestBlock() + if err != nil { + return nil, nil, nil, err + } + + htlcSwitch, err := htlcswitch.New(htlcswitch.Config{ + DB: dbAlice, + SwitchPackager: channeldb.NewSwitchPackager(), + Notifier: notifier, + FwdEventTicker: ticker.New( + htlcswitch.DefaultFwdEventInterval), + LogEventTicker: ticker.New( + htlcswitch.DefaultLogInterval), + AckEventTicker: ticker.New( + htlcswitch.DefaultAckInterval), + }, uint32(currentHeight)) + if err != nil { + return nil, nil, nil, err + } + if err = htlcSwitch.Start(); err != nil { + return nil, nil, nil, err + } + + nodeSignerAlice := netann.NewNodeSigner(aliceKeySigner) + + const chanActiveTimeout = time.Minute + + chanStatusMgr, err := netann.NewChanStatusManager(&netann.ChanStatusConfig{ + ChanStatusSampleInterval: 30 * time.Second, + ChanEnableTimeout: chanActiveTimeout, + ChanDisableTimeout: 2 * time.Minute, + DB: dbAlice, + Graph: dbAlice.ChannelGraph(), + MessageSigner: nodeSignerAlice, + OurPubKey: aliceKeyPub, + IsChannelActive: htlcSwitch.HasActiveLink, + ApplyChannelUpdate: func(*lnwire.ChannelUpdate) error { return nil }, + }) + if err != nil { + return nil, nil, nil, err + } + if err = chanStatusMgr.Start(); err != nil { + return nil, nil, nil, err + } + + errBuffer, err := queue.NewCircularBuffer(ErrorBufferSize) + if err != nil { + return nil, nil, nil, err + } + + var pubKey [33]byte + copy(pubKey[:], aliceKeyPub.SerializeCompressed()) + + cfgAddr := &lnwire.NetAddress{ + IdentityKey: aliceKeyPub, + Address: aliceAddr, + ChainNet: wire.SimNet, + } + + cfg := &Config{ + Addr: cfgAddr, + PubKeyBytes: pubKey, + ErrorBuffer: errBuffer, + ChainIO: chainIO, + Switch: htlcSwitch, + + ChanActiveTimeout: chanActiveTimeout, + InterceptSwitch: htlcswitch.NewInterceptableSwitch(htlcSwitch), + + ChannelDB: dbAlice, + FeeEstimator: estimator, + Wallet: wallet, + ChainNotifier: notifier, + ChanStatusMgr: chanStatusMgr, + DisconnectPeer: func(b *btcec.PublicKey) error { return nil }, + } + + alicePeer := NewBrontide(*cfg) + + chanID := lnwire.NewChanIDFromOutPoint(channelAlice.ChannelPoint()) + alicePeer.activeChannels[chanID] = channelAlice + + alicePeer.wg.Add(1) + go alicePeer.channelManager() + + return alicePeer, channelBob, cleanUpFunc, nil +} diff --git a/rpcserver.go b/rpcserver.go index 3517eb22c..be27c48ab 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -57,6 +57,7 @@ import ( "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/monitoring" + "github.com/lightningnetwork/lnd/peer" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing" @@ -2105,17 +2106,17 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // With the transaction broadcast, we send our first update to // the client. updateChan = make(chan interface{}, 2) - updateChan <- &PendingUpdate{ + updateChan <- &peer.PendingUpdate{ Txid: closingTxid[:], } errChan = make(chan error, 1) notifier := r.server.cc.chainNotifier - go WaitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint, + go peer.WaitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() { // Respond to the local subsystem which // requested the channel closure. - updateChan <- &ChannelCloseUpdate{ + updateChan <- &peer.ChannelCloseUpdate{ ClosingTxid: closingTxid[:], Success: true, } @@ -2228,7 +2229,7 @@ out: // then we can break out of our dispatch loop as we no // longer need to process any further updates. switch closeUpdate := closingUpdate.(type) { - case *ChannelCloseUpdate: + case *peer.ChannelCloseUpdate: h, _ := chainhash.NewHash(closeUpdate.ClosingTxid) rpcsLog.Infof("[closechannel] close completed: "+ "txid(%v)", h) @@ -2246,7 +2247,7 @@ func createRPCCloseUpdate(update interface{}) ( *lnrpc.CloseStatusUpdate, error) { switch u := update.(type) { - case *ChannelCloseUpdate: + case *peer.ChannelCloseUpdate: return &lnrpc.CloseStatusUpdate{ Update: &lnrpc.CloseStatusUpdate_ChanClose{ ChanClose: &lnrpc.ChannelCloseUpdate{ @@ -2254,7 +2255,7 @@ func createRPCCloseUpdate(update interface{}) ( }, }, }, nil - case *PendingUpdate: + case *peer.PendingUpdate: return &lnrpc.CloseStatusUpdate{ Update: &lnrpc.CloseStatusUpdate_ClosePending{ ClosePending: &lnrpc.PendingUpdate{ @@ -2571,7 +2572,7 @@ func (r *rpcServer) ListPeers(ctx context.Context, serverPeer.RemoteFeatures(), ) - peer := &lnrpc.Peer{ + rpcPeer := &lnrpc.Peer{ PubKey: hex.EncodeToString(nodePub[:]), Address: serverPeer.Conn().RemoteAddr().String(), Inbound: serverPeer.Inbound(), @@ -2601,17 +2602,17 @@ func (r *rpcServer) ListPeers(ctx context.Context, // Add the relevant peer errors to our response. for _, error := range peerErrors { - tsError := error.(*TimestampedError) + tsError := error.(*peer.TimestampedError) rpcErr := &lnrpc.TimestampedError{ Timestamp: uint64(tsError.Timestamp.Unix()), Error: tsError.Error.Error(), } - peer.Errors = append(peer.Errors, rpcErr) + rpcPeer.Errors = append(rpcPeer.Errors, rpcErr) } - resp.Peers = append(resp.Peers, peer) + resp.Peers = append(resp.Peers, rpcPeer) } rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers) diff --git a/server.go b/server.go index 28852df2d..0d1138311 100644 --- a/server.go +++ b/server.go @@ -52,7 +52,7 @@ import ( "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/nat" "github.com/lightningnetwork/lnd/netann" - ppeer "github.com/lightningnetwork/lnd/peer" + "github.com/lightningnetwork/lnd/peer" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/pool" "github.com/lightningnetwork/lnd/queue" @@ -114,7 +114,7 @@ var ( // errPeerAlreadyConnected is an error returned by the server when we're // commanded to connect to a peer, but they're already connected. type errPeerAlreadyConnected struct { - peer *peer + peer *peer.Brontide } // Error returns the human readable version of this error type. @@ -168,10 +168,10 @@ type server struct { lastDetectedIP net.IP mu sync.RWMutex - peersByPub map[string]*peer + peersByPub map[string]*peer.Brontide - inboundPeers map[string]*peer - outboundPeers map[string]*peer + inboundPeers map[string]*peer.Brontide + outboundPeers map[string]*peer.Brontide peerConnectedListeners map[string][]chan<- lnpeer.Peer peerDisconnectedListeners map[string][]chan<- struct{} @@ -191,7 +191,7 @@ type server struct { // a disconnect. Adding a peer to this map causes the peer termination // watcher to short circuit in the event that peers are purposefully // disconnected. - ignorePeerTermination map[*peer]struct{} + ignorePeerTermination map[*peer.Brontide]struct{} // scheduledPeerConnection maps a pubkey string to a callback that // should be executed in the peerTerminationWatcher the prior peer with @@ -453,12 +453,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr, chanDB *channeldb.DB, persistentConnReqs: make(map[string][]*connmgr.ConnReq), persistentRetryCancels: make(map[string]chan struct{}), peerErrors: make(map[string]*queue.CircularBuffer), - ignorePeerTermination: make(map[*peer]struct{}), + ignorePeerTermination: make(map[*peer.Brontide]struct{}), scheduledPeerConnection: make(map[string]func()), - peersByPub: make(map[string]*peer), - inboundPeers: make(map[string]*peer), - outboundPeers: make(map[string]*peer), + peersByPub: make(map[string]*peer.Brontide), + inboundPeers: make(map[string]*peer.Brontide), + outboundPeers: make(map[string]*peer.Brontide), peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer), peerDisconnectedListeners: make(map[string][]chan<- struct{}), @@ -2309,7 +2309,7 @@ func (s *server) BroadcastMessage(skips map[route.Vertex]struct{}, // peersByPub throughout this process to ensure we deliver messages to // exact set of peers present at the time of invocation. s.mu.RLock() - peers := make([]*peer, 0, len(s.peersByPub)) + peers := make([]*peer.Brontide, 0, len(s.peersByPub)) for _, sPeer := range s.peersByPub { if skips != nil { if _, ok := skips[sPeer.PubKey()]; ok { @@ -2412,7 +2412,7 @@ func (s *server) NotifyWhenOffline(peerPubKey [33]byte) <-chan struct{} { // daemon's local representation of the remote peer. // // NOTE: This function is safe for concurrent access. -func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer, error) { +func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer.Brontide, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -2426,7 +2426,7 @@ func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer, error) { // public key. // // NOTE: This function is safe for concurrent access. -func (s *server) FindPeerByPubStr(pubStr string) (*peer, error) { +func (s *server) FindPeerByPubStr(pubStr string) (*peer.Brontide, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -2435,7 +2435,7 @@ func (s *server) FindPeerByPubStr(pubStr string) (*peer, error) { // findPeerByPubStr is an internal method that retrieves the specified peer from // the server's internal state using. -func (s *server) findPeerByPubStr(pubStr string) (*peer, error) { +func (s *server) findPeerByPubStr(pubStr string) (*peer.Brontide, error) { peer, ok := s.peersByPub[pubStr] if !ok { return nil, ErrPeerNotConnected @@ -2785,7 +2785,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, errBuffer, ok := s.peerErrors[pkStr] if !ok { var err error - errBuffer, err = queue.NewCircularBuffer(ErrorBufferSize) + errBuffer, err = queue.NewCircularBuffer(peer.ErrorBufferSize) if err != nil { srvrLog.Errorf("unable to create peer %v", err) return @@ -2798,7 +2798,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, // offered that would trigger channel closure. In case of outgoing // htlcs, an extra block is added to prevent the channel from being // closed when the htlc is outstanding and a new block comes in. - pCfg := ppeer.Config{ + pCfg := peer.Config{ Conn: conn, ConnReq: connReq, Addr: peerAddr, @@ -2853,7 +2853,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, copy(pCfg.PubKeyBytes[:], peerAddr.IdentityKey.SerializeCompressed()) copy(pCfg.ServerPubKey[:], s.identityECDH.PubKey().SerializeCompressed()) - p := newPeer(pCfg) + p := peer.NewBrontide(pCfg) // TODO(roasbeef): update IP address for link-node // * also mark last-seen, do it one single transaction? @@ -2874,7 +2874,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, // addPeer adds the passed peer to the server's global state of all active // peers. -func (s *server) addPeer(p *peer) { +func (s *server) addPeer(p *peer.Brontide) { if p == nil { return } @@ -2890,7 +2890,7 @@ func (s *server) addPeer(p *peer) { // TODO(roasbeef): pipe all requests through to the // queryHandler/peerManager - pubSer := p.NetAddress().IdentityKey.SerializeCompressed() + pubSer := p.IdentityKey().SerializeCompressed() pubStr := string(pubSer) s.peersByPub[pubStr] = p @@ -2918,7 +2918,7 @@ func (s *server) addPeer(p *peer) { // be signaled of the new peer once the method returns. // // NOTE: This MUST be launched as a goroutine. -func (s *server) peerInitializer(p *peer) { +func (s *server) peerInitializer(p *peer.Brontide) { defer s.wg.Done() // Avoid initializing peers while the server is exiting. @@ -2979,7 +2979,7 @@ func (s *server) peerInitializer(p *peer) { // successfully, otherwise the peer should be disconnected instead. // // NOTE: This MUST be launched as a goroutine. -func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { +func (s *server) peerTerminationWatcher(p *peer.Brontide, ready chan struct{}) { defer s.wg.Done() p.WaitForDisconnect(ready) @@ -3149,7 +3149,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { // removePeer removes the passed peer from the server's state of all active // peers. -func (s *server) removePeer(p *peer) { +func (s *server) removePeer(p *peer.Brontide) { if p == nil { return } @@ -3405,8 +3405,8 @@ func (s *server) OpenChannel( // We'll wait until the peer is active before beginning the channel // opening process. select { - case <-peer.activeSignal: - case <-peer.quit: + case <-peer.ActiveSignal(): + case <-peer.QuitSignal(): req.err <- fmt.Errorf("peer %x disconnected", pubKeyBytes) return req.updates, req.err case <-s.quit: @@ -3438,11 +3438,11 @@ func (s *server) OpenChannel( // Peers returns a slice of all active peers. // // NOTE: This function is safe for concurrent access. -func (s *server) Peers() []*peer { +func (s *server) Peers() []*peer.Brontide { s.mu.RLock() defer s.mu.RUnlock() - peers := make([]*peer, 0, len(s.peersByPub)) + peers := make([]*peer.Brontide, 0, len(s.peersByPub)) for _, peer := range s.peersByPub { peers = append(peers, peer) } diff --git a/test_utils.go b/test_utils.go index c9d6f653e..78c2601e8 100644 --- a/test_utils.go +++ b/test_utils.go @@ -1,33 +1,8 @@ package lnd import ( - "bytes" - crand "crypto/rand" - "encoding/binary" - "io" - "io/ioutil" - "math/rand" - "net" - "os" - "time" - - "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" - "github.com/lightningnetwork/lnd/chainntnfs" - "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/htlcswitch" - "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/keychain" - "github.com/lightningnetwork/lnd/lnwallet" - "github.com/lightningnetwork/lnd/lnwallet/chainfee" - "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/netann" - ppeer "github.com/lightningnetwork/lnd/peer" - "github.com/lightningnetwork/lnd/queue" - "github.com/lightningnetwork/lnd/shachain" - "github.com/lightningnetwork/lnd/ticker" ) var ( @@ -53,9 +28,6 @@ var ( 0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53, } - // Just use some arbitrary bytes as delivery script. - dummyDeliveryScript = alicesPrivKey[:] - // testTx is used as the default funding txn for single-funder channels. testTx = &wire.MsgTx{ Version: 1, @@ -90,352 +62,3 @@ var ( LockTime: 5, } ) - -// noUpdate is a function which can be used as a parameter in createTestPeer to -// call the setup code with no custom values on the channels set up. -var noUpdate = func(a, b *channeldb.OpenChannel) {} - -// createTestPeer creates a channel between two nodes, and returns a peer for -// one of the nodes, together with the channel seen from both nodes. It takes -// an updateChan function which can be used to modify the default values on -// the channel states for each peer. -func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, - updateChan func(a, b *channeldb.OpenChannel)) (*peer, *lnwallet.LightningChannel, - func(), error) { - - aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes( - btcec.S256(), alicesPrivKey, - ) - aliceKeySigner := &keychain.PrivKeyDigestSigner{PrivKey: aliceKeyPriv} - bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes( - btcec.S256(), bobsPrivKey, - ) - - channelCapacity := btcutil.Amount(10 * 1e8) - channelBal := channelCapacity / 2 - aliceDustLimit := btcutil.Amount(200) - bobDustLimit := btcutil.Amount(1300) - csvTimeoutAlice := uint32(5) - csvTimeoutBob := uint32(4) - - prevOut := &wire.OutPoint{ - Hash: chainhash.Hash(testHdSeed), - Index: 0, - } - fundingTxIn := wire.NewTxIn(prevOut, nil, nil) - - aliceCfg := channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - DustLimit: aliceDustLimit, - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: btcutil.Amount(rand.Int63()), - MinHTLC: lnwire.MilliSatoshi(rand.Int63()), - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(csvTimeoutAlice), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - } - bobCfg := channeldb.ChannelConfig{ - ChannelConstraints: channeldb.ChannelConstraints{ - DustLimit: bobDustLimit, - MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), - ChanReserve: btcutil.Amount(rand.Int63()), - MinHTLC: lnwire.MilliSatoshi(rand.Int63()), - MaxAcceptedHtlcs: uint16(rand.Int31()), - CsvDelay: uint16(csvTimeoutBob), - }, - MultiSigKey: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - RevocationBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - PaymentBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - DelayBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - HtlcBasePoint: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - } - - bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize()) - if err != nil { - return nil, nil, nil, err - } - bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot) - bobFirstRevoke, err := bobPreimageProducer.AtIndex(0) - if err != nil { - return nil, nil, nil, err - } - bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:]) - - aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize()) - if err != nil { - return nil, nil, nil, err - } - alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot) - aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0) - if err != nil { - return nil, nil, nil, err - } - aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:]) - - aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( - channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, - ) - if err != nil { - return nil, nil, nil, err - } - - alicePath, err := ioutil.TempDir("", "alicedb") - if err != nil { - return nil, nil, nil, err - } - - dbAlice, err := channeldb.Open(alicePath) - if err != nil { - return nil, nil, nil, err - } - - bobPath, err := ioutil.TempDir("", "bobdb") - if err != nil { - return nil, nil, nil, err - } - - dbBob, err := channeldb.Open(bobPath) - if err != nil { - return nil, nil, nil, err - } - - estimator := chainfee.NewStaticEstimator(12500, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - return nil, nil, nil, err - } - - // TODO(roasbeef): need to factor in commit fee? - aliceCommit := channeldb.ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), - RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), - FeePerKw: btcutil.Amount(feePerKw), - CommitFee: feePerKw.FeeForWeight(input.CommitWeight), - CommitTx: aliceCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - } - bobCommit := channeldb.ChannelCommitment{ - CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), - RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), - FeePerKw: btcutil.Amount(feePerKw), - CommitFee: feePerKw.FeeForWeight(input.CommitWeight), - CommitTx: bobCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), - } - - var chanIDBytes [8]byte - if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil { - return nil, nil, nil, err - } - - shortChanID := lnwire.NewShortChanIDFromInt( - binary.BigEndian.Uint64(chanIDBytes[:]), - ) - - aliceChannelState := &channeldb.OpenChannel{ - LocalChanCfg: aliceCfg, - RemoteChanCfg: bobCfg, - IdentityPub: aliceKeyPub, - FundingOutpoint: *prevOut, - ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweaklessBit, - IsInitiator: true, - Capacity: channelCapacity, - RemoteCurrentRevocation: bobCommitPoint, - RevocationProducer: alicePreimageProducer, - RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: aliceCommit, - RemoteCommitment: aliceCommit, - Db: dbAlice, - Packager: channeldb.NewChannelPackager(shortChanID), - FundingTxn: testTx, - } - bobChannelState := &channeldb.OpenChannel{ - LocalChanCfg: bobCfg, - RemoteChanCfg: aliceCfg, - IdentityPub: bobKeyPub, - FundingOutpoint: *prevOut, - ChanType: channeldb.SingleFunderTweaklessBit, - IsInitiator: false, - Capacity: channelCapacity, - RemoteCurrentRevocation: aliceCommitPoint, - RevocationProducer: bobPreimageProducer, - RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: bobCommit, - RemoteCommitment: bobCommit, - Db: dbBob, - Packager: channeldb.NewChannelPackager(shortChanID), - } - - // Set custom values on the channel states. - updateChan(aliceChannelState, bobChannelState) - - aliceAddr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - - if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil { - return nil, nil, nil, err - } - - bobAddr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - - if err := bobChannelState.SyncPending(bobAddr, 0); err != nil { - return nil, nil, nil, err - } - - cleanUpFunc := func() { - os.RemoveAll(bobPath) - os.RemoveAll(alicePath) - } - - aliceSigner := &mockSigner{aliceKeyPriv} - bobSigner := &mockSigner{bobKeyPriv} - - alicePool := lnwallet.NewSigPool(1, aliceSigner) - channelAlice, err := lnwallet.NewLightningChannel( - aliceSigner, aliceChannelState, alicePool, - ) - if err != nil { - return nil, nil, nil, err - } - alicePool.Start() - - bobPool := lnwallet.NewSigPool(1, bobSigner) - channelBob, err := lnwallet.NewLightningChannel( - bobSigner, bobChannelState, bobPool, - ) - if err != nil { - return nil, nil, nil, err - } - bobPool.Start() - - chainIO := &mockChainIO{ - bestHeight: fundingBroadcastHeight, - } - wallet := &lnwallet.LightningWallet{ - WalletController: &mockWalletController{ - rootKey: aliceKeyPriv, - publishedTransactions: publTx, - }, - } - - _, currentHeight, err := chainIO.GetBestBlock() - if err != nil { - return nil, nil, nil, err - } - - htlcSwitch, err := htlcswitch.New(htlcswitch.Config{ - DB: dbAlice, - SwitchPackager: channeldb.NewSwitchPackager(), - Notifier: notifier, - FwdEventTicker: ticker.New( - htlcswitch.DefaultFwdEventInterval), - LogEventTicker: ticker.New( - htlcswitch.DefaultLogInterval), - AckEventTicker: ticker.New( - htlcswitch.DefaultAckInterval), - }, uint32(currentHeight)) - if err != nil { - return nil, nil, nil, err - } - if err = htlcSwitch.Start(); err != nil { - return nil, nil, nil, err - } - - nodeSignerAlice := netann.NewNodeSigner(aliceKeySigner) - - const chanActiveTimeout = time.Minute - - chanStatusMgr, err := netann.NewChanStatusManager(&netann.ChanStatusConfig{ - ChanStatusSampleInterval: 30 * time.Second, - ChanEnableTimeout: chanActiveTimeout, - ChanDisableTimeout: 2 * time.Minute, - DB: dbAlice, - Graph: dbAlice.ChannelGraph(), - MessageSigner: nodeSignerAlice, - OurPubKey: aliceKeyPub, - IsChannelActive: htlcSwitch.HasActiveLink, - ApplyChannelUpdate: func(*lnwire.ChannelUpdate) error { return nil }, - }) - if err != nil { - return nil, nil, nil, err - } - if err = chanStatusMgr.Start(); err != nil { - return nil, nil, nil, err - } - - errBuffer, err := queue.NewCircularBuffer(ErrorBufferSize) - if err != nil { - return nil, nil, nil, err - } - - var pubKey [33]byte - copy(pubKey[:], aliceKeyPub.SerializeCompressed()) - - cfgAddr := &lnwire.NetAddress{ - IdentityKey: aliceKeyPub, - Address: aliceAddr, - ChainNet: wire.SimNet, - } - - pCfg := ppeer.Config{ - Addr: cfgAddr, - PubKeyBytes: pubKey, - ErrorBuffer: errBuffer, - ChainIO: chainIO, - Switch: htlcSwitch, - - ChanActiveTimeout: chanActiveTimeout, - InterceptSwitch: htlcswitch.NewInterceptableSwitch(htlcSwitch), - - ChannelDB: dbAlice, - FeeEstimator: estimator, - Wallet: wallet, - ChainNotifier: notifier, - ChanStatusMgr: chanStatusMgr, - DisconnectPeer: func(b *btcec.PublicKey) error { return nil }, - } - - alicePeer := newPeer(pCfg) - - chanID := lnwire.NewChanIDFromOutPoint(channelAlice.ChannelPoint()) - alicePeer.activeChannels[chanID] = channelAlice - - alicePeer.wg.Add(1) - go alicePeer.channelManager() - - return alicePeer, channelBob, cleanUpFunc, nil -}