Merge pull request #2004 from wpaulino/spend-tx-notifier

chainntnfs: handle spend notifications within TxConfNotifier
This commit is contained in:
Olaoluwa Osuntokun 2018-10-30 20:24:17 -07:00 committed by GitHub
commit 9f0b008773
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 4005 additions and 2774 deletions

View File

@ -12,7 +12,6 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwallet/chain"
"github.com/btcsuite/btcwallet/wtxmgr"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/queue"
)
@ -69,9 +68,7 @@ type BitcoindNotifier struct {
notificationCancels chan interface{}
notificationRegistry chan interface{}
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
txConfNotifier *chainntnfs.TxConfNotifier
txNotifier *chainntnfs.TxNotifier
blockEpochClients map[uint64]*blockEpochRegistration
@ -107,8 +104,6 @@ func New(chainConn *chain.BitcoindConn, spendHintCache chainntnfs.SpendHintCache
blockEpochClients: make(map[uint64]*blockEpochRegistration),
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
spendHintCache: spendHintCache,
confirmHintCache: confirmHintCache,
@ -142,8 +137,9 @@ func (b *BitcoindNotifier) Start() error {
return err
}
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
)
b.bestBlock = chainntnfs.BlockEpoch{
@ -173,18 +169,13 @@ func (b *BitcoindNotifier) Stop() error {
// Notify all pending clients of our shutdown by closing the related
// notification channels.
for _, spendClients := range b.spendNotifications {
for _, spendClient := range spendClients {
close(spendClient.spendChan)
}
}
for _, epochClient := range b.blockEpochClients {
close(epochClient.cancelChan)
epochClient.wg.Wait()
close(epochClient.epochChan)
}
b.txConfNotifier.TearDown()
b.txNotifier.TearDown()
return nil
}
@ -204,19 +195,6 @@ out:
select {
case cancelMsg := <-b.notificationCancels:
switch msg := cancelMsg.(type) {
case *spendCancel:
chainntnfs.Log.Infof("Cancelling spend "+
"notification for out_point=%v, "+
"spend_id=%v", msg.op, msg.spendID)
// Before we attempt to close the spendChan,
// ensure that the notification hasn't already
// yet been dispatched.
if outPointClients, ok := b.spendNotifications[msg.op]; ok {
close(outPointClients[msg.spendID].spendChan)
delete(b.spendNotifications[msg.op], msg.spendID)
}
case *epochCancel:
chainntnfs.Log.Infof("Cancelling epoch "+
"notification, epoch_id=%v", msg.epochID)
@ -244,21 +222,13 @@ out:
}
case registerMsg := <-b.notificationRegistry:
switch msg := registerMsg.(type) {
case *spendNotification:
chainntnfs.Log.Infof("New spend subscription: "+
"utxo=%v", msg.targetOutpoint)
op := *msg.targetOutpoint
if _, ok := b.spendNotifications[op]; !ok {
b.spendNotifications[op] = make(map[uint64]*spendNotification)
}
b.spendNotifications[op][msg.spendID] = msg
case *chainntnfs.HistoricalConfDispatch:
// Look up whether the transaction is already
// included in the active chain. We'll do this
// in a goroutine to prevent blocking
// potentially long rescans.
//
// TODO(wilmer): add retry logic if rescan fails?
b.wg.Add(1)
go func() {
defer b.wg.Done()
@ -278,7 +248,7 @@ out:
// begin safely updating the height hint
// cache at tip, since any pending
// rescans have now completed.
err = b.txConfNotifier.UpdateConfDetails(
err = b.txNotifier.UpdateConfDetails(
*msg.TxID, confDetails,
)
if err != nil {
@ -286,6 +256,25 @@ out:
}
}()
case *chainntnfs.HistoricalSpendDispatch:
// In order to ensure we don't block the caller
// on what may be a long rescan, we'll launch a
// goroutine to do so in the background.
//
// TODO(wilmer): add retry logic if rescan fails?
b.wg.Add(1)
go func() {
defer b.wg.Done()
err := b.dispatchSpendDetailsManually(msg)
if err != nil {
chainntnfs.Log.Errorf("Rescan to "+
"determine the spend "+
"details of %v failed: %v",
msg.OutPoint, err)
}
}()
case *blockEpochRegistration:
chainntnfs.Log.Infof("New block epoch subscription")
b.blockEpochClients[msg.epochID] = msg
@ -305,9 +294,6 @@ out:
}
}
msg.errorChan <- nil
case chain.RelevantTx:
b.handleRelevantTx(msg, b.bestBlock.Height)
}
case ntfn := <-b.chainConn.Notifications():
@ -330,7 +316,7 @@ out:
newBestBlock, missedBlocks, err :=
chainntnfs.HandleMissedBlocks(
b.chainConn,
b.txConfNotifier,
b.txNotifier,
b.bestBlock, item.Height,
true,
)
@ -369,7 +355,7 @@ out:
}
newBestBlock, err := chainntnfs.RewindChain(
b.chainConn, b.txConfNotifier,
b.chainConn, b.txNotifier,
b.bestBlock, item.Height-1,
)
if err != nil {
@ -383,7 +369,23 @@ out:
b.bestBlock = newBestBlock
case chain.RelevantTx:
b.handleRelevantTx(item, b.bestBlock.Height)
// We only care about notifying on confirmed
// spends, so if this is a mempool spend, we can
// ignore it and wait for the spend to appear in
// on-chain.
if item.Block == nil {
continue
}
tx := &item.TxRecord.MsgTx
err := b.txNotifier.ProcessRelevantSpendTx(
tx, item.Block.Height,
)
if err != nil {
chainntnfs.Log.Errorf("Unable to "+
"process transaction %v: %v",
tx.TxHash(), err)
}
}
case <-b.quit:
@ -393,55 +395,6 @@ out:
b.wg.Done()
}
// handleRelevantTx notifies any clients of a relevant transaction.
func (b *BitcoindNotifier) handleRelevantTx(tx chain.RelevantTx, bestHeight int32) {
msgTx := tx.TxRecord.MsgTx
// We only care about notifying on confirmed spends, so in case this is
// a mempool spend, we can continue, and wait for the spend to appear
// in chain.
if tx.Block == nil {
return
}
// First, check if this transaction spends an output
// that has an existing spend notification for it.
for i, txIn := range msgTx.TxIn {
prevOut := txIn.PreviousOutPoint
// If this transaction indeed does spend an
// output which we have a registered
// notification for, then create a spend
// summary, finally sending off the details to
// the notification subscriber.
if clients, ok := b.spendNotifications[prevOut]; ok {
spenderSha := msgTx.TxHash()
spendDetails := &chainntnfs.SpendDetail{
SpentOutPoint: &prevOut,
SpenderTxHash: &spenderSha,
SpendingTx: &msgTx,
SpenderInputIndex: uint32(i),
}
spendDetails.SpendingHeight = tx.Block.Height
for _, ntfn := range clients {
chainntnfs.Log.Infof("Dispatching confirmed "+
"spend notification for outpoint=%v "+
"at height %v", ntfn.targetOutpoint,
spendDetails.SpendingHeight)
ntfn.spendChan <- spendDetails
// Close spendChan to ensure that any calls to
// Cancel will not block. This is safe to do
// since the channel is buffered, and the
// message can still be read by the receiver.
close(ntfn.spendChan)
}
delete(b.spendNotifications, prevOut)
}
}
}
// historicalConfDetails looks up whether a transaction is already included in a
// block in the active chain and, if so, returns details about the confirmation.
func (b *BitcoindNotifier) historicalConfDetails(txid *chainhash.Hash,
@ -621,7 +574,7 @@ func (b *BitcoindNotifier) handleBlockConnected(block chainntnfs.BlockEpoch) err
}
txns := btcutil.NewBlock(rawBlock).Transactions()
err = b.txConfNotifier.ConnectTip(
err = b.txNotifier.ConnectTip(
block.Hash, uint32(block.Height), txns)
if err != nil {
return fmt.Errorf("unable to connect tip: %v", err)
@ -630,27 +583,6 @@ func (b *BitcoindNotifier) handleBlockConnected(block chainntnfs.BlockEpoch) err
chainntnfs.Log.Infof("New block: height=%v, sha=%v", block.Height,
block.Hash)
// Finally, we'll update the spend height hint for all of our watched
// outpoints that have not been spent yet. This is safe to do as we do
// not watch already spent outpoints for spend notifications.
ops := make([]wire.OutPoint, 0, len(b.spendNotifications))
for op := range b.spendNotifications {
ops = append(ops, op)
}
if len(ops) > 0 {
err := b.spendHintCache.CommitSpendHint(
uint32(block.Height), ops...,
)
if err != nil {
// The error is not fatal since we are connecting a
// block, and advancing the spend hint is an optimistic
// optimization.
chainntnfs.Log.Errorf("Unable to update spend hint to "+
"%d for %v: %v", block.Height, ops, err)
}
}
// We want to set the best block before dispatching notifications so
// if any subscribers make queries based on their received block epoch,
// our state is fully updated in time.
@ -687,28 +619,6 @@ func (b *BitcoindNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistr
}
}
// spendNotification couples a target outpoint along with the channel used for
// notifications once a spend of the outpoint has been detected.
type spendNotification struct {
targetOutpoint *wire.OutPoint
spendChan chan *chainntnfs.SpendDetail
spendID uint64
heightHint uint32
}
// spendCancel is a message sent to the BitcoindNotifier when a client wishes
// to cancel an outstanding spend notification that has yet to be dispatched.
type spendCancel struct {
// op is the target outpoint of the notification to be cancelled.
op wire.OutPoint
// spendID the ID of the notification to cancel.
spendID uint64
}
// RegisterSpendNtfn registers an intent to be notified once the target
// outpoint has been spent by a transaction on-chain. Once a spend of the target
// outpoint has been detected, the details of the spending event will be sent
@ -717,167 +627,120 @@ type spendCancel struct {
func (b *BitcoindNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
// Before proceeding to register the notification, we'll query our
// height hint cache to determine whether a better one exists.
if hint, err := b.spendHintCache.QuerySpendHint(*outpoint); err == nil {
if hint > heightHint {
chainntnfs.Log.Debugf("Using height hint %d retrieved "+
"from cache for %v", hint, outpoint)
heightHint = hint
}
// First, we'll construct a spend notification request and hand it off
// to the txNotifier.
spendID := atomic.AddUint64(&b.spendClientCounter, 1)
cancel := func() {
b.txNotifier.CancelSpend(*outpoint, spendID)
}
// Construct a notification request for the outpoint and send it to the
// main event loop.
ntfn := &spendNotification{
targetOutpoint: outpoint,
spendChan: make(chan *chainntnfs.SpendDetail, 1),
spendID: atomic.AddUint64(&b.spendClientCounter, 1),
ntfn := &chainntnfs.SpendNtfn{
SpendID: spendID,
OutPoint: *outpoint,
PkScript: pkScript,
Event: chainntnfs.NewSpendEvent(cancel),
HeightHint: heightHint,
}
select {
case <-b.quit:
return nil, ErrChainNotifierShuttingDown
case b.notificationRegistry <- ntfn:
}
if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil {
return nil, err
}
// The following conditional checks to ensure that when a spend
// notification is registered, the output hasn't already been spent. If
// the output is no longer in the UTXO set, the chain will be rescanned
// from the point where the output was added. The rescan will dispatch
// the notification.
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
historicalDispatch, err := b.txNotifier.RegisterSpend(ntfn)
if err != nil {
return nil, err
}
// If the output is unspent, then we'll write it to the cache with the
// given height hint. This allows us to increase the height hint as the
// chain extends and the output remains unspent.
// If the txNotifier didn't return any details to perform a historical
// scan of the chain, then we can return early as there's nothing left
// for us to do.
if historicalDispatch == nil {
return ntfn.Event, nil
}
// We'll then request the backend to notify us when it has detected the
// outpoint as spent.
if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil {
return nil, err
}
// In addition to the check above, we'll also check the backend's UTXO
// set to determine whether the outpoint has been spent. If it hasn't,
// we can return to the caller as well.
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
if err != nil {
return nil, err
}
if txOut != nil {
err := b.spendHintCache.CommitSpendHint(heightHint, *outpoint)
// We'll let the txNotifier know the outpoint is still unspent
// in order to begin updating its spend hint.
err := b.txNotifier.UpdateSpendDetails(*outpoint, nil)
if err != nil {
// The error is not fatal, so we should not return an
// error to the caller.
chainntnfs.Log.Error("Unable to update spend hint to "+
"%d for %v: %v", heightHint, *outpoint, err)
}
} else {
// Otherwise, we'll determine when the output was spent.
//
// First, we'll attempt to retrieve the transaction's block hash
// using the backend's transaction index.
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
if err != nil {
// Avoid returning an error if the transaction was not
// found to proceed with fallback methods.
jsonErr, ok := err.(*btcjson.RPCError)
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
return nil, fmt.Errorf("unable to query for "+
"txid %v: %v", outpoint.Hash, err)
}
return nil, err
}
var blockHash *chainhash.Hash
if tx != nil && tx.BlockHash != "" {
// If we're able to retrieve a valid block hash from the
// transaction, then we'll use it as our rescan starting
// point.
blockHash, err = chainhash.NewHashFromStr(tx.BlockHash)
if err != nil {
return nil, err
}
} else {
// Otherwise, we'll attempt to retrieve the hash for the
// block at the heightHint.
blockHash, err = b.chainConn.GetBlockHash(
int64(heightHint),
)
if err != nil {
return nil, fmt.Errorf("unable to retrieve "+
"hash for block with height %d: %v",
heightHint, err)
}
}
return ntfn.Event, nil
}
// We'll only scan old blocks if the transaction has actually
// been included within a block. Otherwise, we'll encounter an
// error when scanning for blocks. This can happens in the case
// of a race condition, wherein the output itself is unspent,
// and only arrives in the mempool after the getxout call.
if blockHash != nil {
// Rescan all the blocks until the current one.
startHeight, err := b.chainConn.GetBlockHeight(
blockHash,
)
if err != nil {
return nil, err
}
_, endHeight, err := b.chainConn.GetBestBlock()
if err != nil {
return nil, err
}
// In order to ensure we don't block the caller on what
// may be a long rescan, we'll launch a goroutine to do
// so in the background.
b.wg.Add(1)
go func() {
defer b.wg.Done()
err := b.dispatchSpendDetailsManually(
*outpoint, startHeight, endHeight,
)
if err != nil {
chainntnfs.Log.Errorf("Rescan for spend "+
"notification txout(%x) "+
"failed: %v", outpoint, err)
}
}()
// Otherwise, we'll determine when the output was spent by scanning the
// chain. We'll begin by determining where to start our historical
// rescan.
//
// As a minimal optimization, we'll query the backend's transaction
// index (if enabled) to determine if we have a better rescan starting
// height. We can do this as the GetRawTransaction call will return the
// hash of the block it was included in within the chain.
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
if err != nil {
// Avoid returning an error if the transaction was not found to
// proceed with fallback methods.
jsonErr, ok := err.(*btcjson.RPCError)
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
return nil, fmt.Errorf("unable to query for "+
"txid %v: %v", outpoint.Hash, err)
}
}
return &chainntnfs.SpendEvent{
Spend: ntfn.spendChan,
Cancel: func() {
cancel := &spendCancel{
op: *outpoint,
spendID: ntfn.spendID,
}
// If the transaction index was enabled, we'll use the block's hash to
// retrieve its height and check whether it provides a better starting
// point for our rescan.
if tx != nil {
// If the transaction containing the outpoint hasn't confirmed
// on-chain, then there's no need to perform a rescan.
if tx.BlockHash == "" {
return ntfn.Event, nil
}
// Submit spend cancellation to notification dispatcher.
select {
case b.notificationCancels <- cancel:
// Cancellation is being handled, drain the
// spend chan until it is closed before yielding
// to the caller.
for {
select {
case _, ok := <-ntfn.spendChan:
if !ok {
return
}
case <-b.quit:
return
}
}
case <-b.quit:
}
},
}, nil
blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
if err != nil {
return nil, err
}
blockHeight, err := b.chainConn.GetBlockHeight(blockHash)
if err != nil {
return nil, err
}
if uint32(blockHeight) > historicalDispatch.StartHeight {
historicalDispatch.StartHeight = uint32(blockHeight)
}
}
// Now that we've determined the starting point of our rescan, we can
// dispatch it.
select {
case b.notificationRegistry <- historicalDispatch:
return ntfn.Event, nil
case <-b.quit:
return nil, ErrChainNotifierShuttingDown
}
}
// disaptchSpendDetailsManually attempts to manually scan the chain within the
// given height range for a transaction that spends the given outpoint. If one
// is found, it's spending details are sent to the notifier dispatcher, which
// will then dispatch the notification to all of its clients.
func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint,
startHeight, endHeight int32) error {
func (b *BitcoindNotifier) dispatchSpendDetailsManually(
historicalDispatchDetails *chainntnfs.HistoricalSpendDispatch) error {
op := historicalDispatchDetails.OutPoint
startHeight := historicalDispatchDetails.StartHeight
endHeight := historicalDispatchDetails.EndHeight
// Begin scanning blocks at every height to determine if the outpoint
// was spent.
@ -890,6 +753,7 @@ func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint,
default:
}
// First, we'll fetch the block for the current height.
blockHash, err := b.chainConn.GetBlockHash(int64(height))
if err != nil {
return fmt.Errorf("unable to retrieve hash for block "+
@ -901,38 +765,30 @@ func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint,
"%v: %v", blockHash, err)
}
// Then, we'll manually go over every transaction in it and
// determine whether it spends the outpoint in question.
for _, tx := range block.Transactions {
for _, in := range tx.TxIn {
if in.PreviousOutPoint != op {
for i, txIn := range tx.TxIn {
if txIn.PreviousOutPoint != op {
continue
}
// If this transaction input spends the
// outpoint, we'll gather the details of the
// spending transaction and dispatch a spend
// notification to our clients.
relTx := chain.RelevantTx{
TxRecord: &wtxmgr.TxRecord{
MsgTx: *tx,
Hash: tx.TxHash(),
Received: block.Header.Timestamp,
},
Block: &wtxmgr.BlockMeta{
Block: wtxmgr.Block{
Hash: *blockHash,
Height: height,
},
Time: block.Header.Timestamp,
},
// If it does, we'll construct its spend details
// and hand them over to the TxNotifier so that
// it can properly notify its registered
// clients.
txHash := tx.TxHash()
details := &chainntnfs.SpendDetail{
SpentOutPoint: &op,
SpenderTxHash: &txHash,
SpendingTx: tx,
SpenderInputIndex: uint32(i),
SpendingHeight: int32(height),
}
select {
case b.notificationRegistry <- relTx:
case <-b.quit:
return ErrChainNotifierShuttingDown
}
return nil
return b.txNotifier.UpdateSpendDetails(
op, details,
)
}
}
}
@ -959,11 +815,11 @@ func (b *BitcoindNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
chainntnfs.Log.Infof("New confirmation subscription: "+
"txid=%v, numconfs=%v", txid, numConfs)
// Register the conf notification with txconfnotifier. A non-nil value
// Register the conf notification with the TxNotifier. A non-nil value
// for `dispatch` will be returned if we are required to perform a
// manual scan for the confirmation. Otherwise the notifier will begin
// watching at tip for the transaction to confirm.
dispatch, err := b.txConfNotifier.Register(ntfn)
dispatch, err := b.txNotifier.RegisterConf(ntfn)
if err != nil {
return nil, err
}

View File

@ -12,12 +12,11 @@ import (
)
// UnsafeStart starts the notifier with a specified best height and optional
// best hash. Its bestBlock and txConfNotifier are initialized with
// bestHeight and optionally bestHash. The parameter generateBlocks is
// necessary for the bitcoind notifier to ensure we drain all notifications up
// to syncHeight, since if they are generated ahead of UnsafeStart the chainConn
// may start up with an outdated best block and miss sending ntfns. Used for
// testing.
// best hash. Its bestBlock and txNotifier are initialized with bestHeight and
// optionally bestHash. The parameter generateBlocks is necessary for the
// bitcoind notifier to ensure we drain all notifications up to syncHeight,
// since if they are generated ahead of UnsafeStart the chainConn may start up
// with an outdated best block and miss sending ntfns. Used for testing.
func (b *BitcoindNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash,
syncHeight int32, generateBlocks func() error) error {
@ -30,8 +29,9 @@ func (b *BitcoindNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Has
return err
}
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
)
if generateBlocks != nil {

View File

@ -74,9 +74,7 @@ type BtcdNotifier struct {
notificationCancels chan interface{}
notificationRegistry chan interface{}
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
txConfNotifier *chainntnfs.TxConfNotifier
txNotifier *chainntnfs.TxNotifier
blockEpochClients map[uint64]*blockEpochRegistration
@ -114,8 +112,6 @@ func New(config *rpcclient.ConnConfig, spendHintCache chainntnfs.SpendHintCache,
blockEpochClients: make(map[uint64]*blockEpochRegistration),
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
chainUpdates: queue.NewConcurrentQueue(10),
txUpdates: queue.NewConcurrentQueue(10),
@ -166,8 +162,9 @@ func (b *BtcdNotifier) Start() error {
return err
}
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
)
b.bestBlock = chainntnfs.BlockEpoch{
@ -203,18 +200,13 @@ func (b *BtcdNotifier) Stop() error {
// Notify all pending clients of our shutdown by closing the related
// notification channels.
for _, spendClients := range b.spendNotifications {
for _, spendClient := range spendClients {
close(spendClient.spendChan)
}
}
for _, epochClient := range b.blockEpochClients {
close(epochClient.cancelChan)
epochClient.wg.Wait()
close(epochClient.epochChan)
}
b.txConfNotifier.TearDown()
b.txNotifier.TearDown()
return nil
}
@ -275,19 +267,6 @@ out:
select {
case cancelMsg := <-b.notificationCancels:
switch msg := cancelMsg.(type) {
case *spendCancel:
chainntnfs.Log.Infof("Cancelling spend "+
"notification for out_point=%v, "+
"spend_id=%v", msg.op, msg.spendID)
// Before we attempt to close the spendChan,
// ensure that the notification hasn't already
// yet been dispatched.
if outPointClients, ok := b.spendNotifications[msg.op]; ok {
close(outPointClients[msg.spendID].spendChan)
delete(b.spendNotifications[msg.op], msg.spendID)
}
case *epochCancel:
chainntnfs.Log.Infof("Cancelling epoch "+
"notification, epoch_id=%v", msg.epochID)
@ -314,21 +293,13 @@ out:
}
case registerMsg := <-b.notificationRegistry:
switch msg := registerMsg.(type) {
case *spendNotification:
chainntnfs.Log.Infof("New spend subscription: "+
"utxo=%v", msg.targetOutpoint)
op := *msg.targetOutpoint
if _, ok := b.spendNotifications[op]; !ok {
b.spendNotifications[op] = make(map[uint64]*spendNotification)
}
b.spendNotifications[op][msg.spendID] = msg
case *chainntnfs.HistoricalConfDispatch:
// Look up whether the transaction is already
// included in the active chain. We'll do this
// in a goroutine to prevent blocking
// potentially long rescans.
//
// TODO(wilmer): add retry logic if rescan fails?
b.wg.Add(1)
go func() {
defer b.wg.Done()
@ -348,7 +319,7 @@ out:
// begin safely updating the height hint
// cache at tip, since any pending
// rescans have now completed.
err = b.txConfNotifier.UpdateConfDetails(
err = b.txNotifier.UpdateConfDetails(
*msg.TxID, confDetails,
)
if err != nil {
@ -398,7 +369,7 @@ out:
newBestBlock, missedBlocks, err :=
chainntnfs.HandleMissedBlocks(
b.chainConn,
b.txConfNotifier,
b.txNotifier,
b.bestBlock,
update.blockHeight,
true,
@ -436,7 +407,7 @@ out:
}
newBestBlock, err := chainntnfs.RewindChain(
b.chainConn, b.txConfNotifier, b.bestBlock,
b.chainConn, b.txNotifier, b.bestBlock,
update.blockHeight-1,
)
if err != nil {
@ -449,60 +420,23 @@ out:
// partially completed.
b.bestBlock = newBestBlock
// NOTE: we currently only use txUpdates for mempool spends and
// rescan spends. It might get removed entirely in the future.
case item := <-b.txUpdates.ChanOut():
newSpend := item.(*txUpdate)
// We only care about notifying on confirmed spends, so
// in case this is a mempool spend, we can continue,
// and wait for the spend to appear in chain.
// if this is a mempool spend, we can ignore it and wait
// for the spend to appear in on-chain.
if newSpend.details == nil {
continue
}
spendingTx := newSpend.tx
// First, check if this transaction spends an output
// that has an existing spend notification for it.
for i, txIn := range spendingTx.MsgTx().TxIn {
prevOut := txIn.PreviousOutPoint
// If this transaction indeed does spend an
// output which we have a registered
// notification for, then create a spend
// summary, finally sending off the details to
// the notification subscriber.
if clients, ok := b.spendNotifications[prevOut]; ok {
spenderSha := newSpend.tx.Hash()
spendDetails := &chainntnfs.SpendDetail{
SpentOutPoint: &prevOut,
SpenderTxHash: spenderSha,
SpendingTx: spendingTx.MsgTx(),
SpenderInputIndex: uint32(i),
}
spendDetails.SpendingHeight = newSpend.details.Height
for _, ntfn := range clients {
chainntnfs.Log.Infof("Dispatching "+
"confirmed spend "+
"notification for "+
"outpoint=%v at height %v",
ntfn.targetOutpoint,
spendDetails.SpendingHeight)
ntfn.spendChan <- spendDetails
// Close spendChan to ensure
// that any calls to Cancel
// will not block. This is safe
// to do since the channel is
// buffered, and the message
// can still be read by the
// receiver.
close(ntfn.spendChan)
}
delete(b.spendNotifications, prevOut)
}
tx := newSpend.tx.MsgTx()
err := b.txNotifier.ProcessRelevantSpendTx(
tx, newSpend.details.Height,
)
if err != nil {
chainntnfs.Log.Errorf("Unable to process "+
"transaction %v: %v", tx.TxHash(), err)
}
case <-b.quit:
@ -703,7 +637,7 @@ func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error {
connect: true,
}
err = b.txConfNotifier.ConnectTip(
err = b.txNotifier.ConnectTip(
&newBlock.hash, newBlock.height, newBlock.txns,
)
if err != nil {
@ -713,94 +647,12 @@ func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error {
chainntnfs.Log.Infof("New block: height=%v, sha=%v", epoch.Height,
epoch.Hash)
// Define a helper struct for coalescing the spend notifications we will
// dispatch after trying to commit the spend hints.
type spendNtfnBatch struct {
details *chainntnfs.SpendDetail
clients map[uint64]*spendNotification
}
// Scan over the list of relevant transactions and possibly dispatch
// notifications for spends.
spendBatches := make(map[wire.OutPoint]spendNtfnBatch)
for _, tx := range newBlock.txns {
mtx := tx.MsgTx()
txSha := mtx.TxHash()
for i, txIn := range mtx.TxIn {
prevOut := txIn.PreviousOutPoint
// If this transaction indeed does spend an output which
// we have a registered notification for, then create a
// spend summary, finally sending off the details to the
// notification subscriber.
clients, ok := b.spendNotifications[prevOut]
if !ok {
continue
}
delete(b.spendNotifications, prevOut)
spendDetails := &chainntnfs.SpendDetail{
SpentOutPoint: &prevOut,
SpenderTxHash: &txSha,
SpendingTx: mtx,
SpenderInputIndex: uint32(i),
SpendingHeight: int32(newBlock.height),
}
spendBatches[prevOut] = spendNtfnBatch{
details: spendDetails,
clients: clients,
}
}
}
// Finally, we'll update the spend height hint for all of our watched
// outpoints that have not been spent yet. This is safe to do as we do
// not watch already spent outpoints for spend notifications.
ops := make([]wire.OutPoint, 0, len(b.spendNotifications))
for op := range b.spendNotifications {
ops = append(ops, op)
}
if len(ops) > 0 {
err := b.spendHintCache.CommitSpendHint(
uint32(epoch.Height), ops...,
)
if err != nil {
// The error is not fatal since we are connecting a
// block, and advancing the spend hint is an optimistic
// optimization.
chainntnfs.Log.Errorf("Unable to update spend hint to "+
"%d for %v: %v", epoch.Height, ops, err)
}
}
// We want to set the best block before dispatching notifications
// so if any subscribers make queries based on their received
// block epoch, our state is fully updated in time.
// We want to set the best block before dispatching notifications so if
// any subscribers make queries based on their received block epoch, our
// state is fully updated in time.
b.bestBlock = epoch
// Next we'll notify any subscribed clients of the block.
b.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
// Finally, send off the spend details to the notification subscribers.
for _, batch := range spendBatches {
for _, ntfn := range batch.clients {
chainntnfs.Log.Infof("Dispatching spend "+
"notification for outpoint=%v",
ntfn.targetOutpoint)
ntfn.spendChan <- batch.details
// Close spendChan to ensure that any calls to
// Cancel will not block. This is safe to do
// since the channel is buffered, and the
// message can still be read by the receiver.
close(ntfn.spendChan)
}
}
return nil
}
@ -829,28 +681,6 @@ func (b *BtcdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistratio
}
}
// spendNotification couples a target outpoint along with the channel used for
// notifications once a spend of the outpoint has been detected.
type spendNotification struct {
targetOutpoint *wire.OutPoint
spendChan chan *chainntnfs.SpendDetail
spendID uint64
heightHint uint32
}
// spendCancel is a message sent to the BtcdNotifier when a client wishes to
// cancel an outstanding spend notification that has yet to be dispatched.
type spendCancel struct {
// op is the target outpoint of the notification to be cancelled.
op wire.OutPoint
// spendID the ID of the notification to cancel.
spendID uint64
}
// RegisterSpendNtfn registers an intent to be notified once the target
// outpoint has been spent by a transaction on-chain. Once a spend of the target
// outpoint has been detected, the details of the spending event will be sent
@ -859,145 +689,131 @@ type spendCancel struct {
func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
// Before proceeding to register the notification, we'll query our
// height hint cache to determine whether a better one exists.
if hint, err := b.spendHintCache.QuerySpendHint(*outpoint); err == nil {
if hint > heightHint {
chainntnfs.Log.Debugf("Using height hint %d retrieved "+
"from cache for %v", hint, outpoint)
heightHint = hint
}
// First, we'll construct a spend notification request and hand it off
// to the txNotifier.
spendID := atomic.AddUint64(&b.spendClientCounter, 1)
cancel := func() {
b.txNotifier.CancelSpend(*outpoint, spendID)
}
ntfn := &chainntnfs.SpendNtfn{
SpendID: spendID,
OutPoint: *outpoint,
PkScript: pkScript,
Event: chainntnfs.NewSpendEvent(cancel),
HeightHint: heightHint,
}
// Construct a notification request for the outpoint and send it to the
// main event loop.
ntfn := &spendNotification{
targetOutpoint: outpoint,
spendChan: make(chan *chainntnfs.SpendDetail, 1),
spendID: atomic.AddUint64(&b.spendClientCounter, 1),
heightHint: heightHint,
}
select {
case <-b.quit:
return nil, ErrChainNotifierShuttingDown
case b.notificationRegistry <- ntfn:
}
// TODO(roasbeef): update btcd rescan logic to also use both
if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil {
return nil, err
}
// The following conditional checks to ensure that when a spend
// notification is registered, the output hasn't already been spent. If
// the output is no longer in the UTXO set, the chain will be rescanned
// from the point where the output was added. The rescan will dispatch
// the notification.
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
historicalDispatch, err := b.txNotifier.RegisterSpend(ntfn)
if err != nil {
return nil, err
}
// If the output is unspent, then we'll write it to the cache with the
// given height hint. This allows us to increase the height hint as the
// chain extends and the output remains unspent.
// If the txNotifier didn't return any details to perform a historical
// scan of the chain, then we can return early as there's nothing left
// for us to do.
if historicalDispatch == nil {
return ntfn.Event, nil
}
// We'll then request the backend to notify us when it has detected the
// outpoint as spent.
ops := []*wire.OutPoint{outpoint}
if err := b.chainConn.NotifySpent(ops); err != nil {
return nil, err
}
// In addition to the check above, we'll also check the backend's UTXO
// set to determine whether the outpoint has been spent. If it hasn't,
// we can return to the caller as well.
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
if err != nil {
return nil, err
}
if txOut != nil {
err := b.spendHintCache.CommitSpendHint(heightHint, *outpoint)
// We'll let the txNotifier know the outpoint is still unspent
// in order to begin updating its spend hint.
err := b.txNotifier.UpdateSpendDetails(*outpoint, nil)
if err != nil {
// The error is not fatal, so we should not return an
// error to the caller.
chainntnfs.Log.Error("Unable to update spend hint to "+
"%d for %v: %v", heightHint, *outpoint, err)
}
} else {
// Otherwise, we'll determine when the output was spent.
//
// First, we'll attempt to retrieve the transaction's block hash
// using the backend's transaction index.
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
if err != nil {
// Avoid returning an error if the transaction was not
// found to proceed with fallback methods.
jsonErr, ok := err.(*btcjson.RPCError)
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
return nil, fmt.Errorf("unable to query for "+
"txid %v: %v", outpoint.Hash, err)
}
return nil, err
}
var blockHash *chainhash.Hash
if tx != nil && tx.BlockHash != "" {
// If we're able to retrieve a valid block hash from the
// transaction, then we'll use it as our rescan starting
// point.
blockHash, err = chainhash.NewHashFromStr(tx.BlockHash)
if err != nil {
return nil, err
}
} else {
// Otherwise, we'll attempt to retrieve the hash for the
// block at the heightHint.
blockHash, err = b.chainConn.GetBlockHash(
int64(heightHint),
)
if err != nil {
return nil, err
}
}
return ntfn.Event, nil
}
// We'll only request a rescan if the transaction has actually
// been included within a block. Otherwise, we'll encounter an
// error when scanning for blocks. This can happen in the case
// of a race condition, wherein the output itself is unspent,
// and only arrives in the mempool after the getxout call.
if blockHash != nil {
ops := []*wire.OutPoint{outpoint}
// Otherwise, we'll determine when the output was spent by scanning the
// chain. We'll begin by determining where to start our historical
// rescan.
startHash, err := b.chainConn.GetBlockHash(
int64(historicalDispatch.StartHeight),
)
if err != nil {
return nil, fmt.Errorf("unable to get block hash for height "+
"%d: %v", historicalDispatch.StartHeight, err)
}
// In order to ensure that we don't block the caller on
// what may be a long rescan, we'll launch a new
// goroutine to handle the async result of the rescan.
asyncResult := b.chainConn.RescanAsync(
blockHash, nil, ops,
)
go func() {
rescanErr := asyncResult.Receive()
if rescanErr != nil {
chainntnfs.Log.Errorf("Rescan for spend "+
"notification txout(%x) "+
"failed: %v", outpoint, rescanErr)
}
}()
// As a minimal optimization, we'll query the backend's transaction
// index (if enabled) to determine if we have a better rescan starting
// height. We can do this as the GetRawTransaction call will return the
// hash of the block it was included in within the chain.
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
if err != nil {
// Avoid returning an error if the transaction was not found to
// proceed with fallback methods.
jsonErr, ok := err.(*btcjson.RPCError)
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
return nil, fmt.Errorf("unable to query for "+
"txid %v: %v", outpoint.Hash, err)
}
}
return &chainntnfs.SpendEvent{
Spend: ntfn.spendChan,
Cancel: func() {
cancel := &spendCancel{
op: *outpoint,
spendID: ntfn.spendID,
}
// If the transaction index was enabled, we'll use the block's hash to
// retrieve its height and check whether it provides a better starting
// point for our rescan.
if tx != nil {
// If the transaction containing the outpoint hasn't confirmed
// on-chain, then there's no need to perform a rescan.
if tx.BlockHash == "" {
return ntfn.Event, nil
}
// Submit spend cancellation to notification dispatcher.
select {
case b.notificationCancels <- cancel:
// Cancellation is being handled, drain the spend chan until it is
// closed before yielding to the caller.
for {
select {
case _, ok := <-ntfn.spendChan:
if !ok {
return
}
case <-b.quit:
return
}
}
case <-b.quit:
blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
if err != nil {
return nil, err
}
blockHeader, err := b.chainConn.GetBlockHeaderVerbose(blockHash)
if err != nil {
return nil, fmt.Errorf("unable to get header for "+
"block %v: %v", blockHash, err)
}
if uint32(blockHeader.Height) > historicalDispatch.StartHeight {
startHash, err = b.chainConn.GetBlockHash(
int64(blockHeader.Height),
)
if err != nil {
return nil, fmt.Errorf("unable to get block "+
"hash for height %d: %v",
blockHeader.Height, err)
}
},
}, nil
}
}
// In order to ensure that we don't block the caller on what may be a
// long rescan, we'll launch a new goroutine to handle the async result
// of the rescan. We purposefully prevent from adding this goroutine to
// the WaitGroup as we cannnot wait for a quit signal due to the
// asyncResult channel not being exposed.
//
// TODO(wilmer): add retry logic if rescan fails?
asyncResult := b.chainConn.RescanAsync(startHash, nil, ops)
go func() {
if rescanErr := asyncResult.Receive(); rescanErr != nil {
chainntnfs.Log.Errorf("Rescan to determine the spend "+
"details of %v failed: %v", outpoint, rescanErr)
}
}()
return ntfn.Event, nil
}
// RegisterConfirmationsNtfn registers a notification with BtcdNotifier
@ -1019,11 +835,11 @@ func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte,
chainntnfs.Log.Infof("New confirmation subscription: "+
"txid=%v, numconfs=%v", txid, numConfs)
// Register the conf notification with txconfnotifier. A non-nil value
// Register the conf notification with the TxNotifier. A non-nil value
// for `dispatch` will be returned if we are required to perform a
// manual scan for the confirmation. Otherwise the notifier will begin
// watching at tip for the transaction to confirm.
dispatch, err := b.txConfNotifier.Register(ntfn)
dispatch, err := b.txNotifier.RegisterConf(ntfn)
if err != nil {
return nil, err
}

View File

@ -11,12 +11,11 @@ import (
)
// UnsafeStart starts the notifier with a specified best height and optional
// best hash. Its bestBlock and txConfNotifier are initialized with
// bestHeight and optionally bestHash. The parameter generateBlocks is
// necessary for the bitcoind notifier to ensure we drain all notifications up
// to syncHeight, since if they are generated ahead of UnsafeStart the chainConn
// may start up with an outdated best block and miss sending ntfns. Used for
// testing.
// best hash. Its bestBlock and txNotifier are initialized with bestHeight and
// optionally bestHash. The parameter generateBlocks is necessary for the
// bitcoind notifier to ensure we drain all notifications up to syncHeight,
// since if they are generated ahead of UnsafeStart the chainConn may start up
// with an outdated best block and miss sending ntfns. Used for testing.
func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash,
syncHeight int32, generateBlocks func() error) error {
@ -29,8 +28,9 @@ func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash,
return err
}
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
b.txNotifier = chainntnfs.NewTxNotifier(
uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache,
b.spendHintCache,
)
b.chainUpdates.Start()

View File

@ -127,6 +127,10 @@ func (c *HeightHintCache) CommitSpendHint(height uint32, ops ...wire.OutPoint) e
return nil
}
if len(ops) == 0 {
return nil
}
Log.Tracef("Updating spend hint to height %d for %v", height, ops)
return c.db.Batch(func(tx *bolt.Tx) error {
@ -197,6 +201,10 @@ func (c *HeightHintCache) PurgeSpendHint(ops ...wire.OutPoint) error {
return nil
}
if len(ops) == 0 {
return nil
}
Log.Tracef("Removing spend hints for %v", ops)
return c.db.Batch(func(tx *bolt.Tx) error {
@ -228,6 +236,10 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32, txids ...chainhash.Ha
return nil
}
if len(txids) == 0 {
return nil
}
Log.Tracef("Updating confirm hints to height %d for %v", height, txids)
return c.db.Batch(func(tx *bolt.Tx) error {
@ -299,6 +311,10 @@ func (c *HeightHintCache) PurgeConfirmHint(txids ...chainhash.Hash) error {
return nil
}
if len(txids) == 0 {
return nil
}
Log.Tracef("Removing confirm hints for %v", txids)
return c.db.Batch(func(tx *bolt.Tx) error {

View File

@ -159,19 +159,39 @@ type ConfirmationEvent struct {
// Confirmed is a channel that will be sent upon once the transaction
// has been fully confirmed. The struct sent will contain all the
// details of the channel's confirmation.
Confirmed chan *TxConfirmation // MUST be buffered.
//
// NOTE: This channel must be buffered.
Confirmed chan *TxConfirmation
// Updates is a channel that will sent upon, at every incremental
// confirmation, how many confirmations are left to declare the
// transaction as fully confirmed.
Updates chan uint32 // MUST be buffered.
//
// NOTE: This channel must be buffered with the number of required
// confirmations.
Updates chan uint32
// TODO(roasbeef): all goroutines on ln channel updates should also
// have a struct chan that's closed if funding gets re-org out. Need
// to sync, to request another confirmation event ntfn, then re-open
// channel after confs.
NegativeConf chan int32 // MUST be buffered.
// NegativeConf is a channel that will be sent upon if the transaction
// confirms, but is later reorged out of the chain. The integer sent
// through the channel represents the reorg depth.
//
// NOTE: This channel must be buffered.
NegativeConf chan int32
}
// NewConfirmationEvent constructs a new ConfirmationEvent with newly opened
// channels.
func NewConfirmationEvent(numConfs uint32) *ConfirmationEvent {
return &ConfirmationEvent{
Confirmed: make(chan *TxConfirmation, 1),
Updates: make(chan uint32, numConfs),
NegativeConf: make(chan int32, 1),
}
}
// SpendDetail contains details pertaining to a spent output. This struct itself
@ -196,7 +216,16 @@ type SpendDetail struct {
type SpendEvent struct {
// Spend is a receive only channel which will be sent upon once the
// target outpoint has been spent.
Spend <-chan *SpendDetail // MUST be buffered.
//
// NOTE: This channel must be buffered.
Spend chan *SpendDetail
// Reorg is a channel that will be sent upon once we detect the spending
// transaction of the outpoint in question has been reorged out of the
// chain.
//
// NOTE: This channel must be buffered.
Reorg chan struct{}
// Cancel is a closure that should be executed by the caller in the
// case that they wish to prematurely abandon their registered spend
@ -204,6 +233,15 @@ type SpendEvent struct {
Cancel func()
}
// NewSpendEvent constructs a new SpendEvent with newly opened channels.
func NewSpendEvent(cancel func()) *SpendEvent {
return &SpendEvent{
Spend: make(chan *SpendDetail, 1),
Reorg: make(chan struct{}, 1),
Cancel: cancel,
}
}
// BlockEpoch represents metadata concerning each new block connected to the
// main chain.
type BlockEpoch struct {
@ -225,7 +263,9 @@ type BlockEpoch struct {
type BlockEpochEvent struct {
// Epochs is a receive only channel that will be sent upon each time a
// new block is connected to the end of the main chain.
Epochs <-chan *BlockEpoch // MUST be buffered.
//
// NOTE: This channel must be buffered.
Epochs <-chan *BlockEpoch
// Cancel is a closure that should be executed by the caller in the
// case that they wish to abandon their registered spend notification.
@ -392,10 +432,10 @@ func GetClientMissedBlocks(chainConn ChainConn, clientBestBlock *BlockEpoch,
return missedBlocks, nil
}
// RewindChain handles internal state updates for the notifier's TxConfNotifier
// It has no effect if given a height greater than or equal to our current best
// RewindChain handles internal state updates for the notifier's TxNotifier It
// has no effect if given a height greater than or equal to our current best
// known height. It returns the new best block for the notifier.
func RewindChain(chainConn ChainConn, txConfNotifier *TxConfNotifier,
func RewindChain(chainConn ChainConn, txNotifier *TxNotifier,
currBestBlock BlockEpoch, targetHeight int32) (BlockEpoch, error) {
newBestBlock := BlockEpoch{
@ -414,7 +454,7 @@ func RewindChain(chainConn ChainConn, txConfNotifier *TxConfNotifier,
Log.Infof("Block disconnected from main chain: "+
"height=%v, sha=%v", height, newBestBlock.Hash)
err = txConfNotifier.DisconnectTip(uint32(height))
err = txNotifier.DisconnectTip(uint32(height))
if err != nil {
return newBestBlock, fmt.Errorf("unable to "+
" disconnect tip for height=%d: %v",
@ -436,7 +476,7 @@ func RewindChain(chainConn ChainConn, txConfNotifier *TxConfNotifier,
// returned in case a chain rewind occurs and partially completes before
// erroring. In the case where there is no rewind, the notifier's
// current best block is returned.
func HandleMissedBlocks(chainConn ChainConn, txConfNotifier *TxConfNotifier,
func HandleMissedBlocks(chainConn ChainConn, txNotifier *TxNotifier,
currBestBlock BlockEpoch, newHeight int32,
backendStoresReorgs bool) (BlockEpoch, []BlockEpoch, error) {
@ -462,7 +502,7 @@ func HandleMissedBlocks(chainConn ChainConn, txConfNotifier *TxConfNotifier,
"common ancestor: %v", err)
}
currBestBlock, err = RewindChain(chainConn, txConfNotifier,
currBestBlock, err = RewindChain(chainConn, txNotifier,
currBestBlock, startingHeight)
if err != nil {
return currBestBlock, nil, fmt.Errorf("unable to "+

View File

@ -232,6 +232,8 @@ func checkNotificationFields(ntfn *chainntnfs.SpendDetail,
outpoint *wire.OutPoint, spenderSha *chainhash.Hash,
height int32, t *testing.T) {
t.Helper()
if *ntfn.SpentOutPoint != *outpoint {
t.Fatalf("ntfn includes wrong output, reports "+
"%v instead of %v",
@ -756,6 +758,8 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
// already happened. The notifier should dispatch a spend notification
// immediately.
checkSpends := func() {
t.Helper()
const numClients = 2
spendClients := make([]*chainntnfs.SpendEvent, numClients)
for i := 0; i < numClients; i++ {
@ -1108,6 +1112,156 @@ func testReorgConf(miner *rpctest.Harness, notifier chainntnfs.TestChainNotifier
}
}
// testReorgSpend ensures that the different ChainNotifier implementations
// correctly handle outpoints whose spending transaction has been reorged out of
// the chain.
func testReorgSpend(miner *rpctest.Harness,
notifier chainntnfs.TestChainNotifier, t *testing.T) {
// We'll start by creating an output and registering a spend
// notification for it.
outpoint, pkScript := chainntnfs.CreateSpendableOutput(t, miner)
_, currentHeight, err := miner.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve current height: %v", err)
}
spendIntent, err := notifier.RegisterSpendNtfn(
outpoint, pkScript, uint32(currentHeight),
)
if err != nil {
t.Fatalf("unable to register for spend: %v", err)
}
// Set up a new miner that we can use to cause a reorg.
miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"})
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
if err := miner2.SetUp(false, 0); err != nil {
t.Fatalf("unable to set up mining node: %v", err)
}
defer miner2.TearDown()
// We start by connecting the new miner to our original miner, in order
// to have a consistent view of the chain from both miners. They should
// be on the same block height.
if err := rpctest.ConnectNode(miner, miner2); err != nil {
t.Fatalf("unable to connect miners: %v", err)
}
nodeSlice := []*rpctest.Harness{miner, miner2}
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
t.Fatalf("unable to sync miners: %v", err)
}
_, minerHeight1, err := miner.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner1's current height: %v", err)
}
_, minerHeight2, err := miner2.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner2's current height: %v", err)
}
if minerHeight1 != minerHeight2 {
t.Fatalf("expected both miners to be on the same height: "+
"%v vs %v", minerHeight1, minerHeight2)
}
// We disconnect the two nodes, such that we can start mining on them
// individually without the other one learning about the new blocks.
err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
if err != nil {
t.Fatalf("unable to disconnect miners: %v", err)
}
// Craft the spending transaction for the outpoint created above and
// confirm it under the chain of the original miner.
spendTx := chainntnfs.CreateSpendTx(t, outpoint, pkScript)
spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true)
if err != nil {
t.Fatalf("unable to broadcast spend tx: %v", err)
}
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
t.Fatalf("spend tx not relayed to miner: %v", err)
}
const numBlocks = 1
if _, err := miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// We should see a spend notification dispatched with the correct spend
// details.
select {
case spendDetails := <-spendIntent.Spend:
checkNotificationFields(
spendDetails, outpoint, spendTxHash,
currentHeight+numBlocks, t,
)
case <-time.After(5 * time.Second):
t.Fatal("expected spend notification to be dispatched")
}
// Now, with the other miner, we'll generate one more block than the
// other miner and connect them to cause a reorg.
if _, err := miner2.Node.Generate(numBlocks + 1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
if err := rpctest.ConnectNode(miner, miner2); err != nil {
t.Fatalf("unable to connect miners: %v", err)
}
nodeSlice = []*rpctest.Harness{miner2, miner}
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
t.Fatalf("unable to sync miners: %v", err)
}
_, minerHeight1, err = miner.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner1's current height: %v", err)
}
_, minerHeight2, err = miner2.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to get miner2's current height: %v", err)
}
if minerHeight1 != minerHeight2 {
t.Fatalf("expected both miners to be on the same height: "+
"%v vs %v", minerHeight1, minerHeight2)
}
// We should receive a reorg notification.
select {
case _, ok := <-spendIntent.Reorg:
if !ok {
t.Fatal("unexpected reorg channel closed")
}
case <-time.After(5 * time.Second):
t.Fatal("expected to receive reorg notification")
}
// Now that both miners are on the same chain, we'll confirm the
// spending transaction of the outpoint and receive a notification for
// it.
if _, err = miner2.Node.SendRawTransaction(spendTx, true); err != nil {
t.Fatalf("unable to broadcast spend tx: %v", err)
}
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
t.Fatalf("tx not relayed to miner: %v", err)
}
_, currentHeight, err = miner.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to retrieve current height: %v", err)
}
if _, err := miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate single block: %v", err)
}
select {
case spendDetails := <-spendIntent.Spend:
checkNotificationFields(
spendDetails, outpoint, spendTxHash,
currentHeight+numBlocks, t,
)
case <-time.After(5 * time.Second):
t.Fatal("expected spend notification to be dispatched")
}
}
// testCatchUpClientOnMissedBlocks tests the case of multiple registered client
// receiving historical block epoch notifications due to their best known block
// being out of date.
@ -1134,11 +1288,11 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
// the notifier's best block is at the tip of the chain. If it isn't, the
// client may not receive all historical notifications.
bestHeight := outdatedHeight + numBlocks
if err := notifier.UnsafeStart(
bestHeight, nil, bestHeight, generateBlocks); err != nil {
t.Fatalf("Unable to unsafe start the notifier: %v", err)
err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks)
if err != nil {
t.Fatalf("unable to unsafe start the notifier: %v", err)
}
defer notifier.Stop()
// Create numClients clients whose best known block is 10 blocks behind
// the tip of the chain. We expect each client to receive numBlocks
@ -1223,11 +1377,13 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
}
// Next, start the notifier with outdated best block information.
if err := notifier.UnsafeStart(bestHeight,
nil, bestHeight+numBlocks, generateBlocks); err != nil {
t.Fatalf("Unable to unsafe start the notifier: %v", err)
err = notifier.UnsafeStart(
bestHeight, nil, bestHeight+numBlocks, generateBlocks,
)
if err != nil {
t.Fatalf("unable to unsafe start the notifier: %v", err)
}
defer notifier.Stop()
// Create numClients clients who will listen for block notifications.
clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
@ -1396,11 +1552,13 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
// shorter chain, to test that the notifier correctly rewinds to
// the common ancestor between the two chains.
syncHeight := nodeHeight1 + numBlocks + 1
if err := notifier.UnsafeStart(nodeHeight1+numBlocks,
blocks[numBlocks-1], syncHeight, nil); err != nil {
err = notifier.UnsafeStart(
nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil,
)
if err != nil {
t.Fatalf("Unable to unsafe start the notifier: %v", err)
}
defer notifier.Stop()
// Create numClients clients who will listen for block notifications.
clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
@ -1545,6 +1703,10 @@ var ntfnTests = []testCase{
name: "reorg conf",
test: testReorgConf,
},
{
name: "reorg spend",
test: testReorgSpend,
},
}
var blockCatchupTests = []blockCatchupTestCase{
@ -1680,9 +1842,6 @@ func TestInterfaces(t *testing.T) {
success := t.Run(testName, func(t *testing.T) {
blockCatchupTest.test(miner, notifier, t)
})
notifier.Stop()
if !success {
break
}

View File

@ -68,9 +68,7 @@ type NeutrinoNotifier struct {
notificationCancels chan interface{}
notificationRegistry chan interface{}
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
txConfNotifier *chainntnfs.TxConfNotifier
txNotifier *chainntnfs.TxNotifier
blockEpochClients map[uint64]*blockEpochRegistration
@ -109,8 +107,6 @@ func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache,
blockEpochClients: make(map[uint64]*blockEpochRegistration),
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
p2pNode: node,
rescanErr: make(chan error),
@ -162,8 +158,9 @@ func (n *NeutrinoNotifier) Start() error {
neutrino.WatchInputs(zeroInput),
}
n.txConfNotifier = chainntnfs.NewTxConfNotifier(
n.txNotifier = chainntnfs.NewTxNotifier(
n.bestHeight, reorgSafetyLimit, n.confirmHintCache,
n.spendHintCache,
)
n.chainConn = &NeutrinoChainConn{n.p2pNode}
@ -195,18 +192,13 @@ func (n *NeutrinoNotifier) Stop() error {
// Notify all pending clients of our shutdown by closing the related
// notification channels.
for _, spendClients := range n.spendNotifications {
for _, spendClient := range spendClients {
close(spendClient.spendChan)
}
}
for _, epochClient := range n.blockEpochClients {
close(epochClient.cancelChan)
epochClient.wg.Wait()
close(epochClient.epochChan)
}
n.txConfNotifier.TearDown()
n.txNotifier.TearDown()
return nil
}
@ -225,6 +217,14 @@ type filteredBlock struct {
connect bool
}
// rescanFilterUpdate represents a request that will be sent to the
// notificaionRegistry in order to prevent race conditions between the filter
// update and new block notifications.
type rescanFilterUpdate struct {
updateOptions []neutrino.UpdateOption
errChan chan error
}
// onFilteredBlockConnected is a callback which is executed each a new block is
// connected to the end of the main chain.
func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
@ -263,19 +263,6 @@ out:
select {
case cancelMsg := <-n.notificationCancels:
switch msg := cancelMsg.(type) {
case *spendCancel:
chainntnfs.Log.Infof("Cancelling spend "+
"notification for out_point=%v, "+
"spend_id=%v", msg.op, msg.spendID)
// Before we attempt to close the spendChan,
// ensure that the notification hasn't already
// yet been dispatched.
if outPointClients, ok := n.spendNotifications[msg.op]; ok {
close(outPointClients[msg.spendID].spendChan)
delete(n.spendNotifications[msg.op], msg.spendID)
}
case *epochCancel:
chainntnfs.Log.Infof("Cancelling epoch "+
"notification, epoch_id=%v", msg.epochID)
@ -303,21 +290,9 @@ out:
case registerMsg := <-n.notificationRegistry:
switch msg := registerMsg.(type) {
case *spendNotification:
chainntnfs.Log.Infof("New spend subscription: "+
"utxo=%v, height_hint=%v",
msg.targetOutpoint, msg.heightHint)
op := *msg.targetOutpoint
if _, ok := n.spendNotifications[op]; !ok {
n.spendNotifications[op] = make(map[uint64]*spendNotification)
}
n.spendNotifications[op][msg.spendID] = msg
case *chainntnfs.HistoricalConfDispatch:
// Look up whether the transaction is already
// included in the active chain. We'll do this
// in a goroutine to prevent blocking
// We'll start a historical rescan chain of the
// chain asynchronously to prevent blocking
// potentially long rescans.
n.wg.Add(1)
go func() {
@ -331,18 +306,6 @@ out:
chainntnfs.Log.Error(err)
}
// We'll map the script into an address
// type so we can instruct neutrino to
// match if the transaction containing
// the script is found in a block.
params := n.p2pNode.ChainParams()
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
msg.PkScript, &params,
)
if err != nil {
chainntnfs.Log.Error(err)
}
// If the historical dispatch finished
// without error, we will invoke
// UpdateConfDetails even if none were
@ -350,31 +313,12 @@ out:
// begin safely updating the height hint
// cache at tip, since any pending
// rescans have now completed.
err = n.txConfNotifier.UpdateConfDetails(
err = n.txNotifier.UpdateConfDetails(
*msg.TxID, confDetails,
)
if err != nil {
chainntnfs.Log.Error(err)
}
if confDetails != nil {
return
}
// If we can't fully dispatch
// confirmation, then we'll update our
// filter so we can be notified of its
// future initial confirmation.
rescanUpdate := []neutrino.UpdateOption{
neutrino.AddAddrs(addrs...),
neutrino.Rewind(msg.EndHeight),
neutrino.DisableDisconnectedNtfns(true),
}
err = n.chainView.Update(rescanUpdate...)
if err != nil {
chainntnfs.Log.Errorf("Unable to update rescan: %v",
err)
}
}()
case *blockEpochRegistration:
@ -399,6 +343,14 @@ out:
}
}
msg.errorChan <- nil
case *rescanFilterUpdate:
err := n.chainView.Update(msg.updateOptions...)
if err != nil {
chainntnfs.Log.Errorf("Unable to "+
"update rescan filter: %v", err)
}
msg.errChan <- err
}
case item := <-n.chainUpdates.ChanOut():
@ -426,7 +378,7 @@ out:
_, missedBlocks, err :=
chainntnfs.HandleMissedBlocks(
n.chainConn,
n.txConfNotifier,
n.txNotifier,
bestBlock,
int32(update.height),
false,
@ -482,7 +434,7 @@ out:
Hash: hash,
}
newBestBlock, err := chainntnfs.RewindChain(
n.chainConn, n.txConfNotifier, notifierBestBlock,
n.chainConn, n.txNotifier, notifierBestBlock,
int32(update.height-1),
)
if err != nil {
@ -593,7 +545,7 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
// First process the block for our internal state. A new block has
// been connected to the main chain. Send out any N confirmation
// notifications which may have been triggered by this new block.
err := n.txConfNotifier.ConnectTip(
err := n.txNotifier.ConnectTip(
&newBlock.hash, newBlock.height, newBlock.txns,
)
if err != nil {
@ -603,68 +555,6 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
chainntnfs.Log.Infof("New block: height=%v, sha=%v", newBlock.height,
newBlock.hash)
// Create a helper struct for coalescing spend notifications triggered
// by this block.
type spendNtfnBatch struct {
details *chainntnfs.SpendDetail
clients map[uint64]*spendNotification
}
// Scan over the list of relevant transactions and assemble the
// possible spend notifications we need to dispatch.
spendBatches := make(map[wire.OutPoint]spendNtfnBatch)
for _, tx := range newBlock.txns {
mtx := tx.MsgTx()
txSha := mtx.TxHash()
for i, txIn := range mtx.TxIn {
prevOut := txIn.PreviousOutPoint
// If this transaction indeed does spend an output which
// we have a registered notification for, then create a
// spend summary and add it to our batch of spend
// notifications to be delivered.
clients, ok := n.spendNotifications[prevOut]
if !ok {
continue
}
delete(n.spendNotifications, prevOut)
spendDetails := &chainntnfs.SpendDetail{
SpentOutPoint: &prevOut,
SpenderTxHash: &txSha,
SpendingTx: mtx,
SpenderInputIndex: uint32(i),
SpendingHeight: int32(newBlock.height),
}
spendBatches[prevOut] = spendNtfnBatch{
details: spendDetails,
clients: clients,
}
}
}
// Now, we'll update the spend height hint for all of our watched
// outpoints that have not been spent yet. This is safe to do as we do
// not watch already spent outpoints for spend notifications.
ops := make([]wire.OutPoint, 0, len(n.spendNotifications))
for op := range n.spendNotifications {
ops = append(ops, op)
}
if len(ops) > 0 {
err := n.spendHintCache.CommitSpendHint(newBlock.height, ops...)
if err != nil {
// The error is not fatal since we are connecting a
// block, and advancing the spend hint is an optimistic
// optimization.
chainntnfs.Log.Errorf("Unable to update spend hint to "+
"%d for %v: %v", newBlock.height, ops, err)
}
}
// We want to set the best block before dispatching notifications
// so if any subscribers make queries based on their received
// block epoch, our state is fully updated in time.
@ -674,23 +564,6 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
// of the block.
n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
// Finally, send off the spend details to the notification subscribers.
for _, batch := range spendBatches {
for _, ntfn := range batch.clients {
chainntnfs.Log.Infof("Dispatching spend "+
"notification for outpoint=%v",
ntfn.targetOutpoint)
ntfn.spendChan <- batch.details
// Close spendChan to ensure that any calls to
// Cancel will not block. This is safe to do
// since the channel is buffered, and the
// message can still be read by the receiver.
close(ntfn.spendChan)
}
}
return nil
}
@ -737,28 +610,6 @@ func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistr
}
}
// spendNotification couples a target outpoint along with the channel used for
// notifications once a spend of the outpoint has been detected.
type spendNotification struct {
targetOutpoint *wire.OutPoint
spendChan chan *chainntnfs.SpendDetail
spendID uint64
heightHint uint32
}
// spendCancel is a message sent to the NeutrinoNotifier when a client wishes
// to cancel an outstanding spend notification that has yet to be dispatched.
type spendCancel struct {
// op is the target outpoint of the notification to be cancelled.
op wire.OutPoint
// spendID the ID of the notification to cancel.
spendID uint64
}
// RegisterSpendNtfn registers an intent to be notified once the target
// outpoint has been spent by a transaction on-chain. Once a spend of the
// target outpoint has been detected, the details of the spending event will be
@ -766,86 +617,91 @@ type spendCancel struct {
func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
n.heightMtx.RLock()
currentHeight := n.bestHeight
n.heightMtx.RUnlock()
// Before proceeding to register the notification, we'll query our
// height hint cache to determine whether a better one exists.
if hint, err := n.spendHintCache.QuerySpendHint(*outpoint); err == nil {
if hint > heightHint {
chainntnfs.Log.Debugf("Using height hint %d retrieved "+
"from cache for %v", hint, outpoint)
heightHint = hint
}
// First, we'll construct a spend notification request and hand it off
// to the txNotifier.
spendID := atomic.AddUint64(&n.spendClientCounter, 1)
cancel := func() {
n.txNotifier.CancelSpend(*outpoint, spendID)
}
ntfn := &chainntnfs.SpendNtfn{
SpendID: spendID,
OutPoint: *outpoint,
Event: chainntnfs.NewSpendEvent(cancel),
HeightHint: heightHint,
}
// Construct a notification request for the outpoint. We'll defer
// sending it to the main event loop until after we've guaranteed that
// the outpoint has not been spent.
ntfn := &spendNotification{
targetOutpoint: outpoint,
spendChan: make(chan *chainntnfs.SpendDetail, 1),
spendID: atomic.AddUint64(&n.spendClientCounter, 1),
heightHint: heightHint,
historicalDispatch, err := n.txNotifier.RegisterSpend(ntfn)
if err != nil {
return nil, err
}
spendEvent := &chainntnfs.SpendEvent{
Spend: ntfn.spendChan,
Cancel: func() {
cancel := &spendCancel{
op: *outpoint,
spendID: ntfn.spendID,
}
// Submit spend cancellation to notification dispatcher.
select {
case n.notificationCancels <- cancel:
// Cancellation is being handled, drain the
// spend chan until it is closed before yielding
// to the caller.
for {
select {
case _, ok := <-ntfn.spendChan:
if !ok {
return
}
case <-n.quit:
return
}
}
case <-n.quit:
}
},
}
// Ensure that neutrino is caught up to the height hint before we
// attempt to fetch the utxo from the chain. If we're behind, then we
// may miss a notification dispatch.
for {
n.heightMtx.RLock()
currentHeight = n.bestHeight
n.heightMtx.RUnlock()
if currentHeight < heightHint {
time.Sleep(time.Millisecond * 200)
continue
}
break
// If the txNotifier didn't return any details to perform a historical
// scan of the chain, then we can return early as there's nothing left
// for us to do.
if historicalDispatch == nil {
return ntfn.Event, nil
}
// To determine whether this outpoint has been spent on-chain, we'll
// update our filter to watch for the transaction at tip and we'll also
// dispatch a historical rescan to determine if it has been spent in the
// past.
//
// We'll update our filter first to ensure we can immediately detect the
// spend at tip. To do so, we'll map the script into an address
// type so we can instruct neutrino to match if the transaction
// containing the script is found in a block.
inputToWatch := neutrino.InputWithScript{
OutPoint: *outpoint,
PkScript: pkScript,
}
errChan := make(chan error, 1)
select {
case n.notificationRegistry <- &rescanFilterUpdate{
updateOptions: []neutrino.UpdateOption{
neutrino.AddInputs(inputToWatch),
neutrino.Rewind(historicalDispatch.EndHeight),
neutrino.DisableDisconnectedNtfns(true),
},
errChan: errChan,
}:
case <-n.quit:
return nil, ErrChainNotifierShuttingDown
}
select {
case err = <-errChan:
case <-n.quit:
return nil, ErrChainNotifierShuttingDown
}
if err != nil {
return nil, fmt.Errorf("unable to update filter: %v", err)
}
// With the filter updated, we'll dispatch our historical rescan to
// ensure we detect the spend if it happened in the past. We'll ensure
// that neutrino is caught up to the starting height before we attempt
// to fetch the UTXO from the chain. If we're behind, then we may miss a
// notification dispatch.
for {
n.heightMtx.RLock()
currentHeight := n.bestHeight
n.heightMtx.RUnlock()
if currentHeight >= historicalDispatch.StartHeight {
break
}
time.Sleep(time.Millisecond * 200)
}
// Before sending off the notification request, we'll attempt to see if
// this output is still spent or not at this point in the chain.
spendReport, err := n.p2pNode.GetUtxo(
neutrino.WatchInputs(inputToWatch),
neutrino.StartBlock(&waddrmgr.BlockStamp{
Height: int32(heightHint),
Height: int32(historicalDispatch.StartHeight),
}),
neutrino.EndBlock(&waddrmgr.BlockStamp{
Height: int32(historicalDispatch.EndHeight),
}),
)
if err != nil && !strings.Contains(err.Error(), "not found") {
@ -854,57 +710,28 @@ func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
// If a spend report was returned, and the transaction is present, then
// this means that the output is already spent.
var spendDetails *chainntnfs.SpendDetail
if spendReport != nil && spendReport.SpendingTx != nil {
// As a result, we'll launch a goroutine to immediately
// dispatch the notification with a normal response.
go func() {
txSha := spendReport.SpendingTx.TxHash()
select {
case ntfn.spendChan <- &chainntnfs.SpendDetail{
SpentOutPoint: outpoint,
SpenderTxHash: &txSha,
SpendingTx: spendReport.SpendingTx,
SpenderInputIndex: spendReport.SpendingInputIndex,
SpendingHeight: int32(spendReport.SpendingTxHeight),
}:
case <-n.quit:
return
}
}()
return spendEvent, nil
spendingTxHash := spendReport.SpendingTx.TxHash()
spendDetails = &chainntnfs.SpendDetail{
SpentOutPoint: outpoint,
SpenderTxHash: &spendingTxHash,
SpendingTx: spendReport.SpendingTx,
SpenderInputIndex: spendReport.SpendingInputIndex,
SpendingHeight: int32(spendReport.SpendingTxHeight),
}
}
// If the output is still unspent, then we'll update our rescan's
// filter, and send the request to the dispatcher goroutine.
rescanUpdate := []neutrino.UpdateOption{
neutrino.AddInputs(inputToWatch),
neutrino.Rewind(currentHeight),
neutrino.DisableDisconnectedNtfns(true),
}
if err := n.chainView.Update(rescanUpdate...); err != nil {
// Finally, no matter whether the rescan found a spend in the past or
// not, we'll mark our historical rescan as complete to ensure the
// outpoint's spend hint gets updated upon connected/disconnected
// blocks.
err = n.txNotifier.UpdateSpendDetails(*outpoint, spendDetails)
if err != nil {
return nil, err
}
select {
case n.notificationRegistry <- ntfn:
case <-n.quit:
return nil, ErrChainNotifierShuttingDown
}
// Finally, we'll add a spent hint with the current height to the cache
// in order to better keep track of when this outpoint is spent.
err = n.spendHintCache.CommitSpendHint(currentHeight, *outpoint)
if err != nil {
// The error is not fatal, so we should not return an error to
// the caller.
chainntnfs.Log.Errorf("Unable to update spend hint to %d for "+
"%v: %v", currentHeight, outpoint, err)
}
return spendEvent, nil
return ntfn.Event, nil
}
// RegisterConfirmationsNtfn registers a notification with NeutrinoNotifier
@ -928,11 +755,11 @@ func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
chainntnfs.Log.Infof("New confirmation subscription: "+
"txid=%v, numconfs=%v", txid, numConfs)
// Register the conf notification with txconfnotifier. A non-nil value
// Register the conf notification with the TxNotifier. A non-nil value
// for `dispatch` will be returned if we are required to perform a
// manual scan for the confirmation. Otherwise the notifier will begin
// watching at tip for the transaction to confirm.
dispatch, err := n.txConfNotifier.Register(ntfn)
dispatch, err := n.txNotifier.RegisterConf(ntfn)
if err != nil {
return nil, err
}
@ -941,12 +768,55 @@ func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
return ntfn.Event, nil
}
// To determine whether this transaction has confirmed on-chain, we'll
// update our filter to watch for the transaction at tip and we'll also
// dispatch a historical rescan to determine if it has confirmed in the
// past.
//
// We'll update our filter first to ensure we can immediately detect the
// confirmation at tip. To do so, we'll map the script into an address
// type so we can instruct neutrino to match if the transaction
// containing the script is found in a block.
params := n.p2pNode.ChainParams()
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, &params)
if err != nil {
return nil, fmt.Errorf("unable to extract script: %v", err)
}
// We'll send the filter update request to the notifier's main event
// handler and wait for its response.
errChan := make(chan error, 1)
select {
case n.notificationRegistry <- dispatch:
return ntfn.Event, nil
case n.notificationRegistry <- &rescanFilterUpdate{
updateOptions: []neutrino.UpdateOption{
neutrino.AddAddrs(addrs...),
neutrino.Rewind(dispatch.EndHeight),
neutrino.DisableDisconnectedNtfns(true),
},
errChan: errChan,
}:
case <-n.quit:
return nil, ErrChainNotifierShuttingDown
}
select {
case err = <-errChan:
case <-n.quit:
return nil, ErrChainNotifierShuttingDown
}
if err != nil {
return nil, fmt.Errorf("unable to update filter: %v", err)
}
// Finally, with the filter updates, we can dispatch the historical
// rescan to ensure we can detect if the event happened in the past.
select {
case n.notificationRegistry <- dispatch:
case <-n.quit:
return nil, ErrChainNotifierShuttingDown
}
return ntfn.Event, nil
}
// blockEpochRegistration represents a client's intent to receive a

View File

@ -13,13 +13,14 @@ import (
)
// UnsafeStart starts the notifier with a specified best height and optional
// best hash. Its bestHeight, txConfNotifier and neutrino node are initialized
// with bestHeight. The parameter generateBlocks is necessary for the
// bitcoind notifier to ensure we drain all notifications up to syncHeight,
// since if they are generated ahead of UnsafeStart the chainConn may start
// up with an outdated best block and miss sending ntfns. Used for testing.
func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash,
syncHeight int32, generateBlocks func() error) error {
// best hash. Its bestHeight, txNotifier and neutrino node are initialized with
// bestHeight. The parameter generateBlocks is necessary for the bitcoind
// notifier to ensure we drain all notifications up to syncHeight, since if they
// are generated ahead of UnsafeStart the chainConn may start up with an
// outdated best block and miss sending ntfns. Used for testing.
func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32,
bestHash *chainhash.Hash, syncHeight int32,
generateBlocks func() error) error {
// We'll obtain the latest block height of the p2p node. We'll
// start the auto-rescan from this point. Once a caller actually wishes
@ -47,8 +48,9 @@ func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Has
neutrino.WatchInputs(zeroInput),
}
n.txConfNotifier = chainntnfs.NewTxConfNotifier(
n.txNotifier = chainntnfs.NewTxNotifier(
uint32(bestHeight), reorgSafetyLimit, n.confirmHintCache,
n.spendHintCache,
)
n.chainConn = &NeutrinoChainConn{n.p2pNode}

View File

@ -1,806 +0,0 @@
package chainntnfs
import (
"errors"
"fmt"
"sync"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcutil"
)
var (
// ErrTxConfNotifierExiting is an error returned when attempting to
// interact with the TxConfNotifier but it been shut down.
ErrTxConfNotifierExiting = errors.New("TxConfNotifier is exiting")
// ErrTxMaxConfs signals that the user requested a number of
// confirmations beyond the reorg safety limit.
ErrTxMaxConfs = errors.New("too many confirmations requested")
)
// ConfNtfn represents a notifier client's request to receive a notification
// once the target transaction gets sufficient confirmations. The client is
// asynchronously notified via the ConfirmationEvent channels.
type ConfNtfn struct {
// ConfID uniquely identifies the confirmation notification request for
// the specified transaction.
ConfID uint64
// TxID is the hash of the transaction for which confirmation notifications
// are requested.
TxID *chainhash.Hash
// PkScript is the public key script of an outpoint created in this
// transaction.
//
// NOTE: This value MUST be set when the dispatch is to be performed
// using compact filters.
PkScript []byte
// NumConfirmations is the number of confirmations after which the
// notification is to be sent.
NumConfirmations uint32
// Event contains references to the channels that the notifications are to
// be sent over.
Event *ConfirmationEvent
// HeightHint is the minimum height in the chain that we expect to find
// this txid.
HeightHint uint32
// dispatched is false if the confirmed notification has not been sent yet.
dispatched bool
}
// HistoricalConfDispatch parameterizes a manual rescan for a particular
// transaction identifier. The parameters include the start and end block
// heights specifying the range of blocks to scan.
type HistoricalConfDispatch struct {
// TxID is the transaction ID to search for in the historical dispatch.
TxID *chainhash.Hash
// PkScript is a public key script from an output created by this
// transaction.
//
// NOTE: This value MUST be set when the dispatch is to be performed
// using compact filters.
PkScript []byte
// StartHeight specifies the block height at which to being the
// historical rescan.
StartHeight uint32
// EndHeight specifies the last block height (inclusive) that the
// historical scan should consider.
EndHeight uint32
}
// NewConfirmationEvent constructs a new ConfirmationEvent with newly opened
// channels.
func NewConfirmationEvent(numConfs uint32) *ConfirmationEvent {
return &ConfirmationEvent{
Confirmed: make(chan *TxConfirmation, 1),
Updates: make(chan uint32, numConfs),
NegativeConf: make(chan int32, 1),
}
}
// TxConfNotifier is used to register transaction confirmation notifications and
// dispatch them as the transactions confirm. A client can request to be
// notified when a particular transaction has sufficient on-chain confirmations
// (or be notified immediately if the tx already does), and the TxConfNotifier
// will watch changes to the blockchain in order to satisfy these requests.
type TxConfNotifier struct {
// currentHeight is the height of the tracked blockchain. It is used to
// determine the number of confirmations a tx has and ensure blocks are
// connected and disconnected in order.
currentHeight uint32
// reorgSafetyLimit is the chain depth beyond which it is assumed a block
// will not be reorganized out of the chain. This is used to determine when
// to prune old confirmation requests so that reorgs are handled correctly.
// The coinbase maturity period is a reasonable value to use.
reorgSafetyLimit uint32
// reorgDepth is the depth of a chain organization that this system is being
// informed of. This is incremented as long as a sequence of blocks are
// disconnected without being interrupted by a new block.
reorgDepth uint32
// confNotifications is an index of notification requests by transaction
// hash.
confNotifications map[chainhash.Hash]*confNtfnSet
// txsByInitialHeight is an index of watched transactions by the height
// that they are included at in the blockchain. This is tracked so that
// incorrect notifications are not sent if a transaction is reorganized
// out of the chain and so that negative confirmations can be recognized.
txsByInitialHeight map[uint32]map[chainhash.Hash]struct{}
// ntfnsByConfirmHeight is an index of notification requests by the height
// at which the transaction will have sufficient confirmations.
ntfnsByConfirmHeight map[uint32]map[*ConfNtfn]struct{}
// hintCache is a cache used to maintain the latest height hints for
// transactions. Each height hint represents the earliest height at
// which the transactions could have been confirmed within the chain.
hintCache ConfirmHintCache
// quit is closed in order to signal that the notifier is gracefully
// exiting.
quit chan struct{}
sync.Mutex
}
// rescanState indicates the progression of a registration before the notifier
// can begin dispatching confirmations at tip.
type rescanState uint8
const (
// rescanNotStarted is the initial state, denoting that a historical
// dispatch may be required.
rescanNotStarted rescanState = iota
// rescanPending indicates that a dispatch has already been made, and we
// are waiting for its completion. No other rescans should be dispatched
// while in this state.
rescanPending
// rescanComplete signals either that a rescan was dispatched and has
// completed, or that we began watching at tip immediately. In either
// case, the notifier can only dispatch notifications from tip when in
// this state.
rescanComplete
)
// confNtfnSet holds all known, registered confirmation notifications for a
// single txid. If duplicates notifications are requested, only one historical
// dispatch will be spawned to ensure redundant scans are not permitted. A
// single conf detail will be constructed and dispatched to all interested
// clients.
type confNtfnSet struct {
ntfns map[uint64]*ConfNtfn
rescanStatus rescanState
details *TxConfirmation
}
// newConfNtfnSet constructs a fresh confNtfnSet for a group of clients
// interested in a notification for a particular txid.
func newConfNtfnSet() *confNtfnSet {
return &confNtfnSet{
ntfns: make(map[uint64]*ConfNtfn),
rescanStatus: rescanNotStarted,
}
}
// NewTxConfNotifier creates a TxConfNotifier. The current height of the
// blockchain is accepted as a parameter.
func NewTxConfNotifier(startHeight uint32, reorgSafetyLimit uint32,
hintCache ConfirmHintCache) *TxConfNotifier {
return &TxConfNotifier{
currentHeight: startHeight,
reorgSafetyLimit: reorgSafetyLimit,
confNotifications: make(map[chainhash.Hash]*confNtfnSet),
txsByInitialHeight: make(map[uint32]map[chainhash.Hash]struct{}),
ntfnsByConfirmHeight: make(map[uint32]map[*ConfNtfn]struct{}),
hintCache: hintCache,
quit: make(chan struct{}),
}
}
// Register handles a new notification request. The client will be notified when
// the transaction gets a sufficient number of confirmations on the blockchain.
// The registration succeeds if no error is returned. If the returned
// HistoricalConfDispatch is non-nil, the caller is responsible for attempting
// to manually rescan blocks for the txid between the start and end heights.
//
// NOTE: If the transaction has already been included in a block on the chain,
// the confirmation details must be provided with the UpdateConfDetails method,
// otherwise we will wait for the transaction to confirm even though it already
// has.
func (tcn *TxConfNotifier) Register(
ntfn *ConfNtfn) (*HistoricalConfDispatch, error) {
select {
case <-tcn.quit:
return nil, ErrTxConfNotifierExiting
default:
}
// Enforce that we will not dispatch confirmations beyond the reorg
// safety limit.
if ntfn.NumConfirmations > tcn.reorgSafetyLimit {
return nil, ErrTxMaxConfs
}
// Before proceeding to register the notification, we'll query our
// height hint cache to determine whether a better one exists.
//
// TODO(conner): verify that all submitted height hints are identical.
startHeight := ntfn.HeightHint
hint, err := tcn.hintCache.QueryConfirmHint(*ntfn.TxID)
if err == nil {
if hint > startHeight {
Log.Debugf("Using height hint %d retrieved "+
"from cache for %v", hint, *ntfn.TxID)
startHeight = hint
}
} else if err != ErrConfirmHintNotFound {
Log.Errorf("Unable to query confirm hint for %v: %v",
*ntfn.TxID, err)
}
tcn.Lock()
defer tcn.Unlock()
confSet, ok := tcn.confNotifications[*ntfn.TxID]
if !ok {
// If this is the first registration for this txid, construct a
// confSet to coalesce all notifications for the same txid.
confSet = newConfNtfnSet()
tcn.confNotifications[*ntfn.TxID] = confSet
}
confSet.ntfns[ntfn.ConfID] = ntfn
switch confSet.rescanStatus {
// A prior rescan has already completed and we are actively watching at
// tip for this txid.
case rescanComplete:
// If conf details for this set of notifications has already
// been found, we'll attempt to deliver them immediately to this
// client.
Log.Debugf("Attempting to dispatch conf for txid=%v "+
"on registration since rescan has finished", ntfn.TxID)
return nil, tcn.dispatchConfDetails(ntfn, confSet.details)
// A rescan is already in progress, return here to prevent dispatching
// another. When the scan returns, this notifications details will be
// updated as well.
case rescanPending:
Log.Debugf("Waiting for pending rescan to finish before "+
"notifying txid=%v at tip", ntfn.TxID)
return nil, nil
// If no rescan has been dispatched, attempt to do so now.
case rescanNotStarted:
}
// If the provided or cached height hint indicates that the transaction
// is to be confirmed at a height greater than the conf notifier's
// current height, we'll refrain from spawning a historical dispatch.
if startHeight > tcn.currentHeight {
Log.Debugf("Height hint is above current height, not dispatching "+
"historical rescan for txid=%v ", ntfn.TxID)
// Set the rescan status to complete, which will allow the conf
// notifier to start delivering messages for this set
// immediately.
confSet.rescanStatus = rescanComplete
return nil, nil
}
Log.Debugf("Dispatching historical rescan for txid=%v ", ntfn.TxID)
// Construct the parameters for historical dispatch, scanning the range
// of blocks between our best known height hint and the notifier's
// current height. The notifier will begin also watching for
// confirmations at tip starting with the next block.
dispatch := &HistoricalConfDispatch{
TxID: ntfn.TxID,
PkScript: ntfn.PkScript,
StartHeight: startHeight,
EndHeight: tcn.currentHeight,
}
// Set this confSet's status to pending, ensuring subsequent
// registrations don't also attempt a dispatch.
confSet.rescanStatus = rescanPending
return dispatch, nil
}
// UpdateConfDetails attempts to update the confirmation details for an active
// notification within the notifier. This should only be used in the case of a
// transaction that has confirmed before the notifier's current height.
//
// NOTE: The notification should be registered first to ensure notifications are
// dispatched correctly.
func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash,
details *TxConfirmation) error {
select {
case <-tcn.quit:
return ErrTxConfNotifierExiting
default:
}
// Ensure we hold the lock throughout handling the notification to
// prevent the notifier from advancing its height underneath us.
tcn.Lock()
defer tcn.Unlock()
// First, we'll determine whether we have an active notification for
// this transaction with the given ID.
confSet, ok := tcn.confNotifications[txid]
if !ok {
return fmt.Errorf("no notification found with TxID %v", txid)
}
// If the conf details were already found at tip, all existing
// notifications will have been dispatched or queued for dispatch. We
// can exit early to avoid sending too many notifications on the
// buffered channels.
if confSet.details != nil {
return nil
}
// The historical dispatch has been completed for this confSet. We'll
// update the rescan status and cache any details that were found. If
// the details are nil, that implies we did not find them and will
// continue to watch for them at tip.
confSet.rescanStatus = rescanComplete
// The notifier has yet to reach the height at which the transaction was
// included in a block, so we should defer until handling it then within
// ConnectTip.
if details == nil {
Log.Debugf("Conf details for txid=%v not found during "+
"historical dispatch, waiting to dispatch at tip", txid)
return nil
}
if details.BlockHeight > tcn.currentHeight {
Log.Debugf("Conf details for txid=%v found above current "+
"height, waiting to dispatch at tip", txid)
return nil
}
Log.Debugf("Updating conf details for txid=%v details", txid)
err := tcn.hintCache.CommitConfirmHint(details.BlockHeight, txid)
if err != nil {
// The error is not fatal, so we should not return an error to
// the caller.
Log.Errorf("Unable to update confirm hint to %d for %v: %v",
details.BlockHeight, txid, err)
}
// Cache the details found in the rescan and attempt to dispatch any
// notifications that have not yet been delivered.
confSet.details = details
for _, ntfn := range confSet.ntfns {
err = tcn.dispatchConfDetails(ntfn, details)
if err != nil {
return err
}
}
return nil
}
// dispatchConfDetails attempts to cache and dispatch details to a particular
// client if the transaction has sufficiently confirmed. If the provided details
// are nil, this method will be a no-op.
func (tcn *TxConfNotifier) dispatchConfDetails(
ntfn *ConfNtfn, details *TxConfirmation) error {
// If no details are provided, return early as we can't dispatch.
if details == nil {
Log.Debugf("Unable to dispatch %v, no details provided",
ntfn.TxID)
return nil
}
// Now, we'll examine whether the transaction of this
// notification request has reached its required number of
// confirmations. If it has, we'll dispatch a confirmation
// notification to the caller.
confHeight := details.BlockHeight + ntfn.NumConfirmations - 1
if confHeight <= tcn.currentHeight {
Log.Infof("Dispatching %v conf notification for %v",
ntfn.NumConfirmations, ntfn.TxID)
// We'll send a 0 value to the Updates channel,
// indicating that the transaction has already been
// confirmed.
select {
case ntfn.Event.Updates <- 0:
case <-tcn.quit:
return ErrTxConfNotifierExiting
}
select {
case ntfn.Event.Confirmed <- details:
ntfn.dispatched = true
case <-tcn.quit:
return ErrTxConfNotifierExiting
}
} else {
Log.Debugf("Queueing %v conf notification for %v at tip ",
ntfn.NumConfirmations, ntfn.TxID)
// Otherwise, we'll keep track of the notification
// request by the height at which we should dispatch the
// confirmation notification.
ntfnSet, exists := tcn.ntfnsByConfirmHeight[confHeight]
if !exists {
ntfnSet = make(map[*ConfNtfn]struct{})
tcn.ntfnsByConfirmHeight[confHeight] = ntfnSet
}
ntfnSet[ntfn] = struct{}{}
// We'll also send an update to the client of how many
// confirmations are left for the transaction to be
// confirmed.
numConfsLeft := confHeight - tcn.currentHeight
select {
case ntfn.Event.Updates <- numConfsLeft:
case <-tcn.quit:
return ErrTxConfNotifierExiting
}
}
// As a final check, we'll also watch the transaction if it's
// still possible for it to get reorged out of the chain.
blockHeight := details.BlockHeight
reorgSafeHeight := blockHeight + tcn.reorgSafetyLimit
if reorgSafeHeight > tcn.currentHeight {
txSet, exists := tcn.txsByInitialHeight[blockHeight]
if !exists {
txSet = make(map[chainhash.Hash]struct{})
tcn.txsByInitialHeight[blockHeight] = txSet
}
txSet[*ntfn.TxID] = struct{}{}
}
return nil
}
// ConnectTip handles a new block extending the current chain. This checks each
// transaction in the block to see if any watched transactions are included.
// Also, if any watched transactions now have the required number of
// confirmations as a result of this block being connected, this dispatches
// notifications.
func (tcn *TxConfNotifier) ConnectTip(blockHash *chainhash.Hash,
blockHeight uint32, txns []*btcutil.Tx) error {
select {
case <-tcn.quit:
return ErrTxConfNotifierExiting
default:
}
tcn.Lock()
defer tcn.Unlock()
if blockHeight != tcn.currentHeight+1 {
return fmt.Errorf("Received blocks out of order: "+
"current height=%d, new height=%d",
tcn.currentHeight, blockHeight)
}
tcn.currentHeight++
tcn.reorgDepth = 0
// Record any newly confirmed transactions by their confirmed height so
// that notifications get dispatched when the transactions reach their
// required number of confirmations. We'll also watch these transactions
// at the height they were included in the chain so reorgs can be
// handled correctly.
for _, tx := range txns {
txHash := tx.Hash()
// Check if we have any pending notifications for this txid. If
// none are found, we can proceed to the next transaction.
confSet, ok := tcn.confNotifications[*txHash]
if !ok {
continue
}
Log.Debugf("Block contains txid=%v, constructing details",
txHash)
// If we have any, we'll record its confirmed height so that
// notifications get dispatched when the transaction reaches the
// clients' desired number of confirmations.
details := &TxConfirmation{
BlockHash: blockHash,
BlockHeight: blockHeight,
TxIndex: uint32(tx.Index()),
}
confSet.rescanStatus = rescanComplete
confSet.details = details
for _, ntfn := range confSet.ntfns {
// In the event that this notification was aware that
// the transaction was reorged out of the chain, we'll
// consume the reorg notification if it hasn't been done
// yet already.
select {
case <-ntfn.Event.NegativeConf:
default:
}
// We'll note this client's required number of
// confirmations so that we can notify them when
// expected.
confHeight := blockHeight + ntfn.NumConfirmations - 1
ntfnSet, exists := tcn.ntfnsByConfirmHeight[confHeight]
if !exists {
ntfnSet = make(map[*ConfNtfn]struct{})
tcn.ntfnsByConfirmHeight[confHeight] = ntfnSet
}
ntfnSet[ntfn] = struct{}{}
// We'll also note the initial confirmation height in
// order to correctly handle dispatching notifications
// when the transaction gets reorged out of the chain.
txSet, exists := tcn.txsByInitialHeight[blockHeight]
if !exists {
txSet = make(map[chainhash.Hash]struct{})
tcn.txsByInitialHeight[blockHeight] = txSet
}
txSet[*txHash] = struct{}{}
}
}
// In order to update the height hint for all the required transactions
// under one database transaction, we'll gather the set of unconfirmed
// transactions along with the ones that confirmed at the current
// height. To do so, we'll iterate over the confNotifications map, which
// contains the transactions we currently have notifications for. Since
// this map doesn't tell us whether the transaction has confirmed or
// not, we'll need to look at txsByInitialHeight to determine so.
var txsToUpdateHints []chainhash.Hash
for confirmedTx := range tcn.txsByInitialHeight[tcn.currentHeight] {
txsToUpdateHints = append(txsToUpdateHints, confirmedTx)
}
out:
for maybeUnconfirmedTx, confSet := range tcn.confNotifications {
// We shouldn't update the confirm hints if we still have a
// pending rescan in progress. We'll skip writing any for
// notification sets that haven't reached rescanComplete.
if confSet.rescanStatus != rescanComplete {
continue
}
for height, confirmedTxs := range tcn.txsByInitialHeight {
// Skip the transactions that confirmed at the new block
// height as those have already been added.
if height == blockHeight {
continue
}
// If the transaction was found within the set of
// confirmed transactions at this height, we'll skip it.
if _, ok := confirmedTxs[maybeUnconfirmedTx]; ok {
continue out
}
}
txsToUpdateHints = append(txsToUpdateHints, maybeUnconfirmedTx)
}
if len(txsToUpdateHints) > 0 {
err := tcn.hintCache.CommitConfirmHint(
tcn.currentHeight, txsToUpdateHints...,
)
if err != nil {
// The error is not fatal, so we should not return an
// error to the caller.
Log.Errorf("Unable to update confirm hint to %d for "+
"%v: %v", tcn.currentHeight, txsToUpdateHints,
err)
}
}
// Next, we'll dispatch an update to all of the notification clients for
// our watched transactions with the number of confirmations left at
// this new height.
for _, txHashes := range tcn.txsByInitialHeight {
for txHash := range txHashes {
confSet := tcn.confNotifications[txHash]
for _, ntfn := range confSet.ntfns {
txConfHeight := confSet.details.BlockHeight +
ntfn.NumConfirmations - 1
numConfsLeft := txConfHeight - blockHeight
// Since we don't clear notifications until
// transactions are no longer under the risk of
// being reorganized out of the chain, we'll
// skip sending updates for transactions that
// have already been confirmed.
if int32(numConfsLeft) < 0 {
continue
}
select {
case ntfn.Event.Updates <- numConfsLeft:
case <-tcn.quit:
return ErrTxConfNotifierExiting
}
}
}
}
// Then, we'll dispatch notifications for all the transactions that have
// become confirmed at this new block height.
for ntfn := range tcn.ntfnsByConfirmHeight[blockHeight] {
confSet := tcn.confNotifications[*ntfn.TxID]
Log.Infof("Dispatching %v conf notification for %v",
ntfn.NumConfirmations, ntfn.TxID)
select {
case ntfn.Event.Confirmed <- confSet.details:
ntfn.dispatched = true
case <-tcn.quit:
return ErrTxConfNotifierExiting
}
}
delete(tcn.ntfnsByConfirmHeight, tcn.currentHeight)
// Clear entries from confNotifications and confTxsByInitialHeight. We
// assume that reorgs deeper than the reorg safety limit do not happen,
// so we can clear out entries for the block that is now mature.
if tcn.currentHeight >= tcn.reorgSafetyLimit {
matureBlockHeight := tcn.currentHeight - tcn.reorgSafetyLimit
for txHash := range tcn.txsByInitialHeight[matureBlockHeight] {
delete(tcn.confNotifications, txHash)
}
delete(tcn.txsByInitialHeight, matureBlockHeight)
}
return nil
}
// DisconnectTip handles the tip of the current chain being disconnected during
// a chain reorganization. If any watched transactions were included in this
// block, internal structures are updated to ensure a confirmation notification
// is not sent unless the transaction is included in the new chain.
func (tcn *TxConfNotifier) DisconnectTip(blockHeight uint32) error {
select {
case <-tcn.quit:
return ErrTxConfNotifierExiting
default:
}
tcn.Lock()
defer tcn.Unlock()
if blockHeight != tcn.currentHeight {
return fmt.Errorf("Received blocks out of order: "+
"current height=%d, disconnected height=%d",
tcn.currentHeight, blockHeight)
}
tcn.currentHeight--
tcn.reorgDepth++
// Rewind the height hint for all watched transactions.
var txs []chainhash.Hash
for tx := range tcn.confNotifications {
txs = append(txs, tx)
}
err := tcn.hintCache.CommitConfirmHint(tcn.currentHeight, txs...)
if err != nil {
Log.Errorf("Unable to update confirm hint to %d for %v: %v",
tcn.currentHeight, txs, err)
return err
}
// We'll go through all of our watched transactions and attempt to drain
// their notification channels to ensure sending notifications to the
// clients is always non-blocking.
for initialHeight, txHashes := range tcn.txsByInitialHeight {
for txHash := range txHashes {
// If the transaction has been reorged out of the chain,
// we'll make sure to remove the cached confirmation
// details to prevent notifying clients with old
// information.
confSet := tcn.confNotifications[txHash]
if initialHeight == blockHeight {
confSet.details = nil
}
for _, ntfn := range confSet.ntfns {
// First, we'll attempt to drain an update
// from each notification to ensure sends to the
// Updates channel are always non-blocking.
select {
case <-ntfn.Event.Updates:
case <-tcn.quit:
return ErrTxConfNotifierExiting
default:
}
// Then, we'll check if the current transaction
// was included in the block currently being
// disconnected. If it was, we'll need to
// dispatch a reorg notification to the client.
if initialHeight == blockHeight {
err := tcn.dispatchConfReorg(
ntfn, blockHeight,
)
if err != nil {
return err
}
}
}
}
}
// Finally, we can remove the transactions we're currently watching that
// were included in this block height.
delete(tcn.txsByInitialHeight, blockHeight)
return nil
}
// dispatchConfReorg dispatches a reorg notification to the client if the
// confirmation notification was already delivered.
//
// NOTE: This must be called with the TxNotifier's lock held.
func (tcn *TxConfNotifier) dispatchConfReorg(
ntfn *ConfNtfn, heightDisconnected uint32) error {
// If the transaction's confirmation notification has yet to be
// dispatched, we'll need to clear its entry within the
// ntfnsByConfirmHeight index to prevent from notifiying the client once
// the notifier reaches the confirmation height.
if !ntfn.dispatched {
confHeight := heightDisconnected + ntfn.NumConfirmations - 1
ntfnSet, exists := tcn.ntfnsByConfirmHeight[confHeight]
if exists {
delete(ntfnSet, ntfn)
}
return nil
}
// Otherwise, the entry within the ntfnsByConfirmHeight has already been
// deleted, so we'll attempt to drain the confirmation notification to
// ensure sends to the Confirmed channel are always non-blocking.
select {
case <-ntfn.Event.Confirmed:
case <-tcn.quit:
return ErrTxConfNotifierExiting
default:
}
ntfn.dispatched = false
// Send a negative confirmation notification to the client indicating
// how many blocks have been disconnected successively.
select {
case ntfn.Event.NegativeConf <- int32(tcn.reorgDepth):
case <-tcn.quit:
return ErrTxConfNotifierExiting
}
return nil
}
// TearDown is to be called when the owner of the TxConfNotifier is exiting.
// This closes the event channels of all registered notifications that have
// not been dispatched yet.
func (tcn *TxConfNotifier) TearDown() {
tcn.Lock()
defer tcn.Unlock()
close(tcn.quit)
for _, confSet := range tcn.confNotifications {
for _, ntfn := range confSet.ntfns {
if ntfn.dispatched {
continue
}
close(ntfn.Event.Confirmed)
close(ntfn.Event.Updates)
close(ntfn.Event.NegativeConf)
}
}
}

File diff suppressed because it is too large Load Diff

1388
chainntnfs/txnotifier.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff