github.com/decred/dcrlnd@v0.7.6/chainntnfs/dcrdnotify/dcrd.go (about)

     1  package dcrdnotify
     2  
     3  import (
     4  	"context"
     5  	"encoding/hex"
     6  	"errors"
     7  	"fmt"
     8  	"sync"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/decred/dcrd/chaincfg/chainhash"
    13  	"github.com/decred/dcrd/chaincfg/v3"
    14  	"github.com/decred/dcrd/dcrjson/v4"
    15  	"github.com/decred/dcrd/dcrutil/v4"
    16  	jsontypes "github.com/decred/dcrd/rpc/jsonrpc/types/v4"
    17  	"github.com/decred/dcrd/rpcclient/v8"
    18  	"github.com/decred/dcrd/txscript/v4/stdaddr"
    19  	"github.com/decred/dcrd/txscript/v4/stdscript"
    20  	"github.com/decred/dcrd/wire"
    21  	"github.com/decred/dcrlnd/blockcache"
    22  	"github.com/decred/dcrlnd/chainntnfs"
    23  	"github.com/decred/dcrlnd/chainscan"
    24  	"github.com/decred/dcrlnd/queue"
    25  )
    26  
    27  const (
    28  	// notifierType uniquely identifies this concrete implementation of the
    29  	// ChainNotifier interface.
    30  	notifierType = "dcrd"
    31  )
    32  
    33  var (
    34  	// ErrChainNotifierShuttingDown is used when we are trying to
    35  	// measure a spend notification when notifier is already stopped.
    36  	ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " +
    37  		"while attempting to register for spend notification")
    38  
    39  	// errInefficientRescanTxNotFound is used when manually calling the
    40  	// inefficient rescan method.
    41  	errInefficientRescanTxNotFound = errors.New("chainntnfs: tx not found " +
    42  		"after inneficient rescan")
    43  )
    44  
    45  type chainConnAdaptor struct {
    46  	c   *rpcclient.Client
    47  	ctx context.Context
    48  }
    49  
    50  func (cca *chainConnAdaptor) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
    51  	return cca.c.GetBlockHeader(cca.ctx, blockHash)
    52  }
    53  
    54  func (cca *chainConnAdaptor) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
    55  	return cca.c.GetBlockHash(cca.ctx, blockHeight)
    56  }
    57  
    58  func (cca *chainConnAdaptor) GetBlockVerbose(hash *chainhash.Hash, b bool) (*jsontypes.GetBlockVerboseResult, error) {
    59  	return cca.c.GetBlockVerbose(cca.ctx, hash, b)
    60  }
    61  
    62  func (cca *chainConnAdaptor) GetRawTransactionVerbose(hash *chainhash.Hash) (*jsontypes.TxRawResult, error) {
    63  	return cca.c.GetRawTransactionVerbose(cca.ctx, hash)
    64  }
    65  
    66  // TODO(roasbeef): generalize struct below: * move chans to config, allow
    67  // outside callers to handle send conditions
    68  
    69  // DcrdNotifier implements the ChainNotifier interface using dcrd's websockets
    70  // notifications. Multiple concurrent clients are supported. All notifications
    71  // are achieved via non-blocking sends on client channels.
    72  type DcrdNotifier struct {
    73  	epochClientCounter uint64 // To be used atomically.
    74  
    75  	start   sync.Once
    76  	active  int32 // To be used atomically.
    77  	stopped int32 // To be used atomically.
    78  
    79  	chainConn   *rpcclient.Client
    80  	cca         *chainConnAdaptor
    81  	chainParams *chaincfg.Params
    82  	blockCache  *blockcache.BlockCache
    83  
    84  	notificationCancels  chan interface{}
    85  	notificationRegistry chan interface{}
    86  
    87  	txNotifier *chainntnfs.TxNotifier
    88  
    89  	blockEpochClients map[uint64]*blockEpochRegistration
    90  
    91  	bestBlock chainntnfs.BlockEpoch
    92  
    93  	chainUpdates *queue.ConcurrentQueue
    94  
    95  	// spendHintCache is a cache used to query and update the latest height
    96  	// hints for an outpoint. Each height hint represents the earliest
    97  	// height at which the outpoint could have been spent within the chain.
    98  	spendHintCache chainntnfs.SpendHintCache
    99  
   100  	// confirmHintCache is a cache used to query the latest height hints for
   101  	// a transaction. Each height hint represents the earliest height at
   102  	// which the transaction could have confirmed within the chain.
   103  	confirmHintCache chainntnfs.ConfirmHintCache
   104  
   105  	wg   sync.WaitGroup
   106  	quit chan struct{}
   107  }
   108  
   109  // Ensure DcrdNotifier implements the ChainNotifier interface at compile time.
   110  var _ chainntnfs.ChainNotifier = (*DcrdNotifier)(nil)
   111  
   112  // New returns a new DcrdNotifier instance. This function assumes the dcrd node
   113  // detailed in the passed configuration is already running, and willing to
   114  // accept new websockets clients.
   115  func New(config *rpcclient.ConnConfig, chainParams *chaincfg.Params,
   116  	spendHintCache chainntnfs.SpendHintCache,
   117  	confirmHintCache chainntnfs.ConfirmHintCache,
   118  	blockCache *blockcache.BlockCache) (*DcrdNotifier, error) {
   119  
   120  	notifier := &DcrdNotifier{
   121  		chainParams: chainParams,
   122  		blockCache:  blockCache,
   123  
   124  		notificationCancels:  make(chan interface{}),
   125  		notificationRegistry: make(chan interface{}),
   126  
   127  		blockEpochClients: make(map[uint64]*blockEpochRegistration),
   128  
   129  		chainUpdates: queue.NewConcurrentQueue(10),
   130  
   131  		spendHintCache:   spendHintCache,
   132  		confirmHintCache: confirmHintCache,
   133  
   134  		quit: make(chan struct{}),
   135  	}
   136  
   137  	ntfnCallbacks := &rpcclient.NotificationHandlers{
   138  		OnBlockConnected:    notifier.onBlockConnected,
   139  		OnBlockDisconnected: notifier.onBlockDisconnected,
   140  	}
   141  
   142  	// Disable connecting to dcrd within the rpcclient.New method. We defer
   143  	// establishing the connection to our .Start() method.
   144  	config.DisableConnectOnNew = true
   145  	config.DisableAutoReconnect = false
   146  	chainConn, err := rpcclient.New(config, ntfnCallbacks)
   147  	if err != nil {
   148  		return nil, err
   149  	}
   150  	notifier.chainConn = chainConn
   151  	notifier.cca = &chainConnAdaptor{c: chainConn, ctx: context.TODO()}
   152  
   153  	return notifier, nil
   154  }
   155  
   156  // Start connects to the running dcrd node over websockets, registers for block
   157  // notifications, and finally launches all related helper goroutines.
   158  func (n *DcrdNotifier) Start() error {
   159  	var startErr error
   160  	n.start.Do(func() {
   161  		startErr = n.startNotifier()
   162  	})
   163  	return startErr
   164  }
   165  
   166  func (n *DcrdNotifier) startNotifier() error {
   167  	chainntnfs.Log.Infof("Starting dcrd notifier")
   168  
   169  	n.chainUpdates.Start()
   170  
   171  	// TODO(decred): Handle 20 retries...
   172  	//
   173  	// Connect to dcrd, and register for notifications on connected, and
   174  	// disconnected blocks.
   175  	if err := n.chainConn.Connect(context.Background(), true); err != nil {
   176  		n.chainUpdates.Stop()
   177  		return err
   178  	}
   179  	if err := n.chainConn.NotifyBlocks(context.TODO()); err != nil {
   180  		n.chainUpdates.Stop()
   181  		return err
   182  	}
   183  
   184  	currentHash, currentHeight, err := n.chainConn.GetBestBlock(context.TODO())
   185  	if err != nil {
   186  		n.chainUpdates.Stop()
   187  		return err
   188  	}
   189  
   190  	currentHeader, err := n.chainConn.GetBlockHeader(context.TODO(), currentHash)
   191  	if err != nil {
   192  		n.chainUpdates.Stop()
   193  		return err
   194  	}
   195  
   196  	n.txNotifier = chainntnfs.NewTxNotifier(
   197  		uint32(currentHeight), chainntnfs.ReorgSafetyLimit,
   198  		n.confirmHintCache, n.spendHintCache, n.chainParams,
   199  	)
   200  
   201  	n.bestBlock = chainntnfs.BlockEpoch{
   202  		Height:      int32(currentHeight),
   203  		Hash:        currentHash,
   204  		BlockHeader: currentHeader,
   205  	}
   206  
   207  	n.wg.Add(1)
   208  	go n.notificationDispatcher()
   209  
   210  	// Set the active flag now that we've completed the full
   211  	// startup.
   212  	atomic.StoreInt32(&n.active, 1)
   213  
   214  	return nil
   215  }
   216  
   217  // Started returns true if this instance has been started, and false otherwise.
   218  func (n *DcrdNotifier) Started() bool {
   219  	return atomic.LoadInt32(&n.active) != 0
   220  }
   221  
   222  // Stop shutsdown the DcrdNotifier.
   223  func (n *DcrdNotifier) Stop() error {
   224  	// Already shutting down?
   225  	if atomic.AddInt32(&n.stopped, 1) != 1 {
   226  		return nil
   227  	}
   228  
   229  	chainntnfs.Log.Info("dcrd notifier shutting down")
   230  
   231  	// Shutdown the rpc client, this gracefully disconnects from btcd, and
   232  	// cleans up all related resources.
   233  	n.chainConn.Shutdown()
   234  
   235  	close(n.quit)
   236  	n.wg.Wait()
   237  
   238  	n.chainUpdates.Stop()
   239  
   240  	// Notify all pending clients of our shutdown by closing the related
   241  	// notification channels.
   242  	for _, epochClient := range n.blockEpochClients {
   243  		close(epochClient.cancelChan)
   244  		epochClient.wg.Wait()
   245  
   246  		close(epochClient.epochChan)
   247  	}
   248  	n.txNotifier.TearDown()
   249  
   250  	return nil
   251  }
   252  
   253  func (n *DcrdNotifier) getBlock(ctx context.Context, bh *chainhash.Hash) (*wire.MsgBlock, error) {
   254  	return n.blockCache.GetBlock(ctx, bh, n.chainConn.GetBlock)
   255  }
   256  
   257  // filteredBlock represents a new block which has been connected to the main
   258  // chain. The slice of transactions will only be populated if the block
   259  // includes a transaction that confirmed one of our watched txids, or spends
   260  // one of the outputs currently being watched.
   261  type filteredBlock struct {
   262  	header *wire.BlockHeader
   263  	txns   []*dcrutil.Tx
   264  
   265  	// connected is true if this update is a new block and false if it is a
   266  	// disconnected block.
   267  	connect bool
   268  }
   269  
   270  // onBlockConnected implements on OnBlockConnected callback for rpcclient.
   271  func (n *DcrdNotifier) onBlockConnected(blockHeader []byte, transactions [][]byte) {
   272  	var header wire.BlockHeader
   273  	if err := header.FromBytes(blockHeader); err != nil {
   274  		chainntnfs.Log.Warnf("Received block connected with malformed "+
   275  			"header: %v", err)
   276  		return
   277  	}
   278  
   279  	txns := make([]*dcrutil.Tx, 0, len(transactions))
   280  	for _, txBytes := range transactions {
   281  		var tx wire.MsgTx
   282  		if err := tx.FromBytes(txBytes); err != nil {
   283  			chainntnfs.Log.Warnf("Received block connected with malformed "+
   284  				"transaction: %v", err)
   285  			return
   286  		}
   287  
   288  		txns = append(txns, dcrutil.NewTx(&tx))
   289  	}
   290  
   291  	// Append this new chain update to the end of the queue of new chain
   292  	// updates.
   293  	select {
   294  	case n.chainUpdates.ChanIn() <- &filteredBlock{
   295  		header:  &header,
   296  		txns:    txns,
   297  		connect: true,
   298  	}:
   299  	case <-n.quit:
   300  		return
   301  	}
   302  }
   303  
   304  // onBlockDisconnected implements on OnBlockDisconnected callback for rpcclient.
   305  func (n *DcrdNotifier) onBlockDisconnected(blockHeader []byte) {
   306  	var header wire.BlockHeader
   307  	if err := header.FromBytes(blockHeader); err != nil {
   308  		chainntnfs.Log.Warnf("Received block disconnected with malformed "+
   309  			"header: %v", err)
   310  		return
   311  	}
   312  
   313  	// Append this new chain update to the end of the queue of new chain
   314  	// updates.
   315  	select {
   316  	case n.chainUpdates.ChanIn() <- &filteredBlock{
   317  		header:  &header,
   318  		connect: false,
   319  	}:
   320  	case <-n.quit:
   321  		return
   322  	}
   323  }
   324  
   325  // notificationDispatcher is the primary goroutine which handles client
   326  // notification registrations, as well as notification dispatches.
   327  func (n *DcrdNotifier) notificationDispatcher() {
   328  	defer n.wg.Done()
   329  
   330  out:
   331  	for {
   332  		select {
   333  		case cancelMsg := <-n.notificationCancels:
   334  			switch msg := cancelMsg.(type) {
   335  			case *epochCancel:
   336  				chainntnfs.Log.Infof("Cancelling epoch "+
   337  					"notification, epoch_id=%v", msg.epochID)
   338  
   339  				// First, we'll lookup the original
   340  				// registration in order to stop the active
   341  				// queue goroutine.
   342  				reg := n.blockEpochClients[msg.epochID]
   343  				reg.epochQueue.Stop()
   344  
   345  				// Next, close the cancel channel for this
   346  				// specific client, and wait for the client to
   347  				// exit.
   348  				close(n.blockEpochClients[msg.epochID].cancelChan)
   349  				n.blockEpochClients[msg.epochID].wg.Wait()
   350  
   351  				// Once the client has exited, we can then
   352  				// safely close the channel used to send epoch
   353  				// notifications, in order to notify any
   354  				// listeners that the intent has been
   355  				// canceled.
   356  				close(n.blockEpochClients[msg.epochID].epochChan)
   357  				delete(n.blockEpochClients, msg.epochID)
   358  			}
   359  
   360  		case registerMsg := <-n.notificationRegistry:
   361  			switch msg := registerMsg.(type) {
   362  			case *chainntnfs.HistoricalConfDispatch:
   363  				// Look up whether the transaction/output script
   364  				// has already confirmed in the active chain.
   365  				// We'll do this in a goroutine to prevent
   366  				// blocking potentially long rescans.
   367  				//
   368  				// TODO(wilmer): add retry logic if rescan fails?
   369  				n.wg.Add(1)
   370  				go func() {
   371  					defer n.wg.Done()
   372  
   373  					confDetails, _, err := n.historicalConfDetails(
   374  						msg.ConfRequest,
   375  						msg.StartHeight, msg.EndHeight,
   376  					)
   377  					if err != nil {
   378  						chainntnfs.Log.Error(err)
   379  						return
   380  					}
   381  
   382  					// If the historical dispatch finished
   383  					// without error, we will invoke
   384  					// UpdateConfDetails even if none were
   385  					// found. This allows the notifier to
   386  					// begin safely updating the height hint
   387  					// cache at tip, since any pending
   388  					// rescans have now completed.
   389  					err = n.txNotifier.UpdateConfDetails(
   390  						msg.ConfRequest, confDetails,
   391  					)
   392  					if err != nil {
   393  						chainntnfs.Log.Error(err)
   394  					}
   395  				}()
   396  
   397  			case *blockEpochRegistration:
   398  				chainntnfs.Log.Infof("New block epoch subscription")
   399  
   400  				n.blockEpochClients[msg.epochID] = msg
   401  
   402  				// If the client did not provide their best
   403  				// known block, then we'll immediately dispatch
   404  				// a notification for the current tip.
   405  				if msg.bestBlock == nil {
   406  					n.notifyBlockEpochClient(
   407  						msg, n.bestBlock.Height,
   408  						n.bestBlock.Hash,
   409  						n.bestBlock.BlockHeader,
   410  					)
   411  
   412  					msg.errorChan <- nil
   413  					continue
   414  				}
   415  
   416  				// Otherwise, we'll attempt to deliver the
   417  				// backlog of notifications from their best
   418  				// known block.
   419  				missedBlocks, err := chainntnfs.GetClientMissedBlocks(
   420  					n.cca, msg.bestBlock,
   421  					n.bestBlock.Height, true,
   422  				)
   423  				if err != nil {
   424  					msg.errorChan <- err
   425  					continue
   426  				}
   427  
   428  				for _, block := range missedBlocks {
   429  					n.notifyBlockEpochClient(
   430  						msg, block.Height, block.Hash, block.BlockHeader,
   431  					)
   432  				}
   433  
   434  				msg.errorChan <- nil
   435  			}
   436  
   437  		case item := <-n.chainUpdates.ChanOut():
   438  			update := item.(*filteredBlock)
   439  			header := update.header
   440  			if update.connect {
   441  				if header.PrevBlock != *n.bestBlock.Hash {
   442  					// Handle the case where the notifier
   443  					// missed some blocks from its chain
   444  					// backend
   445  					chainntnfs.Log.Infof("Missed blocks, " +
   446  						"attempting to catch up")
   447  					newBestBlock, missedBlocks, err :=
   448  						chainntnfs.HandleMissedBlocks(
   449  							n.cca,
   450  							n.txNotifier,
   451  							n.bestBlock,
   452  							int32(header.Height),
   453  							true,
   454  						)
   455  					if err != nil {
   456  						// Set the bestBlock here in case
   457  						// a catch up partially completed.
   458  						n.bestBlock = newBestBlock
   459  						chainntnfs.Log.Error(err)
   460  						continue
   461  					}
   462  
   463  					for _, block := range missedBlocks {
   464  						filteredBlock, err := n.fetchFilteredBlock(block)
   465  						if err != nil {
   466  							chainntnfs.Log.Error(err)
   467  							continue out
   468  						}
   469  
   470  						err = n.handleBlockConnected(filteredBlock)
   471  						if err != nil {
   472  							chainntnfs.Log.Error(err)
   473  							continue out
   474  						}
   475  					}
   476  				}
   477  
   478  				// TODO(decred) Discuss and decide how to do this.
   479  				// This is necessary because in dcrd, OnBlockConnected will
   480  				// only return filtered transactions, so we need to actually
   481  				// load a watched transaction using LoadTxFilter (which is
   482  				// currently not done in RegisterConfirmationsNtfn).
   483  				bh := update.header.BlockHash()
   484  				filteredBlock, err := n.fetchFilteredBlockForBlockHash(&bh)
   485  				if err != nil {
   486  					chainntnfs.Log.Error(err)
   487  					continue
   488  				}
   489  
   490  				if err := n.handleBlockConnected(filteredBlock); err != nil {
   491  					chainntnfs.Log.Error(err)
   492  				}
   493  				continue
   494  			}
   495  
   496  			if header.Height != uint32(n.bestBlock.Height) {
   497  				chainntnfs.Log.Infof("Missed disconnected" +
   498  					"blocks, attempting to catch up")
   499  			}
   500  
   501  			newBestBlock, err := chainntnfs.RewindChain(
   502  				n.cca, n.txNotifier, n.bestBlock,
   503  				int32(header.Height-1),
   504  			)
   505  			if err != nil {
   506  				chainntnfs.Log.Errorf("Unable to rewind chain "+
   507  					"from height %d to height %d: %v",
   508  					n.bestBlock.Height, int32(header.Height-1), err)
   509  			}
   510  
   511  			// Set the bestBlock here in case a chain rewind
   512  			// partially completed.
   513  			n.bestBlock = newBestBlock
   514  
   515  		case <-n.quit:
   516  			break out
   517  		}
   518  	}
   519  }
   520  
   521  // historicalConfDetails looks up whether a confirmation request (txid/output
   522  // script) has already been included in a block in the active chain and, if so,
   523  // returns details about said block.
   524  func (n *DcrdNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
   525  	startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
   526  	chainntnfs.TxConfStatus, error) {
   527  
   528  	// If a txid was not provided, then we should dispatch upon seeing the
   529  	// script on-chain, so we'll short-circuit straight to scanning manually
   530  	// as there doesn't exist a script index to query.
   531  	if confRequest.TxID == chainntnfs.ZeroHash {
   532  		return n.confDetailsManually(
   533  			confRequest, startHeight, endHeight,
   534  		)
   535  	}
   536  
   537  	// Otherwise, we'll dispatch upon seeing a transaction on-chain with the
   538  	// given hash.
   539  	//
   540  	// We'll first attempt to retrieve the transaction using the node's
   541  	// txindex.
   542  	txNotFoundErr := "No information available about transaction"
   543  	txConf, txStatus, err := chainntnfs.ConfDetailsFromTxIndex(
   544  		n.cca, confRequest, txNotFoundErr,
   545  	)
   546  
   547  	// We'll then check the status of the transaction lookup returned to
   548  	// determine whether we should proceed with any fallback methods.
   549  	switch {
   550  
   551  	// We failed querying the index for the transaction, fall back to
   552  	// scanning manually.
   553  	case err != nil:
   554  		chainntnfs.Log.Debugf("Unable to determine confirmation of %v "+
   555  			"through the backend's txindex (%v), scanning manually",
   556  			confRequest.TxID, err)
   557  
   558  		return n.confDetailsManually(
   559  			confRequest, startHeight, endHeight,
   560  		)
   561  
   562  	// The transaction was found within the node's mempool.
   563  	case txStatus == chainntnfs.TxFoundMempool:
   564  
   565  	// The transaction was found within the node's txindex.
   566  	case txStatus == chainntnfs.TxFoundIndex:
   567  
   568  	// The transaction was not found within the node's mempool or txindex.
   569  	case txStatus == chainntnfs.TxNotFoundIndex:
   570  
   571  	// Unexpected txStatus returned.
   572  	default:
   573  		return nil, txStatus,
   574  			fmt.Errorf("got unexpected txConfStatus: %v", txStatus)
   575  	}
   576  
   577  	return txConf, txStatus, nil
   578  }
   579  
   580  // confDetailsManually looks up whether a transaction/output script has already
   581  // been included in a block in the active chain by scanning the chain's blocks
   582  // within the given range. If the transaction/output script is found, its
   583  // confirmation details are returned. Otherwise, nil is returned.
   584  func (n *DcrdNotifier) confDetailsManually(confRequest chainntnfs.ConfRequest,
   585  	startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
   586  	chainntnfs.TxConfStatus, error) {
   587  
   588  	// Begin scanning blocks at every height to determine where the
   589  	// transaction was included in.
   590  	for height := endHeight; height >= startHeight && height > 0; height-- {
   591  		// Ensure we haven't been requested to shut down before
   592  		// processing the next height.
   593  		select {
   594  		case <-n.quit:
   595  			return nil, chainntnfs.TxNotFoundManually,
   596  				chainntnfs.ErrChainNotifierShuttingDown
   597  		default:
   598  		}
   599  
   600  		blockHash, err := n.chainConn.GetBlockHash(context.TODO(), int64(height))
   601  		if err != nil {
   602  			return nil, chainntnfs.TxNotFoundManually,
   603  				fmt.Errorf("unable to get hash from block "+
   604  					"with height %d", height)
   605  		}
   606  
   607  		// TODO: fetch the neutrino filters instead.
   608  		block, err := n.getBlock(context.TODO(), blockHash)
   609  		if err != nil {
   610  			return nil, chainntnfs.TxNotFoundManually,
   611  				fmt.Errorf("unable to get block with hash "+
   612  					"%v: %v", blockHash, err)
   613  		}
   614  
   615  		// For every transaction in the block, check which one matches
   616  		// our request. If we find one that does, we can dispatch its
   617  		// confirmation details.
   618  		for txIndex, tx := range block.Transactions {
   619  			if !confRequest.MatchesTx(tx) {
   620  				continue
   621  			}
   622  
   623  			return &chainntnfs.TxConfirmation{
   624  				Tx:          tx,
   625  				BlockHash:   blockHash,
   626  				BlockHeight: height,
   627  				TxIndex:     uint32(txIndex),
   628  			}, chainntnfs.TxFoundManually, nil
   629  		}
   630  	}
   631  
   632  	// If we reach here, then we were not able to find the transaction
   633  	// within a block, so we avoid returning an error.
   634  	return nil, chainntnfs.TxNotFoundManually, nil
   635  }
   636  
   637  // handleBlockConnected applies a chain update for a new block. Any watched
   638  // transactions included this block will processed to either send notifications
   639  // now or after numConfirmations confs.
   640  func (n *DcrdNotifier) handleBlockConnected(newBlock *filteredBlock) error {
   641  	// We'll then extend the txNotifier's height with the information of
   642  	// this new block, which will handle all of the notification logic for
   643  	// us.
   644  	newBlockHash := newBlock.header.BlockHash()
   645  	newBlockHeight := newBlock.header.Height
   646  	err := n.txNotifier.ConnectTip(
   647  		&newBlockHash, newBlockHeight, newBlock.txns,
   648  	)
   649  	if err != nil {
   650  		return fmt.Errorf("unable to connect tip: %v", err)
   651  	}
   652  
   653  	chainntnfs.Log.Infof("New block: height=%v, hash=%v", newBlockHeight,
   654  		newBlockHash)
   655  
   656  	// Now that we've guaranteed the new block extends the txNotifier's
   657  	// current tip, we'll proceed to dispatch notifications to all of our
   658  	// registered clients whom have had notifications fulfilled. Before
   659  	// doing so, we'll make sure update our in memory state in order to
   660  	// satisfy any client requests based upon the new block.
   661  	n.bestBlock.Hash = &newBlockHash
   662  	n.bestBlock.Height = int32(newBlockHeight)
   663  	n.bestBlock.BlockHeader = newBlock.header
   664  
   665  	n.notifyBlockEpochs(int32(newBlockHeight), &newBlockHash, newBlock.header)
   666  
   667  	// Delay spend/confirm notifications until the block epoch ntfn has
   668  	// (likely) been processed. This helps prevent classes of errors that
   669  	// happen due to racing the spend/conf ntfn and tracking the current
   670  	// block height in some subsystems.
   671  	select {
   672  	case <-time.After(5 * time.Millisecond):
   673  	case <-n.quit:
   674  	}
   675  
   676  	return n.txNotifier.NotifyHeight(newBlockHeight)
   677  }
   678  
   679  // fetchFilteredBlock is a utility to retrieve the full filtered block from a
   680  // block epoch.
   681  func (n *DcrdNotifier) fetchFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, error) {
   682  	return n.fetchFilteredBlockForBlockHash(epoch.Hash)
   683  }
   684  
   685  // fetchFilteredBlockForBlockHash is a utility to retrieve the full filtered
   686  // block (including _all_ transactions, not just the watched ones) for the
   687  // block identified by the provided block hash.
   688  func (n *DcrdNotifier) fetchFilteredBlockForBlockHash(bh *chainhash.Hash) (*filteredBlock, error) {
   689  	rawBlock, err := n.getBlock(context.TODO(), bh)
   690  	if err != nil {
   691  		return nil, fmt.Errorf("unable to get block: %v", err)
   692  	}
   693  
   694  	txns := make([]*dcrutil.Tx, 0, len(rawBlock.Transactions))
   695  	for i := range rawBlock.Transactions {
   696  		tx := dcrutil.NewTx(rawBlock.Transactions[i])
   697  		tx.SetIndex(i)
   698  		tx.SetTree(wire.TxTreeRegular)
   699  		txns = append(txns, tx)
   700  	}
   701  
   702  	block := &filteredBlock{
   703  		header:  &rawBlock.Header,
   704  		txns:    txns,
   705  		connect: true,
   706  	}
   707  	return block, nil
   708  }
   709  
   710  // notifyBlockEpochs notifies all registered block epoch clients of the newly
   711  // connected block to the main chain.
   712  func (n *DcrdNotifier) notifyBlockEpochs(newHeight int32, newHash *chainhash.Hash,
   713  	newHeader *wire.BlockHeader) {
   714  	for _, client := range n.blockEpochClients {
   715  		n.notifyBlockEpochClient(client, newHeight, newHash, newHeader)
   716  	}
   717  }
   718  
   719  // notifyBlockEpochClient sends a registered block epoch client a notification
   720  // about a specific block.
   721  func (n *DcrdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
   722  	height int32, hash *chainhash.Hash, header *wire.BlockHeader) {
   723  
   724  	epoch := &chainntnfs.BlockEpoch{
   725  		Height:      height,
   726  		Hash:        hash,
   727  		BlockHeader: header,
   728  	}
   729  
   730  	select {
   731  	case epochClient.epochQueue.ChanIn() <- epoch:
   732  	case <-epochClient.cancelChan:
   733  	case <-n.quit:
   734  	}
   735  }
   736  
   737  // RegisterSpendNtfn registers an intent to be notified once the target
   738  // outpoint/output script has been spent by a transaction on-chain. When
   739  // intending to be notified of the spend of an output script, a nil outpoint
   740  // must be used. The heightHint should represent the earliest height in the
   741  // chain of the transaction that spent the outpoint/output script.
   742  //
   743  // Once a spend of has been detected, the details of the spending event will be
   744  // sent across the 'Spend' channel.
   745  func (n *DcrdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
   746  	pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
   747  
   748  	// Register the conf notification with the TxNotifier. A non-nil value
   749  	// for `dispatch` will be returned if we are required to perform a
   750  	// manual scan for the confirmation. Otherwise the notifier will begin
   751  	// watching at tip for the transaction to confirm.
   752  	ntfn, err := n.txNotifier.RegisterSpend(outpoint, pkScript, heightHint)
   753  	if err != nil {
   754  		return nil, err
   755  	}
   756  
   757  	// If the txNotifier didn't return any details to perform a historical
   758  	// scan of the chain, then we can return early as there's nothing left
   759  	// for us to do.
   760  	if ntfn.HistoricalDispatch == nil {
   761  		return ntfn.Event, nil
   762  	}
   763  
   764  	// TODO(decred) This currently always only adds to the tx filter, which
   765  	// will make it grow unboundedly. Ideally this should be reloaded with
   766  	// the specific set we're interested in, but that would require
   767  	// rebuilding the tx filter every time this is called.
   768  	//
   769  	// We'll then request the backend to notify us when it has detected the
   770  	// outpoint or a script was spent.
   771  	var ops []wire.OutPoint
   772  	var addrs []stdaddr.Address
   773  
   774  	// Otherwise, we'll determine when the output was spent by scanning the
   775  	// chain.  We'll begin by determining where to start our historical
   776  	// rescan.
   777  	startHeight := ntfn.HistoricalDispatch.StartHeight
   778  
   779  	emptyOutPoint := outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint
   780  	if emptyOutPoint {
   781  		_, addrs = stdscript.ExtractAddrs(
   782  			0, pkScript, n.chainParams,
   783  		)
   784  	} else {
   785  		ops = []wire.OutPoint{*outpoint}
   786  	}
   787  
   788  	// Ensure we'll receive any new notifications for either the outpoint
   789  	// or the address from now on.
   790  	if err := n.chainConn.LoadTxFilter(context.TODO(), false, addrs, ops); err != nil {
   791  		return nil, err
   792  	}
   793  
   794  	if !emptyOutPoint {
   795  		// When dispatching spends of outpoints, there are a number of checks
   796  		// we can make to start our rescan from a better height or completely
   797  		// avoid it.
   798  		//
   799  		// We'll start by checking the backend's UTXO set to determine whether
   800  		// the outpoint has been spent. If it hasn't, we can return to the
   801  		// caller as well.
   802  		txOut, err := n.chainConn.GetTxOut(
   803  			context.TODO(), &outpoint.Hash, outpoint.Index, outpoint.Tree, true,
   804  		)
   805  		if err != nil {
   806  			return nil, err
   807  		}
   808  		if txOut != nil {
   809  			// We'll let the txNotifier know the outpoint is still
   810  			// unspent in order to begin updating its spend hint.
   811  			err := n.txNotifier.UpdateSpendDetails(
   812  				ntfn.HistoricalDispatch.SpendRequest, nil,
   813  			)
   814  			if err != nil {
   815  				return nil, err
   816  			}
   817  
   818  			return ntfn.Event, nil
   819  		}
   820  
   821  		// As a minimal optimization, we'll query the backend's
   822  		// transaction index (if enabled) to determine if we have a
   823  		// better rescan starting height. We can do this as the
   824  		// GetRawTransaction call will return the hash of the block it
   825  		// was included in within the chain.
   826  		tx, err := n.chainConn.GetRawTransactionVerbose(context.TODO(), &outpoint.Hash)
   827  		if err != nil {
   828  			// Avoid returning an error if the transaction was not found to
   829  			// proceed with fallback methods.
   830  			isNoTxIndexErr := chainntnfs.IsTxIndexDisabledError(err)
   831  			jsonErr, ok := err.(*dcrjson.RPCError)
   832  			if !isNoTxIndexErr && (!ok || jsonErr.Code != dcrjson.ErrRPCNoTxInfo) {
   833  				return nil, fmt.Errorf("unable to query for "+
   834  					"txid %v: %v", outpoint.Hash, err)
   835  			}
   836  		}
   837  
   838  		// If the transaction index was enabled, we'll use the block's
   839  		// hash to retrieve its height and check whether it provides a
   840  		// better starting point for our rescan.
   841  		if tx != nil {
   842  			// If the transaction containing the outpoint hasn't confirmed
   843  			// on-chain, then there's no need to perform a rescan.
   844  			if tx.BlockHash == "" {
   845  				return ntfn.Event, nil
   846  			}
   847  
   848  			blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
   849  			if err != nil {
   850  				return nil, err
   851  			}
   852  			blockHeader, err := n.chainConn.GetBlockHeader(context.TODO(), blockHash)
   853  			if err != nil {
   854  				return nil, fmt.Errorf("unable to get header for "+
   855  					"block %v: %v", blockHash, err)
   856  			}
   857  
   858  			if blockHeader.Height > ntfn.HistoricalDispatch.StartHeight {
   859  				startHeight = blockHeader.Height
   860  			}
   861  		}
   862  	}
   863  
   864  	// TODO(decred): Fix!
   865  	//
   866  	// In order to ensure that we don't block the caller on what may be a
   867  	// long rescan, we'll launch a new goroutine to handle the async result
   868  	// of the rescan. We purposefully prevent from adding this goroutine to
   869  	// the WaitGroup as we cannot wait for a quit signal due to the
   870  	// asyncResult channel not being exposed.
   871  	//
   872  	// TODO(wilmer): add retry logic if rescan fails?
   873  	go n.inefficientSpendRescan(startHeight, ntfn.HistoricalDispatch)
   874  
   875  	return ntfn.Event, nil
   876  }
   877  
   878  // txSpendsSpendRequest returns the index where the given spendRequest was
   879  // spent by the transaction or -1 if no inputs spend the given spendRequest.
   880  func txSpendsSpendRequest(tx *wire.MsgTx, spendRequest *chainntnfs.SpendRequest,
   881  	addrParams stdaddr.AddressParams) int {
   882  
   883  	if spendRequest.OutPoint != chainntnfs.ZeroOutPoint {
   884  		// Matching by outpoint.
   885  		for i, in := range tx.TxIn {
   886  			if in.PreviousOutPoint == spendRequest.OutPoint {
   887  				return i
   888  			}
   889  		}
   890  		return -1
   891  	}
   892  
   893  	// Matching by script.
   894  	for i, in := range tx.TxIn {
   895  		// Ignore the errors here, due to them definitely not being a
   896  		// match.
   897  		pkScript, _ := chainscan.ComputePkScript(
   898  			spendRequest.PkScript.ScriptVersion(), in.SignatureScript,
   899  		)
   900  		if spendRequest.PkScript.Equal(&pkScript) {
   901  			return i
   902  		}
   903  	}
   904  	return -1
   905  }
   906  
   907  // inefficientSpendRescan is a utility function to RegisterSpendNtfn. It performs
   908  // a (very) inefficient rescan over the full mined block database, looking
   909  // for the spending of the passed ntfn outpoint.
   910  //
   911  // This needs to be executed in its own goroutine, as it blocks.
   912  //
   913  // TODO(decred) This _needs_ to be improved into a proper rescan procedure or
   914  // an index.
   915  func (n *DcrdNotifier) inefficientSpendRescan(startHeight uint32,
   916  	histDispatch *chainntnfs.HistoricalSpendDispatch) (*chainntnfs.SpendDetail, error) {
   917  
   918  	endHeight := int64(histDispatch.EndHeight)
   919  
   920  	for height := int64(startHeight); height <= endHeight; height++ {
   921  		scanHash, err := n.chainConn.GetBlockHash(context.TODO(), height)
   922  		if err != nil {
   923  			chainntnfs.Log.Errorf("Error determining next block to scan for "+
   924  				"outpoint spender", err)
   925  			return nil, err
   926  		}
   927  
   928  		res, err := n.chainConn.Rescan(context.TODO(), []chainhash.Hash{*scanHash})
   929  		if err != nil {
   930  			chainntnfs.Log.Errorf("Rescan to determine the spend "+
   931  				"details of %v failed: %v", histDispatch.SpendRequest.OutPoint, err)
   932  			return nil, err
   933  		}
   934  
   935  		if len(res.DiscoveredData) == 0 {
   936  			// No data found for this block, so go on to the next.
   937  			continue
   938  		}
   939  
   940  		// We need to check individual txs since the active tx filter
   941  		// might have multiple transactions, and they may be repeatedly
   942  		// encountered.
   943  		for _, data := range res.DiscoveredData {
   944  			for _, hexTx := range data.Transactions {
   945  				bytesTx, err := hex.DecodeString(hexTx)
   946  				if err != nil {
   947  					chainntnfs.Log.Errorf("Error converting hexTx to "+
   948  						"bytes during spend rescan: %v", err)
   949  					return nil, err
   950  				}
   951  
   952  				var tx wire.MsgTx
   953  				err = tx.FromBytes(bytesTx)
   954  				if err != nil {
   955  					chainntnfs.Log.Errorf("Error decoding tx from bytes "+
   956  						"during spend rescan: %v", err)
   957  				}
   958  
   959  				spenderIndex := txSpendsSpendRequest(
   960  					&tx, &histDispatch.SpendRequest,
   961  					n.chainParams,
   962  				)
   963  				if spenderIndex == -1 {
   964  					// This tx is not a match, so go on to
   965  					// the next.
   966  					continue
   967  				}
   968  
   969  				// Found the spender tx! Update the spend
   970  				// status (which will emit the notification)
   971  				// and finish the scan.
   972  				txHash := tx.TxHash()
   973  				details := &chainntnfs.SpendDetail{
   974  					SpentOutPoint:     &histDispatch.SpendRequest.OutPoint,
   975  					SpenderTxHash:     &txHash,
   976  					SpendingTx:        &tx,
   977  					SpenderInputIndex: uint32(spenderIndex),
   978  					SpendingHeight:    int32(height),
   979  				}
   980  				err = n.txNotifier.UpdateSpendDetails(histDispatch.SpendRequest, details)
   981  				return details, err
   982  			}
   983  		}
   984  	}
   985  
   986  	return nil, errInefficientRescanTxNotFound
   987  }
   988  
   989  // RegisterConfirmationsNtfn registers an intent to be notified once the target
   990  // txid/output script has reached numConfs confirmations on-chain. When
   991  // intending to be notified of the confirmation of an output script, a nil txid
   992  // must be used. The heightHint should represent the earliest height at which
   993  // the txid/output script could have been included in the chain.
   994  //
   995  // Progress on the number of confirmations left can be read from the 'Updates'
   996  // channel. Once it has reached all of its confirmations, a notification will be
   997  // sent across the 'Confirmed' channel.
   998  func (n *DcrdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
   999  	pkScript []byte,
  1000  	numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
  1001  
  1002  	// Register the conf notification with the TxNotifier. A non-nil value
  1003  	// for `dispatch` will be returned if we are required to perform a
  1004  	// manual scan for the confirmation. Otherwise the notifier will begin
  1005  	// watching at tip for the transaction to confirm.
  1006  	ntfn, err := n.txNotifier.RegisterConf(
  1007  		txid, pkScript, numConfs, heightHint,
  1008  	)
  1009  	if err != nil {
  1010  		return nil, err
  1011  	}
  1012  
  1013  	if ntfn.HistoricalDispatch == nil {
  1014  		return ntfn.Event, nil
  1015  	}
  1016  
  1017  	select {
  1018  	case n.notificationRegistry <- ntfn.HistoricalDispatch:
  1019  		return ntfn.Event, nil
  1020  	case <-n.quit:
  1021  		return nil, chainntnfs.ErrChainNotifierShuttingDown
  1022  	}
  1023  }
  1024  
  1025  // blockEpochRegistration represents a client's intent to receive a
  1026  // notification with each newly connected block.
  1027  type blockEpochRegistration struct {
  1028  	epochID uint64
  1029  
  1030  	epochChan chan *chainntnfs.BlockEpoch
  1031  
  1032  	epochQueue *queue.ConcurrentQueue
  1033  
  1034  	bestBlock *chainntnfs.BlockEpoch
  1035  
  1036  	errorChan chan error
  1037  
  1038  	cancelChan chan struct{}
  1039  
  1040  	wg sync.WaitGroup
  1041  }
  1042  
  1043  // epochCancel is a message sent to the DcrdNotifier when a client wishes to
  1044  // cancel an outstanding epoch notification that has yet to be dispatched.
  1045  type epochCancel struct {
  1046  	epochID uint64
  1047  }
  1048  
  1049  // RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
  1050  // caller to receive notifications, of each new block connected to the main
  1051  // chain. Clients have the option of passing in their best known block, which
  1052  // the notifier uses to check if they are behind on blocks and catch them up.
  1053  // If they do not provide one, then a notification will be dispatched
  1054  // immediately for the current tip of the chain upon a successful registration.
  1055  func (n *DcrdNotifier) RegisterBlockEpochNtfn(
  1056  	bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
  1057  
  1058  	reg := &blockEpochRegistration{
  1059  		epochQueue: queue.NewConcurrentQueue(20),
  1060  		epochChan:  make(chan *chainntnfs.BlockEpoch, 20),
  1061  		cancelChan: make(chan struct{}),
  1062  		epochID:    atomic.AddUint64(&n.epochClientCounter, 1),
  1063  		bestBlock:  bestBlock,
  1064  		errorChan:  make(chan error, 1),
  1065  	}
  1066  
  1067  	reg.epochQueue.Start()
  1068  
  1069  	// Before we send the request to the main goroutine, we'll launch a new
  1070  	// goroutine to proxy items added to our queue to the client itself.
  1071  	// This ensures that all notifications are received *in order*.
  1072  	reg.wg.Add(1)
  1073  	go func() {
  1074  		defer reg.wg.Done()
  1075  
  1076  		for {
  1077  			select {
  1078  			case ntfn := <-reg.epochQueue.ChanOut():
  1079  				blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
  1080  				select {
  1081  				case reg.epochChan <- blockNtfn:
  1082  
  1083  				case <-reg.cancelChan:
  1084  					return
  1085  
  1086  				case <-n.quit:
  1087  					return
  1088  				}
  1089  
  1090  			case <-reg.cancelChan:
  1091  				return
  1092  
  1093  			case <-n.quit:
  1094  				return
  1095  			}
  1096  		}
  1097  	}()
  1098  
  1099  	select {
  1100  	case <-n.quit:
  1101  		// As we're exiting before the registration could be sent,
  1102  		// we'll stop the queue now ourselves.
  1103  		reg.epochQueue.Stop()
  1104  
  1105  		return nil, errors.New("chainntnfs: system interrupt while " +
  1106  			"attempting to register for block epoch notification")
  1107  	case n.notificationRegistry <- reg:
  1108  		return &chainntnfs.BlockEpochEvent{
  1109  			Epochs: reg.epochChan,
  1110  			Cancel: func() {
  1111  				cancel := &epochCancel{
  1112  					epochID: reg.epochID,
  1113  				}
  1114  
  1115  				// Submit epoch cancellation to notification dispatcher.
  1116  				select {
  1117  				case n.notificationCancels <- cancel:
  1118  					// Cancellation is being handled, drain
  1119  					// the epoch channel until it is closed
  1120  					// before yielding to caller.
  1121  					for {
  1122  						select {
  1123  						case _, ok := <-reg.epochChan:
  1124  							if !ok {
  1125  								return
  1126  							}
  1127  						case <-n.quit:
  1128  							return
  1129  						}
  1130  					}
  1131  				case <-n.quit:
  1132  				}
  1133  			},
  1134  		}, nil
  1135  	}
  1136  }