github.com/btcsuite/btcd@v0.24.0/netsync/manager.go (about)

     1  // Copyright (c) 2013-2017 The btcsuite developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package netsync
     6  
     7  import (
     8  	"container/list"
     9  	"math/rand"
    10  	"net"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/btcsuite/btcd/blockchain"
    16  	"github.com/btcsuite/btcd/btcutil"
    17  	"github.com/btcsuite/btcd/chaincfg"
    18  	"github.com/btcsuite/btcd/chaincfg/chainhash"
    19  	"github.com/btcsuite/btcd/database"
    20  	"github.com/btcsuite/btcd/mempool"
    21  	peerpkg "github.com/btcsuite/btcd/peer"
    22  	"github.com/btcsuite/btcd/wire"
    23  )
    24  
    25  const (
    26  	// minInFlightBlocks is the minimum number of blocks that should be
    27  	// in the request queue for headers-first mode before requesting
    28  	// more.
    29  	minInFlightBlocks = 10
    30  
    31  	// maxRejectedTxns is the maximum number of rejected transactions
    32  	// hashes to store in memory.
    33  	maxRejectedTxns = 1000
    34  
    35  	// maxRequestedBlocks is the maximum number of requested block
    36  	// hashes to store in memory.
    37  	maxRequestedBlocks = wire.MaxInvPerMsg
    38  
    39  	// maxRequestedTxns is the maximum number of requested transactions
    40  	// hashes to store in memory.
    41  	maxRequestedTxns = wire.MaxInvPerMsg
    42  
    43  	// maxStallDuration is the time after which we will disconnect our
    44  	// current sync peer if we haven't made progress.
    45  	maxStallDuration = 3 * time.Minute
    46  
    47  	// stallSampleInterval the interval at which we will check to see if our
    48  	// sync has stalled.
    49  	stallSampleInterval = 30 * time.Second
    50  )
    51  
    52  // zeroHash is the zero value hash (all zeros).  It is defined as a convenience.
    53  var zeroHash chainhash.Hash
    54  
    55  // newPeerMsg signifies a newly connected peer to the block handler.
    56  type newPeerMsg struct {
    57  	peer *peerpkg.Peer
    58  }
    59  
    60  // blockMsg packages a bitcoin block message and the peer it came from together
    61  // so the block handler has access to that information.
    62  type blockMsg struct {
    63  	block *btcutil.Block
    64  	peer  *peerpkg.Peer
    65  	reply chan struct{}
    66  }
    67  
    68  // invMsg packages a bitcoin inv message and the peer it came from together
    69  // so the block handler has access to that information.
    70  type invMsg struct {
    71  	inv  *wire.MsgInv
    72  	peer *peerpkg.Peer
    73  }
    74  
    75  // headersMsg packages a bitcoin headers message and the peer it came from
    76  // together so the block handler has access to that information.
    77  type headersMsg struct {
    78  	headers *wire.MsgHeaders
    79  	peer    *peerpkg.Peer
    80  }
    81  
    82  // notFoundMsg packages a bitcoin notfound message and the peer it came from
    83  // together so the block handler has access to that information.
    84  type notFoundMsg struct {
    85  	notFound *wire.MsgNotFound
    86  	peer     *peerpkg.Peer
    87  }
    88  
    89  // donePeerMsg signifies a newly disconnected peer to the block handler.
    90  type donePeerMsg struct {
    91  	peer *peerpkg.Peer
    92  }
    93  
    94  // txMsg packages a bitcoin tx message and the peer it came from together
    95  // so the block handler has access to that information.
    96  type txMsg struct {
    97  	tx    *btcutil.Tx
    98  	peer  *peerpkg.Peer
    99  	reply chan struct{}
   100  }
   101  
   102  // getSyncPeerMsg is a message type to be sent across the message channel for
   103  // retrieving the current sync peer.
   104  type getSyncPeerMsg struct {
   105  	reply chan int32
   106  }
   107  
   108  // processBlockResponse is a response sent to the reply channel of a
   109  // processBlockMsg.
   110  type processBlockResponse struct {
   111  	isOrphan bool
   112  	err      error
   113  }
   114  
   115  // processBlockMsg is a message type to be sent across the message channel
   116  // for requested a block is processed.  Note this call differs from blockMsg
   117  // above in that blockMsg is intended for blocks that came from peers and have
   118  // extra handling whereas this message essentially is just a concurrent safe
   119  // way to call ProcessBlock on the internal block chain instance.
   120  type processBlockMsg struct {
   121  	block *btcutil.Block
   122  	flags blockchain.BehaviorFlags
   123  	reply chan processBlockResponse
   124  }
   125  
   126  // isCurrentMsg is a message type to be sent across the message channel for
   127  // requesting whether or not the sync manager believes it is synced with the
   128  // currently connected peers.
   129  type isCurrentMsg struct {
   130  	reply chan bool
   131  }
   132  
   133  // pauseMsg is a message type to be sent across the message channel for
   134  // pausing the sync manager.  This effectively provides the caller with
   135  // exclusive access over the manager until a receive is performed on the
   136  // unpause channel.
   137  type pauseMsg struct {
   138  	unpause <-chan struct{}
   139  }
   140  
   141  // headerNode is used as a node in a list of headers that are linked together
   142  // between checkpoints.
   143  type headerNode struct {
   144  	height int32
   145  	hash   *chainhash.Hash
   146  }
   147  
   148  // peerSyncState stores additional information that the SyncManager tracks
   149  // about a peer.
   150  type peerSyncState struct {
   151  	syncCandidate   bool
   152  	requestQueue    []*wire.InvVect
   153  	requestedTxns   map[chainhash.Hash]struct{}
   154  	requestedBlocks map[chainhash.Hash]struct{}
   155  }
   156  
   157  // limitAdd is a helper function for maps that require a maximum limit by
   158  // evicting a random value if adding the new value would cause it to
   159  // overflow the maximum allowed.
   160  func limitAdd(m map[chainhash.Hash]struct{}, hash chainhash.Hash, limit int) {
   161  	if len(m)+1 > limit {
   162  		// Remove a random entry from the map.  For most compilers, Go's
   163  		// range statement iterates starting at a random item although
   164  		// that is not 100% guaranteed by the spec.  The iteration order
   165  		// is not important here because an adversary would have to be
   166  		// able to pull off preimage attacks on the hashing function in
   167  		// order to target eviction of specific entries anyways.
   168  		for txHash := range m {
   169  			delete(m, txHash)
   170  			break
   171  		}
   172  	}
   173  	m[hash] = struct{}{}
   174  }
   175  
   176  // SyncManager is used to communicate block related messages with peers. The
   177  // SyncManager is started as by executing Start() in a goroutine. Once started,
   178  // it selects peers to sync from and starts the initial block download. Once the
   179  // chain is in sync, the SyncManager handles incoming block and header
   180  // notifications and relays announcements of new blocks to peers.
   181  type SyncManager struct {
   182  	peerNotifier   PeerNotifier
   183  	started        int32
   184  	shutdown       int32
   185  	chain          *blockchain.BlockChain
   186  	txMemPool      *mempool.TxPool
   187  	chainParams    *chaincfg.Params
   188  	progressLogger *blockProgressLogger
   189  	msgChan        chan interface{}
   190  	wg             sync.WaitGroup
   191  	quit           chan struct{}
   192  
   193  	// These fields should only be accessed from the blockHandler thread
   194  	rejectedTxns     map[chainhash.Hash]struct{}
   195  	requestedTxns    map[chainhash.Hash]struct{}
   196  	requestedBlocks  map[chainhash.Hash]struct{}
   197  	syncPeer         *peerpkg.Peer
   198  	peerStates       map[*peerpkg.Peer]*peerSyncState
   199  	lastProgressTime time.Time
   200  
   201  	// The following fields are used for headers-first mode.
   202  	headersFirstMode bool
   203  	headerList       *list.List
   204  	startHeader      *list.Element
   205  	nextCheckpoint   *chaincfg.Checkpoint
   206  
   207  	// An optional fee estimator.
   208  	feeEstimator *mempool.FeeEstimator
   209  }
   210  
   211  // resetHeaderState sets the headers-first mode state to values appropriate for
   212  // syncing from a new peer.
   213  func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) {
   214  	sm.headersFirstMode = false
   215  	sm.headerList.Init()
   216  	sm.startHeader = nil
   217  
   218  	// When there is a next checkpoint, add an entry for the latest known
   219  	// block into the header pool.  This allows the next downloaded header
   220  	// to prove it links to the chain properly.
   221  	if sm.nextCheckpoint != nil {
   222  		node := headerNode{height: newestHeight, hash: newestHash}
   223  		sm.headerList.PushBack(&node)
   224  	}
   225  }
   226  
   227  // findNextHeaderCheckpoint returns the next checkpoint after the passed height.
   228  // It returns nil when there is not one either because the height is already
   229  // later than the final checkpoint or some other reason such as disabled
   230  // checkpoints.
   231  func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
   232  	checkpoints := sm.chain.Checkpoints()
   233  	if len(checkpoints) == 0 {
   234  		return nil
   235  	}
   236  
   237  	// There is no next checkpoint if the height is already after the final
   238  	// checkpoint.
   239  	finalCheckpoint := &checkpoints[len(checkpoints)-1]
   240  	if height >= finalCheckpoint.Height {
   241  		return nil
   242  	}
   243  
   244  	// Find the next checkpoint.
   245  	nextCheckpoint := finalCheckpoint
   246  	for i := len(checkpoints) - 2; i >= 0; i-- {
   247  		if height >= checkpoints[i].Height {
   248  			break
   249  		}
   250  		nextCheckpoint = &checkpoints[i]
   251  	}
   252  	return nextCheckpoint
   253  }
   254  
   255  // startSync will choose the best peer among the available candidate peers to
   256  // download/sync the blockchain from.  When syncing is already running, it
   257  // simply returns.  It also examines the candidates for any which are no longer
   258  // candidates and removes them as needed.
   259  func (sm *SyncManager) startSync() {
   260  	// Return now if we're already syncing.
   261  	if sm.syncPeer != nil {
   262  		return
   263  	}
   264  
   265  	// Once the segwit soft-fork package has activated, we only
   266  	// want to sync from peers which are witness enabled to ensure
   267  	// that we fully validate all blockchain data.
   268  	segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   269  	if err != nil {
   270  		log.Errorf("Unable to query for segwit soft-fork state: %v", err)
   271  		return
   272  	}
   273  
   274  	best := sm.chain.BestSnapshot()
   275  	var higherPeers, equalPeers []*peerpkg.Peer
   276  	for peer, state := range sm.peerStates {
   277  		if !state.syncCandidate {
   278  			continue
   279  		}
   280  
   281  		if segwitActive && !peer.IsWitnessEnabled() {
   282  			log.Debugf("peer %v not witness enabled, skipping", peer)
   283  			continue
   284  		}
   285  
   286  		// Remove sync candidate peers that are no longer candidates due
   287  		// to passing their latest known block.  NOTE: The < is
   288  		// intentional as opposed to <=.  While technically the peer
   289  		// doesn't have a later block when it's equal, it will likely
   290  		// have one soon so it is a reasonable choice.  It also allows
   291  		// the case where both are at 0 such as during regression test.
   292  		if peer.LastBlock() < best.Height {
   293  			state.syncCandidate = false
   294  			continue
   295  		}
   296  
   297  		// If the peer is at the same height as us, we'll add it a set
   298  		// of backup peers in case we do not find one with a higher
   299  		// height. If we are synced up with all of our peers, all of
   300  		// them will be in this set.
   301  		if peer.LastBlock() == best.Height {
   302  			equalPeers = append(equalPeers, peer)
   303  			continue
   304  		}
   305  
   306  		// This peer has a height greater than our own, we'll consider
   307  		// it in the set of better peers from which we'll randomly
   308  		// select.
   309  		higherPeers = append(higherPeers, peer)
   310  	}
   311  
   312  	// Pick randomly from the set of peers greater than our block height,
   313  	// falling back to a random peer of the same height if none are greater.
   314  	//
   315  	// TODO(conner): Use a better algorithm to ranking peers based on
   316  	// observed metrics and/or sync in parallel.
   317  	var bestPeer *peerpkg.Peer
   318  	switch {
   319  	case len(higherPeers) > 0:
   320  		bestPeer = higherPeers[rand.Intn(len(higherPeers))]
   321  
   322  	case len(equalPeers) > 0:
   323  		bestPeer = equalPeers[rand.Intn(len(equalPeers))]
   324  	}
   325  
   326  	// Start syncing from the best peer if one was selected.
   327  	if bestPeer != nil {
   328  		// Clear the requestedBlocks if the sync peer changes, otherwise
   329  		// we may ignore blocks we need that the last sync peer failed
   330  		// to send.
   331  		sm.requestedBlocks = make(map[chainhash.Hash]struct{})
   332  
   333  		locator, err := sm.chain.LatestBlockLocator()
   334  		if err != nil {
   335  			log.Errorf("Failed to get block locator for the "+
   336  				"latest block: %v", err)
   337  			return
   338  		}
   339  
   340  		log.Infof("Syncing to block height %d from peer %v",
   341  			bestPeer.LastBlock(), bestPeer.Addr())
   342  
   343  		// When the current height is less than a known checkpoint we
   344  		// can use block headers to learn about which blocks comprise
   345  		// the chain up to the checkpoint and perform less validation
   346  		// for them.  This is possible since each header contains the
   347  		// hash of the previous header and a merkle root.  Therefore if
   348  		// we validate all of the received headers link together
   349  		// properly and the checkpoint hashes match, we can be sure the
   350  		// hashes for the blocks in between are accurate.  Further, once
   351  		// the full blocks are downloaded, the merkle root is computed
   352  		// and compared against the value in the header which proves the
   353  		// full block hasn't been tampered with.
   354  		//
   355  		// Once we have passed the final checkpoint, or checkpoints are
   356  		// disabled, use standard inv messages learn about the blocks
   357  		// and fully validate them.  Finally, regression test mode does
   358  		// not support the headers-first approach so do normal block
   359  		// downloads when in regression test mode.
   360  		if sm.nextCheckpoint != nil &&
   361  			best.Height < sm.nextCheckpoint.Height &&
   362  			sm.chainParams != &chaincfg.RegressionNetParams {
   363  
   364  			bestPeer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
   365  			sm.headersFirstMode = true
   366  			log.Infof("Downloading headers for blocks %d to "+
   367  				"%d from peer %s", best.Height+1,
   368  				sm.nextCheckpoint.Height, bestPeer.Addr())
   369  		} else {
   370  			bestPeer.PushGetBlocksMsg(locator, &zeroHash)
   371  		}
   372  		sm.syncPeer = bestPeer
   373  
   374  		// Reset the last progress time now that we have a non-nil
   375  		// syncPeer to avoid instantly detecting it as stalled in the
   376  		// event the progress time hasn't been updated recently.
   377  		sm.lastProgressTime = time.Now()
   378  	} else {
   379  		log.Warnf("No sync peer candidates available")
   380  	}
   381  }
   382  
   383  // isSyncCandidate returns whether or not the peer is a candidate to consider
   384  // syncing from.
   385  func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool {
   386  	// Typically a peer is not a candidate for sync if it's not a full node,
   387  	// however regression test is special in that the regression tool is
   388  	// not a full node and still needs to be considered a sync candidate.
   389  	if sm.chainParams == &chaincfg.RegressionNetParams {
   390  		// The peer is not a candidate if it's not coming from localhost
   391  		// or the hostname can't be determined for some reason.
   392  		host, _, err := net.SplitHostPort(peer.Addr())
   393  		if err != nil {
   394  			return false
   395  		}
   396  
   397  		if host != "127.0.0.1" && host != "localhost" {
   398  			return false
   399  		}
   400  
   401  		// Candidate if all checks passed.
   402  		return true
   403  	}
   404  
   405  	// If the segwit soft-fork package has activated, then the peer must
   406  	// also be upgraded.
   407  	segwitActive, err := sm.chain.IsDeploymentActive(
   408  		chaincfg.DeploymentSegwit,
   409  	)
   410  	if err != nil {
   411  		log.Errorf("Unable to query for segwit soft-fork state: %v",
   412  			err)
   413  	}
   414  
   415  	if segwitActive && !peer.IsWitnessEnabled() {
   416  		return false
   417  	}
   418  
   419  	var (
   420  		nodeServices = peer.Services()
   421  		fullNode     = nodeServices.HasFlag(wire.SFNodeNetwork)
   422  		prunedNode   = nodeServices.HasFlag(wire.SFNodeNetworkLimited)
   423  	)
   424  
   425  	switch {
   426  	case fullNode:
   427  		// Node is a sync candidate if it has all the blocks.
   428  
   429  	case prunedNode:
   430  		// Even if the peer is pruned, if they have the node network
   431  		// limited flag, they are able to serve 2 days worth of blocks
   432  		// from the current tip. Therefore, check if our chaintip is
   433  		// within that range.
   434  		bestHeight := sm.chain.BestSnapshot().Height
   435  		peerLastBlock := peer.LastBlock()
   436  
   437  		// bestHeight+1 as we need the peer to serve us the next block,
   438  		// not the one we already have.
   439  		if bestHeight+1 <=
   440  			peerLastBlock-wire.NodeNetworkLimitedBlockThreshold {
   441  
   442  			return false
   443  		}
   444  
   445  	default:
   446  		// If the peer isn't an archival node, and it's not signaling
   447  		// NODE_NETWORK_LIMITED, we can't sync off of this node.
   448  		return false
   449  	}
   450  
   451  	// Candidate if all checks passed.
   452  	return true
   453  }
   454  
   455  // handleNewPeerMsg deals with new peers that have signalled they may
   456  // be considered as a sync peer (they have already successfully negotiated).  It
   457  // also starts syncing if needed.  It is invoked from the syncHandler goroutine.
   458  func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) {
   459  	// Ignore if in the process of shutting down.
   460  	if atomic.LoadInt32(&sm.shutdown) != 0 {
   461  		return
   462  	}
   463  
   464  	log.Infof("New valid peer %s (%s)", peer, peer.UserAgent())
   465  
   466  	// Initialize the peer state.
   467  	isSyncCandidate := sm.isSyncCandidate(peer)
   468  	sm.peerStates[peer] = &peerSyncState{
   469  		syncCandidate:   isSyncCandidate,
   470  		requestedTxns:   make(map[chainhash.Hash]struct{}),
   471  		requestedBlocks: make(map[chainhash.Hash]struct{}),
   472  	}
   473  
   474  	// Start syncing by choosing the best candidate if needed.
   475  	if isSyncCandidate && sm.syncPeer == nil {
   476  		sm.startSync()
   477  	}
   478  }
   479  
   480  // handleStallSample will switch to a new sync peer if the current one has
   481  // stalled. This is detected when by comparing the last progress timestamp with
   482  // the current time, and disconnecting the peer if we stalled before reaching
   483  // their highest advertised block.
   484  func (sm *SyncManager) handleStallSample() {
   485  	if atomic.LoadInt32(&sm.shutdown) != 0 {
   486  		return
   487  	}
   488  
   489  	// If we don't have an active sync peer, exit early.
   490  	if sm.syncPeer == nil {
   491  		return
   492  	}
   493  
   494  	// If the stall timeout has not elapsed, exit early.
   495  	if time.Since(sm.lastProgressTime) <= maxStallDuration {
   496  		return
   497  	}
   498  
   499  	// Check to see that the peer's sync state exists.
   500  	state, exists := sm.peerStates[sm.syncPeer]
   501  	if !exists {
   502  		return
   503  	}
   504  
   505  	sm.clearRequestedState(state)
   506  
   507  	disconnectSyncPeer := sm.shouldDCStalledSyncPeer()
   508  	sm.updateSyncPeer(disconnectSyncPeer)
   509  }
   510  
   511  // shouldDCStalledSyncPeer determines whether or not we should disconnect a
   512  // stalled sync peer. If the peer has stalled and its reported height is greater
   513  // than our own best height, we will disconnect it. Otherwise, we will keep the
   514  // peer connected in case we are already at tip.
   515  func (sm *SyncManager) shouldDCStalledSyncPeer() bool {
   516  	lastBlock := sm.syncPeer.LastBlock()
   517  	startHeight := sm.syncPeer.StartingHeight()
   518  
   519  	var peerHeight int32
   520  	if lastBlock > startHeight {
   521  		peerHeight = lastBlock
   522  	} else {
   523  		peerHeight = startHeight
   524  	}
   525  
   526  	// If we've stalled out yet the sync peer reports having more blocks for
   527  	// us we will disconnect them. This allows us at tip to not disconnect
   528  	// peers when we are equal or they temporarily lag behind us.
   529  	best := sm.chain.BestSnapshot()
   530  	return peerHeight > best.Height
   531  }
   532  
   533  // handleDonePeerMsg deals with peers that have signalled they are done.  It
   534  // removes the peer as a candidate for syncing and in the case where it was
   535  // the current sync peer, attempts to select a new best peer to sync from.  It
   536  // is invoked from the syncHandler goroutine.
   537  func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) {
   538  	state, exists := sm.peerStates[peer]
   539  	if !exists {
   540  		log.Warnf("Received done peer message for unknown peer %s", peer)
   541  		return
   542  	}
   543  
   544  	// Remove the peer from the list of candidate peers.
   545  	delete(sm.peerStates, peer)
   546  
   547  	log.Infof("Lost peer %s", peer)
   548  
   549  	sm.clearRequestedState(state)
   550  
   551  	if peer == sm.syncPeer {
   552  		// Update the sync peer. The server has already disconnected the
   553  		// peer before signaling to the sync manager.
   554  		sm.updateSyncPeer(false)
   555  	}
   556  }
   557  
   558  // clearRequestedState wipes all expected transactions and blocks from the sync
   559  // manager's requested maps that were requested under a peer's sync state, This
   560  // allows them to be rerequested by a subsequent sync peer.
   561  func (sm *SyncManager) clearRequestedState(state *peerSyncState) {
   562  	// Remove requested transactions from the global map so that they will
   563  	// be fetched from elsewhere next time we get an inv.
   564  	for txHash := range state.requestedTxns {
   565  		delete(sm.requestedTxns, txHash)
   566  	}
   567  
   568  	// Remove requested blocks from the global map so that they will be
   569  	// fetched from elsewhere next time we get an inv.
   570  	// TODO: we could possibly here check which peers have these blocks
   571  	// and request them now to speed things up a little.
   572  	for blockHash := range state.requestedBlocks {
   573  		delete(sm.requestedBlocks, blockHash)
   574  	}
   575  }
   576  
   577  // updateSyncPeer choose a new sync peer to replace the current one. If
   578  // dcSyncPeer is true, this method will also disconnect the current sync peer.
   579  // If we are in header first mode, any header state related to prefetching is
   580  // also reset in preparation for the next sync peer.
   581  func (sm *SyncManager) updateSyncPeer(dcSyncPeer bool) {
   582  	log.Debugf("Updating sync peer, no progress for: %v",
   583  		time.Since(sm.lastProgressTime))
   584  
   585  	// First, disconnect the current sync peer if requested.
   586  	if dcSyncPeer {
   587  		sm.syncPeer.Disconnect()
   588  	}
   589  
   590  	// Reset any header state before we choose our next active sync peer.
   591  	if sm.headersFirstMode {
   592  		best := sm.chain.BestSnapshot()
   593  		sm.resetHeaderState(&best.Hash, best.Height)
   594  	}
   595  
   596  	sm.syncPeer = nil
   597  	sm.startSync()
   598  }
   599  
   600  // handleTxMsg handles transaction messages from all peers.
   601  func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
   602  	peer := tmsg.peer
   603  	state, exists := sm.peerStates[peer]
   604  	if !exists {
   605  		log.Warnf("Received tx message from unknown peer %s", peer)
   606  		return
   607  	}
   608  
   609  	// NOTE:  BitcoinJ, and possibly other wallets, don't follow the spec of
   610  	// sending an inventory message and allowing the remote peer to decide
   611  	// whether or not they want to request the transaction via a getdata
   612  	// message.  Unfortunately, the reference implementation permits
   613  	// unrequested data, so it has allowed wallets that don't follow the
   614  	// spec to proliferate.  While this is not ideal, there is no check here
   615  	// to disconnect peers for sending unsolicited transactions to provide
   616  	// interoperability.
   617  	txHash := tmsg.tx.Hash()
   618  
   619  	// Ignore transactions that we have already rejected.  Do not
   620  	// send a reject message here because if the transaction was already
   621  	// rejected, the transaction was unsolicited.
   622  	if _, exists = sm.rejectedTxns[*txHash]; exists {
   623  		log.Debugf("Ignoring unsolicited previously rejected "+
   624  			"transaction %v from %s", txHash, peer)
   625  		return
   626  	}
   627  
   628  	// Process the transaction to include validation, insertion in the
   629  	// memory pool, orphan handling, etc.
   630  	acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx,
   631  		true, true, mempool.Tag(peer.ID()))
   632  
   633  	// Remove transaction from request maps. Either the mempool/chain
   634  	// already knows about it and as such we shouldn't have any more
   635  	// instances of trying to fetch it, or we failed to insert and thus
   636  	// we'll retry next time we get an inv.
   637  	delete(state.requestedTxns, *txHash)
   638  	delete(sm.requestedTxns, *txHash)
   639  
   640  	if err != nil {
   641  		// Do not request this transaction again until a new block
   642  		// has been processed.
   643  		limitAdd(sm.rejectedTxns, *txHash, maxRejectedTxns)
   644  
   645  		// When the error is a rule error, it means the transaction was
   646  		// simply rejected as opposed to something actually going wrong,
   647  		// so log it as such.  Otherwise, something really did go wrong,
   648  		// so log it as an actual error.
   649  		if _, ok := err.(mempool.RuleError); ok {
   650  			log.Debugf("Rejected transaction %v from %s: %v",
   651  				txHash, peer, err)
   652  		} else {
   653  			log.Errorf("Failed to process transaction %v: %v",
   654  				txHash, err)
   655  		}
   656  
   657  		// Convert the error into an appropriate reject message and
   658  		// send it.
   659  		code, reason := mempool.ErrToRejectErr(err)
   660  		peer.PushRejectMsg(wire.CmdTx, code, reason, txHash, false)
   661  		return
   662  	}
   663  
   664  	sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
   665  }
   666  
   667  // current returns true if we believe we are synced with our peers, false if we
   668  // still have blocks to check
   669  func (sm *SyncManager) current() bool {
   670  	if !sm.chain.IsCurrent() {
   671  		return false
   672  	}
   673  
   674  	// if blockChain thinks we are current and we have no syncPeer it
   675  	// is probably right.
   676  	if sm.syncPeer == nil {
   677  		return true
   678  	}
   679  
   680  	// No matter what chain thinks, if we are below the block we are syncing
   681  	// to we are not current.
   682  	if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() {
   683  		return false
   684  	}
   685  	return true
   686  }
   687  
   688  // handleBlockMsg handles block messages from all peers.
   689  func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
   690  	peer := bmsg.peer
   691  	state, exists := sm.peerStates[peer]
   692  	if !exists {
   693  		log.Warnf("Received block message from unknown peer %s", peer)
   694  		return
   695  	}
   696  
   697  	// If we didn't ask for this block then the peer is misbehaving.
   698  	blockHash := bmsg.block.Hash()
   699  	if _, exists = state.requestedBlocks[*blockHash]; !exists {
   700  		// The regression test intentionally sends some blocks twice
   701  		// to test duplicate block insertion fails.  Don't disconnect
   702  		// the peer or ignore the block when we're in regression test
   703  		// mode in this case so the chain code is actually fed the
   704  		// duplicate blocks.
   705  		if sm.chainParams != &chaincfg.RegressionNetParams {
   706  			log.Warnf("Got unrequested block %v from %s -- "+
   707  				"disconnecting", blockHash, peer.Addr())
   708  			peer.Disconnect()
   709  			return
   710  		}
   711  	}
   712  
   713  	// When in headers-first mode, if the block matches the hash of the
   714  	// first header in the list of headers that are being fetched, it's
   715  	// eligible for less validation since the headers have already been
   716  	// verified to link together and are valid up to the next checkpoint.
   717  	// Also, remove the list entry for all blocks except the checkpoint
   718  	// since it is needed to verify the next round of headers links
   719  	// properly.
   720  	isCheckpointBlock := false
   721  	behaviorFlags := blockchain.BFNone
   722  	if sm.headersFirstMode {
   723  		firstNodeEl := sm.headerList.Front()
   724  		if firstNodeEl != nil {
   725  			firstNode := firstNodeEl.Value.(*headerNode)
   726  			if blockHash.IsEqual(firstNode.hash) {
   727  				behaviorFlags |= blockchain.BFFastAdd
   728  				if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) {
   729  					isCheckpointBlock = true
   730  				} else {
   731  					sm.headerList.Remove(firstNodeEl)
   732  				}
   733  			}
   734  		}
   735  	}
   736  
   737  	// Remove block from request maps. Either chain will know about it and
   738  	// so we shouldn't have any more instances of trying to fetch it, or we
   739  	// will fail the insert and thus we'll retry next time we get an inv.
   740  	delete(state.requestedBlocks, *blockHash)
   741  	delete(sm.requestedBlocks, *blockHash)
   742  
   743  	// Process the block to include validation, best chain selection, orphan
   744  	// handling, etc.
   745  	_, isOrphan, err := sm.chain.ProcessBlock(bmsg.block, behaviorFlags)
   746  	if err != nil {
   747  		// When the error is a rule error, it means the block was simply
   748  		// rejected as opposed to something actually going wrong, so log
   749  		// it as such.  Otherwise, something really did go wrong, so log
   750  		// it as an actual error.
   751  		if _, ok := err.(blockchain.RuleError); ok {
   752  			log.Infof("Rejected block %v from %s: %v", blockHash,
   753  				peer, err)
   754  		} else {
   755  			log.Errorf("Failed to process block %v: %v",
   756  				blockHash, err)
   757  		}
   758  		if dbErr, ok := err.(database.Error); ok && dbErr.ErrorCode ==
   759  			database.ErrCorruption {
   760  			panic(dbErr)
   761  		}
   762  
   763  		// Convert the error into an appropriate reject message and
   764  		// send it.
   765  		code, reason := mempool.ErrToRejectErr(err)
   766  		peer.PushRejectMsg(wire.CmdBlock, code, reason, blockHash, false)
   767  		return
   768  	}
   769  
   770  	// Meta-data about the new block this peer is reporting. We use this
   771  	// below to update this peer's latest block height and the heights of
   772  	// other peers based on their last announced block hash. This allows us
   773  	// to dynamically update the block heights of peers, avoiding stale
   774  	// heights when looking for a new sync peer. Upon acceptance of a block
   775  	// or recognition of an orphan, we also use this information to update
   776  	// the block heights over other peers who's invs may have been ignored
   777  	// if we are actively syncing while the chain is not yet current or
   778  	// who may have lost the lock announcement race.
   779  	var heightUpdate int32
   780  	var blkHashUpdate *chainhash.Hash
   781  
   782  	// Request the parents for the orphan block from the peer that sent it.
   783  	if isOrphan {
   784  		// We've just received an orphan block from a peer. In order
   785  		// to update the height of the peer, we try to extract the
   786  		// block height from the scriptSig of the coinbase transaction.
   787  		// Extraction is only attempted if the block's version is
   788  		// high enough (ver 2+).
   789  		header := &bmsg.block.MsgBlock().Header
   790  		if blockchain.ShouldHaveSerializedBlockHeight(header) {
   791  			coinbaseTx := bmsg.block.Transactions()[0]
   792  			cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx)
   793  			if err != nil {
   794  				log.Warnf("Unable to extract height from "+
   795  					"coinbase tx: %v", err)
   796  			} else {
   797  				log.Debugf("Extracted height of %v from "+
   798  					"orphan block", cbHeight)
   799  				heightUpdate = cbHeight
   800  				blkHashUpdate = blockHash
   801  			}
   802  		}
   803  
   804  		orphanRoot := sm.chain.GetOrphanRoot(blockHash)
   805  		locator, err := sm.chain.LatestBlockLocator()
   806  		if err != nil {
   807  			log.Warnf("Failed to get block locator for the "+
   808  				"latest block: %v", err)
   809  		} else {
   810  			peer.PushGetBlocksMsg(locator, orphanRoot)
   811  		}
   812  	} else {
   813  		if peer == sm.syncPeer {
   814  			sm.lastProgressTime = time.Now()
   815  		}
   816  
   817  		// When the block is not an orphan, log information about it and
   818  		// update the chain state.
   819  		sm.progressLogger.LogBlockHeight(bmsg.block, sm.chain)
   820  
   821  		// Update this peer's latest block height, for future
   822  		// potential sync node candidacy.
   823  		best := sm.chain.BestSnapshot()
   824  		heightUpdate = best.Height
   825  		blkHashUpdate = &best.Hash
   826  
   827  		// Clear the rejected transactions.
   828  		sm.rejectedTxns = make(map[chainhash.Hash]struct{})
   829  	}
   830  
   831  	// Update the block height for this peer. But only send a message to
   832  	// the server for updating peer heights if this is an orphan or our
   833  	// chain is "current". This avoids sending a spammy amount of messages
   834  	// if we're syncing the chain from scratch.
   835  	if blkHashUpdate != nil && heightUpdate != 0 {
   836  		peer.UpdateLastBlockHeight(heightUpdate)
   837  		if isOrphan || sm.current() {
   838  			go sm.peerNotifier.UpdatePeerHeights(blkHashUpdate, heightUpdate,
   839  				peer)
   840  		}
   841  	}
   842  
   843  	// If we are not in headers first mode, it's a good time to periodically
   844  	// flush the blockchain cache because we don't expect new blocks immediately.
   845  	// After that, there is nothing more to do.
   846  	if !sm.headersFirstMode {
   847  		if err := sm.chain.FlushUtxoCache(blockchain.FlushPeriodic); err != nil {
   848  			log.Errorf("Error while flushing the blockchain cache: %v", err)
   849  		}
   850  		return
   851  	}
   852  
   853  	// This is headers-first mode, so if the block is not a checkpoint
   854  	// request more blocks using the header list when the request queue is
   855  	// getting short.
   856  	if !isCheckpointBlock {
   857  		if sm.startHeader != nil &&
   858  			len(state.requestedBlocks) < minInFlightBlocks {
   859  			sm.fetchHeaderBlocks()
   860  		}
   861  		return
   862  	}
   863  
   864  	// This is headers-first mode and the block is a checkpoint.  When
   865  	// there is a next checkpoint, get the next round of headers by asking
   866  	// for headers starting from the block after this one up to the next
   867  	// checkpoint.
   868  	prevHeight := sm.nextCheckpoint.Height
   869  	prevHash := sm.nextCheckpoint.Hash
   870  	sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
   871  	if sm.nextCheckpoint != nil {
   872  		locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash})
   873  		err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
   874  		if err != nil {
   875  			log.Warnf("Failed to send getheaders message to "+
   876  				"peer %s: %v", peer.Addr(), err)
   877  			return
   878  		}
   879  		log.Infof("Downloading headers for blocks %d to %d from "+
   880  			"peer %s", prevHeight+1, sm.nextCheckpoint.Height,
   881  			sm.syncPeer.Addr())
   882  		return
   883  	}
   884  
   885  	// This is headers-first mode, the block is a checkpoint, and there are
   886  	// no more checkpoints, so switch to normal mode by requesting blocks
   887  	// from the block after this one up to the end of the chain (zero hash).
   888  	sm.headersFirstMode = false
   889  	sm.headerList.Init()
   890  	log.Infof("Reached the final checkpoint -- switching to normal mode")
   891  	locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash})
   892  	err = peer.PushGetBlocksMsg(locator, &zeroHash)
   893  	if err != nil {
   894  		log.Warnf("Failed to send getblocks message to peer %s: %v",
   895  			peer.Addr(), err)
   896  		return
   897  	}
   898  }
   899  
   900  // fetchHeaderBlocks creates and sends a request to the syncPeer for the next
   901  // list of blocks to be downloaded based on the current list of headers.
   902  func (sm *SyncManager) fetchHeaderBlocks() {
   903  	// Nothing to do if there is no start header.
   904  	if sm.startHeader == nil {
   905  		log.Warnf("fetchHeaderBlocks called with no start header")
   906  		return
   907  	}
   908  
   909  	// Build up a getdata request for the list of blocks the headers
   910  	// describe.  The size hint will be limited to wire.MaxInvPerMsg by
   911  	// the function, so no need to double check it here.
   912  	gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len()))
   913  	numRequested := 0
   914  	for e := sm.startHeader; e != nil; e = e.Next() {
   915  		node, ok := e.Value.(*headerNode)
   916  		if !ok {
   917  			log.Warn("Header list node type is not a headerNode")
   918  			continue
   919  		}
   920  
   921  		iv := wire.NewInvVect(wire.InvTypeBlock, node.hash)
   922  		haveInv, err := sm.haveInventory(iv)
   923  		if err != nil {
   924  			log.Warnf("Unexpected failure when checking for "+
   925  				"existing inventory during header block "+
   926  				"fetch: %v", err)
   927  		}
   928  		if !haveInv {
   929  			syncPeerState := sm.peerStates[sm.syncPeer]
   930  
   931  			sm.requestedBlocks[*node.hash] = struct{}{}
   932  			syncPeerState.requestedBlocks[*node.hash] = struct{}{}
   933  
   934  			// If we're fetching from a witness enabled peer
   935  			// post-fork, then ensure that we receive all the
   936  			// witness data in the blocks.
   937  			if sm.syncPeer.IsWitnessEnabled() {
   938  				iv.Type = wire.InvTypeWitnessBlock
   939  			}
   940  
   941  			gdmsg.AddInvVect(iv)
   942  			numRequested++
   943  		}
   944  		sm.startHeader = e.Next()
   945  		if numRequested >= wire.MaxInvPerMsg {
   946  			break
   947  		}
   948  	}
   949  	if len(gdmsg.InvList) > 0 {
   950  		sm.syncPeer.QueueMessage(gdmsg, nil)
   951  	}
   952  }
   953  
   954  // handleHeadersMsg handles block header messages from all peers.  Headers are
   955  // requested when performing a headers-first sync.
   956  func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
   957  	peer := hmsg.peer
   958  	_, exists := sm.peerStates[peer]
   959  	if !exists {
   960  		log.Warnf("Received headers message from unknown peer %s", peer)
   961  		return
   962  	}
   963  
   964  	// The remote peer is misbehaving if we didn't request headers.
   965  	msg := hmsg.headers
   966  	numHeaders := len(msg.Headers)
   967  	if !sm.headersFirstMode {
   968  		log.Warnf("Got %d unrequested headers from %s -- "+
   969  			"disconnecting", numHeaders, peer.Addr())
   970  		peer.Disconnect()
   971  		return
   972  	}
   973  
   974  	// Nothing to do for an empty headers message.
   975  	if numHeaders == 0 {
   976  		return
   977  	}
   978  
   979  	// Process all of the received headers ensuring each one connects to the
   980  	// previous and that checkpoints match.
   981  	receivedCheckpoint := false
   982  	var finalHash *chainhash.Hash
   983  	for _, blockHeader := range msg.Headers {
   984  		blockHash := blockHeader.BlockHash()
   985  		finalHash = &blockHash
   986  
   987  		// Ensure there is a previous header to compare against.
   988  		prevNodeEl := sm.headerList.Back()
   989  		if prevNodeEl == nil {
   990  			log.Warnf("Header list does not contain a previous" +
   991  				"element as expected -- disconnecting peer")
   992  			peer.Disconnect()
   993  			return
   994  		}
   995  
   996  		// Ensure the header properly connects to the previous one and
   997  		// add it to the list of headers.
   998  		node := headerNode{hash: &blockHash}
   999  		prevNode := prevNodeEl.Value.(*headerNode)
  1000  		if prevNode.hash.IsEqual(&blockHeader.PrevBlock) {
  1001  			node.height = prevNode.height + 1
  1002  			e := sm.headerList.PushBack(&node)
  1003  			if sm.startHeader == nil {
  1004  				sm.startHeader = e
  1005  			}
  1006  		} else {
  1007  			log.Warnf("Received block header that does not "+
  1008  				"properly connect to the chain from peer %s "+
  1009  				"-- disconnecting", peer.Addr())
  1010  			peer.Disconnect()
  1011  			return
  1012  		}
  1013  
  1014  		// Verify the header at the next checkpoint height matches.
  1015  		if node.height == sm.nextCheckpoint.Height {
  1016  			if node.hash.IsEqual(sm.nextCheckpoint.Hash) {
  1017  				receivedCheckpoint = true
  1018  				log.Infof("Verified downloaded block "+
  1019  					"header against checkpoint at height "+
  1020  					"%d/hash %s", node.height, node.hash)
  1021  			} else {
  1022  				log.Warnf("Block header at height %d/hash "+
  1023  					"%s from peer %s does NOT match "+
  1024  					"expected checkpoint hash of %s -- "+
  1025  					"disconnecting", node.height,
  1026  					node.hash, peer.Addr(),
  1027  					sm.nextCheckpoint.Hash)
  1028  				peer.Disconnect()
  1029  				return
  1030  			}
  1031  			break
  1032  		}
  1033  	}
  1034  
  1035  	// When this header is a checkpoint, switch to fetching the blocks for
  1036  	// all of the headers since the last checkpoint.
  1037  	if receivedCheckpoint {
  1038  		// Since the first entry of the list is always the final block
  1039  		// that is already in the database and is only used to ensure
  1040  		// the next header links properly, it must be removed before
  1041  		// fetching the blocks.
  1042  		sm.headerList.Remove(sm.headerList.Front())
  1043  		log.Infof("Received %v block headers: Fetching blocks",
  1044  			sm.headerList.Len())
  1045  		sm.progressLogger.SetLastLogTime(time.Now())
  1046  		sm.fetchHeaderBlocks()
  1047  		return
  1048  	}
  1049  
  1050  	// This header is not a checkpoint, so request the next batch of
  1051  	// headers starting from the latest known header and ending with the
  1052  	// next checkpoint.
  1053  	locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash})
  1054  	err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
  1055  	if err != nil {
  1056  		log.Warnf("Failed to send getheaders message to "+
  1057  			"peer %s: %v", peer.Addr(), err)
  1058  		return
  1059  	}
  1060  }
  1061  
  1062  // handleNotFoundMsg handles notfound messages from all peers.
  1063  func (sm *SyncManager) handleNotFoundMsg(nfmsg *notFoundMsg) {
  1064  	peer := nfmsg.peer
  1065  	state, exists := sm.peerStates[peer]
  1066  	if !exists {
  1067  		log.Warnf("Received notfound message from unknown peer %s", peer)
  1068  		return
  1069  	}
  1070  	for _, inv := range nfmsg.notFound.InvList {
  1071  		// verify the hash was actually announced by the peer
  1072  		// before deleting from the global requested maps.
  1073  		switch inv.Type {
  1074  		case wire.InvTypeWitnessBlock:
  1075  			fallthrough
  1076  		case wire.InvTypeBlock:
  1077  			if _, exists := state.requestedBlocks[inv.Hash]; exists {
  1078  				delete(state.requestedBlocks, inv.Hash)
  1079  				delete(sm.requestedBlocks, inv.Hash)
  1080  			}
  1081  
  1082  		case wire.InvTypeWitnessTx:
  1083  			fallthrough
  1084  		case wire.InvTypeTx:
  1085  			if _, exists := state.requestedTxns[inv.Hash]; exists {
  1086  				delete(state.requestedTxns, inv.Hash)
  1087  				delete(sm.requestedTxns, inv.Hash)
  1088  			}
  1089  		}
  1090  	}
  1091  }
  1092  
  1093  // haveInventory returns whether or not the inventory represented by the passed
  1094  // inventory vector is known.  This includes checking all of the various places
  1095  // inventory can be when it is in different states such as blocks that are part
  1096  // of the main chain, on a side chain, in the orphan pool, and transactions that
  1097  // are in the memory pool (either the main pool or orphan pool).
  1098  func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
  1099  	switch invVect.Type {
  1100  	case wire.InvTypeWitnessBlock:
  1101  		fallthrough
  1102  	case wire.InvTypeBlock:
  1103  		// Ask chain if the block is known to it in any form (main
  1104  		// chain, side chain, or orphan).
  1105  		return sm.chain.HaveBlock(&invVect.Hash)
  1106  
  1107  	case wire.InvTypeWitnessTx:
  1108  		fallthrough
  1109  	case wire.InvTypeTx:
  1110  		// Ask the transaction memory pool if the transaction is known
  1111  		// to it in any form (main pool or orphan).
  1112  		if sm.txMemPool.HaveTransaction(&invVect.Hash) {
  1113  			return true, nil
  1114  		}
  1115  
  1116  		// Check if the transaction exists from the point of view of the
  1117  		// end of the main chain.  Note that this is only a best effort
  1118  		// since it is expensive to check existence of every output and
  1119  		// the only purpose of this check is to avoid downloading
  1120  		// already known transactions.  Only the first two outputs are
  1121  		// checked because the vast majority of transactions consist of
  1122  		// two outputs where one is some form of "pay-to-somebody-else"
  1123  		// and the other is a change output.
  1124  		prevOut := wire.OutPoint{Hash: invVect.Hash}
  1125  		for i := uint32(0); i < 2; i++ {
  1126  			prevOut.Index = i
  1127  			entry, err := sm.chain.FetchUtxoEntry(prevOut)
  1128  			if err != nil {
  1129  				return false, err
  1130  			}
  1131  			if entry != nil && !entry.IsSpent() {
  1132  				return true, nil
  1133  			}
  1134  		}
  1135  
  1136  		return false, nil
  1137  	}
  1138  
  1139  	// The requested inventory is is an unsupported type, so just claim
  1140  	// it is known to avoid requesting it.
  1141  	return true, nil
  1142  }
  1143  
  1144  // handleInvMsg handles inv messages from all peers.
  1145  // We examine the inventory advertised by the remote peer and act accordingly.
  1146  func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
  1147  	peer := imsg.peer
  1148  	state, exists := sm.peerStates[peer]
  1149  	if !exists {
  1150  		log.Warnf("Received inv message from unknown peer %s", peer)
  1151  		return
  1152  	}
  1153  
  1154  	// Attempt to find the final block in the inventory list.  There may
  1155  	// not be one.
  1156  	lastBlock := -1
  1157  	invVects := imsg.inv.InvList
  1158  	for i := len(invVects) - 1; i >= 0; i-- {
  1159  		if invVects[i].Type == wire.InvTypeBlock {
  1160  			lastBlock = i
  1161  			break
  1162  		}
  1163  	}
  1164  
  1165  	// If this inv contains a block announcement, and this isn't coming from
  1166  	// our current sync peer or we're current, then update the last
  1167  	// announced block for this peer. We'll use this information later to
  1168  	// update the heights of peers based on blocks we've accepted that they
  1169  	// previously announced.
  1170  	if lastBlock != -1 && (peer != sm.syncPeer || sm.current()) {
  1171  		peer.UpdateLastAnnouncedBlock(&invVects[lastBlock].Hash)
  1172  	}
  1173  
  1174  	// Ignore invs from peers that aren't the sync if we are not current.
  1175  	// Helps prevent fetching a mass of orphans.
  1176  	if peer != sm.syncPeer && !sm.current() {
  1177  		return
  1178  	}
  1179  
  1180  	// If our chain is current and a peer announces a block we already
  1181  	// know of, then update their current block height.
  1182  	if lastBlock != -1 && sm.current() {
  1183  		blkHeight, err := sm.chain.BlockHeightByHash(&invVects[lastBlock].Hash)
  1184  		if err == nil {
  1185  			peer.UpdateLastBlockHeight(blkHeight)
  1186  		}
  1187  	}
  1188  
  1189  	// Request the advertised inventory if we don't already have it.  Also,
  1190  	// request parent blocks of orphans if we receive one we already have.
  1191  	// Finally, attempt to detect potential stalls due to long side chains
  1192  	// we already have and request more blocks to prevent them.
  1193  	for i, iv := range invVects {
  1194  		// Ignore unsupported inventory types.
  1195  		switch iv.Type {
  1196  		case wire.InvTypeBlock:
  1197  		case wire.InvTypeTx:
  1198  		case wire.InvTypeWitnessBlock:
  1199  		case wire.InvTypeWitnessTx:
  1200  		default:
  1201  			continue
  1202  		}
  1203  
  1204  		// Add the inventory to the cache of known inventory
  1205  		// for the peer.
  1206  		peer.AddKnownInventory(iv)
  1207  
  1208  		// Ignore inventory when we're in headers-first mode.
  1209  		if sm.headersFirstMode {
  1210  			continue
  1211  		}
  1212  
  1213  		// Request the inventory if we don't already have it.
  1214  		haveInv, err := sm.haveInventory(iv)
  1215  		if err != nil {
  1216  			log.Warnf("Unexpected failure when checking for "+
  1217  				"existing inventory during inv message "+
  1218  				"processing: %v", err)
  1219  			continue
  1220  		}
  1221  		if !haveInv {
  1222  			if iv.Type == wire.InvTypeTx {
  1223  				// Skip the transaction if it has already been
  1224  				// rejected.
  1225  				if _, exists := sm.rejectedTxns[iv.Hash]; exists {
  1226  					continue
  1227  				}
  1228  			}
  1229  
  1230  			// Ignore invs block invs from non-witness enabled
  1231  			// peers, as after segwit activation we only want to
  1232  			// download from peers that can provide us full witness
  1233  			// data for blocks.
  1234  			if !peer.IsWitnessEnabled() && iv.Type == wire.InvTypeBlock {
  1235  				continue
  1236  			}
  1237  
  1238  			// Add it to the request queue.
  1239  			state.requestQueue = append(state.requestQueue, iv)
  1240  			continue
  1241  		}
  1242  
  1243  		if iv.Type == wire.InvTypeBlock {
  1244  			// The block is an orphan block that we already have.
  1245  			// When the existing orphan was processed, it requested
  1246  			// the missing parent blocks.  When this scenario
  1247  			// happens, it means there were more blocks missing
  1248  			// than are allowed into a single inventory message.  As
  1249  			// a result, once this peer requested the final
  1250  			// advertised block, the remote peer noticed and is now
  1251  			// resending the orphan block as an available block
  1252  			// to signal there are more missing blocks that need to
  1253  			// be requested.
  1254  			if sm.chain.IsKnownOrphan(&iv.Hash) {
  1255  				// Request blocks starting at the latest known
  1256  				// up to the root of the orphan that just came
  1257  				// in.
  1258  				orphanRoot := sm.chain.GetOrphanRoot(&iv.Hash)
  1259  				locator, err := sm.chain.LatestBlockLocator()
  1260  				if err != nil {
  1261  					log.Errorf("PEER: Failed to get block "+
  1262  						"locator for the latest block: "+
  1263  						"%v", err)
  1264  					continue
  1265  				}
  1266  				peer.PushGetBlocksMsg(locator, orphanRoot)
  1267  				continue
  1268  			}
  1269  
  1270  			// We already have the final block advertised by this
  1271  			// inventory message, so force a request for more.  This
  1272  			// should only happen if we're on a really long side
  1273  			// chain.
  1274  			if i == lastBlock {
  1275  				// Request blocks after this one up to the
  1276  				// final one the remote peer knows about (zero
  1277  				// stop hash).
  1278  				locator := sm.chain.BlockLocatorFromHash(&iv.Hash)
  1279  				peer.PushGetBlocksMsg(locator, &zeroHash)
  1280  			}
  1281  		}
  1282  	}
  1283  
  1284  	// Request as much as possible at once.  Anything that won't fit into
  1285  	// the request will be requested on the next inv message.
  1286  	numRequested := 0
  1287  	gdmsg := wire.NewMsgGetData()
  1288  	requestQueue := state.requestQueue
  1289  	for len(requestQueue) != 0 {
  1290  		iv := requestQueue[0]
  1291  		requestQueue[0] = nil
  1292  		requestQueue = requestQueue[1:]
  1293  
  1294  		switch iv.Type {
  1295  		case wire.InvTypeWitnessBlock:
  1296  			fallthrough
  1297  		case wire.InvTypeBlock:
  1298  			// Request the block if there is not already a pending
  1299  			// request.
  1300  			if _, exists := sm.requestedBlocks[iv.Hash]; !exists {
  1301  				limitAdd(sm.requestedBlocks, iv.Hash, maxRequestedBlocks)
  1302  				limitAdd(state.requestedBlocks, iv.Hash, maxRequestedBlocks)
  1303  
  1304  				if peer.IsWitnessEnabled() {
  1305  					iv.Type = wire.InvTypeWitnessBlock
  1306  				}
  1307  
  1308  				gdmsg.AddInvVect(iv)
  1309  				numRequested++
  1310  			}
  1311  
  1312  		case wire.InvTypeWitnessTx:
  1313  			fallthrough
  1314  		case wire.InvTypeTx:
  1315  			// Request the transaction if there is not already a
  1316  			// pending request.
  1317  			if _, exists := sm.requestedTxns[iv.Hash]; !exists {
  1318  				limitAdd(sm.requestedTxns, iv.Hash, maxRequestedTxns)
  1319  				limitAdd(state.requestedTxns, iv.Hash, maxRequestedTxns)
  1320  
  1321  				// If the peer is capable, request the txn
  1322  				// including all witness data.
  1323  				if peer.IsWitnessEnabled() {
  1324  					iv.Type = wire.InvTypeWitnessTx
  1325  				}
  1326  
  1327  				gdmsg.AddInvVect(iv)
  1328  				numRequested++
  1329  			}
  1330  		}
  1331  
  1332  		if numRequested >= wire.MaxInvPerMsg {
  1333  			break
  1334  		}
  1335  	}
  1336  	state.requestQueue = requestQueue
  1337  	if len(gdmsg.InvList) > 0 {
  1338  		peer.QueueMessage(gdmsg, nil)
  1339  	}
  1340  }
  1341  
  1342  // blockHandler is the main handler for the sync manager.  It must be run as a
  1343  // goroutine.  It processes block and inv messages in a separate goroutine
  1344  // from the peer handlers so the block (MsgBlock) messages are handled by a
  1345  // single thread without needing to lock memory data structures.  This is
  1346  // important because the sync manager controls which blocks are needed and how
  1347  // the fetching should proceed.
  1348  func (sm *SyncManager) blockHandler() {
  1349  	stallTicker := time.NewTicker(stallSampleInterval)
  1350  	defer stallTicker.Stop()
  1351  
  1352  out:
  1353  	for {
  1354  		select {
  1355  		case m := <-sm.msgChan:
  1356  			switch msg := m.(type) {
  1357  			case *newPeerMsg:
  1358  				sm.handleNewPeerMsg(msg.peer)
  1359  
  1360  			case *txMsg:
  1361  				sm.handleTxMsg(msg)
  1362  				msg.reply <- struct{}{}
  1363  
  1364  			case *blockMsg:
  1365  				sm.handleBlockMsg(msg)
  1366  				msg.reply <- struct{}{}
  1367  
  1368  			case *invMsg:
  1369  				sm.handleInvMsg(msg)
  1370  
  1371  			case *headersMsg:
  1372  				sm.handleHeadersMsg(msg)
  1373  
  1374  			case *notFoundMsg:
  1375  				sm.handleNotFoundMsg(msg)
  1376  
  1377  			case *donePeerMsg:
  1378  				sm.handleDonePeerMsg(msg.peer)
  1379  
  1380  			case getSyncPeerMsg:
  1381  				var peerID int32
  1382  				if sm.syncPeer != nil {
  1383  					peerID = sm.syncPeer.ID()
  1384  				}
  1385  				msg.reply <- peerID
  1386  
  1387  			case processBlockMsg:
  1388  				_, isOrphan, err := sm.chain.ProcessBlock(
  1389  					msg.block, msg.flags)
  1390  				if err != nil {
  1391  					msg.reply <- processBlockResponse{
  1392  						isOrphan: false,
  1393  						err:      err,
  1394  					}
  1395  				}
  1396  
  1397  				msg.reply <- processBlockResponse{
  1398  					isOrphan: isOrphan,
  1399  					err:      nil,
  1400  				}
  1401  
  1402  			case isCurrentMsg:
  1403  				msg.reply <- sm.current()
  1404  
  1405  			case pauseMsg:
  1406  				// Wait until the sender unpauses the manager.
  1407  				<-msg.unpause
  1408  
  1409  			default:
  1410  				log.Warnf("Invalid message type in block "+
  1411  					"handler: %T", msg)
  1412  			}
  1413  
  1414  		case <-stallTicker.C:
  1415  			sm.handleStallSample()
  1416  
  1417  		case <-sm.quit:
  1418  			break out
  1419  		}
  1420  	}
  1421  
  1422  	log.Debug("Block handler shutting down: flushing blockchain caches...")
  1423  	if err := sm.chain.FlushUtxoCache(blockchain.FlushRequired); err != nil {
  1424  		log.Errorf("Error while flushing blockchain caches: %v", err)
  1425  	}
  1426  
  1427  	sm.wg.Done()
  1428  	log.Trace("Block handler done")
  1429  }
  1430  
  1431  // handleBlockchainNotification handles notifications from blockchain.  It does
  1432  // things such as request orphan block parents and relay accepted blocks to
  1433  // connected peers.
  1434  func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) {
  1435  	switch notification.Type {
  1436  	// A block has been accepted into the block chain.  Relay it to other
  1437  	// peers.
  1438  	case blockchain.NTBlockAccepted:
  1439  		// Don't relay if we are not current. Other peers that are
  1440  		// current should already know about it.
  1441  		if !sm.current() {
  1442  			return
  1443  		}
  1444  
  1445  		block, ok := notification.Data.(*btcutil.Block)
  1446  		if !ok {
  1447  			log.Warnf("Chain accepted notification is not a block.")
  1448  			break
  1449  		}
  1450  
  1451  		// Generate the inventory vector and relay it.
  1452  		iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
  1453  		sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header)
  1454  
  1455  	// A block has been connected to the main block chain.
  1456  	case blockchain.NTBlockConnected:
  1457  		block, ok := notification.Data.(*btcutil.Block)
  1458  		if !ok {
  1459  			log.Warnf("Chain connected notification is not a block.")
  1460  			break
  1461  		}
  1462  
  1463  		// Remove all of the transactions (except the coinbase) in the
  1464  		// connected block from the transaction pool.  Secondly, remove any
  1465  		// transactions which are now double spends as a result of these
  1466  		// new transactions.  Finally, remove any transaction that is
  1467  		// no longer an orphan. Transactions which depend on a confirmed
  1468  		// transaction are NOT removed recursively because they are still
  1469  		// valid.
  1470  		for _, tx := range block.Transactions()[1:] {
  1471  			sm.txMemPool.RemoveTransaction(tx, false)
  1472  			sm.txMemPool.RemoveDoubleSpends(tx)
  1473  			sm.txMemPool.RemoveOrphan(tx)
  1474  			sm.peerNotifier.TransactionConfirmed(tx)
  1475  			acceptedTxs := sm.txMemPool.ProcessOrphans(tx)
  1476  			sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
  1477  		}
  1478  
  1479  		// Register block with the fee estimator, if it exists.
  1480  		if sm.feeEstimator != nil {
  1481  			err := sm.feeEstimator.RegisterBlock(block)
  1482  
  1483  			// If an error is somehow generated then the fee estimator
  1484  			// has entered an invalid state. Since it doesn't know how
  1485  			// to recover, create a new one.
  1486  			if err != nil {
  1487  				sm.feeEstimator = mempool.NewFeeEstimator(
  1488  					mempool.DefaultEstimateFeeMaxRollback,
  1489  					mempool.DefaultEstimateFeeMinRegisteredBlocks)
  1490  			}
  1491  		}
  1492  
  1493  	// A block has been disconnected from the main block chain.
  1494  	case blockchain.NTBlockDisconnected:
  1495  		block, ok := notification.Data.(*btcutil.Block)
  1496  		if !ok {
  1497  			log.Warnf("Chain disconnected notification is not a block.")
  1498  			break
  1499  		}
  1500  
  1501  		// Reinsert all of the transactions (except the coinbase) into
  1502  		// the transaction pool.
  1503  		for _, tx := range block.Transactions()[1:] {
  1504  			_, _, err := sm.txMemPool.MaybeAcceptTransaction(tx,
  1505  				false, false)
  1506  			if err != nil {
  1507  				// Remove the transaction and all transactions
  1508  				// that depend on it if it wasn't accepted into
  1509  				// the transaction pool.
  1510  				sm.txMemPool.RemoveTransaction(tx, true)
  1511  			}
  1512  		}
  1513  
  1514  		// Rollback previous block recorded by the fee estimator.
  1515  		if sm.feeEstimator != nil {
  1516  			sm.feeEstimator.Rollback(block.Hash())
  1517  		}
  1518  	}
  1519  }
  1520  
  1521  // NewPeer informs the sync manager of a newly active peer.
  1522  func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) {
  1523  	// Ignore if we are shutting down.
  1524  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1525  		return
  1526  	}
  1527  	sm.msgChan <- &newPeerMsg{peer: peer}
  1528  }
  1529  
  1530  // QueueTx adds the passed transaction message and peer to the block handling
  1531  // queue. Responds to the done channel argument after the tx message is
  1532  // processed.
  1533  func (sm *SyncManager) QueueTx(tx *btcutil.Tx, peer *peerpkg.Peer, done chan struct{}) {
  1534  	// Don't accept more transactions if we're shutting down.
  1535  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1536  		done <- struct{}{}
  1537  		return
  1538  	}
  1539  
  1540  	sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done}
  1541  }
  1542  
  1543  // QueueBlock adds the passed block message and peer to the block handling
  1544  // queue. Responds to the done channel argument after the block message is
  1545  // processed.
  1546  func (sm *SyncManager) QueueBlock(block *btcutil.Block, peer *peerpkg.Peer, done chan struct{}) {
  1547  	// Don't accept more blocks if we're shutting down.
  1548  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1549  		done <- struct{}{}
  1550  		return
  1551  	}
  1552  
  1553  	sm.msgChan <- &blockMsg{block: block, peer: peer, reply: done}
  1554  }
  1555  
  1556  // QueueInv adds the passed inv message and peer to the block handling queue.
  1557  func (sm *SyncManager) QueueInv(inv *wire.MsgInv, peer *peerpkg.Peer) {
  1558  	// No channel handling here because peers do not need to block on inv
  1559  	// messages.
  1560  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1561  		return
  1562  	}
  1563  
  1564  	sm.msgChan <- &invMsg{inv: inv, peer: peer}
  1565  }
  1566  
  1567  // QueueHeaders adds the passed headers message and peer to the block handling
  1568  // queue.
  1569  func (sm *SyncManager) QueueHeaders(headers *wire.MsgHeaders, peer *peerpkg.Peer) {
  1570  	// No channel handling here because peers do not need to block on
  1571  	// headers messages.
  1572  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1573  		return
  1574  	}
  1575  
  1576  	sm.msgChan <- &headersMsg{headers: headers, peer: peer}
  1577  }
  1578  
  1579  // QueueNotFound adds the passed notfound message and peer to the block handling
  1580  // queue.
  1581  func (sm *SyncManager) QueueNotFound(notFound *wire.MsgNotFound, peer *peerpkg.Peer) {
  1582  	// No channel handling here because peers do not need to block on
  1583  	// reject messages.
  1584  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1585  		return
  1586  	}
  1587  
  1588  	sm.msgChan <- &notFoundMsg{notFound: notFound, peer: peer}
  1589  }
  1590  
  1591  // DonePeer informs the blockmanager that a peer has disconnected.
  1592  func (sm *SyncManager) DonePeer(peer *peerpkg.Peer) {
  1593  	// Ignore if we are shutting down.
  1594  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1595  		return
  1596  	}
  1597  
  1598  	sm.msgChan <- &donePeerMsg{peer: peer}
  1599  }
  1600  
  1601  // Start begins the core block handler which processes block and inv messages.
  1602  func (sm *SyncManager) Start() {
  1603  	// Already started?
  1604  	if atomic.AddInt32(&sm.started, 1) != 1 {
  1605  		return
  1606  	}
  1607  
  1608  	log.Trace("Starting sync manager")
  1609  	sm.wg.Add(1)
  1610  	go sm.blockHandler()
  1611  }
  1612  
  1613  // Stop gracefully shuts down the sync manager by stopping all asynchronous
  1614  // handlers and waiting for them to finish.
  1615  func (sm *SyncManager) Stop() error {
  1616  	if atomic.AddInt32(&sm.shutdown, 1) != 1 {
  1617  		log.Warnf("Sync manager is already in the process of " +
  1618  			"shutting down")
  1619  		return nil
  1620  	}
  1621  
  1622  	log.Infof("Sync manager shutting down")
  1623  	close(sm.quit)
  1624  	sm.wg.Wait()
  1625  	return nil
  1626  }
  1627  
  1628  // SyncPeerID returns the ID of the current sync peer, or 0 if there is none.
  1629  func (sm *SyncManager) SyncPeerID() int32 {
  1630  	reply := make(chan int32)
  1631  	sm.msgChan <- getSyncPeerMsg{reply: reply}
  1632  	return <-reply
  1633  }
  1634  
  1635  // ProcessBlock makes use of ProcessBlock on an internal instance of a block
  1636  // chain.
  1637  func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) {
  1638  	reply := make(chan processBlockResponse, 1)
  1639  	sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply}
  1640  	response := <-reply
  1641  	return response.isOrphan, response.err
  1642  }
  1643  
  1644  // IsCurrent returns whether or not the sync manager believes it is synced with
  1645  // the connected peers.
  1646  func (sm *SyncManager) IsCurrent() bool {
  1647  	reply := make(chan bool)
  1648  	sm.msgChan <- isCurrentMsg{reply: reply}
  1649  	return <-reply
  1650  }
  1651  
  1652  // Pause pauses the sync manager until the returned channel is closed.
  1653  //
  1654  // Note that while paused, all peer and block processing is halted.  The
  1655  // message sender should avoid pausing the sync manager for long durations.
  1656  func (sm *SyncManager) Pause() chan<- struct{} {
  1657  	c := make(chan struct{})
  1658  	sm.msgChan <- pauseMsg{c}
  1659  	return c
  1660  }
  1661  
  1662  // New constructs a new SyncManager. Use Start to begin processing asynchronous
  1663  // block, tx, and inv updates.
  1664  func New(config *Config) (*SyncManager, error) {
  1665  	sm := SyncManager{
  1666  		peerNotifier:    config.PeerNotifier,
  1667  		chain:           config.Chain,
  1668  		txMemPool:       config.TxMemPool,
  1669  		chainParams:     config.ChainParams,
  1670  		rejectedTxns:    make(map[chainhash.Hash]struct{}),
  1671  		requestedTxns:   make(map[chainhash.Hash]struct{}),
  1672  		requestedBlocks: make(map[chainhash.Hash]struct{}),
  1673  		peerStates:      make(map[*peerpkg.Peer]*peerSyncState),
  1674  		progressLogger:  newBlockProgressLogger("Processed", log),
  1675  		msgChan:         make(chan interface{}, config.MaxPeers*3),
  1676  		headerList:      list.New(),
  1677  		quit:            make(chan struct{}),
  1678  		feeEstimator:    config.FeeEstimator,
  1679  	}
  1680  
  1681  	best := sm.chain.BestSnapshot()
  1682  	if !config.DisableCheckpoints {
  1683  		// Initialize the next checkpoint based on the current height.
  1684  		sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height)
  1685  		if sm.nextCheckpoint != nil {
  1686  			sm.resetHeaderState(&best.Hash, best.Height)
  1687  		}
  1688  	} else {
  1689  		log.Info("Checkpoints are disabled")
  1690  	}
  1691  
  1692  	sm.chain.Subscribe(sm.handleBlockchainNotification)
  1693  
  1694  	return &sm, nil
  1695  }