github.com/lbryio/lbcd@v0.22.119/netsync/manager.go (about)

     1  // Copyright (c) 2013-2017 The btcsuite developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package netsync
     6  
     7  import (
     8  	"container/list"
     9  	"math/rand"
    10  	"net"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/lbryio/lbcd/blockchain"
    16  	"github.com/lbryio/lbcd/chaincfg"
    17  	"github.com/lbryio/lbcd/chaincfg/chainhash"
    18  	"github.com/lbryio/lbcd/database"
    19  	"github.com/lbryio/lbcd/fees"
    20  	"github.com/lbryio/lbcd/mempool"
    21  	peerpkg "github.com/lbryio/lbcd/peer"
    22  	"github.com/lbryio/lbcd/wire"
    23  	btcutil "github.com/lbryio/lbcutil"
    24  )
    25  
    26  const (
    27  	// minInFlightBlocks is the minimum number of blocks that should be
    28  	// in the request queue for headers-first mode before requesting
    29  	// more.
    30  	minInFlightBlocks = 10
    31  
    32  	// maxRejectedTxns is the maximum number of rejected transactions
    33  	// hashes to store in memory.
    34  	maxRejectedTxns = 1000
    35  
    36  	// maxRequestedBlocks is the maximum number of requested block
    37  	// hashes to store in memory.
    38  	maxRequestedBlocks = wire.MaxInvPerMsg
    39  
    40  	// maxRequestedTxns is the maximum number of requested transactions
    41  	// hashes to store in memory.
    42  	maxRequestedTxns = wire.MaxInvPerMsg
    43  
    44  	// maxStallDuration is the time after which we will disconnect our
    45  	// current sync peer if we haven't made progress.
    46  	maxStallDuration = 3 * time.Minute
    47  
    48  	// stallSampleInterval the interval at which we will check to see if our
    49  	// sync has stalled.
    50  	stallSampleInterval = 30 * time.Second
    51  )
    52  
    53  // zeroHash is the zero value hash (all zeros).  It is defined as a convenience.
    54  var zeroHash chainhash.Hash
    55  
    56  // newPeerMsg signifies a newly connected peer to the block handler.
    57  type newPeerMsg struct {
    58  	peer *peerpkg.Peer
    59  }
    60  
    61  // blockMsg packages a bitcoin block message and the peer it came from together
    62  // so the block handler has access to that information.
    63  type blockMsg struct {
    64  	block *btcutil.Block
    65  	peer  *peerpkg.Peer
    66  	reply chan struct{}
    67  }
    68  
    69  // invMsg packages a bitcoin inv message and the peer it came from together
    70  // so the block handler has access to that information.
    71  type invMsg struct {
    72  	inv  *wire.MsgInv
    73  	peer *peerpkg.Peer
    74  }
    75  
    76  // headersMsg packages a bitcoin headers message and the peer it came from
    77  // together so the block handler has access to that information.
    78  type headersMsg struct {
    79  	headers *wire.MsgHeaders
    80  	peer    *peerpkg.Peer
    81  }
    82  
    83  // notFoundMsg packages a bitcoin notfound message and the peer it came from
    84  // together so the block handler has access to that information.
    85  type notFoundMsg struct {
    86  	notFound *wire.MsgNotFound
    87  	peer     *peerpkg.Peer
    88  }
    89  
    90  // donePeerMsg signifies a newly disconnected peer to the block handler.
    91  type donePeerMsg struct {
    92  	peer *peerpkg.Peer
    93  }
    94  
    95  // txMsg packages a bitcoin tx message and the peer it came from together
    96  // so the block handler has access to that information.
    97  type txMsg struct {
    98  	tx    *btcutil.Tx
    99  	peer  *peerpkg.Peer
   100  	reply chan struct{}
   101  }
   102  
   103  // getSyncPeerMsg is a message type to be sent across the message channel for
   104  // retrieving the current sync peer.
   105  type getSyncPeerMsg struct {
   106  	reply chan int32
   107  }
   108  
   109  // processBlockResponse is a response sent to the reply channel of a
   110  // processBlockMsg.
   111  type processBlockResponse struct {
   112  	isOrphan bool
   113  	err      error
   114  }
   115  
   116  // processBlockMsg is a message type to be sent across the message channel
   117  // for requested a block is processed.  Note this call differs from blockMsg
   118  // above in that blockMsg is intended for blocks that came from peers and have
   119  // extra handling whereas this message essentially is just a concurrent safe
   120  // way to call ProcessBlock on the internal block chain instance.
   121  type processBlockMsg struct {
   122  	block *btcutil.Block
   123  	flags blockchain.BehaviorFlags
   124  	reply chan processBlockResponse
   125  }
   126  
   127  // isCurrentMsg is a message type to be sent across the message channel for
   128  // requesting whether or not the sync manager believes it is synced with the
   129  // currently connected peers.
   130  type isCurrentMsg struct {
   131  	reply chan bool
   132  }
   133  
   134  // pauseMsg is a message type to be sent across the message channel for
   135  // pausing the sync manager.  This effectively provides the caller with
   136  // exclusive access over the manager until a receive is performed on the
   137  // unpause channel.
   138  type pauseMsg struct {
   139  	unpause <-chan struct{}
   140  }
   141  
   142  // headerNode is used as a node in a list of headers that are linked together
   143  // between checkpoints.
   144  type headerNode struct {
   145  	height int32
   146  	hash   *chainhash.Hash
   147  }
   148  
   149  // peerSyncState stores additional information that the SyncManager tracks
   150  // about a peer.
   151  type peerSyncState struct {
   152  	syncCandidate   bool
   153  	requestQueue    []*wire.InvVect
   154  	requestedTxns   map[chainhash.Hash]struct{}
   155  	requestedBlocks map[chainhash.Hash]struct{}
   156  }
   157  
   158  // limitAdd is a helper function for maps that require a maximum limit by
   159  // evicting a random value if adding the new value would cause it to
   160  // overflow the maximum allowed.
   161  func limitAdd(m map[chainhash.Hash]struct{}, hash chainhash.Hash, limit int) {
   162  	if len(m)+1 > limit {
   163  		// Remove a random entry from the map.  For most compilers, Go's
   164  		// range statement iterates starting at a random item although
   165  		// that is not 100% guaranteed by the spec.  The iteration order
   166  		// is not important here because an adversary would have to be
   167  		// able to pull off preimage attacks on the hashing function in
   168  		// order to target eviction of specific entries anyways.
   169  		for txHash := range m {
   170  			delete(m, txHash)
   171  			break
   172  		}
   173  	}
   174  	m[hash] = struct{}{}
   175  }
   176  
   177  // SyncManager is used to communicate block related messages with peers. The
   178  // SyncManager is started as by executing Start() in a goroutine. Once started,
   179  // it selects peers to sync from and starts the initial block download. Once the
   180  // chain is in sync, the SyncManager handles incoming block and header
   181  // notifications and relays announcements of new blocks to peers.
   182  type SyncManager struct {
   183  	peerNotifier   PeerNotifier
   184  	started        int32
   185  	shutdown       int32
   186  	chain          *blockchain.BlockChain
   187  	txMemPool      *mempool.TxPool
   188  	chainParams    *chaincfg.Params
   189  	progressLogger *blockProgressLogger
   190  	msgChan        chan interface{}
   191  	wg             sync.WaitGroup
   192  	quit           chan struct{}
   193  
   194  	// These fields should only be accessed from the blockHandler thread
   195  	rejectedTxns     map[chainhash.Hash]struct{}
   196  	requestedTxns    map[chainhash.Hash]struct{}
   197  	requestedBlocks  map[chainhash.Hash]struct{}
   198  	syncPeer         *peerpkg.Peer
   199  	peerStates       map[*peerpkg.Peer]*peerSyncState
   200  	lastProgressTime time.Time
   201  
   202  	// The following fields are used for headers-first mode.
   203  	headersFirstMode bool
   204  	headerList       *list.List
   205  	startHeader      *list.Element
   206  	nextCheckpoint   *chaincfg.Checkpoint
   207  
   208  	// An optional fee estimator.
   209  	feeEstimator *fees.Estimator
   210  }
   211  
   212  // resetHeaderState sets the headers-first mode state to values appropriate for
   213  // syncing from a new peer.
   214  func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) {
   215  	sm.headersFirstMode = false
   216  	sm.headerList.Init()
   217  	sm.startHeader = nil
   218  
   219  	// When there is a next checkpoint, add an entry for the latest known
   220  	// block into the header pool.  This allows the next downloaded header
   221  	// to prove it links to the chain properly.
   222  	if sm.nextCheckpoint != nil {
   223  		node := headerNode{height: newestHeight, hash: newestHash}
   224  		sm.headerList.PushBack(&node)
   225  	}
   226  }
   227  
   228  // findNextHeaderCheckpoint returns the next checkpoint after the passed height.
   229  // It returns nil when there is not one either because the height is already
   230  // later than the final checkpoint or some other reason such as disabled
   231  // checkpoints.
   232  func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
   233  	checkpoints := sm.chain.Checkpoints()
   234  	if len(checkpoints) == 0 {
   235  		return nil
   236  	}
   237  
   238  	// There is no next checkpoint if the height is already after the final
   239  	// checkpoint.
   240  	finalCheckpoint := &checkpoints[len(checkpoints)-1]
   241  	if height >= finalCheckpoint.Height {
   242  		return nil
   243  	}
   244  
   245  	// Find the next checkpoint.
   246  	nextCheckpoint := finalCheckpoint
   247  	for i := len(checkpoints) - 2; i >= 0; i-- {
   248  		if height >= checkpoints[i].Height {
   249  			break
   250  		}
   251  		nextCheckpoint = &checkpoints[i]
   252  	}
   253  	return nextCheckpoint
   254  }
   255  
   256  // startSync will choose the best peer among the available candidate peers to
   257  // download/sync the blockchain from.  When syncing is already running, it
   258  // simply returns.  It also examines the candidates for any which are no longer
   259  // candidates and removes them as needed.
   260  func (sm *SyncManager) startSync() {
   261  	// Return now if we're already syncing.
   262  	if sm.syncPeer != nil {
   263  		return
   264  	}
   265  
   266  	// Once the segwit soft-fork package has activated, we only
   267  	// want to sync from peers which are witness enabled to ensure
   268  	// that we fully validate all blockchain data.
   269  	segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   270  	if err != nil {
   271  		log.Errorf("Unable to query for segwit soft-fork state: %v", err)
   272  		return
   273  	}
   274  
   275  	best := sm.chain.BestSnapshot()
   276  	var higherPeers, equalPeers []*peerpkg.Peer
   277  	for peer, state := range sm.peerStates {
   278  		if !state.syncCandidate {
   279  			continue
   280  		}
   281  
   282  		if segwitActive && !peer.IsWitnessEnabled() {
   283  			log.Debugf("peer %v not witness enabled, skipping", peer)
   284  			continue
   285  		}
   286  
   287  		// Remove sync candidate peers that are no longer candidates due
   288  		// to passing their latest known block.  NOTE: The < is
   289  		// intentional as opposed to <=.  While technically the peer
   290  		// doesn't have a later block when it's equal, it will likely
   291  		// have one soon so it is a reasonable choice.  It also allows
   292  		// the case where both are at 0 such as during regression test.
   293  		if peer.LastBlock() < best.Height {
   294  			state.syncCandidate = false
   295  			continue
   296  		}
   297  
   298  		// If the peer is at the same height as us, we'll add it a set
   299  		// of backup peers in case we do not find one with a higher
   300  		// height. If we are synced up with all of our peers, all of
   301  		// them will be in this set.
   302  		if peer.LastBlock() == best.Height {
   303  			equalPeers = append(equalPeers, peer)
   304  			continue
   305  		}
   306  
   307  		// This peer has a height greater than our own, we'll consider
   308  		// it in the set of better peers from which we'll randomly
   309  		// select.
   310  		higherPeers = append(higherPeers, peer)
   311  	}
   312  
   313  	// Pick randomly from the set of peers greater than our block height,
   314  	// falling back to a random peer of the same height if none are greater.
   315  	//
   316  	// TODO(conner): Use a better algorithm to ranking peers based on
   317  	// observed metrics and/or sync in parallel.
   318  	var bestPeer *peerpkg.Peer
   319  	switch {
   320  	case len(higherPeers) > 0:
   321  		bestPeer = higherPeers[rand.Intn(len(higherPeers))]
   322  
   323  	case len(equalPeers) > 0:
   324  		bestPeer = equalPeers[rand.Intn(len(equalPeers))]
   325  	}
   326  
   327  	// Start syncing from the best peer if one was selected.
   328  	if bestPeer != nil {
   329  		// Clear the requestedBlocks if the sync peer changes, otherwise
   330  		// we may ignore blocks we need that the last sync peer failed
   331  		// to send.
   332  		sm.requestedBlocks = make(map[chainhash.Hash]struct{})
   333  
   334  		locator, err := sm.chain.LatestBlockLocator()
   335  		if err != nil {
   336  			log.Errorf("Failed to get block locator for the "+
   337  				"latest block: %v", err)
   338  			return
   339  		}
   340  
   341  		log.Infof("Syncing to block height %d from peer %v",
   342  			bestPeer.LastBlock(), bestPeer.Addr())
   343  
   344  		// When the current height is less than a known checkpoint we
   345  		// can use block headers to learn about which blocks comprise
   346  		// the chain up to the checkpoint and perform less validation
   347  		// for them.  This is possible since each header contains the
   348  		// hash of the previous header and a merkle root.  Therefore if
   349  		// we validate all of the received headers link together
   350  		// properly and the checkpoint hashes match, we can be sure the
   351  		// hashes for the blocks in between are accurate.  Further, once
   352  		// the full blocks are downloaded, the merkle root is computed
   353  		// and compared against the value in the header which proves the
   354  		// full block hasn't been tampered with.
   355  		//
   356  		// Once we have passed the final checkpoint, or checkpoints are
   357  		// disabled, use standard inv messages learn about the blocks
   358  		// and fully validate them.  Finally, regression test mode does
   359  		// not support the headers-first approach so do normal block
   360  		// downloads when in regression test mode.
   361  		if sm.nextCheckpoint != nil &&
   362  			best.Height < sm.nextCheckpoint.Height &&
   363  			sm.chainParams != &chaincfg.RegressionNetParams {
   364  
   365  			bestPeer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
   366  			sm.headersFirstMode = true
   367  			log.Infof("Downloading headers for blocks %d to "+
   368  				"%d from peer %s", best.Height+1,
   369  				sm.nextCheckpoint.Height, bestPeer.Addr())
   370  		} else {
   371  			bestPeer.PushGetBlocksMsg(locator, &zeroHash)
   372  		}
   373  		sm.syncPeer = bestPeer
   374  
   375  		// Reset the last progress time now that we have a non-nil
   376  		// syncPeer to avoid instantly detecting it as stalled in the
   377  		// event the progress time hasn't been updated recently.
   378  		sm.lastProgressTime = time.Now()
   379  	} else {
   380  		log.Warnf("No sync peer candidates available")
   381  	}
   382  }
   383  
   384  // isSyncCandidate returns whether or not the peer is a candidate to consider
   385  // syncing from.
   386  func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool {
   387  	// Typically a peer is not a candidate for sync if it's not a full node,
   388  	// however regression test is special in that the regression tool is
   389  	// not a full node and still needs to be considered a sync candidate.
   390  	if sm.chainParams == &chaincfg.RegressionNetParams {
   391  		// The peer is not a candidate if it's not coming from localhost
   392  		// or the hostname can't be determined for some reason.
   393  		host, _, err := net.SplitHostPort(peer.Addr())
   394  		if err != nil {
   395  			return false
   396  		}
   397  
   398  		if host != "127.0.0.1" && host != "localhost" {
   399  			return false
   400  		}
   401  	} else {
   402  		// The peer is not a candidate for sync if it's not a full
   403  		// node. Additionally, if the segwit soft-fork package has
   404  		// activated, then the peer must also be upgraded.
   405  		segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   406  		if err != nil {
   407  			log.Errorf("Unable to query for segwit "+
   408  				"soft-fork state: %v", err)
   409  		}
   410  		nodeServices := peer.Services()
   411  		if nodeServices&wire.SFNodeNetwork != wire.SFNodeNetwork ||
   412  			(segwitActive && !peer.IsWitnessEnabled()) {
   413  			return false
   414  		}
   415  	}
   416  
   417  	// Candidate if all checks passed.
   418  	return true
   419  }
   420  
   421  // handleNewPeerMsg deals with new peers that have signalled they may
   422  // be considered as a sync peer (they have already successfully negotiated).  It
   423  // also starts syncing if needed.  It is invoked from the syncHandler goroutine.
   424  func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) {
   425  	// Ignore if in the process of shutting down.
   426  	if atomic.LoadInt32(&sm.shutdown) != 0 {
   427  		return
   428  	}
   429  
   430  	log.Infof("New valid peer %s (%s)", peer, peer.UserAgent())
   431  
   432  	// Initialize the peer state
   433  	isSyncCandidate := sm.isSyncCandidate(peer)
   434  	sm.peerStates[peer] = &peerSyncState{
   435  		syncCandidate:   isSyncCandidate,
   436  		requestedTxns:   make(map[chainhash.Hash]struct{}),
   437  		requestedBlocks: make(map[chainhash.Hash]struct{}),
   438  	}
   439  
   440  	// Start syncing by choosing the best candidate if needed.
   441  	if isSyncCandidate && sm.syncPeer == nil {
   442  		sm.startSync()
   443  	}
   444  }
   445  
   446  // handleStallSample will switch to a new sync peer if the current one has
   447  // stalled. This is detected when by comparing the last progress timestamp with
   448  // the current time, and disconnecting the peer if we stalled before reaching
   449  // their highest advertised block.
   450  func (sm *SyncManager) handleStallSample() {
   451  	if atomic.LoadInt32(&sm.shutdown) != 0 {
   452  		return
   453  	}
   454  
   455  	// If we don't have an active sync peer, exit early.
   456  	if sm.syncPeer == nil {
   457  		return
   458  	}
   459  
   460  	// If the stall timeout has not elapsed, exit early.
   461  	if time.Since(sm.lastProgressTime) <= maxStallDuration {
   462  		return
   463  	}
   464  
   465  	// Check to see that the peer's sync state exists.
   466  	state, exists := sm.peerStates[sm.syncPeer]
   467  	if !exists {
   468  		return
   469  	}
   470  
   471  	sm.clearRequestedState(state)
   472  
   473  	disconnectSyncPeer := sm.shouldDCStalledSyncPeer()
   474  	sm.updateSyncPeer(disconnectSyncPeer)
   475  }
   476  
   477  // shouldDCStalledSyncPeer determines whether or not we should disconnect a
   478  // stalled sync peer. If the peer has stalled and its reported height is greater
   479  // than our own best height, we will disconnect it. Otherwise, we will keep the
   480  // peer connected in case we are already at tip.
   481  func (sm *SyncManager) shouldDCStalledSyncPeer() bool {
   482  	lastBlock := sm.syncPeer.LastBlock()
   483  	startHeight := sm.syncPeer.StartingHeight()
   484  
   485  	var peerHeight int32
   486  	if lastBlock > startHeight {
   487  		peerHeight = lastBlock
   488  	} else {
   489  		peerHeight = startHeight
   490  	}
   491  
   492  	// If we've stalled out yet the sync peer reports having more blocks for
   493  	// us we will disconnect them. This allows us at tip to not disconnect
   494  	// peers when we are equal or they temporarily lag behind us.
   495  	best := sm.chain.BestSnapshot()
   496  	return peerHeight > best.Height
   497  }
   498  
   499  // handleDonePeerMsg deals with peers that have signalled they are done.  It
   500  // removes the peer as a candidate for syncing and in the case where it was
   501  // the current sync peer, attempts to select a new best peer to sync from.  It
   502  // is invoked from the syncHandler goroutine.
   503  func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) {
   504  	state, exists := sm.peerStates[peer]
   505  	if !exists {
   506  		log.Warnf("Received done peer message for unknown peer %s", peer)
   507  		return
   508  	}
   509  
   510  	// Remove the peer from the list of candidate peers.
   511  	delete(sm.peerStates, peer)
   512  
   513  	log.Infof("Lost peer %s", peer)
   514  
   515  	sm.clearRequestedState(state)
   516  
   517  	if peer == sm.syncPeer {
   518  		// Update the sync peer. The server has already disconnected the
   519  		// peer before signaling to the sync manager.
   520  		sm.updateSyncPeer(false)
   521  	}
   522  }
   523  
   524  // clearRequestedState wipes all expected transactions and blocks from the sync
   525  // manager's requested maps that were requested under a peer's sync state, This
   526  // allows them to be rerequested by a subsequent sync peer.
   527  func (sm *SyncManager) clearRequestedState(state *peerSyncState) {
   528  	// Remove requested transactions from the global map so that they will
   529  	// be fetched from elsewhere next time we get an inv.
   530  	for txHash := range state.requestedTxns {
   531  		delete(sm.requestedTxns, txHash)
   532  	}
   533  
   534  	// Remove requested blocks from the global map so that they will be
   535  	// fetched from elsewhere next time we get an inv.
   536  	// TODO: we could possibly here check which peers have these blocks
   537  	// and request them now to speed things up a little.
   538  	for blockHash := range state.requestedBlocks {
   539  		delete(sm.requestedBlocks, blockHash)
   540  	}
   541  }
   542  
   543  // updateSyncPeer choose a new sync peer to replace the current one. If
   544  // dcSyncPeer is true, this method will also disconnect the current sync peer.
   545  // If we are in header first mode, any header state related to prefetching is
   546  // also reset in preparation for the next sync peer.
   547  func (sm *SyncManager) updateSyncPeer(dcSyncPeer bool) {
   548  	log.Debugf("Updating sync peer, no progress for: %v",
   549  		time.Since(sm.lastProgressTime))
   550  
   551  	// First, disconnect the current sync peer if requested.
   552  	if dcSyncPeer {
   553  		sm.syncPeer.Disconnect()
   554  	}
   555  
   556  	// Reset any header state before we choose our next active sync peer.
   557  	if sm.headersFirstMode {
   558  		best := sm.chain.BestSnapshot()
   559  		sm.resetHeaderState(&best.Hash, best.Height)
   560  	}
   561  
   562  	sm.syncPeer = nil
   563  	sm.startSync()
   564  }
   565  
   566  // handleTxMsg handles transaction messages from all peers.
   567  func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
   568  	peer := tmsg.peer
   569  	state, exists := sm.peerStates[peer]
   570  	if !exists {
   571  		log.Warnf("Received tx message from unknown peer %s", peer)
   572  		return
   573  	}
   574  
   575  	// NOTE:  BitcoinJ, and possibly other wallets, don't follow the spec of
   576  	// sending an inventory message and allowing the remote peer to decide
   577  	// whether or not they want to request the transaction via a getdata
   578  	// message.  Unfortunately, the reference implementation permits
   579  	// unrequested data, so it has allowed wallets that don't follow the
   580  	// spec to proliferate.  While this is not ideal, there is no check here
   581  	// to disconnect peers for sending unsolicited transactions to provide
   582  	// interoperability.
   583  	txHash := tmsg.tx.Hash()
   584  
   585  	// Ignore transactions that we have already rejected.  Do not
   586  	// send a reject message here because if the transaction was already
   587  	// rejected, the transaction was unsolicited.
   588  	if _, exists = sm.rejectedTxns[*txHash]; exists {
   589  		log.Debugf("Ignoring unsolicited previously rejected "+
   590  			"transaction %v from %s", txHash, peer)
   591  		return
   592  	}
   593  
   594  	// Process the transaction to include validation, insertion in the
   595  	// memory pool, orphan handling, etc.
   596  	acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx,
   597  		true, true, mempool.Tag(peer.ID()))
   598  
   599  	// Remove transaction from request maps. Either the mempool/chain
   600  	// already knows about it and as such we shouldn't have any more
   601  	// instances of trying to fetch it, or we failed to insert and thus
   602  	// we'll retry next time we get an inv.
   603  	delete(state.requestedTxns, *txHash)
   604  	delete(sm.requestedTxns, *txHash)
   605  
   606  	if err != nil {
   607  		// Do not request this transaction again until a new block
   608  		// has been processed.
   609  		limitAdd(sm.rejectedTxns, *txHash, maxRejectedTxns)
   610  
   611  		// When the error is a rule error, it means the transaction was
   612  		// simply rejected as opposed to something actually going wrong,
   613  		// so log it as such.  Otherwise, something really did go wrong,
   614  		// so log it as an actual error.
   615  		if _, ok := err.(mempool.RuleError); ok {
   616  			log.Debugf("Rejected transaction %v from %s: %v",
   617  				txHash, peer, err)
   618  		} else {
   619  			log.Errorf("Failed to process transaction %v: %v",
   620  				txHash, err)
   621  		}
   622  
   623  		// Convert the error into an appropriate reject message and
   624  		// send it.
   625  		code, reason := mempool.ErrToRejectErr(err)
   626  		peer.PushRejectMsg(wire.CmdTx, code, reason, txHash, false)
   627  		return
   628  	}
   629  
   630  	sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
   631  }
   632  
   633  // current returns true if we believe we are synced with our peers, false if we
   634  // still have blocks to check
   635  func (sm *SyncManager) current() bool {
   636  	if !sm.chain.IsCurrent() {
   637  		return false
   638  	}
   639  
   640  	// if blockChain thinks we are current and we have no syncPeer it
   641  	// is probably right.
   642  	if sm.syncPeer == nil {
   643  		return true
   644  	}
   645  
   646  	// No matter what chain thinks, if we are below the block we are syncing
   647  	// to we are not current.
   648  	if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() {
   649  		return false
   650  	}
   651  	return true
   652  }
   653  
   654  // handleBlockMsg handles block messages from all peers.
   655  func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
   656  	peer := bmsg.peer
   657  	state, exists := sm.peerStates[peer]
   658  	if !exists {
   659  		log.Warnf("Received block message from unknown peer %s", peer)
   660  		return
   661  	}
   662  
   663  	// If we didn't ask for this block then the peer is misbehaving.
   664  	blockHash := bmsg.block.Hash()
   665  	if _, exists = state.requestedBlocks[*blockHash]; !exists {
   666  		// The regression test intentionally sends some blocks twice
   667  		// to test duplicate block insertion fails.  Don't disconnect
   668  		// the peer or ignore the block when we're in regression test
   669  		// mode in this case so the chain code is actually fed the
   670  		// duplicate blocks.
   671  		if sm.chainParams != &chaincfg.RegressionNetParams {
   672  			log.Warnf("Got unrequested block %v from %s -- "+
   673  				"disconnecting", blockHash, peer.Addr())
   674  			peer.Disconnect()
   675  			return
   676  		}
   677  	}
   678  
   679  	// When in headers-first mode, if the block matches the hash of the
   680  	// first header in the list of headers that are being fetched, it's
   681  	// eligible for less validation since the headers have already been
   682  	// verified to link together and are valid up to the next checkpoint.
   683  	// Also, remove the list entry for all blocks except the checkpoint
   684  	// since it is needed to verify the next round of headers links
   685  	// properly.
   686  	isCheckpointBlock := false
   687  	behaviorFlags := blockchain.BFNone
   688  	if sm.headersFirstMode {
   689  		firstNodeEl := sm.headerList.Front()
   690  		if firstNodeEl != nil {
   691  			firstNode := firstNodeEl.Value.(*headerNode)
   692  			if blockHash.IsEqual(firstNode.hash) {
   693  				behaviorFlags |= blockchain.BFFastAdd
   694  				if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) {
   695  					isCheckpointBlock = true
   696  				} else {
   697  					sm.headerList.Remove(firstNodeEl)
   698  				}
   699  			}
   700  		}
   701  	}
   702  
   703  	// Remove block from request maps. Either chain will know about it and
   704  	// so we shouldn't have any more instances of trying to fetch it, or we
   705  	// will fail the insert and thus we'll retry next time we get an inv.
   706  	delete(state.requestedBlocks, *blockHash)
   707  	delete(sm.requestedBlocks, *blockHash)
   708  
   709  	// Process the block to include validation, best chain selection, orphan
   710  	// handling, etc.
   711  	_, isOrphan, err := sm.chain.ProcessBlock(bmsg.block, behaviorFlags)
   712  	if err != nil {
   713  		// When the error is a rule error, it means the block was simply
   714  		// rejected as opposed to something actually going wrong, so log
   715  		// it as such.  Otherwise, something really did go wrong, so log
   716  		// it as an actual error.
   717  		if _, ok := err.(blockchain.RuleError); ok {
   718  			log.Infof("Rejected block %v from %s: %v", blockHash,
   719  				peer, err)
   720  		} else {
   721  			log.Errorf("Failed to process block %v: %v",
   722  				blockHash, err)
   723  		}
   724  		if dbErr, ok := err.(database.Error); ok && dbErr.ErrorCode ==
   725  			database.ErrCorruption {
   726  			panic(dbErr)
   727  		}
   728  
   729  		// Convert the error into an appropriate reject message and
   730  		// send it.
   731  		code, reason := mempool.ErrToRejectErr(err)
   732  		peer.PushRejectMsg(wire.CmdBlock, code, reason, blockHash, false)
   733  		return
   734  	}
   735  
   736  	// Meta-data about the new block this peer is reporting. We use this
   737  	// below to update this peer's latest block height and the heights of
   738  	// other peers based on their last announced block hash. This allows us
   739  	// to dynamically update the block heights of peers, avoiding stale
   740  	// heights when looking for a new sync peer. Upon acceptance of a block
   741  	// or recognition of an orphan, we also use this information to update
   742  	// the block heights over other peers who's invs may have been ignored
   743  	// if we are actively syncing while the chain is not yet current or
   744  	// who may have lost the lock announcement race.
   745  	var heightUpdate int32
   746  	var blkHashUpdate *chainhash.Hash
   747  
   748  	// Request the parents for the orphan block from the peer that sent it.
   749  	if isOrphan {
   750  		// We've just received an orphan block from a peer. In order
   751  		// to update the height of the peer, we try to extract the
   752  		// block height from the scriptSig of the coinbase transaction.
   753  		// Extraction is only attempted if the block's version is
   754  		// high enough (ver 2+).
   755  		header := &bmsg.block.MsgBlock().Header
   756  		if blockchain.ShouldHaveSerializedBlockHeight(header) {
   757  			coinbaseTx := bmsg.block.Transactions()[0]
   758  			cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx)
   759  			if err != nil {
   760  				log.Warnf("Unable to extract height from "+
   761  					"coinbase tx: %v", err)
   762  			} else {
   763  				log.Debugf("Extracted height of %v from "+
   764  					"orphan block", cbHeight)
   765  				heightUpdate = cbHeight
   766  				blkHashUpdate = blockHash
   767  			}
   768  		}
   769  
   770  		orphanRoot := sm.chain.GetOrphanRoot(blockHash)
   771  		locator, err := sm.chain.LatestBlockLocator()
   772  		if err != nil {
   773  			log.Warnf("Failed to get block locator for the "+
   774  				"latest block: %v", err)
   775  		} else {
   776  			peer.PushGetBlocksMsg(locator, orphanRoot)
   777  		}
   778  	} else {
   779  		if peer == sm.syncPeer {
   780  			sm.lastProgressTime = time.Now()
   781  		}
   782  
   783  		// When the block is not an orphan, log information about it and
   784  		// update the chain state.
   785  		sm.progressLogger.LogBlockHeight(bmsg.block)
   786  
   787  		// Update this peer's latest block height, for future
   788  		// potential sync node candidacy.
   789  		best := sm.chain.BestSnapshot()
   790  		heightUpdate = best.Height
   791  		blkHashUpdate = &best.Hash
   792  
   793  		// Clear the rejected transactions.
   794  		sm.rejectedTxns = make(map[chainhash.Hash]struct{})
   795  	}
   796  
   797  	// Update the block height for this peer. But only send a message to
   798  	// the server for updating peer heights if this is an orphan or our
   799  	// chain is "current". This avoids sending a spammy amount of messages
   800  	// if we're syncing the chain from scratch.
   801  	if blkHashUpdate != nil && heightUpdate != 0 {
   802  		peer.UpdateLastBlockHeight(heightUpdate)
   803  		if isOrphan || sm.current() {
   804  			go sm.peerNotifier.UpdatePeerHeights(blkHashUpdate, heightUpdate,
   805  				peer)
   806  		}
   807  	}
   808  
   809  	// Nothing more to do if we aren't in headers-first mode.
   810  	if !sm.headersFirstMode {
   811  		return
   812  	}
   813  
   814  	// This is headers-first mode, so if the block is not a checkpoint
   815  	// request more blocks using the header list when the request queue is
   816  	// getting short.
   817  	if !isCheckpointBlock {
   818  		if sm.startHeader != nil &&
   819  			len(state.requestedBlocks) < minInFlightBlocks {
   820  			sm.fetchHeaderBlocks()
   821  		}
   822  		return
   823  	}
   824  
   825  	// This is headers-first mode and the block is a checkpoint.  When
   826  	// there is a next checkpoint, get the next round of headers by asking
   827  	// for headers starting from the block after this one up to the next
   828  	// checkpoint.
   829  	prevHeight := sm.nextCheckpoint.Height
   830  	prevHash := sm.nextCheckpoint.Hash
   831  	sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
   832  	if sm.nextCheckpoint != nil {
   833  		locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash})
   834  		err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
   835  		if err != nil {
   836  			log.Warnf("Failed to send getheaders message to "+
   837  				"peer %s: %v", peer.Addr(), err)
   838  			return
   839  		}
   840  		log.Infof("Downloading headers for blocks %d to %d from "+
   841  			"peer %s", prevHeight+1, sm.nextCheckpoint.Height,
   842  			sm.syncPeer.Addr())
   843  		return
   844  	}
   845  
   846  	// This is headers-first mode, the block is a checkpoint, and there are
   847  	// no more checkpoints, so switch to normal mode by requesting blocks
   848  	// from the block after this one up to the end of the chain (zero hash).
   849  	sm.headersFirstMode = false
   850  	sm.headerList.Init()
   851  	log.Infof("Reached the final checkpoint -- switching to normal mode")
   852  	locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash})
   853  	err = peer.PushGetBlocksMsg(locator, &zeroHash)
   854  	if err != nil {
   855  		log.Warnf("Failed to send getblocks message to peer %s: %v",
   856  			peer.Addr(), err)
   857  		return
   858  	}
   859  }
   860  
   861  // fetchHeaderBlocks creates and sends a request to the syncPeer for the next
   862  // list of blocks to be downloaded based on the current list of headers.
   863  func (sm *SyncManager) fetchHeaderBlocks() {
   864  	// Nothing to do if there is no start header.
   865  	if sm.startHeader == nil {
   866  		log.Warnf("fetchHeaderBlocks called with no start header")
   867  		return
   868  	}
   869  
   870  	// Build up a getdata request for the list of blocks the headers
   871  	// describe.  The size hint will be limited to wire.MaxInvPerMsg by
   872  	// the function, so no need to double check it here.
   873  	gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len()))
   874  	numRequested := 0
   875  	for e := sm.startHeader; e != nil; e = e.Next() {
   876  		node, ok := e.Value.(*headerNode)
   877  		if !ok {
   878  			log.Warn("Header list node type is not a headerNode")
   879  			continue
   880  		}
   881  
   882  		iv := wire.NewInvVect(wire.InvTypeBlock, node.hash)
   883  		haveInv, err := sm.haveInventory(iv)
   884  		if err != nil {
   885  			log.Warnf("Unexpected failure when checking for "+
   886  				"existing inventory during header block "+
   887  				"fetch: %v", err)
   888  		}
   889  		if !haveInv {
   890  			syncPeerState := sm.peerStates[sm.syncPeer]
   891  
   892  			sm.requestedBlocks[*node.hash] = struct{}{}
   893  			syncPeerState.requestedBlocks[*node.hash] = struct{}{}
   894  
   895  			// If we're fetching from a witness enabled peer
   896  			// post-fork, then ensure that we receive all the
   897  			// witness data in the blocks.
   898  			if sm.syncPeer.IsWitnessEnabled() {
   899  				iv.Type = wire.InvTypeWitnessBlock
   900  			}
   901  
   902  			gdmsg.AddInvVect(iv)
   903  			numRequested++
   904  		}
   905  		sm.startHeader = e.Next()
   906  		if numRequested >= wire.MaxInvPerMsg/99 {
   907  			break
   908  		}
   909  	}
   910  	if len(gdmsg.InvList) > 0 {
   911  		sm.syncPeer.QueueMessage(gdmsg, nil)
   912  	}
   913  }
   914  
   915  // handleHeadersMsg handles block header messages from all peers.  Headers are
   916  // requested when performing a headers-first sync.
   917  func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
   918  	peer := hmsg.peer
   919  	_, exists := sm.peerStates[peer]
   920  	if !exists {
   921  		log.Warnf("Received headers message from unknown peer %s", peer)
   922  		return
   923  	}
   924  
   925  	// The remote peer is misbehaving if we didn't request headers.
   926  	msg := hmsg.headers
   927  	numHeaders := len(msg.Headers)
   928  	if !sm.headersFirstMode {
   929  		log.Warnf("Got %d unrequested headers from %s -- "+
   930  			"disconnecting", numHeaders, peer.Addr())
   931  		peer.Disconnect()
   932  		return
   933  	}
   934  
   935  	// Nothing to do for an empty headers message.
   936  	if numHeaders == 0 {
   937  		return
   938  	}
   939  
   940  	// Process all of the received headers ensuring each one connects to the
   941  	// previous and that checkpoints match.
   942  	receivedCheckpoint := false
   943  	var finalHash *chainhash.Hash
   944  	for _, blockHeader := range msg.Headers {
   945  		blockHash := blockHeader.BlockHash()
   946  		finalHash = &blockHash
   947  
   948  		// Ensure there is a previous header to compare against.
   949  		prevNodeEl := sm.headerList.Back()
   950  		if prevNodeEl == nil {
   951  			log.Warnf("Header list does not contain a previous" +
   952  				"element as expected -- disconnecting peer")
   953  			peer.Disconnect()
   954  			return
   955  		}
   956  
   957  		// Ensure the header properly connects to the previous one and
   958  		// add it to the list of headers.
   959  		node := headerNode{hash: &blockHash}
   960  		prevNode := prevNodeEl.Value.(*headerNode)
   961  		if prevNode.hash.IsEqual(&blockHeader.PrevBlock) {
   962  			node.height = prevNode.height + 1
   963  			e := sm.headerList.PushBack(&node)
   964  			if sm.startHeader == nil {
   965  				sm.startHeader = e
   966  			}
   967  		} else {
   968  			log.Warnf("Received block header that does not "+
   969  				"properly connect to the chain from peer %s "+
   970  				"-- disconnecting", peer.Addr())
   971  			peer.Disconnect()
   972  			return
   973  		}
   974  
   975  		// Verify the header at the next checkpoint height matches.
   976  		if node.height == sm.nextCheckpoint.Height {
   977  			if node.hash.IsEqual(sm.nextCheckpoint.Hash) {
   978  				receivedCheckpoint = true
   979  				log.Infof("Verified downloaded block "+
   980  					"header against checkpoint at height "+
   981  					"%d/hash %s", node.height, node.hash)
   982  			} else {
   983  				log.Warnf("Block header at height %d/hash "+
   984  					"%s from peer %s does NOT match "+
   985  					"expected checkpoint hash of %s -- "+
   986  					"disconnecting", node.height,
   987  					node.hash, peer.Addr(),
   988  					sm.nextCheckpoint.Hash)
   989  				peer.Disconnect()
   990  				return
   991  			}
   992  			break
   993  		}
   994  	}
   995  
   996  	// When this header is a checkpoint, switch to fetching the blocks for
   997  	// all of the headers since the last checkpoint.
   998  	if receivedCheckpoint {
   999  		// Since the first entry of the list is always the final block
  1000  		// that is already in the database and is only used to ensure
  1001  		// the next header links properly, it must be removed before
  1002  		// fetching the blocks.
  1003  		sm.headerList.Remove(sm.headerList.Front())
  1004  		log.Infof("Received %v block headers: Fetching blocks",
  1005  			sm.headerList.Len())
  1006  		sm.progressLogger.SetLastLogTime(time.Now())
  1007  		sm.fetchHeaderBlocks()
  1008  		return
  1009  	}
  1010  
  1011  	// This header is not a checkpoint, so request the next batch of
  1012  	// headers starting from the latest known header and ending with the
  1013  	// next checkpoint.
  1014  	locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash})
  1015  	err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
  1016  	if err != nil {
  1017  		log.Warnf("Failed to send getheaders message to "+
  1018  			"peer %s: %v", peer.Addr(), err)
  1019  		return
  1020  	}
  1021  }
  1022  
  1023  // handleNotFoundMsg handles notfound messages from all peers.
  1024  func (sm *SyncManager) handleNotFoundMsg(nfmsg *notFoundMsg) {
  1025  	peer := nfmsg.peer
  1026  	state, exists := sm.peerStates[peer]
  1027  	if !exists {
  1028  		log.Warnf("Received notfound message from unknown peer %s", peer)
  1029  		return
  1030  	}
  1031  	for _, inv := range nfmsg.notFound.InvList {
  1032  		// verify the hash was actually announced by the peer
  1033  		// before deleting from the global requested maps.
  1034  		switch inv.Type {
  1035  		case wire.InvTypeWitnessBlock:
  1036  			fallthrough
  1037  		case wire.InvTypeBlock:
  1038  			if _, exists := state.requestedBlocks[inv.Hash]; exists {
  1039  				delete(state.requestedBlocks, inv.Hash)
  1040  				delete(sm.requestedBlocks, inv.Hash)
  1041  			}
  1042  
  1043  		case wire.InvTypeWitnessTx:
  1044  			fallthrough
  1045  		case wire.InvTypeTx:
  1046  			if _, exists := state.requestedTxns[inv.Hash]; exists {
  1047  				delete(state.requestedTxns, inv.Hash)
  1048  				delete(sm.requestedTxns, inv.Hash)
  1049  			}
  1050  		}
  1051  	}
  1052  }
  1053  
  1054  // haveInventory returns whether or not the inventory represented by the passed
  1055  // inventory vector is known.  This includes checking all of the various places
  1056  // inventory can be when it is in different states such as blocks that are part
  1057  // of the main chain, on a side chain, in the orphan pool, and transactions that
  1058  // are in the memory pool (either the main pool or orphan pool).
  1059  func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
  1060  	switch invVect.Type {
  1061  	case wire.InvTypeWitnessBlock:
  1062  		fallthrough
  1063  	case wire.InvTypeBlock:
  1064  		// Ask chain if the block is known to it in any form (main
  1065  		// chain, side chain, or orphan).
  1066  		return sm.chain.HaveBlock(&invVect.Hash)
  1067  
  1068  	case wire.InvTypeWitnessTx:
  1069  		fallthrough
  1070  	case wire.InvTypeTx:
  1071  		// Ask the transaction memory pool if the transaction is known
  1072  		// to it in any form (main pool or orphan).
  1073  		if sm.txMemPool.HaveTransaction(&invVect.Hash) {
  1074  			return true, nil
  1075  		}
  1076  
  1077  		// Check if the transaction exists from the point of view of the
  1078  		// end of the main chain.  Note that this is only a best effort
  1079  		// since it is expensive to check existence of every output and
  1080  		// the only purpose of this check is to avoid downloading
  1081  		// already known transactions.  Only the first two outputs are
  1082  		// checked because the vast majority of transactions consist of
  1083  		// two outputs where one is some form of "pay-to-somebody-else"
  1084  		// and the other is a change output.
  1085  		prevOut := wire.OutPoint{Hash: invVect.Hash}
  1086  		for i := uint32(0); i < 2; i++ {
  1087  			prevOut.Index = i
  1088  			entry, err := sm.chain.FetchUtxoEntry(prevOut)
  1089  			if err != nil {
  1090  				return false, err
  1091  			}
  1092  			if entry != nil && !entry.IsSpent() {
  1093  				return true, nil
  1094  			}
  1095  		}
  1096  
  1097  		return false, nil
  1098  	}
  1099  
  1100  	// The requested inventory is is an unsupported type, so just claim
  1101  	// it is known to avoid requesting it.
  1102  	return true, nil
  1103  }
  1104  
  1105  // handleInvMsg handles inv messages from all peers.
  1106  // We examine the inventory advertised by the remote peer and act accordingly.
  1107  func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
  1108  	peer := imsg.peer
  1109  	state, exists := sm.peerStates[peer]
  1110  	if !exists {
  1111  		log.Warnf("Received inv message from unknown peer %s", peer)
  1112  		return
  1113  	}
  1114  
  1115  	// Attempt to find the final block in the inventory list.  There may
  1116  	// not be one.
  1117  	lastBlock := -1
  1118  	invVects := imsg.inv.InvList
  1119  	for i := len(invVects) - 1; i >= 0; i-- {
  1120  		if invVects[i].Type == wire.InvTypeBlock {
  1121  			lastBlock = i
  1122  			break
  1123  		}
  1124  	}
  1125  
  1126  	// If this inv contains a block announcement, and this isn't coming from
  1127  	// our current sync peer or we're current, then update the last
  1128  	// announced block for this peer. We'll use this information later to
  1129  	// update the heights of peers based on blocks we've accepted that they
  1130  	// previously announced.
  1131  	if lastBlock != -1 && (peer != sm.syncPeer || sm.current()) {
  1132  		peer.UpdateLastAnnouncedBlock(&invVects[lastBlock].Hash)
  1133  	}
  1134  
  1135  	// Ignore invs from peers that aren't the sync if we are not current.
  1136  	// Helps prevent fetching a mass of orphans.
  1137  	if peer != sm.syncPeer && !sm.current() {
  1138  		return
  1139  	}
  1140  
  1141  	// If our chain is current and a peer announces a block we already
  1142  	// know of, then update their current block height.
  1143  	if lastBlock != -1 && sm.current() {
  1144  		blkHeight, err := sm.chain.BlockHeightByHash(&invVects[lastBlock].Hash)
  1145  		if err == nil {
  1146  			peer.UpdateLastBlockHeight(blkHeight)
  1147  		}
  1148  	}
  1149  
  1150  	// Request the advertised inventory if we don't already have it.  Also,
  1151  	// request parent blocks of orphans if we receive one we already have.
  1152  	// Finally, attempt to detect potential stalls due to long side chains
  1153  	// we already have and request more blocks to prevent them.
  1154  	for i, iv := range invVects {
  1155  		// Ignore unsupported inventory types.
  1156  		switch iv.Type {
  1157  		case wire.InvTypeBlock:
  1158  		case wire.InvTypeTx:
  1159  		case wire.InvTypeWitnessBlock:
  1160  		case wire.InvTypeWitnessTx:
  1161  		default:
  1162  			continue
  1163  		}
  1164  
  1165  		// Add the inventory to the cache of known inventory
  1166  		// for the peer.
  1167  		peer.AddKnownInventory(iv)
  1168  
  1169  		// Ignore inventory when we're in headers-first mode.
  1170  		if sm.headersFirstMode {
  1171  			continue
  1172  		}
  1173  
  1174  		// Request the inventory if we don't already have it.
  1175  		haveInv, err := sm.haveInventory(iv)
  1176  		if err != nil {
  1177  			log.Warnf("Unexpected failure when checking for "+
  1178  				"existing inventory during inv message "+
  1179  				"processing: %v", err)
  1180  			continue
  1181  		}
  1182  		if !haveInv {
  1183  			if iv.Type == wire.InvTypeTx {
  1184  				// Skip the transaction if it has already been
  1185  				// rejected.
  1186  				if _, exists := sm.rejectedTxns[iv.Hash]; exists {
  1187  					continue
  1188  				}
  1189  			}
  1190  
  1191  			// Ignore invs block invs from non-witness enabled
  1192  			// peers, as after segwit activation we only want to
  1193  			// download from peers that can provide us full witness
  1194  			// data for blocks.
  1195  			if !peer.IsWitnessEnabled() && iv.Type == wire.InvTypeBlock {
  1196  				continue
  1197  			}
  1198  
  1199  			// Add it to the request queue.
  1200  			state.requestQueue = append(state.requestQueue, iv)
  1201  			continue
  1202  		}
  1203  
  1204  		if iv.Type == wire.InvTypeBlock {
  1205  			// The block is an orphan block that we already have.
  1206  			// When the existing orphan was processed, it requested
  1207  			// the missing parent blocks.  When this scenario
  1208  			// happens, it means there were more blocks missing
  1209  			// than are allowed into a single inventory message.  As
  1210  			// a result, once this peer requested the final
  1211  			// advertised block, the remote peer noticed and is now
  1212  			// resending the orphan block as an available block
  1213  			// to signal there are more missing blocks that need to
  1214  			// be requested.
  1215  			if sm.chain.IsKnownOrphan(&iv.Hash) {
  1216  				// Request blocks starting at the latest known
  1217  				// up to the root of the orphan that just came
  1218  				// in.
  1219  				orphanRoot := sm.chain.GetOrphanRoot(&iv.Hash)
  1220  				locator, err := sm.chain.LatestBlockLocator()
  1221  				if err != nil {
  1222  					log.Errorf("PEER: Failed to get block "+
  1223  						"locator for the latest block: "+
  1224  						"%v", err)
  1225  					continue
  1226  				}
  1227  				peer.PushGetBlocksMsg(locator, orphanRoot)
  1228  				continue
  1229  			}
  1230  
  1231  			// We already have the final block advertised by this
  1232  			// inventory message, so force a request for more.  This
  1233  			// should only happen if we're on a really long side
  1234  			// chain.
  1235  			if i == lastBlock {
  1236  				// Request blocks after this one up to the
  1237  				// final one the remote peer knows about (zero
  1238  				// stop hash).
  1239  				locator := sm.chain.BlockLocatorFromHash(&iv.Hash)
  1240  				peer.PushGetBlocksMsg(locator, &zeroHash)
  1241  			}
  1242  		}
  1243  	}
  1244  
  1245  	// Request as much as possible at once.  Anything that won't fit into
  1246  	// the request will be requested on the next inv message.
  1247  	numRequested := 0
  1248  	gdmsg := wire.NewMsgGetData()
  1249  	requestQueue := state.requestQueue
  1250  	for len(requestQueue) != 0 {
  1251  		iv := requestQueue[0]
  1252  		requestQueue[0] = nil
  1253  		requestQueue = requestQueue[1:]
  1254  
  1255  		switch iv.Type {
  1256  		case wire.InvTypeWitnessBlock:
  1257  			fallthrough
  1258  		case wire.InvTypeBlock:
  1259  			// Request the block if there is not already a pending
  1260  			// request.
  1261  			if _, exists := sm.requestedBlocks[iv.Hash]; !exists {
  1262  				limitAdd(sm.requestedBlocks, iv.Hash, maxRequestedBlocks)
  1263  				limitAdd(state.requestedBlocks, iv.Hash, maxRequestedBlocks)
  1264  
  1265  				if peer.IsWitnessEnabled() {
  1266  					iv.Type = wire.InvTypeWitnessBlock
  1267  				}
  1268  
  1269  				gdmsg.AddInvVect(iv)
  1270  				numRequested++
  1271  			}
  1272  
  1273  		case wire.InvTypeWitnessTx:
  1274  			fallthrough
  1275  		case wire.InvTypeTx:
  1276  			// Request the transaction if there is not already a
  1277  			// pending request.
  1278  			if _, exists := sm.requestedTxns[iv.Hash]; !exists {
  1279  				limitAdd(sm.requestedTxns, iv.Hash, maxRequestedTxns)
  1280  				limitAdd(state.requestedTxns, iv.Hash, maxRequestedTxns)
  1281  
  1282  				// If the peer is capable, request the txn
  1283  				// including all witness data.
  1284  				if peer.IsWitnessEnabled() {
  1285  					iv.Type = wire.InvTypeWitnessTx
  1286  				}
  1287  
  1288  				gdmsg.AddInvVect(iv)
  1289  				numRequested++
  1290  			}
  1291  		}
  1292  
  1293  		if numRequested >= wire.MaxInvPerMsg {
  1294  			break
  1295  		}
  1296  	}
  1297  	state.requestQueue = requestQueue
  1298  	if len(gdmsg.InvList) > 0 {
  1299  		peer.QueueMessage(gdmsg, nil)
  1300  	}
  1301  }
  1302  
  1303  // blockHandler is the main handler for the sync manager.  It must be run as a
  1304  // goroutine.  It processes block and inv messages in a separate goroutine
  1305  // from the peer handlers so the block (MsgBlock) messages are handled by a
  1306  // single thread without needing to lock memory data structures.  This is
  1307  // important because the sync manager controls which blocks are needed and how
  1308  // the fetching should proceed.
  1309  func (sm *SyncManager) blockHandler() {
  1310  	stallTicker := time.NewTicker(stallSampleInterval)
  1311  	defer stallTicker.Stop()
  1312  
  1313  out:
  1314  	for {
  1315  		select {
  1316  		case m := <-sm.msgChan:
  1317  			switch msg := m.(type) {
  1318  			case *newPeerMsg:
  1319  				sm.handleNewPeerMsg(msg.peer)
  1320  
  1321  			case *txMsg:
  1322  				sm.handleTxMsg(msg)
  1323  				msg.reply <- struct{}{}
  1324  
  1325  			case *blockMsg:
  1326  				sm.handleBlockMsg(msg)
  1327  				msg.reply <- struct{}{}
  1328  
  1329  			case *invMsg:
  1330  				sm.handleInvMsg(msg)
  1331  
  1332  			case *headersMsg:
  1333  				sm.handleHeadersMsg(msg)
  1334  
  1335  			case *notFoundMsg:
  1336  				sm.handleNotFoundMsg(msg)
  1337  
  1338  			case *donePeerMsg:
  1339  				sm.handleDonePeerMsg(msg.peer)
  1340  
  1341  			case getSyncPeerMsg:
  1342  				var peerID int32
  1343  				if sm.syncPeer != nil {
  1344  					peerID = sm.syncPeer.ID()
  1345  				}
  1346  				msg.reply <- peerID
  1347  
  1348  			case processBlockMsg:
  1349  				_, isOrphan, err := sm.chain.ProcessBlock(
  1350  					msg.block, msg.flags)
  1351  				if err != nil {
  1352  					msg.reply <- processBlockResponse{
  1353  						isOrphan: false,
  1354  						err:      err,
  1355  					}
  1356  				}
  1357  
  1358  				msg.reply <- processBlockResponse{
  1359  					isOrphan: isOrphan,
  1360  					err:      nil,
  1361  				}
  1362  
  1363  			case isCurrentMsg:
  1364  				msg.reply <- sm.current()
  1365  
  1366  			case pauseMsg:
  1367  				// Wait until the sender unpauses the manager.
  1368  				<-msg.unpause
  1369  
  1370  			default:
  1371  				log.Warnf("Invalid message type in block "+
  1372  					"handler: %T", msg)
  1373  			}
  1374  
  1375  		case <-stallTicker.C:
  1376  			sm.handleStallSample()
  1377  
  1378  		case <-sm.quit:
  1379  			break out
  1380  		}
  1381  	}
  1382  
  1383  	sm.wg.Done()
  1384  	log.Trace("Block handler done")
  1385  }
  1386  
  1387  // handleBlockchainNotification handles notifications from blockchain.  It does
  1388  // things such as request orphan block parents and relay accepted blocks to
  1389  // connected peers.
  1390  func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) {
  1391  	switch notification.Type {
  1392  	// A block has been accepted into the block chain.  Relay it to other
  1393  	// peers.
  1394  	case blockchain.NTBlockAccepted:
  1395  		// Don't relay if we are not current. Other peers that are
  1396  		// current should already know about it.
  1397  		if !sm.current() {
  1398  			return
  1399  		}
  1400  
  1401  		block, ok := notification.Data.(*btcutil.Block)
  1402  		if !ok {
  1403  			log.Warnf("Chain accepted notification is not a block.")
  1404  			break
  1405  		}
  1406  
  1407  		// Generate the inventory vector and relay it.
  1408  		iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
  1409  		sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header)
  1410  
  1411  		if !sm.feeEstimator.IsEnabled() {
  1412  			// fee estimation can only start after we have performed an initial
  1413  			// sync, otherwise we'll start adding mempool transactions at the
  1414  			// wrong height.
  1415  			sm.feeEstimator.Enable(block.Height())
  1416  		}
  1417  
  1418  	// A block has been connected to the main block chain.
  1419  	case blockchain.NTBlockConnected:
  1420  		block, ok := notification.Data.(*btcutil.Block)
  1421  		if !ok {
  1422  			log.Warnf("Chain connected notification is not a block.")
  1423  			break
  1424  		}
  1425  
  1426  		// Account for transactions mined in the newly connected block for fee
  1427  		// estimation. This must be done before attempting to remove
  1428  		// transactions from the mempool because the mempool will alert the
  1429  		// estimator of the txs that are leaving
  1430  		sm.feeEstimator.ProcessBlock(block)
  1431  
  1432  		// Remove all of the transactions (except the coinbase) in the
  1433  		// connected block from the transaction pool.  Secondly, remove any
  1434  		// transactions which are now double spends as a result of these
  1435  		// new transactions.  Finally, remove any transaction that is
  1436  		// no longer an orphan. Transactions which depend on a confirmed
  1437  		// transaction are NOT removed recursively because they are still
  1438  		// valid.
  1439  		for _, tx := range block.Transactions()[1:] {
  1440  			sm.txMemPool.RemoveTransaction(tx, false)
  1441  			sm.txMemPool.RemoveDoubleSpends(tx)
  1442  			sm.txMemPool.RemoveOrphan(tx)
  1443  			sm.peerNotifier.TransactionConfirmed(tx)
  1444  			acceptedTxs := sm.txMemPool.ProcessOrphans(tx)
  1445  			sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
  1446  		}
  1447  
  1448  	// A block has been disconnected from the main block chain.
  1449  	case blockchain.NTBlockDisconnected:
  1450  		block, ok := notification.Data.(*btcutil.Block)
  1451  		if !ok {
  1452  			log.Warnf("Chain disconnected notification is not a block.")
  1453  			break
  1454  		}
  1455  
  1456  		// Reinsert all of the transactions (except the coinbase) into
  1457  		// the transaction pool.
  1458  		for _, tx := range block.Transactions()[1:] {
  1459  			_, _, err := sm.txMemPool.MaybeAcceptTransaction(tx,
  1460  				false, false)
  1461  			if err != nil {
  1462  				// Remove the transaction and all transactions
  1463  				// that depend on it if it wasn't accepted into
  1464  				// the transaction pool.
  1465  				sm.txMemPool.RemoveTransaction(tx, true)
  1466  			}
  1467  		}
  1468  
  1469  	}
  1470  }
  1471  
  1472  // NewPeer informs the sync manager of a newly active peer.
  1473  func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) {
  1474  	// Ignore if we are shutting down.
  1475  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1476  		return
  1477  	}
  1478  	sm.msgChan <- &newPeerMsg{peer: peer}
  1479  }
  1480  
  1481  // QueueTx adds the passed transaction message and peer to the block handling
  1482  // queue. Responds to the done channel argument after the tx message is
  1483  // processed.
  1484  func (sm *SyncManager) QueueTx(tx *btcutil.Tx, peer *peerpkg.Peer, done chan struct{}) {
  1485  	// Don't accept more transactions if we're shutting down.
  1486  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1487  		done <- struct{}{}
  1488  		return
  1489  	}
  1490  
  1491  	sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done}
  1492  }
  1493  
  1494  // QueueBlock adds the passed block message and peer to the block handling
  1495  // queue. Responds to the done channel argument after the block message is
  1496  // processed.
  1497  func (sm *SyncManager) QueueBlock(block *btcutil.Block, peer *peerpkg.Peer, done chan struct{}) {
  1498  	// Don't accept more blocks if we're shutting down.
  1499  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1500  		done <- struct{}{}
  1501  		return
  1502  	}
  1503  
  1504  	sm.msgChan <- &blockMsg{block: block, peer: peer, reply: done}
  1505  }
  1506  
  1507  // QueueInv adds the passed inv message and peer to the block handling queue.
  1508  func (sm *SyncManager) QueueInv(inv *wire.MsgInv, peer *peerpkg.Peer) {
  1509  	// No channel handling here because peers do not need to block on inv
  1510  	// messages.
  1511  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1512  		return
  1513  	}
  1514  
  1515  	sm.msgChan <- &invMsg{inv: inv, peer: peer}
  1516  }
  1517  
  1518  // QueueHeaders adds the passed headers message and peer to the block handling
  1519  // queue.
  1520  func (sm *SyncManager) QueueHeaders(headers *wire.MsgHeaders, peer *peerpkg.Peer) {
  1521  	// No channel handling here because peers do not need to block on
  1522  	// headers messages.
  1523  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1524  		return
  1525  	}
  1526  
  1527  	sm.msgChan <- &headersMsg{headers: headers, peer: peer}
  1528  }
  1529  
  1530  // QueueNotFound adds the passed notfound message and peer to the block handling
  1531  // queue.
  1532  func (sm *SyncManager) QueueNotFound(notFound *wire.MsgNotFound, peer *peerpkg.Peer) {
  1533  	// No channel handling here because peers do not need to block on
  1534  	// reject messages.
  1535  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1536  		return
  1537  	}
  1538  
  1539  	sm.msgChan <- &notFoundMsg{notFound: notFound, peer: peer}
  1540  }
  1541  
  1542  // DonePeer informs the blockmanager that a peer has disconnected.
  1543  func (sm *SyncManager) DonePeer(peer *peerpkg.Peer) {
  1544  	// Ignore if we are shutting down.
  1545  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1546  		return
  1547  	}
  1548  
  1549  	sm.msgChan <- &donePeerMsg{peer: peer}
  1550  }
  1551  
  1552  // Start begins the core block handler which processes block and inv messages.
  1553  func (sm *SyncManager) Start() {
  1554  	// Already started?
  1555  	if atomic.AddInt32(&sm.started, 1) != 1 {
  1556  		return
  1557  	}
  1558  
  1559  	log.Trace("Starting sync manager")
  1560  	sm.wg.Add(1)
  1561  	go sm.blockHandler()
  1562  }
  1563  
  1564  // Stop gracefully shuts down the sync manager by stopping all asynchronous
  1565  // handlers and waiting for them to finish.
  1566  func (sm *SyncManager) Stop() error {
  1567  	if atomic.AddInt32(&sm.shutdown, 1) != 1 {
  1568  		log.Warnf("Sync manager is already in the process of " +
  1569  			"shutting down")
  1570  		return nil
  1571  	}
  1572  
  1573  	log.Infof("Sync manager shutting down")
  1574  	close(sm.quit)
  1575  	sm.wg.Wait()
  1576  	return nil
  1577  }
  1578  
  1579  // SyncPeerID returns the ID of the current sync peer, or 0 if there is none.
  1580  func (sm *SyncManager) SyncPeerID() int32 {
  1581  	reply := make(chan int32)
  1582  	sm.msgChan <- getSyncPeerMsg{reply: reply}
  1583  	return <-reply
  1584  }
  1585  
  1586  // ProcessBlock makes use of ProcessBlock on an internal instance of a block
  1587  // chain.
  1588  func (sm *SyncManager) ProcessBlock(block *btcutil.Block, flags blockchain.BehaviorFlags) (bool, error) {
  1589  	reply := make(chan processBlockResponse, 1)
  1590  	sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply}
  1591  	response := <-reply
  1592  	return response.isOrphan, response.err
  1593  }
  1594  
  1595  // IsCurrent returns whether or not the sync manager believes it is synced with
  1596  // the connected peers.
  1597  func (sm *SyncManager) IsCurrent() bool {
  1598  	reply := make(chan bool)
  1599  	sm.msgChan <- isCurrentMsg{reply: reply}
  1600  	return <-reply
  1601  }
  1602  
  1603  // Pause pauses the sync manager until the returned channel is closed.
  1604  //
  1605  // Note that while paused, all peer and block processing is halted.  The
  1606  // message sender should avoid pausing the sync manager for long durations.
  1607  func (sm *SyncManager) Pause() chan<- struct{} {
  1608  	c := make(chan struct{})
  1609  	sm.msgChan <- pauseMsg{c}
  1610  	return c
  1611  }
  1612  
  1613  // New constructs a new SyncManager. Use Start to begin processing asynchronous
  1614  // block, tx, and inv updates.
  1615  func New(config *Config) (*SyncManager, error) {
  1616  	sm := SyncManager{
  1617  		peerNotifier:    config.PeerNotifier,
  1618  		chain:           config.Chain,
  1619  		txMemPool:       config.TxMemPool,
  1620  		chainParams:     config.ChainParams,
  1621  		rejectedTxns:    make(map[chainhash.Hash]struct{}),
  1622  		requestedTxns:   make(map[chainhash.Hash]struct{}),
  1623  		requestedBlocks: make(map[chainhash.Hash]struct{}),
  1624  		peerStates:      make(map[*peerpkg.Peer]*peerSyncState),
  1625  		progressLogger:  newBlockProgressLogger("Processed", log),
  1626  		msgChan:         make(chan interface{}, config.MaxPeers*3),
  1627  		headerList:      list.New(),
  1628  		quit:            make(chan struct{}),
  1629  		feeEstimator:    config.FeeEstimator,
  1630  	}
  1631  
  1632  	best := sm.chain.BestSnapshot()
  1633  	if !config.DisableCheckpoints {
  1634  		// Initialize the next checkpoint based on the current height.
  1635  		sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height)
  1636  		if sm.nextCheckpoint != nil {
  1637  			sm.resetHeaderState(&best.Hash, best.Height)
  1638  		}
  1639  	} else {
  1640  		log.Info("Checkpoints are disabled")
  1641  	}
  1642  
  1643  	sm.chain.Subscribe(sm.handleBlockchainNotification)
  1644  
  1645  	return &sm, nil
  1646  }