github.com/palcoin-project/palcd@v1.0.0/netsync/manager.go (about)

     1  // Copyright (c) 2013-2017 The btcsuite developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package netsync
     6  
     7  import (
     8  	"container/list"
     9  	"math/rand"
    10  	"net"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/palcoin-project/palcd/blockchain"
    16  	"github.com/palcoin-project/palcd/chaincfg"
    17  	"github.com/palcoin-project/palcd/chaincfg/chainhash"
    18  	"github.com/palcoin-project/palcd/database"
    19  	"github.com/palcoin-project/palcd/mempool"
    20  	peerpkg "github.com/palcoin-project/palcd/peer"
    21  	"github.com/palcoin-project/palcd/wire"
    22  	"github.com/palcoin-project/palcutil"
    23  )
    24  
    25  const (
    26  	// minInFlightBlocks is the minimum number of blocks that should be
    27  	// in the request queue for headers-first mode before requesting
    28  	// more.
    29  	minInFlightBlocks = 10
    30  
    31  	// maxRejectedTxns is the maximum number of rejected transactions
    32  	// hashes to store in memory.
    33  	maxRejectedTxns = 1000
    34  
    35  	// maxRequestedBlocks is the maximum number of requested block
    36  	// hashes to store in memory.
    37  	maxRequestedBlocks = wire.MaxInvPerMsg
    38  
    39  	// maxRequestedTxns is the maximum number of requested transactions
    40  	// hashes to store in memory.
    41  	maxRequestedTxns = wire.MaxInvPerMsg
    42  
    43  	// maxStallDuration is the time after which we will disconnect our
    44  	// current sync peer if we haven't made progress.
    45  	maxStallDuration = 3 * time.Minute
    46  
    47  	// stallSampleInterval the interval at which we will check to see if our
    48  	// sync has stalled.
    49  	stallSampleInterval = 30 * time.Second
    50  )
    51  
    52  // zeroHash is the zero value hash (all zeros).  It is defined as a convenience.
    53  var zeroHash chainhash.Hash
    54  
    55  // newPeerMsg signifies a newly connected peer to the block handler.
    56  type newPeerMsg struct {
    57  	peer *peerpkg.Peer
    58  }
    59  
    60  // blockMsg packages a bitcoin block message and the peer it came from together
    61  // so the block handler has access to that information.
    62  type blockMsg struct {
    63  	block *palcutil.Block
    64  	peer  *peerpkg.Peer
    65  	reply chan struct{}
    66  }
    67  
    68  // invMsg packages a bitcoin inv message and the peer it came from together
    69  // so the block handler has access to that information.
    70  type invMsg struct {
    71  	inv  *wire.MsgInv
    72  	peer *peerpkg.Peer
    73  }
    74  
    75  // headersMsg packages a bitcoin headers message and the peer it came from
    76  // together so the block handler has access to that information.
    77  type headersMsg struct {
    78  	headers *wire.MsgHeaders
    79  	peer    *peerpkg.Peer
    80  }
    81  
    82  // notFoundMsg packages a bitcoin notfound message and the peer it came from
    83  // together so the block handler has access to that information.
    84  type notFoundMsg struct {
    85  	notFound *wire.MsgNotFound
    86  	peer     *peerpkg.Peer
    87  }
    88  
    89  // donePeerMsg signifies a newly disconnected peer to the block handler.
    90  type donePeerMsg struct {
    91  	peer *peerpkg.Peer
    92  }
    93  
    94  // txMsg packages a bitcoin tx message and the peer it came from together
    95  // so the block handler has access to that information.
    96  type txMsg struct {
    97  	tx    *palcutil.Tx
    98  	peer  *peerpkg.Peer
    99  	reply chan struct{}
   100  }
   101  
   102  // getSyncPeerMsg is a message type to be sent across the message channel for
   103  // retrieving the current sync peer.
   104  type getSyncPeerMsg struct {
   105  	reply chan int32
   106  }
   107  
   108  // processBlockResponse is a response sent to the reply channel of a
   109  // processBlockMsg.
   110  type processBlockResponse struct {
   111  	isOrphan bool
   112  	err      error
   113  }
   114  
   115  // processBlockMsg is a message type to be sent across the message channel
   116  // for requested a block is processed.  Note this call differs from blockMsg
   117  // above in that blockMsg is intended for blocks that came from peers and have
   118  // extra handling whereas this message essentially is just a concurrent safe
   119  // way to call ProcessBlock on the internal block chain instance.
   120  type processBlockMsg struct {
   121  	block *palcutil.Block
   122  	flags blockchain.BehaviorFlags
   123  	reply chan processBlockResponse
   124  }
   125  
   126  // isCurrentMsg is a message type to be sent across the message channel for
   127  // requesting whether or not the sync manager believes it is synced with the
   128  // currently connected peers.
   129  type isCurrentMsg struct {
   130  	reply chan bool
   131  }
   132  
   133  // pauseMsg is a message type to be sent across the message channel for
   134  // pausing the sync manager.  This effectively provides the caller with
   135  // exclusive access over the manager until a receive is performed on the
   136  // unpause channel.
   137  type pauseMsg struct {
   138  	unpause <-chan struct{}
   139  }
   140  
   141  // headerNode is used as a node in a list of headers that are linked together
   142  // between checkpoints.
   143  type headerNode struct {
   144  	height int32
   145  	hash   *chainhash.Hash
   146  }
   147  
   148  // peerSyncState stores additional information that the SyncManager tracks
   149  // about a peer.
   150  type peerSyncState struct {
   151  	syncCandidate   bool
   152  	requestQueue    []*wire.InvVect
   153  	requestedTxns   map[chainhash.Hash]struct{}
   154  	requestedBlocks map[chainhash.Hash]struct{}
   155  }
   156  
   157  // limitAdd is a helper function for maps that require a maximum limit by
   158  // evicting a random value if adding the new value would cause it to
   159  // overflow the maximum allowed.
   160  func limitAdd(m map[chainhash.Hash]struct{}, hash chainhash.Hash, limit int) {
   161  	if len(m)+1 > limit {
   162  		// Remove a random entry from the map.  For most compilers, Go's
   163  		// range statement iterates starting at a random item although
   164  		// that is not 100% guaranteed by the spec.  The iteration order
   165  		// is not important here because an adversary would have to be
   166  		// able to pull off preimage attacks on the hashing function in
   167  		// order to target eviction of specific entries anyways.
   168  		for txHash := range m {
   169  			delete(m, txHash)
   170  			break
   171  		}
   172  	}
   173  	m[hash] = struct{}{}
   174  }
   175  
   176  // SyncManager is used to communicate block related messages with peers. The
   177  // SyncManager is started as by executing Start() in a goroutine. Once started,
   178  // it selects peers to sync from and starts the initial block download. Once the
   179  // chain is in sync, the SyncManager handles incoming block and header
   180  // notifications and relays announcements of new blocks to peers.
   181  type SyncManager struct {
   182  	peerNotifier   PeerNotifier
   183  	started        int32
   184  	shutdown       int32
   185  	chain          *blockchain.BlockChain
   186  	txMemPool      *mempool.TxPool
   187  	chainParams    *chaincfg.Params
   188  	progressLogger *blockProgressLogger
   189  	msgChan        chan interface{}
   190  	wg             sync.WaitGroup
   191  	quit           chan struct{}
   192  
   193  	// These fields should only be accessed from the blockHandler thread
   194  	rejectedTxns     map[chainhash.Hash]struct{}
   195  	requestedTxns    map[chainhash.Hash]struct{}
   196  	requestedBlocks  map[chainhash.Hash]struct{}
   197  	syncPeer         *peerpkg.Peer
   198  	peerStates       map[*peerpkg.Peer]*peerSyncState
   199  	lastProgressTime time.Time
   200  
   201  	// The following fields are used for headers-first mode.
   202  	headersFirstMode bool
   203  	headerList       *list.List
   204  	startHeader      *list.Element
   205  	nextCheckpoint   *chaincfg.Checkpoint
   206  
   207  	// An optional fee estimator.
   208  	feeEstimator *mempool.FeeEstimator
   209  }
   210  
   211  // resetHeaderState sets the headers-first mode state to values appropriate for
   212  // syncing from a new peer.
   213  func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) {
   214  	sm.headersFirstMode = false
   215  	sm.headerList.Init()
   216  	sm.startHeader = nil
   217  
   218  	// When there is a next checkpoint, add an entry for the latest known
   219  	// block into the header pool.  This allows the next downloaded header
   220  	// to prove it links to the chain properly.
   221  	if sm.nextCheckpoint != nil {
   222  		node := headerNode{height: newestHeight, hash: newestHash}
   223  		sm.headerList.PushBack(&node)
   224  	}
   225  }
   226  
   227  // findNextHeaderCheckpoint returns the next checkpoint after the passed height.
   228  // It returns nil when there is not one either because the height is already
   229  // later than the final checkpoint or some other reason such as disabled
   230  // checkpoints.
   231  func (sm *SyncManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
   232  	checkpoints := sm.chain.Checkpoints()
   233  	if len(checkpoints) == 0 {
   234  		return nil
   235  	}
   236  
   237  	// There is no next checkpoint if the height is already after the final
   238  	// checkpoint.
   239  	finalCheckpoint := &checkpoints[len(checkpoints)-1]
   240  	if height >= finalCheckpoint.Height {
   241  		return nil
   242  	}
   243  
   244  	// Find the next checkpoint.
   245  	nextCheckpoint := finalCheckpoint
   246  	for i := len(checkpoints) - 2; i >= 0; i-- {
   247  		if height >= checkpoints[i].Height {
   248  			break
   249  		}
   250  		nextCheckpoint = &checkpoints[i]
   251  	}
   252  	return nextCheckpoint
   253  }
   254  
   255  // startSync will choose the best peer among the available candidate peers to
   256  // download/sync the blockchain from.  When syncing is already running, it
   257  // simply returns.  It also examines the candidates for any which are no longer
   258  // candidates and removes them as needed.
   259  func (sm *SyncManager) startSync() {
   260  	// Return now if we're already syncing.
   261  	if sm.syncPeer != nil {
   262  		return
   263  	}
   264  
   265  	// Once the segwit soft-fork package has activated, we only
   266  	// want to sync from peers which are witness enabled to ensure
   267  	// that we fully validate all blockchain data.
   268  	segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   269  	if err != nil {
   270  		log.Errorf("Unable to query for segwit soft-fork state: %v", err)
   271  		return
   272  	}
   273  
   274  	best := sm.chain.BestSnapshot()
   275  	var higherPeers, equalPeers []*peerpkg.Peer
   276  	for peer, state := range sm.peerStates {
   277  		if !state.syncCandidate {
   278  			continue
   279  		}
   280  
   281  		if segwitActive && !peer.IsWitnessEnabled() {
   282  			log.Debugf("peer %v not witness enabled, skipping", peer)
   283  			continue
   284  		}
   285  
   286  		// Remove sync candidate peers that are no longer candidates due
   287  		// to passing their latest known block.  NOTE: The < is
   288  		// intentional as opposed to <=.  While technically the peer
   289  		// doesn't have a later block when it's equal, it will likely
   290  		// have one soon so it is a reasonable choice.  It also allows
   291  		// the case where both are at 0 such as during regression test.
   292  		if peer.LastBlock() < best.Height {
   293  			state.syncCandidate = false
   294  			continue
   295  		}
   296  
   297  		// If the peer is at the same height as us, we'll add it a set
   298  		// of backup peers in case we do not find one with a higher
   299  		// height. If we are synced up with all of our peers, all of
   300  		// them will be in this set.
   301  		if peer.LastBlock() == best.Height {
   302  			equalPeers = append(equalPeers, peer)
   303  			continue
   304  		}
   305  
   306  		// This peer has a height greater than our own, we'll consider
   307  		// it in the set of better peers from which we'll randomly
   308  		// select.
   309  		higherPeers = append(higherPeers, peer)
   310  	}
   311  
   312  	// Pick randomly from the set of peers greater than our block height,
   313  	// falling back to a random peer of the same height if none are greater.
   314  	//
   315  	// TODO(conner): Use a better algorithm to ranking peers based on
   316  	// observed metrics and/or sync in parallel.
   317  	var bestPeer *peerpkg.Peer
   318  	switch {
   319  	case len(higherPeers) > 0:
   320  		bestPeer = higherPeers[rand.Intn(len(higherPeers))]
   321  
   322  	case len(equalPeers) > 0:
   323  		bestPeer = equalPeers[rand.Intn(len(equalPeers))]
   324  	}
   325  
   326  	// Start syncing from the best peer if one was selected.
   327  	if bestPeer != nil {
   328  		// Clear the requestedBlocks if the sync peer changes, otherwise
   329  		// we may ignore blocks we need that the last sync peer failed
   330  		// to send.
   331  		sm.requestedBlocks = make(map[chainhash.Hash]struct{})
   332  
   333  		locator, err := sm.chain.LatestBlockLocator()
   334  		if err != nil {
   335  			log.Errorf("Failed to get block locator for the "+
   336  				"latest block: %v", err)
   337  			return
   338  		}
   339  
   340  		log.Infof("Syncing to block height %d from peer %v",
   341  			bestPeer.LastBlock(), bestPeer.Addr())
   342  
   343  		// When the current height is less than a known checkpoint we
   344  		// can use block headers to learn about which blocks comprise
   345  		// the chain up to the checkpoint and perform less validation
   346  		// for them.  This is possible since each header contains the
   347  		// hash of the previous header and a merkle root.  Therefore if
   348  		// we validate all of the received headers link together
   349  		// properly and the checkpoint hashes match, we can be sure the
   350  		// hashes for the blocks in between are accurate.  Further, once
   351  		// the full blocks are downloaded, the merkle root is computed
   352  		// and compared against the value in the header which proves the
   353  		// full block hasn't been tampered with.
   354  		//
   355  		// Once we have passed the final checkpoint, or checkpoints are
   356  		// disabled, use standard inv messages learn about the blocks
   357  		// and fully validate them.  Finally, regression test mode does
   358  		// not support the headers-first approach so do normal block
   359  		// downloads when in regression test mode.
   360  		if sm.nextCheckpoint != nil &&
   361  			best.Height < sm.nextCheckpoint.Height &&
   362  			sm.chainParams != &chaincfg.RegressionNetParams {
   363  
   364  			bestPeer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
   365  			sm.headersFirstMode = true
   366  			log.Infof("Downloading headers for blocks %d to "+
   367  				"%d from peer %s", best.Height+1,
   368  				sm.nextCheckpoint.Height, bestPeer.Addr())
   369  		} else {
   370  			bestPeer.PushGetBlocksMsg(locator, &zeroHash)
   371  		}
   372  		sm.syncPeer = bestPeer
   373  
   374  		// Reset the last progress time now that we have a non-nil
   375  		// syncPeer to avoid instantly detecting it as stalled in the
   376  		// event the progress time hasn't been updated recently.
   377  		sm.lastProgressTime = time.Now()
   378  	} else {
   379  		log.Warnf("No sync peer candidates available")
   380  	}
   381  }
   382  
   383  // isSyncCandidate returns whether or not the peer is a candidate to consider
   384  // syncing from.
   385  func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool {
   386  	// Typically a peer is not a candidate for sync if it's not a full node,
   387  	// however regression test is special in that the regression tool is
   388  	// not a full node and still needs to be considered a sync candidate.
   389  	if sm.chainParams == &chaincfg.RegressionNetParams {
   390  		// The peer is not a candidate if it's not coming from localhost
   391  		// or the hostname can't be determined for some reason.
   392  		host, _, err := net.SplitHostPort(peer.Addr())
   393  		if err != nil {
   394  			return false
   395  		}
   396  
   397  		if host != "127.0.0.1" && host != "localhost" {
   398  			return false
   399  		}
   400  	} else {
   401  		// The peer is not a candidate for sync if it's not a full
   402  		// node. Additionally, if the segwit soft-fork package has
   403  		// activated, then the peer must also be upgraded.
   404  		segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   405  		if err != nil {
   406  			log.Errorf("Unable to query for segwit "+
   407  				"soft-fork state: %v", err)
   408  		}
   409  		nodeServices := peer.Services()
   410  		if nodeServices&wire.SFNodeNetwork != wire.SFNodeNetwork ||
   411  			(segwitActive && !peer.IsWitnessEnabled()) {
   412  			return false
   413  		}
   414  	}
   415  
   416  	// Candidate if all checks passed.
   417  	return true
   418  }
   419  
   420  // handleNewPeerMsg deals with new peers that have signalled they may
   421  // be considered as a sync peer (they have already successfully negotiated).  It
   422  // also starts syncing if needed.  It is invoked from the syncHandler goroutine.
   423  func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) {
   424  	// Ignore if in the process of shutting down.
   425  	if atomic.LoadInt32(&sm.shutdown) != 0 {
   426  		return
   427  	}
   428  
   429  	log.Infof("New valid peer %s (%s)", peer, peer.UserAgent())
   430  
   431  	// Initialize the peer state
   432  	isSyncCandidate := sm.isSyncCandidate(peer)
   433  	sm.peerStates[peer] = &peerSyncState{
   434  		syncCandidate:   isSyncCandidate,
   435  		requestedTxns:   make(map[chainhash.Hash]struct{}),
   436  		requestedBlocks: make(map[chainhash.Hash]struct{}),
   437  	}
   438  
   439  	// Start syncing by choosing the best candidate if needed.
   440  	if isSyncCandidate && sm.syncPeer == nil {
   441  		sm.startSync()
   442  	}
   443  }
   444  
   445  // handleStallSample will switch to a new sync peer if the current one has
   446  // stalled. This is detected when by comparing the last progress timestamp with
   447  // the current time, and disconnecting the peer if we stalled before reaching
   448  // their highest advertised block.
   449  func (sm *SyncManager) handleStallSample() {
   450  	if atomic.LoadInt32(&sm.shutdown) != 0 {
   451  		return
   452  	}
   453  
   454  	// If we don't have an active sync peer, exit early.
   455  	if sm.syncPeer == nil {
   456  		return
   457  	}
   458  
   459  	// If the stall timeout has not elapsed, exit early.
   460  	if time.Since(sm.lastProgressTime) <= maxStallDuration {
   461  		return
   462  	}
   463  
   464  	// Check to see that the peer's sync state exists.
   465  	state, exists := sm.peerStates[sm.syncPeer]
   466  	if !exists {
   467  		return
   468  	}
   469  
   470  	sm.clearRequestedState(state)
   471  
   472  	disconnectSyncPeer := sm.shouldDCStalledSyncPeer()
   473  	sm.updateSyncPeer(disconnectSyncPeer)
   474  }
   475  
   476  // shouldDCStalledSyncPeer determines whether or not we should disconnect a
   477  // stalled sync peer. If the peer has stalled and its reported height is greater
   478  // than our own best height, we will disconnect it. Otherwise, we will keep the
   479  // peer connected in case we are already at tip.
   480  func (sm *SyncManager) shouldDCStalledSyncPeer() bool {
   481  	lastBlock := sm.syncPeer.LastBlock()
   482  	startHeight := sm.syncPeer.StartingHeight()
   483  
   484  	var peerHeight int32
   485  	if lastBlock > startHeight {
   486  		peerHeight = lastBlock
   487  	} else {
   488  		peerHeight = startHeight
   489  	}
   490  
   491  	// If we've stalled out yet the sync peer reports having more blocks for
   492  	// us we will disconnect them. This allows us at tip to not disconnect
   493  	// peers when we are equal or they temporarily lag behind us.
   494  	best := sm.chain.BestSnapshot()
   495  	return peerHeight > best.Height
   496  }
   497  
   498  // handleDonePeerMsg deals with peers that have signalled they are done.  It
   499  // removes the peer as a candidate for syncing and in the case where it was
   500  // the current sync peer, attempts to select a new best peer to sync from.  It
   501  // is invoked from the syncHandler goroutine.
   502  func (sm *SyncManager) handleDonePeerMsg(peer *peerpkg.Peer) {
   503  	state, exists := sm.peerStates[peer]
   504  	if !exists {
   505  		log.Warnf("Received done peer message for unknown peer %s", peer)
   506  		return
   507  	}
   508  
   509  	// Remove the peer from the list of candidate peers.
   510  	delete(sm.peerStates, peer)
   511  
   512  	log.Infof("Lost peer %s", peer)
   513  
   514  	sm.clearRequestedState(state)
   515  
   516  	if peer == sm.syncPeer {
   517  		// Update the sync peer. The server has already disconnected the
   518  		// peer before signaling to the sync manager.
   519  		sm.updateSyncPeer(false)
   520  	}
   521  }
   522  
   523  // clearRequestedState wipes all expected transactions and blocks from the sync
   524  // manager's requested maps that were requested under a peer's sync state, This
   525  // allows them to be rerequested by a subsequent sync peer.
   526  func (sm *SyncManager) clearRequestedState(state *peerSyncState) {
   527  	// Remove requested transactions from the global map so that they will
   528  	// be fetched from elsewhere next time we get an inv.
   529  	for txHash := range state.requestedTxns {
   530  		delete(sm.requestedTxns, txHash)
   531  	}
   532  
   533  	// Remove requested blocks from the global map so that they will be
   534  	// fetched from elsewhere next time we get an inv.
   535  	// TODO: we could possibly here check which peers have these blocks
   536  	// and request them now to speed things up a little.
   537  	for blockHash := range state.requestedBlocks {
   538  		delete(sm.requestedBlocks, blockHash)
   539  	}
   540  }
   541  
   542  // updateSyncPeer choose a new sync peer to replace the current one. If
   543  // dcSyncPeer is true, this method will also disconnect the current sync peer.
   544  // If we are in header first mode, any header state related to prefetching is
   545  // also reset in preparation for the next sync peer.
   546  func (sm *SyncManager) updateSyncPeer(dcSyncPeer bool) {
   547  	log.Debugf("Updating sync peer, no progress for: %v",
   548  		time.Since(sm.lastProgressTime))
   549  
   550  	// First, disconnect the current sync peer if requested.
   551  	if dcSyncPeer {
   552  		sm.syncPeer.Disconnect()
   553  	}
   554  
   555  	// Reset any header state before we choose our next active sync peer.
   556  	if sm.headersFirstMode {
   557  		best := sm.chain.BestSnapshot()
   558  		sm.resetHeaderState(&best.Hash, best.Height)
   559  	}
   560  
   561  	sm.syncPeer = nil
   562  	sm.startSync()
   563  }
   564  
   565  // handleTxMsg handles transaction messages from all peers.
   566  func (sm *SyncManager) handleTxMsg(tmsg *txMsg) {
   567  	peer := tmsg.peer
   568  	state, exists := sm.peerStates[peer]
   569  	if !exists {
   570  		log.Warnf("Received tx message from unknown peer %s", peer)
   571  		return
   572  	}
   573  
   574  	// NOTE:  BitcoinJ, and possibly other wallets, don't follow the spec of
   575  	// sending an inventory message and allowing the remote peer to decide
   576  	// whether or not they want to request the transaction via a getdata
   577  	// message.  Unfortunately, the reference implementation permits
   578  	// unrequested data, so it has allowed wallets that don't follow the
   579  	// spec to proliferate.  While this is not ideal, there is no check here
   580  	// to disconnect peers for sending unsolicited transactions to provide
   581  	// interoperability.
   582  	txHash := tmsg.tx.Hash()
   583  
   584  	// Ignore transactions that we have already rejected.  Do not
   585  	// send a reject message here because if the transaction was already
   586  	// rejected, the transaction was unsolicited.
   587  	if _, exists = sm.rejectedTxns[*txHash]; exists {
   588  		log.Debugf("Ignoring unsolicited previously rejected "+
   589  			"transaction %v from %s", txHash, peer)
   590  		return
   591  	}
   592  
   593  	// Process the transaction to include validation, insertion in the
   594  	// memory pool, orphan handling, etc.
   595  	acceptedTxs, err := sm.txMemPool.ProcessTransaction(tmsg.tx,
   596  		true, true, mempool.Tag(peer.ID()))
   597  
   598  	// Remove transaction from request maps. Either the mempool/chain
   599  	// already knows about it and as such we shouldn't have any more
   600  	// instances of trying to fetch it, or we failed to insert and thus
   601  	// we'll retry next time we get an inv.
   602  	delete(state.requestedTxns, *txHash)
   603  	delete(sm.requestedTxns, *txHash)
   604  
   605  	if err != nil {
   606  		// Do not request this transaction again until a new block
   607  		// has been processed.
   608  		limitAdd(sm.rejectedTxns, *txHash, maxRejectedTxns)
   609  
   610  		// When the error is a rule error, it means the transaction was
   611  		// simply rejected as opposed to something actually going wrong,
   612  		// so log it as such.  Otherwise, something really did go wrong,
   613  		// so log it as an actual error.
   614  		if _, ok := err.(mempool.RuleError); ok {
   615  			log.Debugf("Rejected transaction %v from %s: %v",
   616  				txHash, peer, err)
   617  		} else {
   618  			log.Errorf("Failed to process transaction %v: %v",
   619  				txHash, err)
   620  		}
   621  
   622  		// Convert the error into an appropriate reject message and
   623  		// send it.
   624  		code, reason := mempool.ErrToRejectErr(err)
   625  		peer.PushRejectMsg(wire.CmdTx, code, reason, txHash, false)
   626  		return
   627  	}
   628  
   629  	sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
   630  }
   631  
   632  // current returns true if we believe we are synced with our peers, false if we
   633  // still have blocks to check
   634  func (sm *SyncManager) current() bool {
   635  	if !sm.chain.IsCurrent() {
   636  		return false
   637  	}
   638  
   639  	// if blockChain thinks we are current and we have no syncPeer it
   640  	// is probably right.
   641  	if sm.syncPeer == nil {
   642  		return true
   643  	}
   644  
   645  	// No matter what chain thinks, if we are below the block we are syncing
   646  	// to we are not current.
   647  	if sm.chain.BestSnapshot().Height < sm.syncPeer.LastBlock() {
   648  		return false
   649  	}
   650  	return true
   651  }
   652  
   653  // handleBlockMsg handles block messages from all peers.
   654  func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) {
   655  	peer := bmsg.peer
   656  	state, exists := sm.peerStates[peer]
   657  	if !exists {
   658  		log.Warnf("Received block message from unknown peer %s", peer)
   659  		return
   660  	}
   661  
   662  	// If we didn't ask for this block then the peer is misbehaving.
   663  	blockHash := bmsg.block.Hash()
   664  	if _, exists = state.requestedBlocks[*blockHash]; !exists {
   665  		// The regression test intentionally sends some blocks twice
   666  		// to test duplicate block insertion fails.  Don't disconnect
   667  		// the peer or ignore the block when we're in regression test
   668  		// mode in this case so the chain code is actually fed the
   669  		// duplicate blocks.
   670  		if sm.chainParams != &chaincfg.RegressionNetParams {
   671  			log.Warnf("Got unrequested block %v from %s -- "+
   672  				"disconnecting", blockHash, peer.Addr())
   673  			peer.Disconnect()
   674  			return
   675  		}
   676  	}
   677  
   678  	// When in headers-first mode, if the block matches the hash of the
   679  	// first header in the list of headers that are being fetched, it's
   680  	// eligible for less validation since the headers have already been
   681  	// verified to link together and are valid up to the next checkpoint.
   682  	// Also, remove the list entry for all blocks except the checkpoint
   683  	// since it is needed to verify the next round of headers links
   684  	// properly.
   685  	isCheckpointBlock := false
   686  	behaviorFlags := blockchain.BFNone
   687  	if sm.headersFirstMode {
   688  		firstNodeEl := sm.headerList.Front()
   689  		if firstNodeEl != nil {
   690  			firstNode := firstNodeEl.Value.(*headerNode)
   691  			if blockHash.IsEqual(firstNode.hash) {
   692  				behaviorFlags |= blockchain.BFFastAdd
   693  				if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) {
   694  					isCheckpointBlock = true
   695  				} else {
   696  					sm.headerList.Remove(firstNodeEl)
   697  				}
   698  			}
   699  		}
   700  	}
   701  
   702  	// Remove block from request maps. Either chain will know about it and
   703  	// so we shouldn't have any more instances of trying to fetch it, or we
   704  	// will fail the insert and thus we'll retry next time we get an inv.
   705  	delete(state.requestedBlocks, *blockHash)
   706  	delete(sm.requestedBlocks, *blockHash)
   707  
   708  	// Process the block to include validation, best chain selection, orphan
   709  	// handling, etc.
   710  	_, isOrphan, err := sm.chain.ProcessBlock(bmsg.block, behaviorFlags)
   711  	if err != nil {
   712  		// When the error is a rule error, it means the block was simply
   713  		// rejected as opposed to something actually going wrong, so log
   714  		// it as such.  Otherwise, something really did go wrong, so log
   715  		// it as an actual error.
   716  		if _, ok := err.(blockchain.RuleError); ok {
   717  			log.Infof("Rejected block %v from %s: %v", blockHash,
   718  				peer, err)
   719  		} else {
   720  			log.Errorf("Failed to process block %v: %v",
   721  				blockHash, err)
   722  		}
   723  		if dbErr, ok := err.(database.Error); ok && dbErr.ErrorCode ==
   724  			database.ErrCorruption {
   725  			panic(dbErr)
   726  		}
   727  
   728  		// Convert the error into an appropriate reject message and
   729  		// send it.
   730  		code, reason := mempool.ErrToRejectErr(err)
   731  		peer.PushRejectMsg(wire.CmdBlock, code, reason, blockHash, false)
   732  		return
   733  	}
   734  
   735  	// Meta-data about the new block this peer is reporting. We use this
   736  	// below to update this peer's latest block height and the heights of
   737  	// other peers based on their last announced block hash. This allows us
   738  	// to dynamically update the block heights of peers, avoiding stale
   739  	// heights when looking for a new sync peer. Upon acceptance of a block
   740  	// or recognition of an orphan, we also use this information to update
   741  	// the block heights over other peers who's invs may have been ignored
   742  	// if we are actively syncing while the chain is not yet current or
   743  	// who may have lost the lock announcement race.
   744  	var heightUpdate int32
   745  	var blkHashUpdate *chainhash.Hash
   746  
   747  	// Request the parents for the orphan block from the peer that sent it.
   748  	if isOrphan {
   749  		// We've just received an orphan block from a peer. In order
   750  		// to update the height of the peer, we try to extract the
   751  		// block height from the scriptSig of the coinbase transaction.
   752  		// Extraction is only attempted if the block's version is
   753  		// high enough (ver 2+).
   754  		header := &bmsg.block.MsgBlock().Header
   755  		if blockchain.ShouldHaveSerializedBlockHeight(header) {
   756  			coinbaseTx := bmsg.block.Transactions()[0]
   757  			cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx)
   758  			if err != nil {
   759  				log.Warnf("Unable to extract height from "+
   760  					"coinbase tx: %v", err)
   761  			} else {
   762  				log.Debugf("Extracted height of %v from "+
   763  					"orphan block", cbHeight)
   764  				heightUpdate = cbHeight
   765  				blkHashUpdate = blockHash
   766  			}
   767  		}
   768  
   769  		orphanRoot := sm.chain.GetOrphanRoot(blockHash)
   770  		locator, err := sm.chain.LatestBlockLocator()
   771  		if err != nil {
   772  			log.Warnf("Failed to get block locator for the "+
   773  				"latest block: %v", err)
   774  		} else {
   775  			peer.PushGetBlocksMsg(locator, orphanRoot)
   776  		}
   777  	} else {
   778  		if peer == sm.syncPeer {
   779  			sm.lastProgressTime = time.Now()
   780  		}
   781  
   782  		// When the block is not an orphan, log information about it and
   783  		// update the chain state.
   784  		sm.progressLogger.LogBlockHeight(bmsg.block)
   785  
   786  		// Update this peer's latest block height, for future
   787  		// potential sync node candidacy.
   788  		best := sm.chain.BestSnapshot()
   789  		heightUpdate = best.Height
   790  		blkHashUpdate = &best.Hash
   791  
   792  		// Clear the rejected transactions.
   793  		sm.rejectedTxns = make(map[chainhash.Hash]struct{})
   794  	}
   795  
   796  	// Update the block height for this peer. But only send a message to
   797  	// the server for updating peer heights if this is an orphan or our
   798  	// chain is "current". This avoids sending a spammy amount of messages
   799  	// if we're syncing the chain from scratch.
   800  	if blkHashUpdate != nil && heightUpdate != 0 {
   801  		peer.UpdateLastBlockHeight(heightUpdate)
   802  		if isOrphan || sm.current() {
   803  			go sm.peerNotifier.UpdatePeerHeights(blkHashUpdate, heightUpdate,
   804  				peer)
   805  		}
   806  	}
   807  
   808  	// Nothing more to do if we aren't in headers-first mode.
   809  	if !sm.headersFirstMode {
   810  		return
   811  	}
   812  
   813  	// This is headers-first mode, so if the block is not a checkpoint
   814  	// request more blocks using the header list when the request queue is
   815  	// getting short.
   816  	if !isCheckpointBlock {
   817  		if sm.startHeader != nil &&
   818  			len(state.requestedBlocks) < minInFlightBlocks {
   819  			sm.fetchHeaderBlocks()
   820  		}
   821  		return
   822  	}
   823  
   824  	// This is headers-first mode and the block is a checkpoint.  When
   825  	// there is a next checkpoint, get the next round of headers by asking
   826  	// for headers starting from the block after this one up to the next
   827  	// checkpoint.
   828  	prevHeight := sm.nextCheckpoint.Height
   829  	prevHash := sm.nextCheckpoint.Hash
   830  	sm.nextCheckpoint = sm.findNextHeaderCheckpoint(prevHeight)
   831  	if sm.nextCheckpoint != nil {
   832  		locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash})
   833  		err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
   834  		if err != nil {
   835  			log.Warnf("Failed to send getheaders message to "+
   836  				"peer %s: %v", peer.Addr(), err)
   837  			return
   838  		}
   839  		log.Infof("Downloading headers for blocks %d to %d from "+
   840  			"peer %s", prevHeight+1, sm.nextCheckpoint.Height,
   841  			sm.syncPeer.Addr())
   842  		return
   843  	}
   844  
   845  	// This is headers-first mode, the block is a checkpoint, and there are
   846  	// no more checkpoints, so switch to normal mode by requesting blocks
   847  	// from the block after this one up to the end of the chain (zero hash).
   848  	sm.headersFirstMode = false
   849  	sm.headerList.Init()
   850  	log.Infof("Reached the final checkpoint -- switching to normal mode")
   851  	locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash})
   852  	err = peer.PushGetBlocksMsg(locator, &zeroHash)
   853  	if err != nil {
   854  		log.Warnf("Failed to send getblocks message to peer %s: %v",
   855  			peer.Addr(), err)
   856  		return
   857  	}
   858  }
   859  
   860  // fetchHeaderBlocks creates and sends a request to the syncPeer for the next
   861  // list of blocks to be downloaded based on the current list of headers.
   862  func (sm *SyncManager) fetchHeaderBlocks() {
   863  	// Nothing to do if there is no start header.
   864  	if sm.startHeader == nil {
   865  		log.Warnf("fetchHeaderBlocks called with no start header")
   866  		return
   867  	}
   868  
   869  	// Build up a getdata request for the list of blocks the headers
   870  	// describe.  The size hint will be limited to wire.MaxInvPerMsg by
   871  	// the function, so no need to double check it here.
   872  	gdmsg := wire.NewMsgGetDataSizeHint(uint(sm.headerList.Len()))
   873  	numRequested := 0
   874  	for e := sm.startHeader; e != nil; e = e.Next() {
   875  		node, ok := e.Value.(*headerNode)
   876  		if !ok {
   877  			log.Warn("Header list node type is not a headerNode")
   878  			continue
   879  		}
   880  
   881  		iv := wire.NewInvVect(wire.InvTypeBlock, node.hash)
   882  		haveInv, err := sm.haveInventory(iv)
   883  		if err != nil {
   884  			log.Warnf("Unexpected failure when checking for "+
   885  				"existing inventory during header block "+
   886  				"fetch: %v", err)
   887  		}
   888  		if !haveInv {
   889  			syncPeerState := sm.peerStates[sm.syncPeer]
   890  
   891  			sm.requestedBlocks[*node.hash] = struct{}{}
   892  			syncPeerState.requestedBlocks[*node.hash] = struct{}{}
   893  
   894  			// If we're fetching from a witness enabled peer
   895  			// post-fork, then ensure that we receive all the
   896  			// witness data in the blocks.
   897  			if sm.syncPeer.IsWitnessEnabled() {
   898  				iv.Type = wire.InvTypeWitnessBlock
   899  			}
   900  
   901  			gdmsg.AddInvVect(iv)
   902  			numRequested++
   903  		}
   904  		sm.startHeader = e.Next()
   905  		if numRequested >= wire.MaxInvPerMsg {
   906  			break
   907  		}
   908  	}
   909  	if len(gdmsg.InvList) > 0 {
   910  		sm.syncPeer.QueueMessage(gdmsg, nil)
   911  	}
   912  }
   913  
   914  // handleHeadersMsg handles block header messages from all peers.  Headers are
   915  // requested when performing a headers-first sync.
   916  func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) {
   917  	peer := hmsg.peer
   918  	_, exists := sm.peerStates[peer]
   919  	if !exists {
   920  		log.Warnf("Received headers message from unknown peer %s", peer)
   921  		return
   922  	}
   923  
   924  	// The remote peer is misbehaving if we didn't request headers.
   925  	msg := hmsg.headers
   926  	numHeaders := len(msg.Headers)
   927  	if !sm.headersFirstMode {
   928  		log.Warnf("Got %d unrequested headers from %s -- "+
   929  			"disconnecting", numHeaders, peer.Addr())
   930  		peer.Disconnect()
   931  		return
   932  	}
   933  
   934  	// Nothing to do for an empty headers message.
   935  	if numHeaders == 0 {
   936  		return
   937  	}
   938  
   939  	// Process all of the received headers ensuring each one connects to the
   940  	// previous and that checkpoints match.
   941  	receivedCheckpoint := false
   942  	var finalHash *chainhash.Hash
   943  	for _, blockHeader := range msg.Headers {
   944  		blockHash := blockHeader.BlockHash()
   945  		finalHash = &blockHash
   946  
   947  		// Ensure there is a previous header to compare against.
   948  		prevNodeEl := sm.headerList.Back()
   949  		if prevNodeEl == nil {
   950  			log.Warnf("Header list does not contain a previous" +
   951  				"element as expected -- disconnecting peer")
   952  			peer.Disconnect()
   953  			return
   954  		}
   955  
   956  		// Ensure the header properly connects to the previous one and
   957  		// add it to the list of headers.
   958  		node := headerNode{hash: &blockHash}
   959  		prevNode := prevNodeEl.Value.(*headerNode)
   960  		if prevNode.hash.IsEqual(&blockHeader.PrevBlock) {
   961  			node.height = prevNode.height + 1
   962  			e := sm.headerList.PushBack(&node)
   963  			if sm.startHeader == nil {
   964  				sm.startHeader = e
   965  			}
   966  		} else {
   967  			log.Warnf("Received block header that does not "+
   968  				"properly connect to the chain from peer %s "+
   969  				"-- disconnecting", peer.Addr())
   970  			peer.Disconnect()
   971  			return
   972  		}
   973  
   974  		// Verify the header at the next checkpoint height matches.
   975  		if node.height == sm.nextCheckpoint.Height {
   976  			if node.hash.IsEqual(sm.nextCheckpoint.Hash) {
   977  				receivedCheckpoint = true
   978  				log.Infof("Verified downloaded block "+
   979  					"header against checkpoint at height "+
   980  					"%d/hash %s", node.height, node.hash)
   981  			} else {
   982  				log.Warnf("Block header at height %d/hash "+
   983  					"%s from peer %s does NOT match "+
   984  					"expected checkpoint hash of %s -- "+
   985  					"disconnecting", node.height,
   986  					node.hash, peer.Addr(),
   987  					sm.nextCheckpoint.Hash)
   988  				peer.Disconnect()
   989  				return
   990  			}
   991  			break
   992  		}
   993  	}
   994  
   995  	// When this header is a checkpoint, switch to fetching the blocks for
   996  	// all of the headers since the last checkpoint.
   997  	if receivedCheckpoint {
   998  		// Since the first entry of the list is always the final block
   999  		// that is already in the database and is only used to ensure
  1000  		// the next header links properly, it must be removed before
  1001  		// fetching the blocks.
  1002  		sm.headerList.Remove(sm.headerList.Front())
  1003  		log.Infof("Received %v block headers: Fetching blocks",
  1004  			sm.headerList.Len())
  1005  		sm.progressLogger.SetLastLogTime(time.Now())
  1006  		sm.fetchHeaderBlocks()
  1007  		return
  1008  	}
  1009  
  1010  	// This header is not a checkpoint, so request the next batch of
  1011  	// headers starting from the latest known header and ending with the
  1012  	// next checkpoint.
  1013  	locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash})
  1014  	err := peer.PushGetHeadersMsg(locator, sm.nextCheckpoint.Hash)
  1015  	if err != nil {
  1016  		log.Warnf("Failed to send getheaders message to "+
  1017  			"peer %s: %v", peer.Addr(), err)
  1018  		return
  1019  	}
  1020  }
  1021  
  1022  // handleNotFoundMsg handles notfound messages from all peers.
  1023  func (sm *SyncManager) handleNotFoundMsg(nfmsg *notFoundMsg) {
  1024  	peer := nfmsg.peer
  1025  	state, exists := sm.peerStates[peer]
  1026  	if !exists {
  1027  		log.Warnf("Received notfound message from unknown peer %s", peer)
  1028  		return
  1029  	}
  1030  	for _, inv := range nfmsg.notFound.InvList {
  1031  		// verify the hash was actually announced by the peer
  1032  		// before deleting from the global requested maps.
  1033  		switch inv.Type {
  1034  		case wire.InvTypeWitnessBlock:
  1035  			fallthrough
  1036  		case wire.InvTypeBlock:
  1037  			if _, exists := state.requestedBlocks[inv.Hash]; exists {
  1038  				delete(state.requestedBlocks, inv.Hash)
  1039  				delete(sm.requestedBlocks, inv.Hash)
  1040  			}
  1041  
  1042  		case wire.InvTypeWitnessTx:
  1043  			fallthrough
  1044  		case wire.InvTypeTx:
  1045  			if _, exists := state.requestedTxns[inv.Hash]; exists {
  1046  				delete(state.requestedTxns, inv.Hash)
  1047  				delete(sm.requestedTxns, inv.Hash)
  1048  			}
  1049  		}
  1050  	}
  1051  }
  1052  
  1053  // haveInventory returns whether or not the inventory represented by the passed
  1054  // inventory vector is known.  This includes checking all of the various places
  1055  // inventory can be when it is in different states such as blocks that are part
  1056  // of the main chain, on a side chain, in the orphan pool, and transactions that
  1057  // are in the memory pool (either the main pool or orphan pool).
  1058  func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) {
  1059  	switch invVect.Type {
  1060  	case wire.InvTypeWitnessBlock:
  1061  		fallthrough
  1062  	case wire.InvTypeBlock:
  1063  		// Ask chain if the block is known to it in any form (main
  1064  		// chain, side chain, or orphan).
  1065  		return sm.chain.HaveBlock(&invVect.Hash)
  1066  
  1067  	case wire.InvTypeWitnessTx:
  1068  		fallthrough
  1069  	case wire.InvTypeTx:
  1070  		// Ask the transaction memory pool if the transaction is known
  1071  		// to it in any form (main pool or orphan).
  1072  		if sm.txMemPool.HaveTransaction(&invVect.Hash) {
  1073  			return true, nil
  1074  		}
  1075  
  1076  		// Check if the transaction exists from the point of view of the
  1077  		// end of the main chain.  Note that this is only a best effort
  1078  		// since it is expensive to check existence of every output and
  1079  		// the only purpose of this check is to avoid downloading
  1080  		// already known transactions.  Only the first two outputs are
  1081  		// checked because the vast majority of transactions consist of
  1082  		// two outputs where one is some form of "pay-to-somebody-else"
  1083  		// and the other is a change output.
  1084  		prevOut := wire.OutPoint{Hash: invVect.Hash}
  1085  		for i := uint32(0); i < 2; i++ {
  1086  			prevOut.Index = i
  1087  			entry, err := sm.chain.FetchUtxoEntry(prevOut)
  1088  			if err != nil {
  1089  				return false, err
  1090  			}
  1091  			if entry != nil && !entry.IsSpent() {
  1092  				return true, nil
  1093  			}
  1094  		}
  1095  
  1096  		return false, nil
  1097  	}
  1098  
  1099  	// The requested inventory is is an unsupported type, so just claim
  1100  	// it is known to avoid requesting it.
  1101  	return true, nil
  1102  }
  1103  
  1104  // handleInvMsg handles inv messages from all peers.
  1105  // We examine the inventory advertised by the remote peer and act accordingly.
  1106  func (sm *SyncManager) handleInvMsg(imsg *invMsg) {
  1107  	peer := imsg.peer
  1108  	state, exists := sm.peerStates[peer]
  1109  	if !exists {
  1110  		log.Warnf("Received inv message from unknown peer %s", peer)
  1111  		return
  1112  	}
  1113  
  1114  	// Attempt to find the final block in the inventory list.  There may
  1115  	// not be one.
  1116  	lastBlock := -1
  1117  	invVects := imsg.inv.InvList
  1118  	for i := len(invVects) - 1; i >= 0; i-- {
  1119  		if invVects[i].Type == wire.InvTypeBlock {
  1120  			lastBlock = i
  1121  			break
  1122  		}
  1123  	}
  1124  
  1125  	// If this inv contains a block announcement, and this isn't coming from
  1126  	// our current sync peer or we're current, then update the last
  1127  	// announced block for this peer. We'll use this information later to
  1128  	// update the heights of peers based on blocks we've accepted that they
  1129  	// previously announced.
  1130  	if lastBlock != -1 && (peer != sm.syncPeer || sm.current()) {
  1131  		peer.UpdateLastAnnouncedBlock(&invVects[lastBlock].Hash)
  1132  	}
  1133  
  1134  	// Ignore invs from peers that aren't the sync if we are not current.
  1135  	// Helps prevent fetching a mass of orphans.
  1136  	if peer != sm.syncPeer && !sm.current() {
  1137  		return
  1138  	}
  1139  
  1140  	// If our chain is current and a peer announces a block we already
  1141  	// know of, then update their current block height.
  1142  	if lastBlock != -1 && sm.current() {
  1143  		blkHeight, err := sm.chain.BlockHeightByHash(&invVects[lastBlock].Hash)
  1144  		if err == nil {
  1145  			peer.UpdateLastBlockHeight(blkHeight)
  1146  		}
  1147  	}
  1148  
  1149  	// Request the advertised inventory if we don't already have it.  Also,
  1150  	// request parent blocks of orphans if we receive one we already have.
  1151  	// Finally, attempt to detect potential stalls due to long side chains
  1152  	// we already have and request more blocks to prevent them.
  1153  	for i, iv := range invVects {
  1154  		// Ignore unsupported inventory types.
  1155  		switch iv.Type {
  1156  		case wire.InvTypeBlock:
  1157  		case wire.InvTypeTx:
  1158  		case wire.InvTypeWitnessBlock:
  1159  		case wire.InvTypeWitnessTx:
  1160  		default:
  1161  			continue
  1162  		}
  1163  
  1164  		// Add the inventory to the cache of known inventory
  1165  		// for the peer.
  1166  		peer.AddKnownInventory(iv)
  1167  
  1168  		// Ignore inventory when we're in headers-first mode.
  1169  		if sm.headersFirstMode {
  1170  			continue
  1171  		}
  1172  
  1173  		// Request the inventory if we don't already have it.
  1174  		haveInv, err := sm.haveInventory(iv)
  1175  		if err != nil {
  1176  			log.Warnf("Unexpected failure when checking for "+
  1177  				"existing inventory during inv message "+
  1178  				"processing: %v", err)
  1179  			continue
  1180  		}
  1181  		if !haveInv {
  1182  			if iv.Type == wire.InvTypeTx {
  1183  				// Skip the transaction if it has already been
  1184  				// rejected.
  1185  				if _, exists := sm.rejectedTxns[iv.Hash]; exists {
  1186  					continue
  1187  				}
  1188  			}
  1189  
  1190  			// Ignore invs block invs from non-witness enabled
  1191  			// peers, as after segwit activation we only want to
  1192  			// download from peers that can provide us full witness
  1193  			// data for blocks.
  1194  			if !peer.IsWitnessEnabled() && iv.Type == wire.InvTypeBlock {
  1195  				continue
  1196  			}
  1197  
  1198  			// Add it to the request queue.
  1199  			state.requestQueue = append(state.requestQueue, iv)
  1200  			continue
  1201  		}
  1202  
  1203  		if iv.Type == wire.InvTypeBlock {
  1204  			// The block is an orphan block that we already have.
  1205  			// When the existing orphan was processed, it requested
  1206  			// the missing parent blocks.  When this scenario
  1207  			// happens, it means there were more blocks missing
  1208  			// than are allowed into a single inventory message.  As
  1209  			// a result, once this peer requested the final
  1210  			// advertised block, the remote peer noticed and is now
  1211  			// resending the orphan block as an available block
  1212  			// to signal there are more missing blocks that need to
  1213  			// be requested.
  1214  			if sm.chain.IsKnownOrphan(&iv.Hash) {
  1215  				// Request blocks starting at the latest known
  1216  				// up to the root of the orphan that just came
  1217  				// in.
  1218  				orphanRoot := sm.chain.GetOrphanRoot(&iv.Hash)
  1219  				locator, err := sm.chain.LatestBlockLocator()
  1220  				if err != nil {
  1221  					log.Errorf("PEER: Failed to get block "+
  1222  						"locator for the latest block: "+
  1223  						"%v", err)
  1224  					continue
  1225  				}
  1226  				peer.PushGetBlocksMsg(locator, orphanRoot)
  1227  				continue
  1228  			}
  1229  
  1230  			// We already have the final block advertised by this
  1231  			// inventory message, so force a request for more.  This
  1232  			// should only happen if we're on a really long side
  1233  			// chain.
  1234  			if i == lastBlock {
  1235  				// Request blocks after this one up to the
  1236  				// final one the remote peer knows about (zero
  1237  				// stop hash).
  1238  				locator := sm.chain.BlockLocatorFromHash(&iv.Hash)
  1239  				peer.PushGetBlocksMsg(locator, &zeroHash)
  1240  			}
  1241  		}
  1242  	}
  1243  
  1244  	// Request as much as possible at once.  Anything that won't fit into
  1245  	// the request will be requested on the next inv message.
  1246  	numRequested := 0
  1247  	gdmsg := wire.NewMsgGetData()
  1248  	requestQueue := state.requestQueue
  1249  	for len(requestQueue) != 0 {
  1250  		iv := requestQueue[0]
  1251  		requestQueue[0] = nil
  1252  		requestQueue = requestQueue[1:]
  1253  
  1254  		switch iv.Type {
  1255  		case wire.InvTypeWitnessBlock:
  1256  			fallthrough
  1257  		case wire.InvTypeBlock:
  1258  			// Request the block if there is not already a pending
  1259  			// request.
  1260  			if _, exists := sm.requestedBlocks[iv.Hash]; !exists {
  1261  				limitAdd(sm.requestedBlocks, iv.Hash, maxRequestedBlocks)
  1262  				limitAdd(state.requestedBlocks, iv.Hash, maxRequestedBlocks)
  1263  
  1264  				if peer.IsWitnessEnabled() {
  1265  					iv.Type = wire.InvTypeWitnessBlock
  1266  				}
  1267  
  1268  				gdmsg.AddInvVect(iv)
  1269  				numRequested++
  1270  			}
  1271  
  1272  		case wire.InvTypeWitnessTx:
  1273  			fallthrough
  1274  		case wire.InvTypeTx:
  1275  			// Request the transaction if there is not already a
  1276  			// pending request.
  1277  			if _, exists := sm.requestedTxns[iv.Hash]; !exists {
  1278  				limitAdd(sm.requestedTxns, iv.Hash, maxRequestedTxns)
  1279  				limitAdd(state.requestedTxns, iv.Hash, maxRequestedTxns)
  1280  
  1281  				// If the peer is capable, request the txn
  1282  				// including all witness data.
  1283  				if peer.IsWitnessEnabled() {
  1284  					iv.Type = wire.InvTypeWitnessTx
  1285  				}
  1286  
  1287  				gdmsg.AddInvVect(iv)
  1288  				numRequested++
  1289  			}
  1290  		}
  1291  
  1292  		if numRequested >= wire.MaxInvPerMsg {
  1293  			break
  1294  		}
  1295  	}
  1296  	state.requestQueue = requestQueue
  1297  	if len(gdmsg.InvList) > 0 {
  1298  		peer.QueueMessage(gdmsg, nil)
  1299  	}
  1300  }
  1301  
  1302  // blockHandler is the main handler for the sync manager.  It must be run as a
  1303  // goroutine.  It processes block and inv messages in a separate goroutine
  1304  // from the peer handlers so the block (MsgBlock) messages are handled by a
  1305  // single thread without needing to lock memory data structures.  This is
  1306  // important because the sync manager controls which blocks are needed and how
  1307  // the fetching should proceed.
  1308  func (sm *SyncManager) blockHandler() {
  1309  	stallTicker := time.NewTicker(stallSampleInterval)
  1310  	defer stallTicker.Stop()
  1311  
  1312  out:
  1313  	for {
  1314  		select {
  1315  		case m := <-sm.msgChan:
  1316  			switch msg := m.(type) {
  1317  			case *newPeerMsg:
  1318  				sm.handleNewPeerMsg(msg.peer)
  1319  
  1320  			case *txMsg:
  1321  				sm.handleTxMsg(msg)
  1322  				msg.reply <- struct{}{}
  1323  
  1324  			case *blockMsg:
  1325  				sm.handleBlockMsg(msg)
  1326  				msg.reply <- struct{}{}
  1327  
  1328  			case *invMsg:
  1329  				sm.handleInvMsg(msg)
  1330  
  1331  			case *headersMsg:
  1332  				sm.handleHeadersMsg(msg)
  1333  
  1334  			case *notFoundMsg:
  1335  				sm.handleNotFoundMsg(msg)
  1336  
  1337  			case *donePeerMsg:
  1338  				sm.handleDonePeerMsg(msg.peer)
  1339  
  1340  			case getSyncPeerMsg:
  1341  				var peerID int32
  1342  				if sm.syncPeer != nil {
  1343  					peerID = sm.syncPeer.ID()
  1344  				}
  1345  				msg.reply <- peerID
  1346  
  1347  			case processBlockMsg:
  1348  				_, isOrphan, err := sm.chain.ProcessBlock(
  1349  					msg.block, msg.flags)
  1350  				if err != nil {
  1351  					msg.reply <- processBlockResponse{
  1352  						isOrphan: false,
  1353  						err:      err,
  1354  					}
  1355  				}
  1356  
  1357  				msg.reply <- processBlockResponse{
  1358  					isOrphan: isOrphan,
  1359  					err:      nil,
  1360  				}
  1361  
  1362  			case isCurrentMsg:
  1363  				msg.reply <- sm.current()
  1364  
  1365  			case pauseMsg:
  1366  				// Wait until the sender unpauses the manager.
  1367  				<-msg.unpause
  1368  
  1369  			default:
  1370  				log.Warnf("Invalid message type in block "+
  1371  					"handler: %T", msg)
  1372  			}
  1373  
  1374  		case <-stallTicker.C:
  1375  			sm.handleStallSample()
  1376  
  1377  		case <-sm.quit:
  1378  			break out
  1379  		}
  1380  	}
  1381  
  1382  	sm.wg.Done()
  1383  	log.Trace("Block handler done")
  1384  }
  1385  
  1386  // handleBlockchainNotification handles notifications from blockchain.  It does
  1387  // things such as request orphan block parents and relay accepted blocks to
  1388  // connected peers.
  1389  func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Notification) {
  1390  	switch notification.Type {
  1391  	// A block has been accepted into the block chain.  Relay it to other
  1392  	// peers.
  1393  	case blockchain.NTBlockAccepted:
  1394  		// Don't relay if we are not current. Other peers that are
  1395  		// current should already know about it.
  1396  		if !sm.current() {
  1397  			return
  1398  		}
  1399  
  1400  		block, ok := notification.Data.(*palcutil.Block)
  1401  		if !ok {
  1402  			log.Warnf("Chain accepted notification is not a block.")
  1403  			break
  1404  		}
  1405  
  1406  		// Generate the inventory vector and relay it.
  1407  		iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
  1408  		sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header)
  1409  
  1410  	// A block has been connected to the main block chain.
  1411  	case blockchain.NTBlockConnected:
  1412  		block, ok := notification.Data.(*palcutil.Block)
  1413  		if !ok {
  1414  			log.Warnf("Chain connected notification is not a block.")
  1415  			break
  1416  		}
  1417  
  1418  		// Remove all of the transactions (except the coinbase) in the
  1419  		// connected block from the transaction pool.  Secondly, remove any
  1420  		// transactions which are now double spends as a result of these
  1421  		// new transactions.  Finally, remove any transaction that is
  1422  		// no longer an orphan. Transactions which depend on a confirmed
  1423  		// transaction are NOT removed recursively because they are still
  1424  		// valid.
  1425  		for _, tx := range block.Transactions()[1:] {
  1426  			sm.txMemPool.RemoveTransaction(tx, false)
  1427  			sm.txMemPool.RemoveDoubleSpends(tx)
  1428  			sm.txMemPool.RemoveOrphan(tx)
  1429  			sm.peerNotifier.TransactionConfirmed(tx)
  1430  			acceptedTxs := sm.txMemPool.ProcessOrphans(tx)
  1431  			sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
  1432  		}
  1433  
  1434  		// Register block with the fee estimator, if it exists.
  1435  		if sm.feeEstimator != nil {
  1436  			err := sm.feeEstimator.RegisterBlock(block)
  1437  
  1438  			// If an error is somehow generated then the fee estimator
  1439  			// has entered an invalid state. Since it doesn't know how
  1440  			// to recover, create a new one.
  1441  			if err != nil {
  1442  				sm.feeEstimator = mempool.NewFeeEstimator(
  1443  					mempool.DefaultEstimateFeeMaxRollback,
  1444  					mempool.DefaultEstimateFeeMinRegisteredBlocks)
  1445  			}
  1446  		}
  1447  
  1448  	// A block has been disconnected from the main block chain.
  1449  	case blockchain.NTBlockDisconnected:
  1450  		block, ok := notification.Data.(*palcutil.Block)
  1451  		if !ok {
  1452  			log.Warnf("Chain disconnected notification is not a block.")
  1453  			break
  1454  		}
  1455  
  1456  		// Reinsert all of the transactions (except the coinbase) into
  1457  		// the transaction pool.
  1458  		for _, tx := range block.Transactions()[1:] {
  1459  			_, _, err := sm.txMemPool.MaybeAcceptTransaction(tx,
  1460  				false, false)
  1461  			if err != nil {
  1462  				// Remove the transaction and all transactions
  1463  				// that depend on it if it wasn't accepted into
  1464  				// the transaction pool.
  1465  				sm.txMemPool.RemoveTransaction(tx, true)
  1466  			}
  1467  		}
  1468  
  1469  		// Rollback previous block recorded by the fee estimator.
  1470  		if sm.feeEstimator != nil {
  1471  			sm.feeEstimator.Rollback(block.Hash())
  1472  		}
  1473  	}
  1474  }
  1475  
  1476  // NewPeer informs the sync manager of a newly active peer.
  1477  func (sm *SyncManager) NewPeer(peer *peerpkg.Peer) {
  1478  	// Ignore if we are shutting down.
  1479  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1480  		return
  1481  	}
  1482  	sm.msgChan <- &newPeerMsg{peer: peer}
  1483  }
  1484  
  1485  // QueueTx adds the passed transaction message and peer to the block handling
  1486  // queue. Responds to the done channel argument after the tx message is
  1487  // processed.
  1488  func (sm *SyncManager) QueueTx(tx *palcutil.Tx, peer *peerpkg.Peer, done chan struct{}) {
  1489  	// Don't accept more transactions if we're shutting down.
  1490  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1491  		done <- struct{}{}
  1492  		return
  1493  	}
  1494  
  1495  	sm.msgChan <- &txMsg{tx: tx, peer: peer, reply: done}
  1496  }
  1497  
  1498  // QueueBlock adds the passed block message and peer to the block handling
  1499  // queue. Responds to the done channel argument after the block message is
  1500  // processed.
  1501  func (sm *SyncManager) QueueBlock(block *palcutil.Block, peer *peerpkg.Peer, done chan struct{}) {
  1502  	// Don't accept more blocks if we're shutting down.
  1503  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1504  		done <- struct{}{}
  1505  		return
  1506  	}
  1507  
  1508  	sm.msgChan <- &blockMsg{block: block, peer: peer, reply: done}
  1509  }
  1510  
  1511  // QueueInv adds the passed inv message and peer to the block handling queue.
  1512  func (sm *SyncManager) QueueInv(inv *wire.MsgInv, peer *peerpkg.Peer) {
  1513  	// No channel handling here because peers do not need to block on inv
  1514  	// messages.
  1515  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1516  		return
  1517  	}
  1518  
  1519  	sm.msgChan <- &invMsg{inv: inv, peer: peer}
  1520  }
  1521  
  1522  // QueueHeaders adds the passed headers message and peer to the block handling
  1523  // queue.
  1524  func (sm *SyncManager) QueueHeaders(headers *wire.MsgHeaders, peer *peerpkg.Peer) {
  1525  	// No channel handling here because peers do not need to block on
  1526  	// headers messages.
  1527  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1528  		return
  1529  	}
  1530  
  1531  	sm.msgChan <- &headersMsg{headers: headers, peer: peer}
  1532  }
  1533  
  1534  // QueueNotFound adds the passed notfound message and peer to the block handling
  1535  // queue.
  1536  func (sm *SyncManager) QueueNotFound(notFound *wire.MsgNotFound, peer *peerpkg.Peer) {
  1537  	// No channel handling here because peers do not need to block on
  1538  	// reject messages.
  1539  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1540  		return
  1541  	}
  1542  
  1543  	sm.msgChan <- &notFoundMsg{notFound: notFound, peer: peer}
  1544  }
  1545  
  1546  // DonePeer informs the blockmanager that a peer has disconnected.
  1547  func (sm *SyncManager) DonePeer(peer *peerpkg.Peer) {
  1548  	// Ignore if we are shutting down.
  1549  	if atomic.LoadInt32(&sm.shutdown) != 0 {
  1550  		return
  1551  	}
  1552  
  1553  	sm.msgChan <- &donePeerMsg{peer: peer}
  1554  }
  1555  
  1556  // Start begins the core block handler which processes block and inv messages.
  1557  func (sm *SyncManager) Start() {
  1558  	// Already started?
  1559  	if atomic.AddInt32(&sm.started, 1) != 1 {
  1560  		return
  1561  	}
  1562  
  1563  	log.Trace("Starting sync manager")
  1564  	sm.wg.Add(1)
  1565  	go sm.blockHandler()
  1566  }
  1567  
  1568  // Stop gracefully shuts down the sync manager by stopping all asynchronous
  1569  // handlers and waiting for them to finish.
  1570  func (sm *SyncManager) Stop() error {
  1571  	if atomic.AddInt32(&sm.shutdown, 1) != 1 {
  1572  		log.Warnf("Sync manager is already in the process of " +
  1573  			"shutting down")
  1574  		return nil
  1575  	}
  1576  
  1577  	log.Infof("Sync manager shutting down")
  1578  	close(sm.quit)
  1579  	sm.wg.Wait()
  1580  	return nil
  1581  }
  1582  
  1583  // SyncPeerID returns the ID of the current sync peer, or 0 if there is none.
  1584  func (sm *SyncManager) SyncPeerID() int32 {
  1585  	reply := make(chan int32)
  1586  	sm.msgChan <- getSyncPeerMsg{reply: reply}
  1587  	return <-reply
  1588  }
  1589  
  1590  // ProcessBlock makes use of ProcessBlock on an internal instance of a block
  1591  // chain.
  1592  func (sm *SyncManager) ProcessBlock(block *palcutil.Block, flags blockchain.BehaviorFlags) (bool, error) {
  1593  	reply := make(chan processBlockResponse, 1)
  1594  	sm.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply}
  1595  	response := <-reply
  1596  	return response.isOrphan, response.err
  1597  }
  1598  
  1599  // IsCurrent returns whether or not the sync manager believes it is synced with
  1600  // the connected peers.
  1601  func (sm *SyncManager) IsCurrent() bool {
  1602  	reply := make(chan bool)
  1603  	sm.msgChan <- isCurrentMsg{reply: reply}
  1604  	return <-reply
  1605  }
  1606  
  1607  // Pause pauses the sync manager until the returned channel is closed.
  1608  //
  1609  // Note that while paused, all peer and block processing is halted.  The
  1610  // message sender should avoid pausing the sync manager for long durations.
  1611  func (sm *SyncManager) Pause() chan<- struct{} {
  1612  	c := make(chan struct{})
  1613  	sm.msgChan <- pauseMsg{c}
  1614  	return c
  1615  }
  1616  
  1617  // New constructs a new SyncManager. Use Start to begin processing asynchronous
  1618  // block, tx, and inv updates.
  1619  func New(config *Config) (*SyncManager, error) {
  1620  	sm := SyncManager{
  1621  		peerNotifier:    config.PeerNotifier,
  1622  		chain:           config.Chain,
  1623  		txMemPool:       config.TxMemPool,
  1624  		chainParams:     config.ChainParams,
  1625  		rejectedTxns:    make(map[chainhash.Hash]struct{}),
  1626  		requestedTxns:   make(map[chainhash.Hash]struct{}),
  1627  		requestedBlocks: make(map[chainhash.Hash]struct{}),
  1628  		peerStates:      make(map[*peerpkg.Peer]*peerSyncState),
  1629  		progressLogger:  newBlockProgressLogger("Processed", log),
  1630  		msgChan:         make(chan interface{}, config.MaxPeers*3),
  1631  		headerList:      list.New(),
  1632  		quit:            make(chan struct{}),
  1633  		feeEstimator:    config.FeeEstimator,
  1634  	}
  1635  
  1636  	best := sm.chain.BestSnapshot()
  1637  	if !config.DisableCheckpoints {
  1638  		// Initialize the next checkpoint based on the current height.
  1639  		sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height)
  1640  		if sm.nextCheckpoint != nil {
  1641  			sm.resetHeaderState(&best.Hash, best.Height)
  1642  		}
  1643  	} else {
  1644  		log.Info("Checkpoints are disabled")
  1645  	}
  1646  
  1647  	sm.chain.Subscribe(sm.handleBlockchainNotification)
  1648  
  1649  	return &sm, nil
  1650  }