github.com/BlockABC/godash@v0.0.0-20191112120524-f4aa3a32c566/blockmanager.go (about)

     1  // Copyright (c) 2013-2016 The btcsuite developers
     2  // Copyright (c) 2016 The Dash developers
     3  // Use of this source code is governed by an ISC
     4  // license that can be found in the LICENSE file.
     5  
     6  package main
     7  
     8  import (
     9  	"container/list"
    10  	"net"
    11  	"os"
    12  	"path/filepath"
    13  	"sync"
    14  	"sync/atomic"
    15  	"time"
    16  
    17  	"github.com/BlockABC/godash/blockchain"
    18  	"github.com/BlockABC/godash/chaincfg"
    19  	"github.com/BlockABC/godash/database"
    20  	"github.com/BlockABC/godash/wire"
    21  	"github.com/BlockABC/godashutil"
    22  )
    23  
    24  const (
    25  	chanBufferSize = 50
    26  
    27  	// minInFlightBlocks is the minimum number of blocks that should be
    28  	// in the request queue for headers-first mode before requesting
    29  	// more.
    30  	minInFlightBlocks = 10
    31  
    32  	// blockDbNamePrefix is the prefix for the block database name.  The
    33  	// database type is appended to this value to form the full block
    34  	// database name.
    35  	blockDbNamePrefix = "blocks"
    36  
    37  	// maxRejectedTxns is the maximum number of rejected transactions
    38  	// shas to store in memory.
    39  	maxRejectedTxns = 1000
    40  
    41  	// maxRequestedBlocks is the maximum number of requested block
    42  	// shas to store in memory.
    43  	maxRequestedBlocks = wire.MaxInvPerMsg
    44  
    45  	// maxRequestedTxns is the maximum number of requested transactions
    46  	// shas to store in memory.
    47  	maxRequestedTxns = wire.MaxInvPerMsg
    48  )
    49  
    50  // zeroHash is the zero value hash (all zeros).  It is defined as a convenience.
    51  var zeroHash wire.ShaHash
    52  
    53  // newPeerMsg signifies a newly connected peer to the block handler.
    54  type newPeerMsg struct {
    55  	peer *serverPeer
    56  }
    57  
    58  // blockMsg packages a bitcoin block message and the peer it came from together
    59  // so the block handler has access to that information.
    60  type blockMsg struct {
    61  	block *godashutil.Block
    62  	peer  *serverPeer
    63  }
    64  
    65  // invMsg packages a bitcoin inv message and the peer it came from together
    66  // so the block handler has access to that information.
    67  type invMsg struct {
    68  	inv  *wire.MsgInv
    69  	peer *serverPeer
    70  }
    71  
    72  // headersMsg packages a bitcoin headers message and the peer it came from
    73  // together so the block handler has access to that information.
    74  type headersMsg struct {
    75  	headers *wire.MsgHeaders
    76  	peer    *serverPeer
    77  }
    78  
    79  // donePeerMsg signifies a newly disconnected peer to the block handler.
    80  type donePeerMsg struct {
    81  	peer *serverPeer
    82  }
    83  
    84  // txMsg packages a bitcoin tx message and the peer it came from together
    85  // so the block handler has access to that information.
    86  type txMsg struct {
    87  	tx   *godashutil.Tx
    88  	peer *serverPeer
    89  }
    90  
    91  // getSyncPeerMsg is a message type to be sent across the message channel for
    92  // retrieving the current sync peer.
    93  type getSyncPeerMsg struct {
    94  	reply chan *serverPeer
    95  }
    96  
    97  // processBlockResponse is a response sent to the reply channel of a
    98  // processBlockMsg.
    99  type processBlockResponse struct {
   100  	isOrphan bool
   101  	err      error
   102  }
   103  
   104  // processBlockMsg is a message type to be sent across the message channel
   105  // for requested a block is processed.  Note this call differs from blockMsg
   106  // above in that blockMsg is intended for blocks that came from peers and have
   107  // extra handling whereas this message essentially is just a concurrent safe
   108  // way to call ProcessBlock on the internal block chain instance.
   109  type processBlockMsg struct {
   110  	block *godashutil.Block
   111  	flags blockchain.BehaviorFlags
   112  	reply chan processBlockResponse
   113  }
   114  
   115  // isCurrentMsg is a message type to be sent across the message channel for
   116  // requesting whether or not the block manager believes it is synced with
   117  // the currently connected peers.
   118  type isCurrentMsg struct {
   119  	reply chan bool
   120  }
   121  
   122  // pauseMsg is a message type to be sent across the message channel for
   123  // pausing the block manager.  This effectively provides the caller with
   124  // exclusive access over the manager until a receive is performed on the
   125  // unpause channel.
   126  type pauseMsg struct {
   127  	unpause <-chan struct{}
   128  }
   129  
   130  // headerNode is used as a node in a list of headers that are linked together
   131  // between checkpoints.
   132  type headerNode struct {
   133  	height int32
   134  	sha    *wire.ShaHash
   135  }
   136  
   137  // chainState tracks the state of the best chain as blocks are inserted.  This
   138  // is done because btcchain is currently not safe for concurrent access and the
   139  // block manager is typically quite busy processing block and inventory.
   140  // Therefore, requesting this information from chain through the block manager
   141  // would not be anywhere near as efficient as simply updating it as each block
   142  // is inserted and protecting it with a mutex.
   143  type chainState struct {
   144  	sync.Mutex
   145  	newestHash        *wire.ShaHash
   146  	newestHeight      int32
   147  	pastMedianTime    time.Time
   148  	pastMedianTimeErr error
   149  }
   150  
   151  // Best returns the block hash and height known for the tip of the best known
   152  // chain.
   153  //
   154  // This function is safe for concurrent access.
   155  func (c *chainState) Best() (*wire.ShaHash, int32) {
   156  	c.Lock()
   157  	defer c.Unlock()
   158  
   159  	return c.newestHash, c.newestHeight
   160  }
   161  
   162  // blockManager provides a concurrency safe block manager for handling all
   163  // incoming blocks.
   164  type blockManager struct {
   165  	server            *server
   166  	started           int32
   167  	shutdown          int32
   168  	chain             *blockchain.BlockChain
   169  	rejectedTxns      map[wire.ShaHash]struct{}
   170  	requestedTxns     map[wire.ShaHash]struct{}
   171  	requestedBlocks   map[wire.ShaHash]struct{}
   172  	progressLogger    *blockProgressLogger
   173  	receivedLogBlocks int64
   174  	receivedLogTx     int64
   175  	processingReqs    bool
   176  	syncPeer          *serverPeer
   177  	msgChan           chan interface{}
   178  	chainState        chainState
   179  	wg                sync.WaitGroup
   180  	quit              chan struct{}
   181  
   182  	// The following fields are used for headers-first mode.
   183  	headersFirstMode bool
   184  	headerList       *list.List
   185  	startHeader      *list.Element
   186  	nextCheckpoint   *chaincfg.Checkpoint
   187  }
   188  
   189  // resetHeaderState sets the headers-first mode state to values appropriate for
   190  // syncing from a new peer.
   191  func (b *blockManager) resetHeaderState(newestHash *wire.ShaHash, newestHeight int32) {
   192  	b.headersFirstMode = false
   193  	b.headerList.Init()
   194  	b.startHeader = nil
   195  
   196  	// When there is a next checkpoint, add an entry for the latest known
   197  	// block into the header pool.  This allows the next downloaded header
   198  	// to prove it links to the chain properly.
   199  	if b.nextCheckpoint != nil {
   200  		node := headerNode{height: newestHeight, sha: newestHash}
   201  		b.headerList.PushBack(&node)
   202  	}
   203  }
   204  
   205  // updateChainState updates the chain state associated with the block manager.
   206  // This allows fast access to chain information since btcchain is currently not
   207  // safe for concurrent access and the block manager is typically quite busy
   208  // processing block and inventory.
   209  func (b *blockManager) updateChainState(newestHash *wire.ShaHash, newestHeight int32) {
   210  	b.chainState.Lock()
   211  	defer b.chainState.Unlock()
   212  
   213  	b.chainState.newestHash = newestHash
   214  	b.chainState.newestHeight = newestHeight
   215  	medianTime, err := b.chain.CalcPastMedianTime()
   216  	if err != nil {
   217  		b.chainState.pastMedianTimeErr = err
   218  	} else {
   219  		b.chainState.pastMedianTime = medianTime
   220  	}
   221  }
   222  
   223  // findNextHeaderCheckpoint returns the next checkpoint after the passed height.
   224  // It returns nil when there is not one either because the height is already
   225  // later than the final checkpoint or some other reason such as disabled
   226  // checkpoints.
   227  func (b *blockManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
   228  	// There is no next checkpoint if checkpoints are disabled or there are
   229  	// none for this current network.
   230  	if cfg.DisableCheckpoints {
   231  		return nil
   232  	}
   233  	checkpoints := b.server.chainParams.Checkpoints
   234  	if len(checkpoints) == 0 {
   235  		return nil
   236  	}
   237  
   238  	// There is no next checkpoint if the height is already after the final
   239  	// checkpoint.
   240  	finalCheckpoint := &checkpoints[len(checkpoints)-1]
   241  	if height >= finalCheckpoint.Height {
   242  		return nil
   243  	}
   244  
   245  	// Find the next checkpoint.
   246  	nextCheckpoint := finalCheckpoint
   247  	for i := len(checkpoints) - 2; i >= 0; i-- {
   248  		if height >= checkpoints[i].Height {
   249  			break
   250  		}
   251  		nextCheckpoint = &checkpoints[i]
   252  	}
   253  	return nextCheckpoint
   254  }
   255  
   256  // startSync will choose the best peer among the available candidate peers to
   257  // download/sync the blockchain from.  When syncing is already running, it
   258  // simply returns.  It also examines the candidates for any which are no longer
   259  // candidates and removes them as needed.
   260  func (b *blockManager) startSync(peers *list.List) {
   261  	// Return now if we're already syncing.
   262  	if b.syncPeer != nil {
   263  		return
   264  	}
   265  
   266  	best := b.chain.BestSnapshot()
   267  	var bestPeer *serverPeer
   268  	var enext *list.Element
   269  	for e := peers.Front(); e != nil; e = enext {
   270  		enext = e.Next()
   271  		sp := e.Value.(*serverPeer)
   272  
   273  		// Remove sync candidate peers that are no longer candidates due
   274  		// to passing their latest known block.  NOTE: The < is
   275  		// intentional as opposed to <=.  While techcnically the peer
   276  		// doesn't have a later block when it's equal, it will likely
   277  		// have one soon so it is a reasonable choice.  It also allows
   278  		// the case where both are at 0 such as during regression test.
   279  		if sp.LastBlock() < best.Height {
   280  			peers.Remove(e)
   281  			continue
   282  		}
   283  
   284  		// TODO(davec): Use a better algorithm to choose the best peer.
   285  		// For now, just pick the first available candidate.
   286  		bestPeer = sp
   287  	}
   288  
   289  	// Start syncing from the best peer if one was selected.
   290  	if bestPeer != nil {
   291  		// Clear the requestedBlocks if the sync peer changes, otherwise
   292  		// we may ignore blocks we need that the last sync peer failed
   293  		// to send.
   294  		b.requestedBlocks = make(map[wire.ShaHash]struct{})
   295  
   296  		locator, err := b.chain.LatestBlockLocator()
   297  		if err != nil {
   298  			bmgrLog.Errorf("Failed to get block locator for the "+
   299  				"latest block: %v", err)
   300  			return
   301  		}
   302  
   303  		bmgrLog.Infof("Syncing to block height %d from peer %v",
   304  			bestPeer.LastBlock(), bestPeer.Addr())
   305  
   306  		// When the current height is less than a known checkpoint we
   307  		// can use block headers to learn about which blocks comprise
   308  		// the chain up to the checkpoint and perform less validation
   309  		// for them.  This is possible since each header contains the
   310  		// hash of the previous header and a merkle root.  Therefore if
   311  		// we validate all of the received headers link together
   312  		// properly and the checkpoint hashes match, we can be sure the
   313  		// hashes for the blocks in between are accurate.  Further, once
   314  		// the full blocks are downloaded, the merkle root is computed
   315  		// and compared against the value in the header which proves the
   316  		// full block hasn't been tampered with.
   317  		//
   318  		// Once we have passed the final checkpoint, or checkpoints are
   319  		// disabled, use standard inv messages learn about the blocks
   320  		// and fully validate them.  Finally, regression test mode does
   321  		// not support the headers-first approach so do normal block
   322  		// downloads when in regression test mode.
   323  		if b.nextCheckpoint != nil &&
   324  			best.Height < b.nextCheckpoint.Height &&
   325  			!cfg.RegressionTest && !cfg.DisableCheckpoints {
   326  
   327  			bestPeer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash)
   328  			b.headersFirstMode = true
   329  			bmgrLog.Infof("Downloading headers for blocks %d to "+
   330  				"%d from peer %s", best.Height+1,
   331  				b.nextCheckpoint.Height, bestPeer.Addr())
   332  		} else {
   333  			bestPeer.PushGetBlocksMsg(locator, &zeroHash)
   334  		}
   335  		b.syncPeer = bestPeer
   336  	} else {
   337  		bmgrLog.Warnf("No sync peer candidates available")
   338  	}
   339  }
   340  
   341  // isSyncCandidate returns whether or not the peer is a candidate to consider
   342  // syncing from.
   343  func (b *blockManager) isSyncCandidate(sp *serverPeer) bool {
   344  	// Typically a peer is not a candidate for sync if it's not a full node,
   345  	// however regression test is special in that the regression tool is
   346  	// not a full node and still needs to be considered a sync candidate.
   347  	if cfg.RegressionTest {
   348  		// The peer is not a candidate if it's not coming from localhost
   349  		// or the hostname can't be determined for some reason.
   350  		host, _, err := net.SplitHostPort(sp.Addr())
   351  		if err != nil {
   352  			return false
   353  		}
   354  
   355  		if host != "127.0.0.1" && host != "localhost" {
   356  			return false
   357  		}
   358  	} else {
   359  		// The peer is not a candidate for sync if it's not a full node.
   360  		if sp.Services()&wire.SFNodeNetwork != wire.SFNodeNetwork {
   361  			return false
   362  		}
   363  	}
   364  
   365  	// Candidate if all checks passed.
   366  	return true
   367  }
   368  
   369  // handleNewPeerMsg deals with new peers that have signalled they may
   370  // be considered as a sync peer (they have already successfully negotiated).  It
   371  // also starts syncing if needed.  It is invoked from the syncHandler goroutine.
   372  func (b *blockManager) handleNewPeerMsg(peers *list.List, sp *serverPeer) {
   373  	// Ignore if in the process of shutting down.
   374  	if atomic.LoadInt32(&b.shutdown) != 0 {
   375  		return
   376  	}
   377  
   378  	bmgrLog.Infof("New valid peer %s (%s)", sp, sp.UserAgent())
   379  
   380  	// Ignore the peer if it's not a sync candidate.
   381  	if !b.isSyncCandidate(sp) {
   382  		return
   383  	}
   384  
   385  	// Add the peer as a candidate to sync from.
   386  	peers.PushBack(sp)
   387  
   388  	// Start syncing by choosing the best candidate if needed.
   389  	b.startSync(peers)
   390  }
   391  
   392  // handleDonePeerMsg deals with peers that have signalled they are done.  It
   393  // removes the peer as a candidate for syncing and in the case where it was
   394  // the current sync peer, attempts to select a new best peer to sync from.  It
   395  // is invoked from the syncHandler goroutine.
   396  func (b *blockManager) handleDonePeerMsg(peers *list.List, sp *serverPeer) {
   397  	// Remove the peer from the list of candidate peers.
   398  	for e := peers.Front(); e != nil; e = e.Next() {
   399  		if e.Value == sp {
   400  			peers.Remove(e)
   401  			break
   402  		}
   403  	}
   404  
   405  	bmgrLog.Infof("Lost peer %s", sp)
   406  
   407  	// Remove requested transactions from the global map so that they will
   408  	// be fetched from elsewhere next time we get an inv.
   409  	for k := range sp.requestedTxns {
   410  		delete(b.requestedTxns, k)
   411  	}
   412  
   413  	// Remove requested blocks from the global map so that they will be
   414  	// fetched from elsewhere next time we get an inv.
   415  	// TODO(oga) we could possibly here check which peers have these blocks
   416  	// and request them now to speed things up a little.
   417  	for k := range sp.requestedBlocks {
   418  		delete(b.requestedBlocks, k)
   419  	}
   420  
   421  	// Attempt to find a new peer to sync from if the quitting peer is the
   422  	// sync peer.  Also, reset the headers-first state if in headers-first
   423  	// mode so
   424  	if b.syncPeer != nil && b.syncPeer == sp {
   425  		b.syncPeer = nil
   426  		if b.headersFirstMode {
   427  			best := b.chain.BestSnapshot()
   428  			b.resetHeaderState(best.Hash, best.Height)
   429  		}
   430  		b.startSync(peers)
   431  	}
   432  }
   433  
   434  // handleTxMsg handles transaction messages from all peers.
   435  func (b *blockManager) handleTxMsg(tmsg *txMsg) {
   436  	// NOTE:  BitcoinJ, and possibly other wallets, don't follow the spec of
   437  	// sending an inventory message and allowing the remote peer to decide
   438  	// whether or not they want to request the transaction via a getdata
   439  	// message.  Unfortunately, the reference implementation permits
   440  	// unrequested data, so it has allowed wallets that don't follow the
   441  	// spec to proliferate.  While this is not ideal, there is no check here
   442  	// to disconnect peers for sending unsolicited transactions to provide
   443  	// interoperability.
   444  	txHash := tmsg.tx.Sha()
   445  
   446  	// Ignore transactions that we have already rejected.  Do not
   447  	// send a reject message here because if the transaction was already
   448  	// rejected, the transaction was unsolicited.
   449  	if _, exists := b.rejectedTxns[*txHash]; exists {
   450  		bmgrLog.Debugf("Ignoring unsolicited previously rejected "+
   451  			"transaction %v from %s", txHash, tmsg.peer)
   452  		return
   453  	}
   454  
   455  	// Process the transaction to include validation, insertion in the
   456  	// memory pool, orphan handling, etc.
   457  	allowOrphans := cfg.MaxOrphanTxs > 0
   458  	acceptedTxs, err := b.server.txMemPool.ProcessTransaction(tmsg.tx,
   459  		allowOrphans, true)
   460  
   461  	// Remove transaction from request maps. Either the mempool/chain
   462  	// already knows about it and as such we shouldn't have any more
   463  	// instances of trying to fetch it, or we failed to insert and thus
   464  	// we'll retry next time we get an inv.
   465  	delete(tmsg.peer.requestedTxns, *txHash)
   466  	delete(b.requestedTxns, *txHash)
   467  
   468  	if err != nil {
   469  		// Do not request this transaction again until a new block
   470  		// has been processed.
   471  		b.rejectedTxns[*txHash] = struct{}{}
   472  		b.limitMap(b.rejectedTxns, maxRejectedTxns)
   473  
   474  		// When the error is a rule error, it means the transaction was
   475  		// simply rejected as opposed to something actually going wrong,
   476  		// so log it as such.  Otherwise, something really did go wrong,
   477  		// so log it as an actual error.
   478  		if _, ok := err.(RuleError); ok {
   479  			bmgrLog.Debugf("Rejected transaction %v from %s: %v",
   480  				txHash, tmsg.peer, err)
   481  		} else {
   482  			bmgrLog.Errorf("Failed to process transaction %v: %v",
   483  				txHash, err)
   484  		}
   485  
   486  		// Convert the error into an appropriate reject message and
   487  		// send it.
   488  		code, reason := errToRejectErr(err)
   489  		tmsg.peer.PushRejectMsg(wire.CmdTx, code, reason, txHash,
   490  			false)
   491  		return
   492  	}
   493  
   494  	b.server.AnnounceNewTransactions(acceptedTxs)
   495  }
   496  
   497  // current returns true if we believe we are synced with our peers, false if we
   498  // still have blocks to check
   499  func (b *blockManager) current() bool {
   500  	if !b.chain.IsCurrent() {
   501  		return false
   502  	}
   503  
   504  	// if blockChain thinks we are current and we have no syncPeer it
   505  	// is probably right.
   506  	if b.syncPeer == nil {
   507  		return true
   508  	}
   509  
   510  	// No matter what chain thinks, if we are below the block we are syncing
   511  	// to we are not current.
   512  	if b.chain.BestSnapshot().Height < b.syncPeer.LastBlock() {
   513  		return false
   514  	}
   515  	return true
   516  }
   517  
   518  // handleBlockMsg handles block messages from all peers.
   519  func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
   520  	// If we didn't ask for this block then the peer is misbehaving.
   521  	blockSha := bmsg.block.Sha()
   522  	if _, exists := bmsg.peer.requestedBlocks[*blockSha]; !exists {
   523  		// The regression test intentionally sends some blocks twice
   524  		// to test duplicate block insertion fails.  Don't disconnect
   525  		// the peer or ignore the block when we're in regression test
   526  		// mode in this case so the chain code is actually fed the
   527  		// duplicate blocks.
   528  		if !cfg.RegressionTest {
   529  			bmgrLog.Warnf("Got unrequested block %v from %s -- "+
   530  				"disconnecting", blockSha, bmsg.peer.Addr())
   531  			bmsg.peer.Disconnect()
   532  			return
   533  		}
   534  	}
   535  
   536  	// When in headers-first mode, if the block matches the hash of the
   537  	// first header in the list of headers that are being fetched, it's
   538  	// eligible for less validation since the headers have already been
   539  	// verified to link together and are valid up to the next checkpoint.
   540  	// Also, remove the list entry for all blocks except the checkpoint
   541  	// since it is needed to verify the next round of headers links
   542  	// properly.
   543  	isCheckpointBlock := false
   544  	behaviorFlags := blockchain.BFNone
   545  	if b.headersFirstMode {
   546  		firstNodeEl := b.headerList.Front()
   547  		if firstNodeEl != nil {
   548  			firstNode := firstNodeEl.Value.(*headerNode)
   549  			if blockSha.IsEqual(firstNode.sha) {
   550  				behaviorFlags |= blockchain.BFFastAdd
   551  				if firstNode.sha.IsEqual(b.nextCheckpoint.Hash) {
   552  					isCheckpointBlock = true
   553  				} else {
   554  					b.headerList.Remove(firstNodeEl)
   555  				}
   556  			}
   557  		}
   558  	}
   559  
   560  	// Remove block from request maps. Either chain will know about it and
   561  	// so we shouldn't have any more instances of trying to fetch it, or we
   562  	// will fail the insert and thus we'll retry next time we get an inv.
   563  	delete(bmsg.peer.requestedBlocks, *blockSha)
   564  	delete(b.requestedBlocks, *blockSha)
   565  
   566  	// Process the block to include validation, best chain selection, orphan
   567  	// handling, etc.
   568  	isOrphan, err := b.chain.ProcessBlock(bmsg.block, behaviorFlags)
   569  	if err != nil {
   570  		// When the error is a rule error, it means the block was simply
   571  		// rejected as opposed to something actually going wrong, so log
   572  		// it as such.  Otherwise, something really did go wrong, so log
   573  		// it as an actual error.
   574  		if _, ok := err.(blockchain.RuleError); ok {
   575  			bmgrLog.Infof("Rejected block %v from %s: %v", blockSha,
   576  				bmsg.peer, err)
   577  		} else {
   578  			bmgrLog.Errorf("Failed to process block %v: %v",
   579  				blockSha, err)
   580  		}
   581  		if dbErr, ok := err.(database.Error); ok && dbErr.ErrorCode ==
   582  			database.ErrCorruption {
   583  			panic(dbErr)
   584  		}
   585  
   586  		// Convert the error into an appropriate reject message and
   587  		// send it.
   588  		code, reason := errToRejectErr(err)
   589  		bmsg.peer.PushRejectMsg(wire.CmdBlock, code, reason,
   590  			blockSha, false)
   591  		return
   592  	}
   593  
   594  	// Meta-data about the new block this peer is reporting. We use this
   595  	// below to update this peer's lastest block height and the heights of
   596  	// other peers based on their last announced block sha. This allows us
   597  	// to dynamically update the block heights of peers, avoiding stale heights
   598  	// when looking for a new sync peer. Upon acceptance of a block or
   599  	// recognition of an orphan, we also use this information to update
   600  	// the block heights over other peers who's invs may have been ignored
   601  	// if we are actively syncing while the chain is not yet current or
   602  	// who may have lost the lock announcment race.
   603  	var heightUpdate int32
   604  	var blkShaUpdate *wire.ShaHash
   605  
   606  	// Request the parents for the orphan block from the peer that sent it.
   607  	if isOrphan {
   608  		// We've just received an orphan block from a peer. In order
   609  		// to update the height of the peer, we try to extract the
   610  		// block height from the scriptSig of the coinbase transaction.
   611  		// Extraction is only attempted if the block's version is
   612  		// high enough (ver 2+).
   613  		header := &bmsg.block.MsgBlock().Header
   614  		if blockchain.ShouldHaveSerializedBlockHeight(header) {
   615  			coinbaseTx := bmsg.block.Transactions()[0]
   616  			cbHeight, err := blockchain.ExtractCoinbaseHeight(coinbaseTx)
   617  			if err != nil {
   618  				bmgrLog.Warnf("Unable to extract height from "+
   619  					"coinbase tx: %v", err)
   620  			} else {
   621  				bmgrLog.Debugf("Extracted height of %v from "+
   622  					"orphan block", cbHeight)
   623  				heightUpdate = int32(cbHeight)
   624  				blkShaUpdate = blockSha
   625  			}
   626  		}
   627  
   628  		orphanRoot := b.chain.GetOrphanRoot(blockSha)
   629  		locator, err := b.chain.LatestBlockLocator()
   630  		if err != nil {
   631  			bmgrLog.Warnf("Failed to get block locator for the "+
   632  				"latest block: %v", err)
   633  		} else {
   634  			bmsg.peer.PushGetBlocksMsg(locator, orphanRoot)
   635  		}
   636  	} else {
   637  		// When the block is not an orphan, log information about it and
   638  		// update the chain state.
   639  		b.progressLogger.LogBlockHeight(bmsg.block)
   640  
   641  		// Query the chain for the latest best block since the block
   642  		// that was processed could be on a side chain or have caused
   643  		// a reorg.
   644  		best := b.chain.BestSnapshot()
   645  		b.updateChainState(best.Hash, best.Height)
   646  
   647  		// Update this peer's latest block height, for future
   648  		// potential sync node candidacy.
   649  		heightUpdate = best.Height
   650  		blkShaUpdate = best.Hash
   651  
   652  		// Clear the rejected transactions.
   653  		b.rejectedTxns = make(map[wire.ShaHash]struct{})
   654  
   655  		// Allow any clients performing long polling via the
   656  		// getblocktemplate RPC to be notified when the new block causes
   657  		// their old block template to become stale.
   658  		rpcServer := b.server.rpcServer
   659  		if rpcServer != nil {
   660  			rpcServer.gbtWorkState.NotifyBlockConnected(blockSha)
   661  		}
   662  	}
   663  
   664  	// Update the block height for this peer. But only send a message to
   665  	// the server for updating peer heights if this is an orphan or our
   666  	// chain is "current". This avoids sending a spammy amount of messages
   667  	// if we're syncing the chain from scratch.
   668  	if blkShaUpdate != nil && heightUpdate != 0 {
   669  		bmsg.peer.UpdateLastBlockHeight(heightUpdate)
   670  		if isOrphan || b.current() {
   671  			go b.server.UpdatePeerHeights(blkShaUpdate, int32(heightUpdate), bmsg.peer)
   672  		}
   673  	}
   674  
   675  	// Nothing more to do if we aren't in headers-first mode.
   676  	if !b.headersFirstMode {
   677  		return
   678  	}
   679  
   680  	// This is headers-first mode, so if the block is not a checkpoint
   681  	// request more blocks using the header list when the request queue is
   682  	// getting short.
   683  	if !isCheckpointBlock {
   684  		if b.startHeader != nil &&
   685  			len(bmsg.peer.requestedBlocks) < minInFlightBlocks {
   686  			b.fetchHeaderBlocks()
   687  		}
   688  		return
   689  	}
   690  
   691  	// This is headers-first mode and the block is a checkpoint.  When
   692  	// there is a next checkpoint, get the next round of headers by asking
   693  	// for headers starting from the block after this one up to the next
   694  	// checkpoint.
   695  	prevHeight := b.nextCheckpoint.Height
   696  	prevHash := b.nextCheckpoint.Hash
   697  	b.nextCheckpoint = b.findNextHeaderCheckpoint(prevHeight)
   698  	if b.nextCheckpoint != nil {
   699  		locator := blockchain.BlockLocator([]*wire.ShaHash{prevHash})
   700  		err := bmsg.peer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash)
   701  		if err != nil {
   702  			bmgrLog.Warnf("Failed to send getheaders message to "+
   703  				"peer %s: %v", bmsg.peer.Addr(), err)
   704  			return
   705  		}
   706  		bmgrLog.Infof("Downloading headers for blocks %d to %d from "+
   707  			"peer %s", prevHeight+1, b.nextCheckpoint.Height,
   708  			b.syncPeer.Addr())
   709  		return
   710  	}
   711  
   712  	// This is headers-first mode, the block is a checkpoint, and there are
   713  	// no more checkpoints, so switch to normal mode by requesting blocks
   714  	// from the block after this one up to the end of the chain (zero hash).
   715  	b.headersFirstMode = false
   716  	b.headerList.Init()
   717  	bmgrLog.Infof("Reached the final checkpoint -- switching to normal mode")
   718  	locator := blockchain.BlockLocator([]*wire.ShaHash{blockSha})
   719  	err = bmsg.peer.PushGetBlocksMsg(locator, &zeroHash)
   720  	if err != nil {
   721  		bmgrLog.Warnf("Failed to send getblocks message to peer %s: %v",
   722  			bmsg.peer.Addr(), err)
   723  		return
   724  	}
   725  }
   726  
   727  // fetchHeaderBlocks creates and sends a request to the syncPeer for the next
   728  // list of blocks to be downloaded based on the current list of headers.
   729  func (b *blockManager) fetchHeaderBlocks() {
   730  	// Nothing to do if there is no start header.
   731  	if b.startHeader == nil {
   732  		bmgrLog.Warnf("fetchHeaderBlocks called with no start header")
   733  		return
   734  	}
   735  
   736  	// Build up a getdata request for the list of blocks the headers
   737  	// describe.  The size hint will be limited to wire.MaxInvPerMsg by
   738  	// the function, so no need to double check it here.
   739  	gdmsg := wire.NewMsgGetDataSizeHint(uint(b.headerList.Len()))
   740  	numRequested := 0
   741  	for e := b.startHeader; e != nil; e = e.Next() {
   742  		node, ok := e.Value.(*headerNode)
   743  		if !ok {
   744  			bmgrLog.Warn("Header list node type is not a headerNode")
   745  			continue
   746  		}
   747  
   748  		iv := wire.NewInvVect(wire.InvTypeBlock, node.sha)
   749  		haveInv, err := b.haveInventory(iv)
   750  		if err != nil {
   751  			bmgrLog.Warnf("Unexpected failure when checking for "+
   752  				"existing inventory during header block "+
   753  				"fetch: %v", err)
   754  		}
   755  		if !haveInv {
   756  			b.requestedBlocks[*node.sha] = struct{}{}
   757  			b.syncPeer.requestedBlocks[*node.sha] = struct{}{}
   758  			gdmsg.AddInvVect(iv)
   759  			numRequested++
   760  		}
   761  		b.startHeader = e.Next()
   762  		if numRequested >= wire.MaxInvPerMsg {
   763  			break
   764  		}
   765  	}
   766  	if len(gdmsg.InvList) > 0 {
   767  		b.syncPeer.QueueMessage(gdmsg, nil)
   768  	}
   769  }
   770  
   771  // handleHeadersMsghandles headers messages from all peers.
   772  func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
   773  	// The remote peer is misbehaving if we didn't request headers.
   774  	msg := hmsg.headers
   775  	numHeaders := len(msg.Headers)
   776  	if !b.headersFirstMode {
   777  		bmgrLog.Warnf("Got %d unrequested headers from %s -- "+
   778  			"disconnecting", numHeaders, hmsg.peer.Addr())
   779  		hmsg.peer.Disconnect()
   780  		return
   781  	}
   782  
   783  	// Nothing to do for an empty headers message.
   784  	if numHeaders == 0 {
   785  		return
   786  	}
   787  
   788  	// Process all of the received headers ensuring each one connects to the
   789  	// previous and that checkpoints match.
   790  	receivedCheckpoint := false
   791  	var finalHash *wire.ShaHash
   792  	for _, blockHeader := range msg.Headers {
   793  		blockHash := blockHeader.BlockSha()
   794  		finalHash = &blockHash
   795  
   796  		// Ensure there is a previous header to compare against.
   797  		prevNodeEl := b.headerList.Back()
   798  		if prevNodeEl == nil {
   799  			bmgrLog.Warnf("Header list does not contain a previous" +
   800  				"element as expected -- disconnecting peer")
   801  			hmsg.peer.Disconnect()
   802  			return
   803  		}
   804  
   805  		// Ensure the header properly connects to the previous one and
   806  		// add it to the list of headers.
   807  		node := headerNode{sha: &blockHash}
   808  		prevNode := prevNodeEl.Value.(*headerNode)
   809  		if prevNode.sha.IsEqual(&blockHeader.PrevBlock) {
   810  			node.height = prevNode.height + 1
   811  			e := b.headerList.PushBack(&node)
   812  			if b.startHeader == nil {
   813  				b.startHeader = e
   814  			}
   815  		} else {
   816  			bmgrLog.Warnf("Received block header that does not "+
   817  				"properly connect to the chain from peer %s "+
   818  				"-- disconnecting", hmsg.peer.Addr())
   819  			hmsg.peer.Disconnect()
   820  			return
   821  		}
   822  
   823  		// Verify the header at the next checkpoint height matches.
   824  		if node.height == b.nextCheckpoint.Height {
   825  			if node.sha.IsEqual(b.nextCheckpoint.Hash) {
   826  				receivedCheckpoint = true
   827  				bmgrLog.Infof("Verified downloaded block "+
   828  					"header against checkpoint at height "+
   829  					"%d/hash %s", node.height, node.sha)
   830  			} else {
   831  				bmgrLog.Warnf("Block header at height %d/hash "+
   832  					"%s from peer %s does NOT match "+
   833  					"expected checkpoint hash of %s -- "+
   834  					"disconnecting", node.height,
   835  					node.sha, hmsg.peer.Addr(),
   836  					b.nextCheckpoint.Hash)
   837  				hmsg.peer.Disconnect()
   838  				return
   839  			}
   840  			break
   841  		}
   842  	}
   843  
   844  	// When this header is a checkpoint, switch to fetching the blocks for
   845  	// all of the headers since the last checkpoint.
   846  	if receivedCheckpoint {
   847  		// Since the first entry of the list is always the final block
   848  		// that is already in the database and is only used to ensure
   849  		// the next header links properly, it must be removed before
   850  		// fetching the blocks.
   851  		b.headerList.Remove(b.headerList.Front())
   852  		bmgrLog.Infof("Received %v block headers: Fetching blocks",
   853  			b.headerList.Len())
   854  		b.progressLogger.SetLastLogTime(time.Now())
   855  		b.fetchHeaderBlocks()
   856  		return
   857  	}
   858  
   859  	// This header is not a checkpoint, so request the next batch of
   860  	// headers starting from the latest known header and ending with the
   861  	// next checkpoint.
   862  	locator := blockchain.BlockLocator([]*wire.ShaHash{finalHash})
   863  	err := hmsg.peer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash)
   864  	if err != nil {
   865  		bmgrLog.Warnf("Failed to send getheaders message to "+
   866  			"peer %s: %v", hmsg.peer.Addr(), err)
   867  		return
   868  	}
   869  }
   870  
   871  // haveInventory returns whether or not the inventory represented by the passed
   872  // inventory vector is known.  This includes checking all of the various places
   873  // inventory can be when it is in different states such as blocks that are part
   874  // of the main chain, on a side chain, in the orphan pool, and transactions that
   875  // are in the memory pool (either the main pool or orphan pool).
   876  func (b *blockManager) haveInventory(invVect *wire.InvVect) (bool, error) {
   877  	switch invVect.Type {
   878  	case wire.InvTypeBlock:
   879  		// Ask chain if the block is known to it in any form (main
   880  		// chain, side chain, or orphan).
   881  		return b.chain.HaveBlock(&invVect.Hash)
   882  
   883  	case wire.InvTypeTx:
   884  		// Ask the transaction memory pool if the transaction is known
   885  		// to it in any form (main pool or orphan).
   886  		if b.server.txMemPool.HaveTransaction(&invVect.Hash) {
   887  			return true, nil
   888  		}
   889  
   890  		// Check if the transaction exists from the point of view of the
   891  		// end of the main chain.
   892  		entry, err := b.chain.FetchUtxoEntry(&invVect.Hash)
   893  		if err != nil {
   894  			return false, err
   895  		}
   896  		return entry != nil && !entry.IsFullySpent(), nil
   897  	}
   898  
   899  	// The requested inventory is is an unsupported type, so just claim
   900  	// it is known to avoid requesting it.
   901  	return true, nil
   902  }
   903  
   904  // handleInvMsg handles inv messages from all peers.
   905  // We examine the inventory advertised by the remote peer and act accordingly.
   906  func (b *blockManager) handleInvMsg(imsg *invMsg) {
   907  	// Attempt to find the final block in the inventory list.  There may
   908  	// not be one.
   909  	lastBlock := -1
   910  	invVects := imsg.inv.InvList
   911  	for i := len(invVects) - 1; i >= 0; i-- {
   912  		if invVects[i].Type == wire.InvTypeBlock {
   913  			lastBlock = i
   914  			break
   915  		}
   916  	}
   917  
   918  	// If this inv contains a block announcement, and this isn't coming from
   919  	// our current sync peer or we're current, then update the last
   920  	// announced block for this peer. We'll use this information later to
   921  	// update the heights of peers based on blocks we've accepted that they
   922  	// previously announced.
   923  	if lastBlock != -1 && (imsg.peer != b.syncPeer || b.current()) {
   924  		imsg.peer.UpdateLastAnnouncedBlock(&invVects[lastBlock].Hash)
   925  	}
   926  
   927  	// Ignore invs from peers that aren't the sync if we are not current.
   928  	// Helps prevent fetching a mass of orphans.
   929  	if imsg.peer != b.syncPeer && !b.current() {
   930  		return
   931  	}
   932  
   933  	// If our chain is current and a peer announces a block we already
   934  	// know of, then update their current block height.
   935  	if lastBlock != -1 && b.current() {
   936  		blkHeight, err := b.chain.BlockHeightByHash(&invVects[lastBlock].Hash)
   937  		if err == nil {
   938  			imsg.peer.UpdateLastBlockHeight(int32(blkHeight))
   939  		}
   940  	}
   941  
   942  	// Request the advertised inventory if we don't already have it.  Also,
   943  	// request parent blocks of orphans if we receive one we already have.
   944  	// Finally, attempt to detect potential stalls due to long side chains
   945  	// we already have and request more blocks to prevent them.
   946  	for i, iv := range invVects {
   947  		// Ignore unsupported inventory types.
   948  		if iv.Type != wire.InvTypeBlock && iv.Type != wire.InvTypeTx {
   949  			continue
   950  		}
   951  
   952  		// Add the inventory to the cache of known inventory
   953  		// for the peer.
   954  		imsg.peer.AddKnownInventory(iv)
   955  
   956  		// Ignore inventory when we're in headers-first mode.
   957  		if b.headersFirstMode {
   958  			continue
   959  		}
   960  
   961  		// Request the inventory if we don't already have it.
   962  		haveInv, err := b.haveInventory(iv)
   963  		if err != nil {
   964  			bmgrLog.Warnf("Unexpected failure when checking for "+
   965  				"existing inventory during inv message "+
   966  				"processing: %v", err)
   967  			continue
   968  		}
   969  		if !haveInv {
   970  			if iv.Type == wire.InvTypeTx {
   971  				// Skip the transaction if it has already been
   972  				// rejected.
   973  				if _, exists := b.rejectedTxns[iv.Hash]; exists {
   974  					continue
   975  				}
   976  			}
   977  
   978  			// Add it to the request queue.
   979  			imsg.peer.requestQueue = append(imsg.peer.requestQueue, iv)
   980  			continue
   981  		}
   982  
   983  		if iv.Type == wire.InvTypeBlock {
   984  			// The block is an orphan block that we already have.
   985  			// When the existing orphan was processed, it requested
   986  			// the missing parent blocks.  When this scenario
   987  			// happens, it means there were more blocks missing
   988  			// than are allowed into a single inventory message.  As
   989  			// a result, once this peer requested the final
   990  			// advertised block, the remote peer noticed and is now
   991  			// resending the orphan block as an available block
   992  			// to signal there are more missing blocks that need to
   993  			// be requested.
   994  			if b.chain.IsKnownOrphan(&iv.Hash) {
   995  				// Request blocks starting at the latest known
   996  				// up to the root of the orphan that just came
   997  				// in.
   998  				orphanRoot := b.chain.GetOrphanRoot(&iv.Hash)
   999  				locator, err := b.chain.LatestBlockLocator()
  1000  				if err != nil {
  1001  					bmgrLog.Errorf("PEER: Failed to get block "+
  1002  						"locator for the latest block: "+
  1003  						"%v", err)
  1004  					continue
  1005  				}
  1006  				imsg.peer.PushGetBlocksMsg(locator, orphanRoot)
  1007  				continue
  1008  			}
  1009  
  1010  			// We already have the final block advertised by this
  1011  			// inventory message, so force a request for more.  This
  1012  			// should only happen if we're on a really long side
  1013  			// chain.
  1014  			if i == lastBlock {
  1015  				// Request blocks after this one up to the
  1016  				// final one the remote peer knows about (zero
  1017  				// stop hash).
  1018  				locator := b.chain.BlockLocatorFromHash(&iv.Hash)
  1019  				imsg.peer.PushGetBlocksMsg(locator, &zeroHash)
  1020  			}
  1021  		}
  1022  	}
  1023  
  1024  	// Request as much as possible at once.  Anything that won't fit into
  1025  	// the request will be requested on the next inv message.
  1026  	numRequested := 0
  1027  	gdmsg := wire.NewMsgGetData()
  1028  	requestQueue := imsg.peer.requestQueue
  1029  	for len(requestQueue) != 0 {
  1030  		iv := requestQueue[0]
  1031  		requestQueue[0] = nil
  1032  		requestQueue = requestQueue[1:]
  1033  
  1034  		switch iv.Type {
  1035  		case wire.InvTypeBlock:
  1036  			// Request the block if there is not already a pending
  1037  			// request.
  1038  			if _, exists := b.requestedBlocks[iv.Hash]; !exists {
  1039  				b.requestedBlocks[iv.Hash] = struct{}{}
  1040  				b.limitMap(b.requestedBlocks, maxRequestedBlocks)
  1041  				imsg.peer.requestedBlocks[iv.Hash] = struct{}{}
  1042  				gdmsg.AddInvVect(iv)
  1043  				numRequested++
  1044  			}
  1045  
  1046  		case wire.InvTypeTx:
  1047  			// Request the transaction if there is not already a
  1048  			// pending request.
  1049  			if _, exists := b.requestedTxns[iv.Hash]; !exists {
  1050  				b.requestedTxns[iv.Hash] = struct{}{}
  1051  				b.limitMap(b.requestedTxns, maxRequestedTxns)
  1052  				imsg.peer.requestedTxns[iv.Hash] = struct{}{}
  1053  				gdmsg.AddInvVect(iv)
  1054  				numRequested++
  1055  			}
  1056  		}
  1057  
  1058  		if numRequested >= wire.MaxInvPerMsg {
  1059  			break
  1060  		}
  1061  	}
  1062  	imsg.peer.requestQueue = requestQueue
  1063  	if len(gdmsg.InvList) > 0 {
  1064  		imsg.peer.QueueMessage(gdmsg, nil)
  1065  	}
  1066  }
  1067  
  1068  // limitMap is a helper function for maps that require a maximum limit by
  1069  // evicting a random transaction if adding a new value would cause it to
  1070  // overflow the maximum allowed.
  1071  func (b *blockManager) limitMap(m map[wire.ShaHash]struct{}, limit int) {
  1072  	if len(m)+1 > limit {
  1073  		// Remove a random entry from the map.  For most compilers, Go's
  1074  		// range statement iterates starting at a random item although
  1075  		// that is not 100% guaranteed by the spec.  The iteration order
  1076  		// is not important here because an adversary would have to be
  1077  		// able to pull off preimage attacks on the hashing function in
  1078  		// order to target eviction of specific entries anyways.
  1079  		for txHash := range m {
  1080  			delete(m, txHash)
  1081  			return
  1082  		}
  1083  	}
  1084  }
  1085  
  1086  // blockHandler is the main handler for the block manager.  It must be run
  1087  // as a goroutine.  It processes block and inv messages in a separate goroutine
  1088  // from the peer handlers so the block (MsgBlock) messages are handled by a
  1089  // single thread without needing to lock memory data structures.  This is
  1090  // important because the block manager controls which blocks are needed and how
  1091  // the fetching should proceed.
  1092  func (b *blockManager) blockHandler() {
  1093  	candidatePeers := list.New()
  1094  out:
  1095  	for {
  1096  		select {
  1097  		case m := <-b.msgChan:
  1098  			switch msg := m.(type) {
  1099  			case *newPeerMsg:
  1100  				b.handleNewPeerMsg(candidatePeers, msg.peer)
  1101  
  1102  			case *txMsg:
  1103  				b.handleTxMsg(msg)
  1104  				msg.peer.txProcessed <- struct{}{}
  1105  
  1106  			case *blockMsg:
  1107  				b.handleBlockMsg(msg)
  1108  				msg.peer.blockProcessed <- struct{}{}
  1109  
  1110  			case *invMsg:
  1111  				b.handleInvMsg(msg)
  1112  
  1113  			case *headersMsg:
  1114  				b.handleHeadersMsg(msg)
  1115  
  1116  			case *donePeerMsg:
  1117  				b.handleDonePeerMsg(candidatePeers, msg.peer)
  1118  
  1119  			case getSyncPeerMsg:
  1120  				msg.reply <- b.syncPeer
  1121  
  1122  			case processBlockMsg:
  1123  				isOrphan, err := b.chain.ProcessBlock(msg.block,
  1124  					msg.flags)
  1125  				if err != nil {
  1126  					msg.reply <- processBlockResponse{
  1127  						isOrphan: false,
  1128  						err:      err,
  1129  					}
  1130  				}
  1131  
  1132  				// Query the chain for the latest best block
  1133  				// since the block that was processed could be
  1134  				// on a side chain or have caused a reorg.
  1135  				best := b.chain.BestSnapshot()
  1136  				b.updateChainState(best.Hash, best.Height)
  1137  
  1138  				// Allow any clients performing long polling via the
  1139  				// getblocktemplate RPC to be notified when the new block causes
  1140  				// their old block template to become stale.
  1141  				rpcServer := b.server.rpcServer
  1142  				if rpcServer != nil {
  1143  					rpcServer.gbtWorkState.NotifyBlockConnected(msg.block.Sha())
  1144  				}
  1145  
  1146  				msg.reply <- processBlockResponse{
  1147  					isOrphan: isOrphan,
  1148  					err:      nil,
  1149  				}
  1150  
  1151  			case isCurrentMsg:
  1152  				msg.reply <- b.current()
  1153  
  1154  			case pauseMsg:
  1155  				// Wait until the sender unpauses the manager.
  1156  				<-msg.unpause
  1157  
  1158  			default:
  1159  				bmgrLog.Warnf("Invalid message type in block "+
  1160  					"handler: %T", msg)
  1161  			}
  1162  
  1163  		case <-b.quit:
  1164  			break out
  1165  		}
  1166  	}
  1167  
  1168  	b.wg.Done()
  1169  	bmgrLog.Trace("Block handler done")
  1170  }
  1171  
  1172  // handleNotifyMsg handles notifications from blockchain.  It does things such
  1173  // as request orphan block parents and relay accepted blocks to connected peers.
  1174  func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) {
  1175  	switch notification.Type {
  1176  	// A block has been accepted into the block chain.  Relay it to other
  1177  	// peers.
  1178  	case blockchain.NTBlockAccepted:
  1179  		// Don't relay if we are not current. Other peers that are
  1180  		// current should already know about it.
  1181  		if !b.current() {
  1182  			return
  1183  		}
  1184  
  1185  		block, ok := notification.Data.(*godashutil.Block)
  1186  		if !ok {
  1187  			bmgrLog.Warnf("Chain accepted notification is not a block.")
  1188  			break
  1189  		}
  1190  
  1191  		// Generate the inventory vector and relay it.
  1192  		iv := wire.NewInvVect(wire.InvTypeBlock, block.Sha())
  1193  		b.server.RelayInventory(iv, block.MsgBlock().Header)
  1194  
  1195  	// A block has been connected to the main block chain.
  1196  	case blockchain.NTBlockConnected:
  1197  		block, ok := notification.Data.(*godashutil.Block)
  1198  		if !ok {
  1199  			bmgrLog.Warnf("Chain connected notification is not a block.")
  1200  			break
  1201  		}
  1202  
  1203  		// Remove all of the transactions (except the coinbase) in the
  1204  		// connected block from the transaction pool.  Secondly, remove any
  1205  		// transactions which are now double spends as a result of these
  1206  		// new transactions.  Finally, remove any transaction that is
  1207  		// no longer an orphan. Transactions which depend on a confirmed
  1208  		// transaction are NOT removed recursively because they are still
  1209  		// valid.
  1210  		for _, tx := range block.Transactions()[1:] {
  1211  			b.server.txMemPool.RemoveTransaction(tx, false)
  1212  			b.server.txMemPool.RemoveDoubleSpends(tx)
  1213  			b.server.txMemPool.RemoveOrphan(tx.Sha())
  1214  			acceptedTxs := b.server.txMemPool.ProcessOrphans(tx.Sha())
  1215  			b.server.AnnounceNewTransactions(acceptedTxs)
  1216  		}
  1217  
  1218  		if r := b.server.rpcServer; r != nil {
  1219  			// Now that this block is in the blockchain we can mark
  1220  			// all the transactions (except the coinbase) as no
  1221  			// longer needing rebroadcasting.
  1222  			for _, tx := range block.Transactions()[1:] {
  1223  				iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha())
  1224  				b.server.RemoveRebroadcastInventory(iv)
  1225  			}
  1226  
  1227  			// Notify registered websocket clients of incoming block.
  1228  			r.ntfnMgr.NotifyBlockConnected(block)
  1229  		}
  1230  
  1231  	// A block has been disconnected from the main block chain.
  1232  	case blockchain.NTBlockDisconnected:
  1233  		block, ok := notification.Data.(*godashutil.Block)
  1234  		if !ok {
  1235  			bmgrLog.Warnf("Chain disconnected notification is not a block.")
  1236  			break
  1237  		}
  1238  
  1239  		// Reinsert all of the transactions (except the coinbase) into
  1240  		// the transaction pool.
  1241  		for _, tx := range block.Transactions()[1:] {
  1242  			_, err := b.server.txMemPool.MaybeAcceptTransaction(tx,
  1243  				false, false)
  1244  			if err != nil {
  1245  				// Remove the transaction and all transactions
  1246  				// that depend on it if it wasn't accepted into
  1247  				// the transaction pool.
  1248  				b.server.txMemPool.RemoveTransaction(tx, true)
  1249  			}
  1250  		}
  1251  
  1252  		// Notify registered websocket clients.
  1253  		if r := b.server.rpcServer; r != nil {
  1254  			r.ntfnMgr.NotifyBlockDisconnected(block)
  1255  		}
  1256  	}
  1257  }
  1258  
  1259  // NewPeer informs the block manager of a newly active peer.
  1260  func (b *blockManager) NewPeer(sp *serverPeer) {
  1261  	// Ignore if we are shutting down.
  1262  	if atomic.LoadInt32(&b.shutdown) != 0 {
  1263  		return
  1264  	}
  1265  	b.msgChan <- &newPeerMsg{peer: sp}
  1266  }
  1267  
  1268  // QueueTx adds the passed transaction message and peer to the block handling
  1269  // queue.
  1270  func (b *blockManager) QueueTx(tx *godashutil.Tx, sp *serverPeer) {
  1271  	// Don't accept more transactions if we're shutting down.
  1272  	if atomic.LoadInt32(&b.shutdown) != 0 {
  1273  		sp.txProcessed <- struct{}{}
  1274  		return
  1275  	}
  1276  
  1277  	b.msgChan <- &txMsg{tx: tx, peer: sp}
  1278  }
  1279  
  1280  // QueueBlock adds the passed block message and peer to the block handling queue.
  1281  func (b *blockManager) QueueBlock(block *godashutil.Block, sp *serverPeer) {
  1282  	// Don't accept more blocks if we're shutting down.
  1283  	if atomic.LoadInt32(&b.shutdown) != 0 {
  1284  		sp.blockProcessed <- struct{}{}
  1285  		return
  1286  	}
  1287  
  1288  	b.msgChan <- &blockMsg{block: block, peer: sp}
  1289  }
  1290  
  1291  // QueueInv adds the passed inv message and peer to the block handling queue.
  1292  func (b *blockManager) QueueInv(inv *wire.MsgInv, sp *serverPeer) {
  1293  	// No channel handling here because peers do not need to block on inv
  1294  	// messages.
  1295  	if atomic.LoadInt32(&b.shutdown) != 0 {
  1296  		return
  1297  	}
  1298  
  1299  	b.msgChan <- &invMsg{inv: inv, peer: sp}
  1300  }
  1301  
  1302  // QueueHeaders adds the passed headers message and peer to the block handling
  1303  // queue.
  1304  func (b *blockManager) QueueHeaders(headers *wire.MsgHeaders, sp *serverPeer) {
  1305  	// No channel handling here because peers do not need to block on
  1306  	// headers messages.
  1307  	if atomic.LoadInt32(&b.shutdown) != 0 {
  1308  		return
  1309  	}
  1310  
  1311  	b.msgChan <- &headersMsg{headers: headers, peer: sp}
  1312  }
  1313  
  1314  // DonePeer informs the blockmanager that a peer has disconnected.
  1315  func (b *blockManager) DonePeer(sp *serverPeer) {
  1316  	// Ignore if we are shutting down.
  1317  	if atomic.LoadInt32(&b.shutdown) != 0 {
  1318  		return
  1319  	}
  1320  
  1321  	b.msgChan <- &donePeerMsg{peer: sp}
  1322  }
  1323  
  1324  // Start begins the core block handler which processes block and inv messages.
  1325  func (b *blockManager) Start() {
  1326  	// Already started?
  1327  	if atomic.AddInt32(&b.started, 1) != 1 {
  1328  		return
  1329  	}
  1330  
  1331  	bmgrLog.Trace("Starting block manager")
  1332  	b.wg.Add(1)
  1333  	go b.blockHandler()
  1334  }
  1335  
  1336  // Stop gracefully shuts down the block manager by stopping all asynchronous
  1337  // handlers and waiting for them to finish.
  1338  func (b *blockManager) Stop() error {
  1339  	if atomic.AddInt32(&b.shutdown, 1) != 1 {
  1340  		bmgrLog.Warnf("Block manager is already in the process of " +
  1341  			"shutting down")
  1342  		return nil
  1343  	}
  1344  
  1345  	bmgrLog.Infof("Block manager shutting down")
  1346  	close(b.quit)
  1347  	b.wg.Wait()
  1348  	return nil
  1349  }
  1350  
  1351  // SyncPeer returns the current sync peer.
  1352  func (b *blockManager) SyncPeer() *serverPeer {
  1353  	reply := make(chan *serverPeer)
  1354  	b.msgChan <- getSyncPeerMsg{reply: reply}
  1355  	return <-reply
  1356  }
  1357  
  1358  // ProcessBlock makes use of ProcessBlock on an internal instance of a block
  1359  // chain.  It is funneled through the block manager since btcchain is not safe
  1360  // for concurrent access.
  1361  func (b *blockManager) ProcessBlock(block *godashutil.Block, flags blockchain.BehaviorFlags) (bool, error) {
  1362  	reply := make(chan processBlockResponse, 1)
  1363  	b.msgChan <- processBlockMsg{block: block, flags: flags, reply: reply}
  1364  	response := <-reply
  1365  	return response.isOrphan, response.err
  1366  }
  1367  
  1368  // IsCurrent returns whether or not the block manager believes it is synced with
  1369  // the connected peers.
  1370  func (b *blockManager) IsCurrent() bool {
  1371  	reply := make(chan bool)
  1372  	b.msgChan <- isCurrentMsg{reply: reply}
  1373  	return <-reply
  1374  }
  1375  
  1376  // Pause pauses the block manager until the returned channel is closed.
  1377  //
  1378  // Note that while paused, all peer and block processing is halted.  The
  1379  // message sender should avoid pausing the block manager for long durations.
  1380  func (b *blockManager) Pause() chan<- struct{} {
  1381  	c := make(chan struct{})
  1382  	b.msgChan <- pauseMsg{c}
  1383  	return c
  1384  }
  1385  
  1386  // newBlockManager returns a new bitcoin block manager.
  1387  // Use Start to begin processing asynchronous block and inv updates.
  1388  func newBlockManager(s *server, indexManager blockchain.IndexManager) (*blockManager, error) {
  1389  	bm := blockManager{
  1390  		server:          s,
  1391  		rejectedTxns:    make(map[wire.ShaHash]struct{}),
  1392  		requestedTxns:   make(map[wire.ShaHash]struct{}),
  1393  		requestedBlocks: make(map[wire.ShaHash]struct{}),
  1394  		progressLogger:  newBlockProgressLogger("Processed", bmgrLog),
  1395  		msgChan:         make(chan interface{}, cfg.MaxPeers*3),
  1396  		headerList:      list.New(),
  1397  		quit:            make(chan struct{}),
  1398  	}
  1399  
  1400  	// Create a new block chain instance with the appropriate configuration.
  1401  	var err error
  1402  	bm.chain, err = blockchain.New(&blockchain.Config{
  1403  		DB:            s.db,
  1404  		ChainParams:   s.chainParams,
  1405  		TimeSource:    s.timeSource,
  1406  		Notifications: bm.handleNotifyMsg,
  1407  		SigCache:      s.sigCache,
  1408  		IndexManager:  indexManager,
  1409  	})
  1410  	if err != nil {
  1411  		return nil, err
  1412  	}
  1413  	best := bm.chain.BestSnapshot()
  1414  	bm.chain.DisableCheckpoints(cfg.DisableCheckpoints)
  1415  	if !cfg.DisableCheckpoints {
  1416  		// Initialize the next checkpoint based on the current height.
  1417  		bm.nextCheckpoint = bm.findNextHeaderCheckpoint(best.Height)
  1418  		if bm.nextCheckpoint != nil {
  1419  			bm.resetHeaderState(best.Hash, best.Height)
  1420  		}
  1421  	} else {
  1422  		bmgrLog.Info("Checkpoints are disabled")
  1423  	}
  1424  
  1425  	// Initialize the chain state now that the initial block node index has
  1426  	// been generated.
  1427  	bm.updateChainState(best.Hash, best.Height)
  1428  
  1429  	return &bm, nil
  1430  }
  1431  
  1432  // removeRegressionDB removes the existing regression test database if running
  1433  // in regression test mode and it already exists.
  1434  func removeRegressionDB(dbPath string) error {
  1435  	// Don't do anything if not in regression test mode.
  1436  	if !cfg.RegressionTest {
  1437  		return nil
  1438  	}
  1439  
  1440  	// Remove the old regression test database if it already exists.
  1441  	fi, err := os.Stat(dbPath)
  1442  	if err == nil {
  1443  		btcdLog.Infof("Removing regression test database from '%s'", dbPath)
  1444  		if fi.IsDir() {
  1445  			err := os.RemoveAll(dbPath)
  1446  			if err != nil {
  1447  				return err
  1448  			}
  1449  		} else {
  1450  			err := os.Remove(dbPath)
  1451  			if err != nil {
  1452  				return err
  1453  			}
  1454  		}
  1455  	}
  1456  
  1457  	return nil
  1458  }
  1459  
  1460  // dbPath returns the path to the block database given a database type.
  1461  func blockDbPath(dbType string) string {
  1462  	// The database name is based on the database type.
  1463  	dbName := blockDbNamePrefix + "_" + dbType
  1464  	if dbType == "sqlite" {
  1465  		dbName = dbName + ".db"
  1466  	}
  1467  	dbPath := filepath.Join(cfg.DataDir, dbName)
  1468  	return dbPath
  1469  }
  1470  
  1471  // warnMultipeDBs shows a warning if multiple block database types are detected.
  1472  // This is not a situation most users want.  It is handy for development however
  1473  // to support multiple side-by-side databases.
  1474  func warnMultipeDBs() {
  1475  	// This is intentionally not using the known db types which depend
  1476  	// on the database types compiled into the binary since we want to
  1477  	// detect legacy db types as well.
  1478  	dbTypes := []string{"ffldb", "leveldb", "sqlite"}
  1479  	duplicateDbPaths := make([]string, 0, len(dbTypes)-1)
  1480  	for _, dbType := range dbTypes {
  1481  		if dbType == cfg.DbType {
  1482  			continue
  1483  		}
  1484  
  1485  		// Store db path as a duplicate db if it exists.
  1486  		dbPath := blockDbPath(dbType)
  1487  		if fileExists(dbPath) {
  1488  			duplicateDbPaths = append(duplicateDbPaths, dbPath)
  1489  		}
  1490  	}
  1491  
  1492  	// Warn if there are extra databases.
  1493  	if len(duplicateDbPaths) > 0 {
  1494  		selectedDbPath := blockDbPath(cfg.DbType)
  1495  		btcdLog.Warnf("WARNING: There are multiple block chain databases "+
  1496  			"using different database types.\nYou probably don't "+
  1497  			"want to waste disk space by having more than one.\n"+
  1498  			"Your current database is located at [%v].\nThe "+
  1499  			"additional database is located at %v", selectedDbPath,
  1500  			duplicateDbPaths)
  1501  	}
  1502  }
  1503  
  1504  // loadBlockDB loads (or creates when needed) the block database taking into
  1505  // account the selected database backend and returns a handle to it.  It also
  1506  // contains additional logic such warning the user if there are multiple
  1507  // databases which consume space on the file system and ensuring the regression
  1508  // test database is clean when in regression test mode.
  1509  func loadBlockDB() (database.DB, error) {
  1510  	// The memdb backend does not have a file path associated with it, so
  1511  	// handle it uniquely.  We also don't want to worry about the multiple
  1512  	// database type warnings when running with the memory database.
  1513  	if cfg.DbType == "memdb" {
  1514  		btcdLog.Infof("Creating block database in memory.")
  1515  		db, err := database.Create(cfg.DbType)
  1516  		if err != nil {
  1517  			return nil, err
  1518  		}
  1519  		return db, nil
  1520  	}
  1521  
  1522  	warnMultipeDBs()
  1523  
  1524  	// The database name is based on the database type.
  1525  	dbPath := blockDbPath(cfg.DbType)
  1526  
  1527  	// The regression test is special in that it needs a clean database for
  1528  	// each run, so remove it now if it already exists.
  1529  	removeRegressionDB(dbPath)
  1530  
  1531  	btcdLog.Infof("Loading block database from '%s'", dbPath)
  1532  	db, err := database.Open(cfg.DbType, dbPath, activeNetParams.Net)
  1533  	if err != nil {
  1534  		// Return the error if it's not because the database doesn't
  1535  		// exist.
  1536  		if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode !=
  1537  			database.ErrDbDoesNotExist {
  1538  
  1539  			return nil, err
  1540  		}
  1541  
  1542  		// Create the db if it does not exist.
  1543  		err = os.MkdirAll(cfg.DataDir, 0700)
  1544  		if err != nil {
  1545  			return nil, err
  1546  		}
  1547  		db, err = database.Create(cfg.DbType, dbPath, activeNetParams.Net)
  1548  		if err != nil {
  1549  			return nil, err
  1550  		}
  1551  	}
  1552  
  1553  	btcdLog.Info("Block database loaded")
  1554  	return db, nil
  1555  }