github.com/deso-protocol/core@v1.2.9/lib/server.go (about)

     1  package lib
     2  
     3  import (
     4  	"encoding/hex"
     5  	"fmt"
     6  	"net"
     7  	"runtime"
     8  	"strings"
     9  	"time"
    10  
    11  	"github.com/decred/dcrd/lru"
    12  
    13  	"github.com/DataDog/datadog-go/statsd"
    14  
    15  	"github.com/btcsuite/btcd/addrmgr"
    16  	chainlib "github.com/btcsuite/btcd/blockchain"
    17  	"github.com/btcsuite/btcd/wire"
    18  	"github.com/davecgh/go-spew/spew"
    19  	"github.com/deso-protocol/go-deadlock"
    20  	"github.com/dgraph-io/badger/v3"
    21  	"github.com/golang/glog"
    22  	"github.com/pkg/errors"
    23  )
    24  
    25  // ServerMessage is the core data structure processed by the Server in its main
    26  // loop.
    27  type ServerMessage struct {
    28  	Peer      *Peer
    29  	Msg       DeSoMessage
    30  	ReplyChan chan *ServerReply
    31  }
    32  
    33  // GetDataRequestInfo is a data structure used to keep track of which transactions
    34  // we've requested from a Peer.
    35  type GetDataRequestInfo struct {
    36  	PeerWhoSentInv *Peer
    37  	TimeRequested  time.Time
    38  }
    39  
    40  // ServerReply is used to signal to outside programs that a particuler ServerMessage
    41  // they may have been waiting on has been processed.
    42  type ServerReply struct {
    43  }
    44  
    45  // Server is the core of the DeSo node. It effectively runs a single-threaded
    46  // main loop that processes transactions from other peers and responds to them
    47  // accordingly. Probably the best place to start looking is the messageHandler
    48  // function.
    49  type Server struct {
    50  	cmgr          *ConnectionManager
    51  	blockchain    *Blockchain
    52  	mempool       *DeSoMempool
    53  	miner         *DeSoMiner
    54  	blockProducer *DeSoBlockProducer
    55  	eventManager  *EventManager
    56  
    57  	// All messages received from peers get sent from the ConnectionManager to the
    58  	// Server through this channel.
    59  	//
    60  	// Generally, the
    61  	// ConnectionManager is responsible for managing the connections to all the peers,
    62  	// but when it receives a message from one of them, it forwards it to the Server
    63  	// on this channel to actually process (acting as a router in that way).
    64  	//
    65  	// In addition to messages from peers, the ConnectionManager will also send control
    66  	// messages to notify the Server e.g. when a Peer connects or disconnects so that
    67  	// the Server can take action appropriately.
    68  	incomingMessages chan *ServerMessage
    69  	// inventoryBeingProcessed keeps track of the inventory (hashes of blocks and
    70  	// transactions) that we've recently processed from peers. It is useful for
    71  	// avoiding situations in which we re-fetch the same data from many peers.
    72  	// For example, if we get the same Block inv message from multiple peers,
    73  	// adding it to this map and checking this map before replying will make it
    74  	// so that we only send a reply to the first peer that sent us the inv, which
    75  	// is more efficient.
    76  	inventoryBeingProcessed lru.Cache
    77  	// hasRequestedSync indicates whether we've bootstrapped our mempool
    78  	// by requesting all mempool transactions from a
    79  	// peer. It's initially false
    80  	// when the server boots up but gets set to true after we make a Mempool
    81  	// request once we're fully synced.
    82  	// The waitGroup is used to manage the cleanup of the Server.
    83  	waitGroup deadlock.WaitGroup
    84  
    85  	// During initial block download, we request headers and blocks from a single
    86  	// peer. Note: These fields should only be accessed from the messageHandler thread.
    87  	//
    88  	// TODO: This could be much faster if we were to download blocks in parallel
    89  	// rather than from a single peer but it won't be a problem until later, at which
    90  	// point we can make the optimization.
    91  	SyncPeer *Peer
    92  	// How long we wait on a transaction we're fetching before giving
    93  	// up on it. Note this doesn't apply to blocks because they have their own
    94  	// process for retrying that differs from transactions, which are
    95  	// more best-effort than blocks.
    96  	requestTimeoutSeconds uint32
    97  
    98  	// dataLock protects requestedTxns and requestedBlocks
    99  	dataLock deadlock.Mutex
   100  
   101  	// requestedTransactions contains hashes of transactions for which we have
   102  	// requested data but have not yet received a response.
   103  	requestedTransactionsMap map[BlockHash]*GetDataRequestInfo
   104  
   105  	// addrsToBroadcast is a list of all the addresses we've received from valid addr
   106  	// messages that we intend to broadcast to our peers. It is organized as:
   107  	// <recipient address> -> <list of addresses we received from that recipient>.
   108  	//
   109  	// It is organized in this way so that we can limit the number of addresses we
   110  	// are distributing for a single peer to avoid a DOS attack.
   111  	addrsToBroadcastLock deadlock.RWMutex
   112  	addrsToBroadcastt    map[string][]*SingleAddr
   113  
   114  	// When set to true, we disable the ConnectionManager
   115  	disableNetworking bool
   116  
   117  	// When set to true, transactions created on this node will be ignored.
   118  	readOnlyMode                 bool
   119  	ignoreInboundPeerInvMessages bool
   120  
   121  	// Becomes true after the node has processed its first transaction bundle from
   122  	// any peer. This is useful in a deployment setting because it makes it so that
   123  	// a health check can wait until this value becomes true.
   124  	hasProcessedFirstTransactionBundle bool
   125  
   126  	statsdClient *statsd.Client
   127  
   128  	Notifier *Notifier
   129  }
   130  
   131  func (srv *Server) HasProcessedFirstTransactionBundle() bool {
   132  	return srv.hasProcessedFirstTransactionBundle
   133  }
   134  
   135  // ResetRequestQueues resets all the request queues.
   136  func (srv *Server) ResetRequestQueues() {
   137  	srv.dataLock.Lock()
   138  	defer srv.dataLock.Unlock()
   139  
   140  	glog.V(2).Infof("Server.ResetRequestQueues: Resetting request queues")
   141  
   142  	srv.requestedTransactionsMap = make(map[BlockHash]*GetDataRequestInfo)
   143  }
   144  
   145  // dataLock must be acquired for writing before calling this function.
   146  func (srv *Server) _removeRequest(hash *BlockHash) {
   147  	// Just be lazy and remove the hash from everything indiscriminantly to
   148  	// make sure it's good and purged.
   149  	delete(srv.requestedTransactionsMap, *hash)
   150  
   151  	invVect := &InvVect{
   152  		Type: InvTypeTx,
   153  		Hash: *hash,
   154  	}
   155  	srv.inventoryBeingProcessed.Delete(*invVect)
   156  }
   157  
   158  // dataLock must be acquired for writing before calling this function.
   159  func (srv *Server) _expireRequests() {
   160  	// TODO: It could in theory get slow to do brute force iteration over everything
   161  	// we've requested but not yet received, which is what we do below. But we'll
   162  	// wait until we actually have an issue with it before optimizing it, since it
   163  	// could also be fine. Just watch out for it.
   164  
   165  	timeout := time.Duration(int64(srv.requestTimeoutSeconds) * int64(time.Second))
   166  	for hashIter, requestInfo := range srv.requestedTransactionsMap {
   167  		// Note that it's safe to use the hash iterator here because _removeRequest
   168  		// doesn't take a reference to it.
   169  		if requestInfo.TimeRequested.Add(timeout).After(time.Now()) {
   170  			srv._removeRequest(&hashIter)
   171  		}
   172  	}
   173  }
   174  
   175  // ExpireRequests checks to see if any requests have expired and removes them if so.
   176  func (srv *Server) ExpireRequests() {
   177  	srv.dataLock.Lock()
   178  	defer srv.dataLock.Unlock()
   179  
   180  	srv._expireRequests()
   181  }
   182  
   183  // TODO: The hallmark of a messy non-law-of-demeter-following interface...
   184  func (srv *Server) GetBlockchain() *Blockchain {
   185  	return srv.blockchain
   186  }
   187  
   188  // TODO: The hallmark of a messy non-law-of-demeter-following interface...
   189  func (srv *Server) GetMempool() *DeSoMempool {
   190  	return srv.mempool
   191  }
   192  
   193  // TODO: The hallmark of a messy non-law-of-demeter-following interface...
   194  func (srv *Server) GetBlockProducer() *DeSoBlockProducer {
   195  	return srv.blockProducer
   196  }
   197  
   198  // TODO: The hallmark of a messy non-law-of-demeter-following interface...
   199  func (srv *Server) GetConnectionManager() *ConnectionManager {
   200  	return srv.cmgr
   201  }
   202  
   203  // TODO: The hallmark of a messy non-law-of-demeter-following interface...
   204  func (srv *Server) GetMiner() *DeSoMiner {
   205  	return srv.miner
   206  }
   207  
   208  func (srv *Server) BroadcastTransaction(txn *MsgDeSoTxn) ([]*MempoolTx, error) {
   209  	// Use the backendServer to add the transaction to the mempool and
   210  	// relay it to peers. When a transaction is created by the user there
   211  	// is no need to consider a rateLimit and also no need to verifySignatures
   212  	// because we generally will have done that already.
   213  	mempoolTxs, err := srv._addNewTxn(nil /*peer*/, txn, false /*rateLimit*/, false /*verifySignatures*/)
   214  	if err != nil {
   215  		return nil, errors.Wrapf(err, "BroadcastTransaction: ")
   216  	}
   217  
   218  	// At this point, we know the transaction has been run through the mempool.
   219  	// Now wait for an update of the ReadOnlyUtxoView so we don't break anything.
   220  	srv.mempool.BlockUntilReadOnlyViewRegenerated()
   221  
   222  	return mempoolTxs, nil
   223  }
   224  
   225  func (srv *Server) VerifyAndBroadcastTransaction(txn *MsgDeSoTxn) error {
   226  	// Grab the block tip and use it as the height for validation.
   227  	blockHeight := srv.blockchain.BlockTip().Height
   228  	err := srv.blockchain.ValidateTransaction(
   229  		txn,
   230  		// blockHeight is set to the next block since that's where this
   231  		// transaction will be mined at the earliest.
   232  		blockHeight+1,
   233  		true,
   234  		srv.mempool)
   235  	if err != nil {
   236  		return fmt.Errorf("VerifyAndBroadcastTransaction: Problem validating txn: %v", err)
   237  	}
   238  
   239  	if _, err := srv.BroadcastTransaction(txn); err != nil {
   240  		return fmt.Errorf("VerifyAndBroadcastTransaction: Problem broadcasting txn: %v", err)
   241  	}
   242  
   243  	return nil
   244  }
   245  
   246  // NewServer initializes all of the internal data structures. Right now this basically
   247  // looks as follows:
   248  // - ConnectionManager starts and keeps track of peers.
   249  // - When messages are received from peers, they get forwarded on a channel to
   250  //   the Server to handle them. In that sense the ConnectionManager is basically
   251  //   just acting as a router.
   252  // - When the Server receives a message from a peer, it can do any of the following:
   253  //   * Take no action.
   254  //   * Use the Blockchain data structure to validate the transaction or update the.
   255  //     Blockchain data structure.
   256  //   * Send a new message. This can be a message directed back to that actually sent this
   257  //     message or it can be a message to another peer for whatever reason. When a message
   258  //     is sent in this way it can also have a deadline on it that the peer needs to
   259  //     respond by or else it will be disconnected.
   260  //   * Disconnect the peer. In this case the ConnectionManager gets notified about the
   261  //     disconnection and may opt to replace the now-disconnected peer with a  new peer.
   262  //     This happens for example when an outbound peer is disconnected in order to
   263  //     maintain TargetOutboundPeers.
   264  // - The server could also receive a control message that a peer has been disconnected.
   265  //   This can be useful to the server if, for example, it was expecting a response from
   266  //   a particular peer, which could be the case in initial block download where a single
   267  //   sync peer is used.
   268  //
   269  // TODO: Refactor all these arguments into a config object or something.
   270  func NewServer(
   271  	_params *DeSoParams,
   272  	_listeners []net.Listener,
   273  	_desoAddrMgr *addrmgr.AddrManager,
   274  	_connectIps []string,
   275  	_db *badger.DB,
   276  	postgres *Postgres,
   277  	_targetOutboundPeers uint32,
   278  	_maxInboundPeers uint32,
   279  	_minerPublicKeys []string,
   280  	_numMiningThreads uint64,
   281  	_limitOneInboundConnectionPerIP bool,
   282  	_rateLimitFeerateNanosPerKB uint64,
   283  	_minFeeRateNanosPerKB uint64,
   284  	_stallTimeoutSeconds uint64,
   285  	_maxBlockTemplatesToCache uint64,
   286  	_minBlockUpdateIntervalSeconds uint64,
   287  	_blockCypherAPIKey string,
   288  	_runReadOnlyUtxoViewUpdater bool,
   289  	_dataDir string,
   290  	_mempoolDumpDir string,
   291  	_disableNetworking bool,
   292  	_readOnlyMode bool,
   293  	_ignoreInboundPeerInvMessages bool,
   294  	statsd *statsd.Client,
   295  	_blockProducerSeed string,
   296  	_trustedBlockProducerPublicKeys []string,
   297  	_trustedBlockProducerStartHeight uint64,
   298  	eventManager *EventManager,
   299  ) (*Server, error) {
   300  
   301  	// Create an empty Server object here so we can pass a reference to it to the
   302  	// ConnectionManager.
   303  	srv := &Server{
   304  		disableNetworking:            _disableNetworking,
   305  		readOnlyMode:                 _readOnlyMode,
   306  		ignoreInboundPeerInvMessages: _ignoreInboundPeerInvMessages,
   307  	}
   308  
   309  	// The same timesource is used in the chain data structure and in the connection
   310  	// manager. It just takes and keeps track of the median time among our peers so
   311  	// we can keep a consistent clock.
   312  	timesource := chainlib.NewMedianTime()
   313  
   314  	// Create a new connection manager but note that it won't be initialized until Start().
   315  	_incomingMessages := make(chan *ServerMessage, (_targetOutboundPeers+_maxInboundPeers)*3)
   316  	_cmgr := NewConnectionManager(
   317  		_params, _desoAddrMgr, _listeners, _connectIps, timesource,
   318  		_targetOutboundPeers, _maxInboundPeers, _limitOneInboundConnectionPerIP,
   319  		_stallTimeoutSeconds, _minFeeRateNanosPerKB,
   320  		_incomingMessages, srv)
   321  
   322  	// Set up the blockchain data structure. This is responsible for accepting new
   323  	// blocks, keeping track of the best chain, and keeping all of that state up
   324  	// to date on disk.
   325  	//
   326  	// If this is the first time this data structure is being initialized, it will
   327  	// contain only the genesis block. Otherwise it loads all of the block headers
   328  	// (actually BlockNode's) from the db into memory, which is a somewhat heavy-weight
   329  	// operation.
   330  	//
   331  	// TODO: Would be nice if this heavier-weight operation were moved to Start() to
   332  	// keep this constructor fast.
   333  	eventManager.OnBlockConnected(srv._handleBlockMainChainConnectedd)
   334  	eventManager.OnBlockAccepted(srv._handleBlockAccepted)
   335  	eventManager.OnBlockDisconnected(srv._handleBlockMainChainDisconnectedd)
   336  
   337  	_chain, err := NewBlockchain(
   338  		_trustedBlockProducerPublicKeys,
   339  		_trustedBlockProducerStartHeight,
   340  		_params, timesource, _db, postgres, eventManager)
   341  	if err != nil {
   342  		return nil, errors.Wrapf(err, "NewServer: Problem initializing blockchain")
   343  	}
   344  
   345  	glog.V(1).Infof("Initialized chain: Best Header Height: %d, Header Hash: %s, Header CumWork: %s, Best Block Height: %d, Block Hash: %s, Block CumWork: %s",
   346  		_chain.headerTip().Height,
   347  		hex.EncodeToString(_chain.headerTip().Hash[:]),
   348  		hex.EncodeToString(BigintToHash(_chain.headerTip().CumWork)[:]),
   349  		_chain.blockTip().Height,
   350  		hex.EncodeToString(_chain.blockTip().Hash[:]),
   351  		hex.EncodeToString(BigintToHash(_chain.blockTip().CumWork)[:]))
   352  
   353  	// Create a mempool to store transactions until they're ready to be mined into
   354  	// blocks.
   355  	_mempool := NewDeSoMempool(_chain, _rateLimitFeerateNanosPerKB,
   356  		_minFeeRateNanosPerKB, _blockCypherAPIKey, _runReadOnlyUtxoViewUpdater, _dataDir,
   357  		_mempoolDumpDir)
   358  
   359  	// Useful for debugging. Every second, it outputs the contents of the mempool
   360  	// and the contents of the addrmanager.
   361  	/*
   362  		go func() {
   363  			time.Sleep(3 * time.Second)
   364  			for {
   365  				glog.V(2).Infof("Current mempool txns: ")
   366  				counter := 0
   367  				for kk, mempoolTx := range _mempool.poolMap {
   368  					kkCopy := kk
   369  					glog.V(2).Infof("\t%d: < %v: %v >", counter, &kkCopy, mempoolTx)
   370  					counter++
   371  				}
   372  				glog.V(2).Infof("Current addrs: ")
   373  				for ii, na := range srv.cmgr.addrMgr.GetAllAddrs() {
   374  					glog.V(2).Infof("Addr %d: <%s:%d>", ii, na.IP.String(), na.Port)
   375  				}
   376  				time.Sleep(1 * time.Second)
   377  			}
   378  		}()
   379  	*/
   380  
   381  	// Initialize the BlockProducer
   382  	// TODO(miner): Should figure out a way to get this into main.
   383  	var _blockProducer *DeSoBlockProducer
   384  	if _maxBlockTemplatesToCache > 0 {
   385  		_blockProducer, err = NewDeSoBlockProducer(
   386  			_minBlockUpdateIntervalSeconds, _maxBlockTemplatesToCache,
   387  			_blockProducerSeed,
   388  			_mempool, _chain,
   389  			_params, postgres)
   390  		if err != nil {
   391  			panic(err)
   392  		}
   393  		go func() {
   394  			_blockProducer.Start()
   395  		}()
   396  	}
   397  
   398  	// TODO(miner): Make the miner its own binary and pull it out of here.
   399  	// Don't start the miner unless miner public keys are set.
   400  	if _numMiningThreads <= 0 {
   401  		_numMiningThreads = uint64(runtime.NumCPU())
   402  	}
   403  	_miner, err := NewDeSoMiner(_minerPublicKeys, uint32(_numMiningThreads), _blockProducer, _params)
   404  	if err != nil {
   405  		return nil, errors.Wrapf(err, "NewServer: ")
   406  	}
   407  
   408  	// Set all the fields on the Server object.
   409  	srv.cmgr = _cmgr
   410  	srv.blockchain = _chain
   411  	srv.mempool = _mempool
   412  	srv.miner = _miner
   413  	srv.blockProducer = _blockProducer
   414  	srv.incomingMessages = _incomingMessages
   415  	// Make this hold a multiple of what we hold for individual peers.
   416  	srv.inventoryBeingProcessed = lru.NewCache(maxKnownInventory)
   417  	srv.requestTimeoutSeconds = 10
   418  
   419  	srv.statsdClient = statsd
   420  
   421  	// TODO: Make this configurable
   422  	//srv.Notifier = NewNotifier(_chain, postgres)
   423  	//srv.Notifier.Start()
   424  
   425  	// Start statsd reporter
   426  	if srv.statsdClient != nil {
   427  		srv.StartStatsdReporter()
   428  	}
   429  
   430  	// Initialize the addrs to broadcast map.
   431  	srv.addrsToBroadcastt = make(map[string][]*SingleAddr)
   432  
   433  	// This will initialize the request queues.
   434  	srv.ResetRequestQueues()
   435  
   436  	return srv, nil
   437  }
   438  
   439  func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) {
   440  	glog.V(1).Infof("Server._handleGetHeadersMessage: called with locator: (%v), "+
   441  		"stopHash: (%v) from Peer %v", msg.BlockLocator, msg.StopHash, pp)
   442  
   443  	// Ignore GetHeaders requests we're still syncing.
   444  	if srv.blockchain.isSyncing() {
   445  		chainState := srv.blockchain.chainState()
   446  		glog.V(1).Infof("Server._handleGetHeadersMessage: Ignoring GetHeaders from Peer %v"+
   447  			"because node is syncing with ChainState (%v)", pp, chainState)
   448  		return
   449  	}
   450  
   451  	// Find the most recent known block in the best block chain based
   452  	// on the block locator and fetch all of the headers after it until either
   453  	// MaxHeadersPerMsg have been fetched or the provided stop
   454  	// hash is encountered. Note that the headers we return are based on
   455  	// our best *block* chain not our best *header* chain. The reaason for
   456  	// this is that the peer will likely follow up this request by asking
   457  	// us for the blocks corresponding to the headers and we need to be
   458  	// able to deliver them in this case.
   459  	//
   460  	// Use the block after the genesis block if no other blocks in the
   461  	// provided locator are known. This does mean the client will start
   462  	// over with the genesis block if unknown block locators are provided.
   463  	headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash)
   464  
   465  	// Send found headers to the requesting peer.
   466  	blockTip := srv.blockchain.blockTip()
   467  	pp.AddDeSoMessage(&MsgDeSoHeaderBundle{
   468  		Headers:   headers,
   469  		TipHash:   blockTip.Hash,
   470  		TipHeight: blockTip.Height,
   471  	}, false)
   472  	glog.V(2).Infof("Server._handleGetHeadersMessage: Replied to GetHeaders request "+
   473  		"with response headers: (%v), tip hash (%v), tip height (%d) from Peer %v",
   474  		headers, blockTip.Hash, blockTip.Height, pp)
   475  }
   476  
   477  // GetBlocks computes what blocks we need to fetch and asks for them from the
   478  // corresponding peer. It is typically called after we have exited
   479  // SyncStateSyncingHeaders.
   480  func (srv *Server) GetBlocks(pp *Peer, maxHeight int) {
   481  	// Fetch as many blocks as we can from this peer.
   482  	numBlocksToFetch := MaxBlocksInFlight - len(pp.requestedBlocks)
   483  	blockNodesToFetch := srv.blockchain.GetBlockNodesToFetch(
   484  		numBlocksToFetch, maxHeight, pp.requestedBlocks)
   485  	if len(blockNodesToFetch) == 0 {
   486  		// This can happen if, for example, we're already requesting the maximum
   487  		// number of blocks we can. Just return in this case.
   488  		return
   489  	}
   490  
   491  	// If we're here then we have some blocks to fetch so fetch them.
   492  	hashList := []*BlockHash{}
   493  	for _, node := range blockNodesToFetch {
   494  		hashList = append(hashList, node.Hash)
   495  
   496  		pp.requestedBlocks[*node.Hash] = true
   497  	}
   498  	pp.AddDeSoMessage(&MsgDeSoGetBlocks{
   499  		HashList: hashList,
   500  	}, false)
   501  
   502  	glog.V(1).Infof("GetBlocks: Downloading %d blocks from header %v to header %v from peer %v",
   503  		len(blockNodesToFetch),
   504  		blockNodesToFetch[0].Header,
   505  		blockNodesToFetch[len(blockNodesToFetch)-1].Header,
   506  		pp)
   507  }
   508  
   509  func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) {
   510  	glog.Infof("Received header bundle with %v headers "+
   511  		"in state %s from peer %v. Downloaded ( %v / %v ) total headers",
   512  		len(msg.Headers), srv.blockchain.chainState(), pp,
   513  		srv.blockchain.headerTip().Header.Height, pp.StartingBlockHeight())
   514  
   515  	// Start by processing all of the headers given to us. They should start
   516  	// right after the tip of our header chain ideally. While going through them
   517  	// tally up the number that we actually process.
   518  	numNewHeaders := 0
   519  	for _, headerReceived := range msg.Headers {
   520  		// If we encounter a duplicate header while we're still syncing then
   521  		// the peer is misbehaving. Disconnect so we can find one that won't
   522  		// have this issue. Hitting duplicates after we're done syncing is
   523  		// fine and can happen in certain cases.
   524  		headerHash, _ := headerReceived.Hash()
   525  		if srv.blockchain.HasHeader(headerHash) {
   526  			if srv.blockchain.isSyncing() {
   527  
   528  				glog.Warningf("Server._handleHeaderBundle: Duplicate header %v received from peer %v "+
   529  					"in state %s. Local header tip height %d "+
   530  					"hash %s with duplicate %v",
   531  					headerHash,
   532  					pp, srv.blockchain.chainState(), srv.blockchain.headerTip().Height,
   533  					hex.EncodeToString(srv.blockchain.headerTip().Hash[:]), headerHash)
   534  
   535  				// TODO: This logic should really be commented back in, but there was a bug that
   536  				// arises when a program is killed forcefully whereby a partial write leads to this
   537  				// logic causing the sync to stall. As such, it's more trouble than it's worth
   538  				// at the moment but we should consider being more strict about it in the future.
   539  				/*
   540  					pp.Disconnect()
   541  					return
   542  				*/
   543  			}
   544  
   545  			// Don't process duplicate headers.
   546  			continue
   547  		}
   548  
   549  		// If we get here then we have a header we haven't seen before.
   550  		numNewHeaders++
   551  
   552  		// Process the header, as we haven't seen it before.
   553  		_, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash)
   554  
   555  		// If this header is an orphan or we encoutnered an error for any reason,
   556  		// disconnect from the peer. Because every header is sent in response to
   557  		// a GetHeaders request, the peer should know enough to never send us
   558  		// unconnectedTxns unless it's misbehaving.
   559  		if err != nil || isOrphan {
   560  			glog.Errorf("Server._handleHeaderBundle: Disconnecting from peer %v in state %s "+
   561  				"because error occurred processing header: %v, isOrphan: %v",
   562  				pp, srv.blockchain.chainState(), err, isOrphan)
   563  
   564  			pp.Disconnect()
   565  			return
   566  		}
   567  	}
   568  
   569  	// After processing all the headers this will check to see if we are fully current
   570  	// and send a request to our Peer to start a Mempool sync if so.
   571  	//
   572  	// This statement makes it so that if we boot up our node such that
   573  	// its initial state is fully current we'll always bootstrap our mempools with a
   574  	// mempool request. The alternative is that our state is not fully current
   575  	// when we boot up, and we cover this second case in the _handleBlock function.
   576  	srv._maybeRequestSync(pp)
   577  
   578  	// At this point we should have processed all the headers. Now we will
   579  	// make a decision on whether to request more headers from this peer based
   580  	// on how many headers we received in this message. Since every HeaderBundle
   581  	// is a response to a GetHeaders request from us with a HeaderLocator embedded in it, receiving
   582  	// anything less than MaxHeadersPerMsg headers from a peer is sufficient to
   583  	// make us think that the peer doesn't have any more interesting headers for us.
   584  	// On the other hand, if the request contains MaxHeadersPerMsg, it is highly
   585  	// likely we have not hit the tip of our peer's chain, and so requesting more
   586  	// headers from the peer would likely be useful.
   587  	if uint32(len(msg.Headers)) < MaxHeadersPerMsg {
   588  		// If we have exhausted the peer's headers but our header chain still isn't
   589  		// current it means the peer we chose isn't current either. So disconnect
   590  		// from her and try to sync with someone else.
   591  		if srv.blockchain.chainState() == SyncStateSyncingHeaders {
   592  			glog.V(1).Infof("Server._handleHeaderBundle: Disconnecting from peer %v because "+
   593  				"we have exhausted their headers but our tip is still only "+
   594  				"at time=%v height=%d", pp,
   595  				time.Unix(int64(srv.blockchain.headerTip().Header.TstampSecs), 0),
   596  				srv.blockchain.headerTip().Header.Height)
   597  			pp.Disconnect()
   598  			return
   599  		}
   600  
   601  		// If we have exhausted the peer's headers but our blocks aren't current,
   602  		// send a GetBlocks message to the peer for as many blocks as we can get.
   603  		if srv.blockchain.chainState() == SyncStateSyncingBlocks {
   604  			// A maxHeight of -1 tells GetBlocks to fetch as many blocks as we can
   605  			// from this peer without worrying about how many blocks the peer actually
   606  			// has. We can do that in this case since this usually happens dring sync
   607  			// before we've made any GetBlocks requests to the peer.
   608  			blockTip := srv.blockchain.blockTip()
   609  			glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* blocks starting at "+
   610  				"height %d out of %d from peer %v",
   611  				blockTip.Header.Height+1, msg.TipHeight, pp)
   612  			maxHeight := -1
   613  			srv.GetBlocks(pp, maxHeight)
   614  			return
   615  		}
   616  
   617  		// If we have exhausted the peer's headers and our blocks are current but
   618  		// we still need a few more blocks to line our block chain up with
   619  		// our header chain, send the peer a GetBlocks message for blocks we're
   620  		// positive she has.
   621  		if srv.blockchain.chainState() == SyncStateNeedBlocksss ||
   622  			*(srv.blockchain.blockTip().Hash) != *(srv.blockchain.headerTip().Hash) {
   623  			// If the peer's tip is not in our blockchain then we don't request
   624  			// any blocks from them because they're on some kind of fork that
   625  			// we're either not aware of or that we don't think is the best chain.
   626  			// Doing things this way makes it so that when we request blocks we
   627  			// are 100% positive the peer has them.
   628  			if !srv.blockchain.HasHeader(msg.TipHash) {
   629  				glog.V(1).Infof("Server._handleHeaderBundle: Peer's tip is not in our "+
   630  					"blockchain so not requesting anything else from them. Our block "+
   631  					"tip %v, their tip %v:%d, peer: %v",
   632  					srv.blockchain.blockTip().Header, msg.TipHash, msg.TipHeight, pp)
   633  				return
   634  			}
   635  
   636  			// At this point, we have verified that the peer's tip is in our main
   637  			// header chain. This implies that any blocks we would request from
   638  			// them should be available as long as they don't exceed the peer's
   639  			// tip height.
   640  			blockTip := srv.blockchain.blockTip()
   641  			glog.V(1).Infof("Server._handleHeaderBundle: *Downloading* blocks starting at "+
   642  				"block tip %v out of %d from peer %v",
   643  				blockTip.Header, msg.TipHeight, pp)
   644  			srv.GetBlocks(pp, int(msg.TipHeight))
   645  			return
   646  		}
   647  
   648  		// If we get here it means we have all the headers and blocks we need
   649  		// so there's nothing more to do.
   650  		glog.V(1).Infof("Server._handleHeaderBundle: Tip is up-to-date so no "+
   651  			"need to send anything. Our block tip: %v, their tip: %v:%d, Peer: %v",
   652  			srv.blockchain.blockTip().Header, msg.TipHash, msg.TipHeight, pp)
   653  		return
   654  	}
   655  
   656  	// If we get here it means the peer sent us a full header bundle where at
   657  	// least one of the headers contained in the bundle was new to us. When
   658  	// this happens it means the peer likely has more headers for us to process
   659  	// so follow up with another GetHeaders request. Set the block locator for
   660  	// this request using the node corresponding to the last header in this
   661  	// message. Not doing this and using our header tip instead, for example,
   662  	// would result in us not being able to switch away from our current chain
   663  	// even if the peer has a long fork with more work than our current header
   664  	// chain.
   665  	lastHash, _ := msg.Headers[len(msg.Headers)-1].Hash()
   666  	locator, err := srv.blockchain.HeaderLocatorWithNodeHash(lastHash)
   667  	if err != nil {
   668  		glog.Warningf("Server._handleHeaderBundle: Disconnecting peer %v because "+
   669  			"she indicated that she has more headers but the last hash %v in "+
   670  			"the header bundle does not correspond to a block in our index.",
   671  			pp, lastHash)
   672  		pp.Disconnect()
   673  		return
   674  	}
   675  	pp.AddDeSoMessage(&MsgDeSoGetHeaders{
   676  		StopHash:     &BlockHash{},
   677  		BlockLocator: locator,
   678  	}, false)
   679  	headerTip := srv.blockchain.headerTip()
   680  	glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* headers for blocks starting at "+
   681  		"header tip %v out of %d from peer %v",
   682  		headerTip.Header, msg.TipHeight, pp)
   683  }
   684  
   685  func (srv *Server) _handleGetBlocks(pp *Peer, msg *MsgDeSoGetBlocks) {
   686  	glog.V(1).Infof("srv._handleGetBlocks: Called with message %v from Peer %v", msg, pp)
   687  
   688  	// Let the peer handle this
   689  	pp.AddDeSoMessage(msg, true /*inbound*/)
   690  }
   691  
   692  func (srv *Server) _startSync() {
   693  	// Return now if we're already syncing.
   694  	if srv.SyncPeer != nil {
   695  		glog.V(2).Infof("Server._startSync: Not running because SyncPeer != nil")
   696  		return
   697  	}
   698  	glog.V(1).Infof("Server._startSync: Attempting to start sync")
   699  
   700  	// Set our tip to be the best header tip rather than the best block tip. Using
   701  	// the block tip instead might cause us to select a peer who is missing blocks
   702  	// for the headers we've downloaded.
   703  	bestHeight := srv.blockchain.headerTip().Height
   704  
   705  	// Find a peer with StartingHeight bigger than our best header tip.
   706  	var bestPeer *Peer
   707  	for _, peer := range srv.cmgr.GetAllPeers() {
   708  		if !peer.IsSyncCandidate() {
   709  			continue
   710  		}
   711  
   712  		// Choose the peer with the best height out of everyone who's a
   713  		// valid sync candidate.
   714  		if peer.StartingBlockHeight() < bestHeight {
   715  			continue
   716  		}
   717  
   718  		// TODO: Choose best peers based on ping time and/or the highest
   719  		// starting block height. For now, keeping it simple and just choosing
   720  		// the last one we iterate over with a block height larger than our best.
   721  		bestPeer = peer
   722  	}
   723  
   724  	if bestPeer == nil {
   725  		glog.V(1).Infof("Server._startSync: No sync peer candidates available")
   726  		return
   727  	}
   728  
   729  	// Note we don't need to reset requestedBlocks when the SyncPeer changes
   730  	// since we update requestedBLocks when a Peer disconnects to remove any
   731  	// blocks that are currently being requested. This means that either a
   732  	// still-connected Peer will eventually deliver the blocks OR we'll eventually
   733  	// disconnect from that Peer, removing the blocks we requested from her from
   734  	// requestedBlocks, which will cause us to re-download them again after.
   735  
   736  	// Regardless of what our SyncState is, always start by sending a GetHeaders
   737  	// message to our SyncPeer. This ensures that our header chains are in-sync
   738  	// before we start requesting blocks. If we were to go directly to fetching
   739  	// blocks from our SyncPeer without doing this first, we wouldn't be 100%
   740  	// sure that she has them.
   741  	glog.V(1).Infof("Server._startSync: Syncing headers to height %d from peer %v",
   742  		bestPeer.StartingBlockHeight(), bestPeer)
   743  
   744  	// Send a GetHeaders message to the Peer to start the headers sync.
   745  	// Note that we include an empty BlockHash as the stopHash to indicate we want as
   746  	// many headers as the Peer can give us.
   747  	locator := srv.blockchain.LatestHeaderLocator()
   748  	bestPeer.AddDeSoMessage(&MsgDeSoGetHeaders{
   749  		StopHash:     &BlockHash{},
   750  		BlockLocator: locator,
   751  	}, false)
   752  	glog.V(1).Infof("Server._startSync: Downloading headers for blocks starting at "+
   753  		"header tip height %v from peer %v", bestHeight, bestPeer)
   754  
   755  	srv.SyncPeer = bestPeer
   756  }
   757  
   758  func (srv *Server) _handleNewPeer(pp *Peer) {
   759  	isSyncCandidate := pp.IsSyncCandidate()
   760  	isSyncing := srv.blockchain.isSyncing()
   761  	chainState := srv.blockchain.chainState()
   762  	glog.V(1).Infof("Server._handleNewPeer: Processing NewPeer: (%v); IsSyncCandidate(%v), syncPeerIsNil=(%v), IsSyncing=(%v), ChainState=(%v)",
   763  		pp, isSyncCandidate, (srv.SyncPeer == nil), isSyncing, chainState)
   764  
   765  	// Request a sync if we're ready
   766  	srv._maybeRequestSync(pp)
   767  
   768  	// Start syncing by choosing the best candidate.
   769  	if isSyncCandidate && srv.SyncPeer == nil {
   770  		srv._startSync()
   771  	}
   772  }
   773  
   774  func (srv *Server) _cleanupDonePeerPeerState(pp *Peer) {
   775  	// Grab the dataLock since we'll be modifying requestedBlocks
   776  	srv.dataLock.Lock()
   777  	defer srv.dataLock.Unlock()
   778  
   779  	// Choose a new Peer to switch our queued and in-flight requests to. If no Peer is
   780  	// found, just remove any requests queued or in-flight for the disconnecting Peer
   781  	// and return.
   782  	//
   783  	// If we find a newPeer, reassign in-flight and queued requests to this Peer and
   784  	// re-request them if we have room in our in-flight list.
   785  
   786  	// If the newPeer exists but doesn't have these transactions, they will
   787  	// simply reply with an empty TransactionBundle
   788  	// for each GetTransactions we send them. This will result in the
   789  	// requests eventually expiring, which will cause us to remove them from
   790  	// inventoryProcessed and potentially get the data from another Peer in the future.
   791  	//
   792  	// TODO: Sending a sync/mempool message to a random Peer periodically seems like it would
   793  	// be a good way to fill any gaps.
   794  	newPeer := srv.cmgr.RandomPeer()
   795  	if newPeer == nil {
   796  		// If we don't have a new Peer, remove everything that was destined for
   797  		// this Peer. Note we don't need to copy the iterator because everything
   798  		// below doesn't take a reference to it.
   799  		for hash, requestInfo := range srv.requestedTransactionsMap {
   800  			if requestInfo.PeerWhoSentInv.ID == pp.ID {
   801  				srv._removeRequest(&hash)
   802  			}
   803  		}
   804  		return
   805  	}
   806  
   807  	// If we get here then we know we have a valid newPeer so re-assign all the
   808  	// queued requests to newPeer.
   809  
   810  	// Now deal with transactions. They don't have a queue and so all we need to do
   811  	// is reassign the requests that were in-flight to the old Peer and then make
   812  	// the requests to the newPeer.
   813  	txnHashesReassigned := []*BlockHash{}
   814  	for hashIter, requestInfo := range srv.requestedTransactionsMap {
   815  		// Don't do anything if the requests are not meant for the Peer
   816  		// we're disconnecting to the new Peer.
   817  		if requestInfo.PeerWhoSentInv.ID != pp.ID {
   818  			continue
   819  		}
   820  		// Make a copy of the hash so we can take a pointer to it.
   821  		hashCopy := &BlockHash{}
   822  		copy(hashCopy[:], hashIter[:])
   823  
   824  		// We will be sending this request to the new peer so update the info
   825  		// to reflect that.
   826  		requestInfo.PeerWhoSentInv = newPeer
   827  		requestInfo.TimeRequested = time.Now()
   828  		txnHashesReassigned = append(txnHashesReassigned, hashCopy)
   829  	}
   830  	// Request any hashes we might have reassigned in a goroutine to keep things
   831  	// moving.
   832  	newPeer.AddDeSoMessage(&MsgDeSoGetTransactions{
   833  		HashList: txnHashesReassigned,
   834  	}, false)
   835  }
   836  
   837  func (srv *Server) _handleBitcoinManagerUpdate(bmUpdate *MsgDeSoBitcoinManagerUpdate) {
   838  	glog.V(1).Infof("Server._handleBitcoinManagerUpdate: Being called")
   839  
   840  	// Regardless of whether the DeSo chain is in-sync, consider adding any BitcoinExchange
   841  	// transactions we've found to our mempool. We do this to minimize the chances that the
   842  	// network ever loses track of someone's BitcoinExchange.
   843  	if len(bmUpdate.TransactionsFound) > 0 {
   844  		go func() {
   845  			glog.V(2).Infof("Server._handleBitcoinManagerUpdate: BitcoinManager "+
   846  				"found %d BitcoinExchange transactions for us to consider",
   847  				len(bmUpdate.TransactionsFound))
   848  
   849  			// Put all the transactions through some validation to see if they're
   850  			// worth our time. This saves us from getting spammed by _addNewTxnAndRelay
   851  			// when processing stale blocks.
   852  			//
   853  			// Note that we pass a nil mempool in order to avoid considering transactions
   854  			// that are in the mempool but lacking a merkle proof. If transactions are
   855  			// invalid then a separate mempool check later will catch them.
   856  			validTransactions := []*MsgDeSoTxn{}
   857  			for _, burnTxn := range bmUpdate.TransactionsFound {
   858  				err := srv.blockchain.ValidateTransaction(
   859  					burnTxn, srv.blockchain.blockTip().Height+1, true, /*verifySignatures*/
   860  					nil /*mempool*/)
   861  				if err == nil {
   862  					validTransactions = append(validTransactions, burnTxn)
   863  				} else {
   864  					glog.V(1).Infof("Server._handleBitcoinManagerUpdate: Problem adding Bitcoin "+
   865  						"burn transaction: %v", err)
   866  				}
   867  			}
   868  
   869  			glog.V(2).Infof("Server._handleBitcoinManagerUpdate: Processing %d out of %d "+
   870  				"transactions that were actually valid", len(validTransactions),
   871  				len(bmUpdate.TransactionsFound))
   872  
   873  			totalAdded := 0
   874  			for _, validTx := range validTransactions {
   875  				// This shouldn't care about the min burn work because it tries to add to
   876  				// the mempool directly. We should never get an error here because we've already
   877  				// validated all of the transactions.
   878  				//
   879  				// Note we set rateLimit=false because we have a global minimum txn fee that should
   880  				// prevent spam on its own.
   881  				mempoolTxs, err := srv._addNewTxn(
   882  					nil, validTx, false /*rateLimit*/, true /*verifySignatures*/)
   883  				totalAdded += len(mempoolTxs)
   884  
   885  				if err != nil {
   886  					glog.V(1).Infof("Server._handleBitcoinManagerUpdate: Problem adding Bitcoin "+
   887  						"burn transaction during _addNewTxnAndRelay: %v", err)
   888  				}
   889  			}
   890  
   891  			// If we're fully current after accepting all the BitcoinExchange txns then let the
   892  			// peer start sending us INV messages
   893  			srv._maybeRequestSync(nil)
   894  
   895  			glog.V(2).Infof("Server._handleBitcoinManagerUpdate: Successfully added %d out of %d "+
   896  				"transactions", totalAdded, len(bmUpdate.TransactionsFound))
   897  		}()
   898  	}
   899  
   900  	// If we don't have a SyncPeer right now, kick off a sync if we can. No need to
   901  	// check if we're syncing or not since all this does is send a getheaders to a
   902  	// Peer who's available.
   903  	if srv.SyncPeer == nil {
   904  		glog.V(1).Infof("Server._handleBitcoinManagerUpdate: SyncPeer is nil; calling startSync")
   905  		srv._startSync()
   906  		return
   907  	}
   908  
   909  	if !srv.blockchain.isSyncing() {
   910  
   911  		//glog.V(1).Infof("Server._handleBitcoinManagerUpdate: SyncPeer is NOT nil and " +
   912  		//	"BitcoinManager is time-current; sending " +
   913  		//	"DeSo getheaders for good measure")
   914  		glog.V(1).Infof("Server._handleBitcoinManagerUpdate: SyncPeer is NOT nil; sending " +
   915  			"DeSo getheaders for good measure")
   916  		locator := srv.blockchain.LatestHeaderLocator()
   917  		srv.SyncPeer.AddDeSoMessage(&MsgDeSoGetHeaders{
   918  			StopHash:     &BlockHash{},
   919  			BlockLocator: locator,
   920  		}, false)
   921  	}
   922  
   923  	// Note there is an edge case where we may be stuck in state SyncingBlocks. Calilng
   924  	// GetBlocks when we're in this state fixes the edge case and doesn't have any
   925  	// negative side-effects otherwise.
   926  	if srv.blockchain.chainState() == SyncStateSyncingBlocks ||
   927  		srv.blockchain.chainState() == SyncStateNeedBlocksss {
   928  
   929  		glog.V(1).Infof("Server._handleBitcoinManagerUpdate: SyncPeer is NOT nil and " +
   930  			"BitcoinManager is time-current; node is in SyncStateSyncingBlocks. Calling " +
   931  			"GetBlocks for good measure.")
   932  		// Setting maxHeight = -1 gets us as many blocks as we can get from our
   933  		// peer, which is OK because we can assume the peer has all of them when
   934  		// we're syncing.
   935  		maxHeight := -1
   936  		srv.GetBlocks(srv.SyncPeer, maxHeight)
   937  		return
   938  	}
   939  }
   940  
   941  func (srv *Server) _handleDonePeer(pp *Peer) {
   942  	glog.V(1).Infof("Server._handleDonePeer: Processing DonePeer: %v", pp)
   943  
   944  	srv._cleanupDonePeerPeerState(pp)
   945  
   946  	// Attempt to find a new peer to sync from if the quitting peer is the
   947  	// sync peer and if our blockchain isn't current.
   948  	if srv.SyncPeer == pp && srv.blockchain.isSyncing() {
   949  
   950  		srv.SyncPeer = nil
   951  		srv._startSync()
   952  	}
   953  }
   954  
   955  func (srv *Server) _relayTransactions() {
   956  	glog.V(1).Infof("Server._relayTransactions: Waiting for mempool readOnlyView to regenerate")
   957  	srv.mempool.BlockUntilReadOnlyViewRegenerated()
   958  	glog.V(1).Infof("Server._relayTransactions: Mempool view has regenerated")
   959  
   960  	// For each peer, compute the transactions they're missing from the mempool and
   961  	// send them an inv.
   962  	allPeers := srv.cmgr.GetAllPeers()
   963  	txnList := srv.mempool.readOnlyUniversalTransactionList
   964  	for _, pp := range allPeers {
   965  		if !pp.canReceiveInvMessagess {
   966  			glog.V(1).Infof("Skipping invs for peer %v because not ready "+
   967  				"yet: %v", pp, pp.canReceiveInvMessagess)
   968  			continue
   969  		}
   970  		// For each peer construct an inventory message that excludes transactions
   971  		// for which the minimum fee is below what the Peer will allow.
   972  		invMsg := &MsgDeSoInv{}
   973  		for _, newTxn := range txnList {
   974  			invVect := &InvVect{
   975  				Type: InvTypeTx,
   976  				Hash: *newTxn.Hash,
   977  			}
   978  
   979  			// If the peer has this txn already then skip it.
   980  			if pp.knownInventory.Contains(*invVect) {
   981  				continue
   982  			}
   983  
   984  			invMsg.InvList = append(invMsg.InvList, invVect)
   985  		}
   986  		if len(invMsg.InvList) > 0 {
   987  			pp.AddDeSoMessage(invMsg, false)
   988  		}
   989  	}
   990  
   991  	glog.V(1).Infof("Server._relayTransactions: Relay to all peers is complete!")
   992  }
   993  
   994  func (srv *Server) _addNewTxn(
   995  	pp *Peer, txn *MsgDeSoTxn, rateLimit bool, verifySignatures bool) ([]*MempoolTx, error) {
   996  
   997  	if srv.readOnlyMode {
   998  		err := fmt.Errorf("Server._addNewTxnAndRelay: Not processing txn from peer %v "+
   999  			"because peer is in read-only mode: %v", pp, srv.readOnlyMode)
  1000  		glog.V(1).Infof(err.Error())
  1001  		return nil, err
  1002  	}
  1003  
  1004  	if srv.blockchain.chainState() != SyncStateFullyCurrent {
  1005  
  1006  		err := fmt.Errorf("Server._addNewTxnAndRelay: Cannot process txn "+
  1007  			"from peer %v while syncing: %v %v", pp, srv.blockchain.chainState(), txn.Hash())
  1008  		glog.Error(err)
  1009  		return nil, err
  1010  	}
  1011  
  1012  	glog.V(1).Infof("Server._addNewTxnAndRelay: txn: %v, peer: %v", txn, pp)
  1013  
  1014  	// Try and add the transaction to the mempool.
  1015  	peerID := uint64(0)
  1016  	if pp != nil {
  1017  		peerID = pp.ID
  1018  	}
  1019  
  1020  	srv.blockchain.ChainLock.RLock()
  1021  	newlyAcceptedTxns, err := srv.mempool.ProcessTransaction(
  1022  		txn, true /*allowUnconnectedTxn*/, rateLimit, peerID, verifySignatures)
  1023  	srv.blockchain.ChainLock.RUnlock()
  1024  	if err != nil {
  1025  		return nil, errors.Wrapf(err, "Server._handleTransaction: Problem adding transaction to mempool: ")
  1026  	}
  1027  
  1028  	glog.V(1).Infof("Server._addNewTxnAndRelay: newlyAcceptedTxns: %v, Peer: %v", newlyAcceptedTxns, pp)
  1029  
  1030  	return newlyAcceptedTxns, nil
  1031  }
  1032  
  1033  // It's assumed that the caller will hold the ChainLock for reading so
  1034  // that the mempool transactions don't shift under our feet.
  1035  func (srv *Server) _handleBlockMainChainConnectedd(event *BlockEvent) {
  1036  	blk := event.Block
  1037  
  1038  	// Don't do anything mempool-related until our best block chain is done
  1039  	// syncing.
  1040  	//
  1041  	// We add a second check as an edge-case to protect against when
  1042  	// this function is called with an uninitialized blockchain object. This
  1043  	// can happen during initChain() for example.
  1044  	if srv.blockchain == nil || !srv.blockchain.isInitialized || srv.blockchain.isSyncing() {
  1045  		return
  1046  	}
  1047  
  1048  	// If we're current, update the mempool to remove the transactions
  1049  	// in this block from it. We can't do this in a goroutine because we
  1050  	// need each mempool update to happen in the same order as that in which
  1051  	// we connected the blocks and this wouldn't be guaranteed if we kicked
  1052  	// off a goroutine for each update.
  1053  	newlyAcceptedTxns := srv.mempool.UpdateAfterConnectBlock(blk)
  1054  	_ = newlyAcceptedTxns
  1055  
  1056  	blockHash, _ := blk.Header.Hash()
  1057  	glog.V(1).Infof("_handleBlockMainChainConnected: Block %s height %d connected to "+
  1058  		"main chain and chain is current.", hex.EncodeToString(blockHash[:]), blk.Header.Height)
  1059  }
  1060  
  1061  // It's assumed that the caller will hold the ChainLock for reading so
  1062  // that the mempool transactions don't shift under our feet.
  1063  func (srv *Server) _handleBlockMainChainDisconnectedd(event *BlockEvent) {
  1064  	blk := event.Block
  1065  
  1066  	// Don't do anything mempool-related until our best block chain is done
  1067  	// syncing.
  1068  	if srv.blockchain.isSyncing() {
  1069  		return
  1070  	}
  1071  
  1072  	// If we're current, update the mempool to add back the transactions
  1073  	// in this block. We can't do this in a goroutine because we
  1074  	// need each mempool update to happen in the same order as that in which
  1075  	// we connected the blocks and this wouldn't be guaranteed if we kicked
  1076  	// off a goroutine for each update.
  1077  	srv.mempool.UpdateAfterDisconnectBlock(blk)
  1078  
  1079  	blockHash, _ := blk.Header.Hash()
  1080  	glog.V(1).Infof("_handleBlockMainChainDisconnect: Block %s height %d disconnected from "+
  1081  		"main chain and chain is current.", hex.EncodeToString(blockHash[:]), blk.Header.Height)
  1082  }
  1083  
  1084  func (srv *Server) _maybeRequestSync(pp *Peer) {
  1085  	// Send the mempool message if DeSo and Bitcoin are fully current
  1086  	if srv.blockchain.chainState() == SyncStateFullyCurrent {
  1087  		if pp != nil {
  1088  			glog.V(1).Infof("Server._maybeRequestSync: Sending mempool message: %v", pp)
  1089  			pp.AddDeSoMessage(&MsgDeSoMempool{}, false)
  1090  		} else {
  1091  			glog.V(1).Infof("Server._maybeRequestSync: NOT sending mempool message because peer is nil: %v", pp)
  1092  		}
  1093  	} else {
  1094  		glog.V(1).Infof("Server._maybeRequestSync: NOT sending mempool message because not current: %v, %v",
  1095  			srv.blockchain.chainState(),
  1096  			pp)
  1097  	}
  1098  }
  1099  
  1100  func (srv *Server) _handleBlockAccepted(event *BlockEvent) {
  1101  	blk := event.Block
  1102  
  1103  	// Don't relay blocks until our best block chain is done syncing.
  1104  	if srv.blockchain.isSyncing() {
  1105  		return
  1106  	}
  1107  
  1108  	// If we're fully current after accepting all the blocks but we have not
  1109  	// yet requested all of the mempool transactions from one of our peers, do
  1110  	// that now. This covers the case where our node is behind when it boots
  1111  	// up, making it so that right at the end of the node's initial sync, after
  1112  	// everything has been connected, we then bootstrap our mempool.
  1113  	srv._maybeRequestSync(nil)
  1114  
  1115  	// Construct an inventory vector to relay to peers.
  1116  	blockHash, _ := blk.Header.Hash()
  1117  	invVect := &InvVect{
  1118  		Type: InvTypeBlock,
  1119  		Hash: *blockHash,
  1120  	}
  1121  
  1122  	// Iterate through all the peers and relay the InvVect to them. This will only
  1123  	// actually be relayed if it's not already in the peer's knownInventory.
  1124  	allPeers := srv.cmgr.GetAllPeers()
  1125  	for _, pp := range allPeers {
  1126  		pp.AddDeSoMessage(&MsgDeSoInv{
  1127  			InvList: []*InvVect{invVect},
  1128  		}, false)
  1129  	}
  1130  }
  1131  
  1132  func (srv *Server) _logAndDisconnectPeer(pp *Peer, blockMsg *MsgDeSoBlock, suffix string) {
  1133  	// Disconnect the Peer. Generally-speaking, disconnecting from the peer will cause its
  1134  	// requested blocks and txns to be removed from the global maps and cause it to be
  1135  	// replaced by another peer. Furthermore,
  1136  	// if we're in the process of syncing our node, the startSync process will also
  1137  	// be restarted as a resul. If we're not syncing our peer and have instead reached
  1138  	// the steady-state, then the next interesting inv message should cause us to
  1139  	// fetch headers, blocks, etc. So we'll be back.
  1140  	glog.Errorf("Server._handleBlock: Encountered an error processing "+
  1141  		"block %v. Disconnecting from peer %v: %s", blockMsg, pp, suffix)
  1142  	pp.Disconnect()
  1143  }
  1144  
  1145  func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) {
  1146  	glog.Infof("Server._handleBlock: Received block ( %v / %v ) from Peer %v",
  1147  		blk.Header.Height, srv.blockchain.headerTip().Height, pp)
  1148  
  1149  	// Pull out the header for easy access.
  1150  	blockHeader := blk.Header
  1151  	if blockHeader == nil {
  1152  		// Should never happen but check it nevertheless.
  1153  		srv._logAndDisconnectPeer(pp, blk, "Header was nil")
  1154  		return
  1155  	}
  1156  	// Compute the hash of the block.
  1157  	blockHash, err := blk.Header.Hash()
  1158  	if err != nil {
  1159  		// This should never happen if we got this far but log the error, clear the
  1160  		// requestedBlocks, disconnect from the peer and return just in case.
  1161  		srv._logAndDisconnectPeer(
  1162  			pp, blk, "Problem computing block hash")
  1163  		return
  1164  	}
  1165  
  1166  	if pp != nil {
  1167  		delete(pp.requestedBlocks, *blockHash)
  1168  	}
  1169  
  1170  	// Check that the mempool has not received a transaction that would forbid this block's signature pubkey.
  1171  	// This is a minimal check, a more thorough check is made in the ProcessBlock function. This check is
  1172  	// necessary because the ProcessBlock function only has access to mined transactions. Therefore, if an
  1173  	// attacker were to prevent a "forbid X pubkey" transaction from mining, they could force nodes to continue
  1174  	// processing their blocks.
  1175  	if len(srv.blockchain.trustedBlockProducerPublicKeys) > 0 && blockHeader.Height >= srv.blockchain.trustedBlockProducerStartHeight {
  1176  		if blk.BlockProducerInfo != nil {
  1177  			_, entryExists := srv.mempool.readOnlyUtxoView.ForbiddenPubKeyToForbiddenPubKeyEntry[MakePkMapKey(
  1178  				blk.BlockProducerInfo.PublicKey)]
  1179  			if entryExists {
  1180  				srv._logAndDisconnectPeer(pp, blk, "Got forbidden block signature public key.")
  1181  				return
  1182  			}
  1183  		}
  1184  	}
  1185  
  1186  	// Only verify signatures for recent blocks.
  1187  	var isOrphan bool
  1188  	if srv.blockchain.isSyncing() {
  1189  		glog.V(1).Infof("Server._handleBlock: Processing block %v WITHOUT "+
  1190  			"signature checking because SyncState=%v for peer %v",
  1191  			blk, srv.blockchain.chainState(), pp)
  1192  		_, isOrphan, err = srv.blockchain.ProcessBlock(blk, false)
  1193  
  1194  	} else {
  1195  		// TODO: Signature checking slows things down because it acquires the ChainLock.
  1196  		// The optimal solution is to check signatures in a way that doesn't acquire the
  1197  		// ChainLock, which is what Bitcoin Core does.
  1198  		glog.V(1).Infof("Server._handleBlock: Processing block %v WITH "+
  1199  			"signature checking because SyncState=%v for peer %v",
  1200  			blk, srv.blockchain.chainState(), pp)
  1201  		_, isOrphan, err = srv.blockchain.ProcessBlock(blk, true)
  1202  	}
  1203  
  1204  	// If we hit an error then abort mission entirely. We should generally never
  1205  	// see an error with a block from a peer.
  1206  	if err != nil {
  1207  		if strings.Contains(err.Error(), "RuleErrorDuplicateBlock") {
  1208  			// Just warn on duplicate blocks but don't disconnect the peer.
  1209  			// TODO: This assuages a bug similar to the one referenced in the duplicate
  1210  			// headers comment above but in the future we should probably try and figure
  1211  			// out a way to be more strict about things.
  1212  			glog.Warningf("Got duplicate block %v from peer %v", blk, pp)
  1213  		} else {
  1214  			srv._logAndDisconnectPeer(
  1215  				pp, blk,
  1216  				errors.Wrapf(err, "Error while processing block: ").Error())
  1217  			return
  1218  		}
  1219  	}
  1220  	if isOrphan {
  1221  		// We should generally never receive orphan blocks. It indicates something
  1222  		// went wrong in our headers syncing.
  1223  		glog.Errorf("ERROR: Received orphan block with hash %v height %v. "+
  1224  			"This should never happen", blockHash, blk.Header.Height)
  1225  		return
  1226  	}
  1227  
  1228  	// We shouldn't be receiving blocks while syncing headers.
  1229  	if srv.blockchain.chainState() == SyncStateSyncingHeaders {
  1230  		srv._logAndDisconnectPeer(
  1231  			pp, blk,
  1232  			"We should never get blocks when we're syncing headers")
  1233  		return
  1234  	}
  1235  
  1236  	// If we're syncing blocks, call GetBlocks and try to get as many blocks
  1237  	// from our peer as we can. This allows the initial block download to be
  1238  	// more incremental since every time we're able to accept a block (or
  1239  	// group of blocks) we indicate this to our peer so they can send us more.
  1240  	if srv.blockchain.chainState() == SyncStateSyncingBlocks {
  1241  		// Setting maxHeight = -1 gets us as many blocks as we can get from our
  1242  		// peer, which is OK because we can assume the peer has all of them when
  1243  		// we're syncing.
  1244  		maxHeight := -1
  1245  		srv.GetBlocks(pp, maxHeight)
  1246  		return
  1247  	}
  1248  
  1249  	if srv.blockchain.chainState() == SyncStateNeedBlocksss {
  1250  		// If we don't have any blocks to wait for anymore, hit the peer with
  1251  		// a GetHeaders request to see if there are any more headers we should
  1252  		// be aware of. This will generally happen in two cases:
  1253  		// - With our sync peer after we’re almost at the end of syncing blocks.
  1254  		//   In this case, calling GetHeaders once the requestedblocks is almost
  1255  		//   gone will result in us getting all of the remaining blocks right up
  1256  		//   to the tip and then stopping, which is exactly what we want.
  1257  		// - With a peer that sent us an inv. In this case, the peer could have
  1258  		//   more blocks for us or it could not. Either way, it’s good to check
  1259  		//   and worst case the peer will return an empty header bundle that will
  1260  		//   result in us not sending anything back because there won’t be any new
  1261  		//   blocks to request.
  1262  		locator := srv.blockchain.LatestHeaderLocator()
  1263  		pp.AddDeSoMessage(&MsgDeSoGetHeaders{
  1264  			StopHash:     &BlockHash{},
  1265  			BlockLocator: locator,
  1266  		}, false)
  1267  		return
  1268  	}
  1269  
  1270  	// If we get here, it means we're in SyncStateFullySynced, which is great.
  1271  	// In this case we shoot a MEMPOOL message over to the peer to bootstrap the mempool.
  1272  	srv._maybeRequestSync(pp)
  1273  }
  1274  
  1275  func (srv *Server) _handleInv(peer *Peer, msg *MsgDeSoInv) {
  1276  	if !peer.isOutbound && srv.ignoreInboundPeerInvMessages {
  1277  		glog.Infof("_handleInv: Ignoring inv message from inbound peer because "+
  1278  			"ignore_outbound_peer_inv_messages=true: %v", peer)
  1279  		return
  1280  	}
  1281  	peer.AddDeSoMessage(msg, true /*inbound*/)
  1282  }
  1283  
  1284  func (srv *Server) _handleGetTransactions(pp *Peer, msg *MsgDeSoGetTransactions) {
  1285  	glog.V(1).Infof("Server._handleGetTransactions: Received GetTransactions "+
  1286  		"message %v from Peer %v", msg, pp)
  1287  
  1288  	pp.AddDeSoMessage(msg, true /*inbound*/)
  1289  }
  1290  
  1291  func (srv *Server) ProcessSingleTxnWithChainLock(
  1292  	pp *Peer, txn *MsgDeSoTxn) ([]*MempoolTx, error) {
  1293  	// Lock the chain for reading so that transactions don't shift under our feet
  1294  	// when processing this bundle. Not doing this could cause us to miss transactions
  1295  	// erroneously.
  1296  	//
  1297  	// TODO(performance): We should probably do this less frequently.
  1298  	srv.blockchain.ChainLock.RLock()
  1299  	defer func() {
  1300  		srv.blockchain.ChainLock.RUnlock()
  1301  	}()
  1302  	// Note we set rateLimit=false because we have a global minimum txn fee that should
  1303  	// prevent spam on its own.
  1304  	return srv.mempool.ProcessTransaction(
  1305  		txn, true /*allowUnconnectedTxn*/, false, /*rateLimit*/
  1306  		pp.ID, true /*verifySignatures*/)
  1307  }
  1308  
  1309  func (srv *Server) _processTransactions(pp *Peer, msg *MsgDeSoTransactionBundle) []*MempoolTx {
  1310  	// Try and add all the transactions to our mempool in the order we received
  1311  	// them. If any fail to get added, just log an error.
  1312  	//
  1313  	// TODO: It would be nice if we did something fancy here like if we kept
  1314  	// track of rejected transactions and retried them every time we connected
  1315  	// a block. Doing something like this would make it so that if a transaction
  1316  	// was initially rejected due to us not having its dependencies, then we
  1317  	// will eventually add it as opposed to just forgetting about it.
  1318  	glog.V(2).Infof("Server._handleTransactionBundle: Processing message %v from "+
  1319  		"peer %v", msg, pp)
  1320  	transactionsToRelay := []*MempoolTx{}
  1321  	for _, txn := range msg.Transactions {
  1322  		// Process the transaction with rate-limiting while allowing unconnectedTxns and
  1323  		// verifying signatures.
  1324  		newlyAcceptedTxns, err := srv.ProcessSingleTxnWithChainLock(pp, txn)
  1325  		if err != nil {
  1326  			glog.Errorf(fmt.Sprintf("Server._handleTransactionBundle: Rejected "+
  1327  				"transaction %v from peer %v from mempool: %v", txn, pp, err))
  1328  			// A peer should know better than to send us a transaction that's below
  1329  			// our min feerate, which they see when we send them a version message.
  1330  			if err == TxErrorInsufficientFeeMinFee {
  1331  				glog.Errorf(fmt.Sprintf("Server._handleTransactionBundle: Disconnecting "+
  1332  					"Peer %v for sending us a transaction %v with fee below the minimum fee %d",
  1333  					pp, txn, srv.mempool.minFeeRateNanosPerKB))
  1334  				pp.Disconnect()
  1335  			}
  1336  
  1337  			// Don't do anything else if we got an error.
  1338  			continue
  1339  		}
  1340  		if len(newlyAcceptedTxns) == 0 {
  1341  			glog.Infof(fmt.Sprintf("Server._handleTransactionBundle: "+
  1342  				"Transaction %v from peer %v was added as an ORPHAN", spew.Sdump(txn), pp))
  1343  		}
  1344  
  1345  		// If we get here then the transaction was accepted into our mempool.
  1346  		// Queue the transactions that were accepted them for relay to all of the peers
  1347  		// who don't yet have them.
  1348  		transactionsToRelay = append(transactionsToRelay, newlyAcceptedTxns...)
  1349  	}
  1350  
  1351  	return transactionsToRelay
  1352  }
  1353  
  1354  func (srv *Server) _handleTransactionBundle(pp *Peer, msg *MsgDeSoTransactionBundle) {
  1355  	glog.V(1).Infof("Server._handleTransactionBundle: Received TransactionBundle "+
  1356  		"message of size %v from Peer %v", len(msg.Transactions), pp)
  1357  
  1358  	pp.AddDeSoMessage(msg, true /*inbound*/)
  1359  }
  1360  
  1361  func (srv *Server) _handleMempool(pp *Peer, msg *MsgDeSoMempool) {
  1362  	glog.V(1).Infof("Server._handleMempool: Received Mempool message from Peer %v", pp)
  1363  
  1364  	pp.canReceiveInvMessagess = true
  1365  }
  1366  
  1367  func (srv *Server) StartStatsdReporter() {
  1368  	go func() {
  1369  	out:
  1370  		for {
  1371  			select {
  1372  			case <-time.After(5 * time.Second):
  1373  				tags := []string{}
  1374  
  1375  				// Report mempool size
  1376  				mempoolTotal := len(srv.mempool.readOnlyUniversalTransactionList)
  1377  				srv.statsdClient.Gauge("MEMPOOL.COUNT", float64(mempoolTotal), tags, 1)
  1378  
  1379  				// Report block + headers height
  1380  				blocksHeight := srv.blockchain.BlockTip().Height
  1381  				srv.statsdClient.Gauge("BLOCKS.HEIGHT", float64(blocksHeight), tags, 1)
  1382  
  1383  				headersHeight := srv.blockchain.HeaderTip().Height
  1384  				srv.statsdClient.Gauge("HEADERS.HEIGHT", float64(headersHeight), tags, 1)
  1385  
  1386  			case <-srv.mempool.quit:
  1387  				break out
  1388  			}
  1389  		}
  1390  	}()
  1391  }
  1392  
  1393  func (srv *Server) _handleAddrMessage(pp *Peer, msg *MsgDeSoAddr) {
  1394  	srv.addrsToBroadcastLock.Lock()
  1395  	defer srv.addrsToBroadcastLock.Unlock()
  1396  
  1397  	glog.V(1).Infof("Server._handleAddrMessage: Received Addr from peer %v with addrs %v", pp, spew.Sdump(msg.AddrList))
  1398  
  1399  	// If this addr message contains more than the maximum allowed number of addresses
  1400  	// then disconnect this peer.
  1401  	if len(msg.AddrList) > MaxAddrsPerAddrMsg {
  1402  		glog.Errorf(fmt.Sprintf("Server._handleAddrMessage: Disconnecting "+
  1403  			"Peer %v for sending us an addr message with %d transactions, which exceeds "+
  1404  			"the max allowed %d",
  1405  			pp, len(msg.AddrList), MaxAddrsPerAddrMsg))
  1406  		pp.Disconnect()
  1407  		return
  1408  	}
  1409  
  1410  	// Add all the addresses we received to the addrmgr.
  1411  	netAddrsReceived := []*wire.NetAddress{}
  1412  	for _, addr := range msg.AddrList {
  1413  		addrAsNetAddr := wire.NewNetAddressIPPort(addr.IP, addr.Port, (wire.ServiceFlag)(addr.Services))
  1414  		if !addrmgr.IsRoutable(addrAsNetAddr) {
  1415  			glog.V(1).Infof("Dropping address %v from peer %v because it is not routable", addr, pp)
  1416  			continue
  1417  		}
  1418  
  1419  		netAddrsReceived = append(
  1420  			netAddrsReceived, addrAsNetAddr)
  1421  	}
  1422  	srv.cmgr.addrMgr.AddAddresses(netAddrsReceived, pp.netAddr)
  1423  
  1424  	// If the message had <= 10 addrs in it, then queue all the addresses for relaying
  1425  	// on the next cycle.
  1426  	if len(msg.AddrList) <= 10 {
  1427  		glog.V(1).Infof("Server._handleAddrMessage: Queueing %d addrs for forwarding from "+
  1428  			"peer %v", len(msg.AddrList), pp)
  1429  		sourceAddr := &SingleAddr{
  1430  			Timestamp: time.Now(),
  1431  			IP:        pp.netAddr.IP,
  1432  			Port:      pp.netAddr.Port,
  1433  			Services:  pp.serviceFlags,
  1434  		}
  1435  		listToAddTo, hasSeenSource := srv.addrsToBroadcastt[sourceAddr.StringWithPort(false /*includePort*/)]
  1436  		if !hasSeenSource {
  1437  			listToAddTo = []*SingleAddr{}
  1438  		}
  1439  		// If this peer has been sending us a lot of little crap, evict a lot of their
  1440  		// stuff but don't disconnect.
  1441  		if len(listToAddTo) > MaxAddrsPerAddrMsg {
  1442  			listToAddTo = listToAddTo[:MaxAddrsPerAddrMsg/2]
  1443  		}
  1444  		listToAddTo = append(listToAddTo, msg.AddrList...)
  1445  		srv.addrsToBroadcastt[sourceAddr.StringWithPort(false /*includePort*/)] = listToAddTo
  1446  	}
  1447  }
  1448  
  1449  func (srv *Server) _handleGetAddrMessage(pp *Peer, msg *MsgDeSoGetAddr) {
  1450  	glog.V(1).Infof("Server._handleGetAddrMessage: Received GetAddr from peer %v", pp)
  1451  	// When we get a GetAddr message, choose MaxAddrsPerMsg from the AddrMgr
  1452  	// and send them back to the peer.
  1453  	netAddrsFound := srv.cmgr.addrMgr.AddressCache()
  1454  	if len(netAddrsFound) > MaxAddrsPerAddrMsg {
  1455  		netAddrsFound = netAddrsFound[:MaxAddrsPerAddrMsg]
  1456  	}
  1457  
  1458  	// Convert the list to a SingleAddr list.
  1459  	res := &MsgDeSoAddr{}
  1460  	for _, netAddr := range netAddrsFound {
  1461  		singleAddr := &SingleAddr{
  1462  			Timestamp: time.Now(),
  1463  			IP:        netAddr.IP,
  1464  			Port:      netAddr.Port,
  1465  			Services:  (ServiceFlag)(netAddr.Services),
  1466  		}
  1467  		res.AddrList = append(res.AddrList, singleAddr)
  1468  	}
  1469  	pp.AddDeSoMessage(res, false)
  1470  }
  1471  
  1472  func (srv *Server) _handleControlMessages(serverMessage *ServerMessage) (_shouldQuit bool) {
  1473  	switch msg := serverMessage.Msg.(type) {
  1474  	// Control messages used internally to signal to the server.
  1475  	case *MsgDeSoNewPeer:
  1476  		srv._handleNewPeer(serverMessage.Peer)
  1477  	case *MsgDeSoDonePeer:
  1478  		srv._handleDonePeer(serverMessage.Peer)
  1479  	case *MsgDeSoBitcoinManagerUpdate:
  1480  		srv._handleBitcoinManagerUpdate(msg)
  1481  	case *MsgDeSoQuit:
  1482  		return true
  1483  	}
  1484  
  1485  	return false
  1486  }
  1487  
  1488  func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) {
  1489  	// Handle all non-control message types from our Peers.
  1490  	switch msg := serverMessage.Msg.(type) {
  1491  	// Messages sent among peers.
  1492  	case *MsgDeSoBlock:
  1493  		srv._handleBlock(serverMessage.Peer, msg)
  1494  	case *MsgDeSoGetHeaders:
  1495  		srv._handleGetHeaders(serverMessage.Peer, msg)
  1496  	case *MsgDeSoHeaderBundle:
  1497  		srv._handleHeaderBundle(serverMessage.Peer, msg)
  1498  	case *MsgDeSoGetBlocks:
  1499  		srv._handleGetBlocks(serverMessage.Peer, msg)
  1500  	case *MsgDeSoGetTransactions:
  1501  		srv._handleGetTransactions(serverMessage.Peer, msg)
  1502  	case *MsgDeSoTransactionBundle:
  1503  		srv._handleTransactionBundle(serverMessage.Peer, msg)
  1504  	case *MsgDeSoMempool:
  1505  		srv._handleMempool(serverMessage.Peer, msg)
  1506  	case *MsgDeSoInv:
  1507  		srv._handleInv(serverMessage.Peer, msg)
  1508  	}
  1509  }
  1510  
  1511  // Note that messageHandler is single-threaded and so all of the handle* functions
  1512  // it calls can assume they can access the Server's variables without concurrency
  1513  // issues.
  1514  func (srv *Server) messageHandler() {
  1515  	for {
  1516  		serverMessage := <-srv.incomingMessages
  1517  		glog.V(2).Infof("Server.messageHandler: Handling message of type %v from Peer %v",
  1518  			serverMessage.Msg.GetMsgType(), serverMessage.Peer)
  1519  
  1520  		// If the message is an addr message we handle it independent of whether or
  1521  		// not the BitcoinManager is synced.
  1522  		if serverMessage.Msg.GetMsgType() == MsgTypeAddr {
  1523  			srv._handleAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoAddr))
  1524  			continue
  1525  		}
  1526  		// If the message is a GetAddr message we handle it independent of whether or
  1527  		// not the BitcoinManager is synced.
  1528  		if serverMessage.Msg.GetMsgType() == MsgTypeGetAddr {
  1529  			srv._handleGetAddrMessage(serverMessage.Peer, serverMessage.Msg.(*MsgDeSoGetAddr))
  1530  			continue
  1531  		}
  1532  
  1533  		srv._handlePeerMessages(serverMessage)
  1534  
  1535  		// Always check for and handle control messages regardless of whether the
  1536  		// BitcoinManager is synced. Note that we filter control messages out in a
  1537  		// Peer's inHander so any control message we get at this point should be bona fide.
  1538  		shouldQuit := srv._handleControlMessages(serverMessage)
  1539  		if shouldQuit {
  1540  			break
  1541  		}
  1542  
  1543  		// Signal to whatever sent us this message that we're done processing
  1544  		// the block.
  1545  		if serverMessage.ReplyChan != nil {
  1546  			serverMessage.ReplyChan <- &ServerReply{}
  1547  		}
  1548  	}
  1549  
  1550  	// If we broke out of the select statement then it's time to allow things to
  1551  	// clean up.
  1552  	srv.waitGroup.Done()
  1553  	glog.V(2).Info("Server.Start: Server done")
  1554  }
  1555  
  1556  func (srv *Server) _getAddrsToBroadcast() []*SingleAddr {
  1557  	srv.addrsToBroadcastLock.Lock()
  1558  	defer srv.addrsToBroadcastLock.Unlock()
  1559  
  1560  	// If there's nothing in the map, return.
  1561  	if len(srv.addrsToBroadcastt) == 0 {
  1562  		return []*SingleAddr{}
  1563  	}
  1564  
  1565  	// If we get here then we have some addresses to broadcast.
  1566  	addrsToBroadcast := []*SingleAddr{}
  1567  	for len(addrsToBroadcast) < 10 && len(srv.addrsToBroadcastt) > 0 {
  1568  		// Choose a key at random. This works because map iteration is random in golang.
  1569  		bucket := ""
  1570  		for kk := range srv.addrsToBroadcastt {
  1571  			bucket = kk
  1572  			break
  1573  		}
  1574  
  1575  		// Remove the last element from the slice for the given bucket.
  1576  		currentAddrList := srv.addrsToBroadcastt[bucket]
  1577  		if len(currentAddrList) > 0 {
  1578  			lastIndex := len(currentAddrList) - 1
  1579  			currentAddr := currentAddrList[lastIndex]
  1580  			currentAddrList = currentAddrList[:lastIndex]
  1581  			if len(currentAddrList) == 0 {
  1582  				delete(srv.addrsToBroadcastt, bucket)
  1583  			} else {
  1584  				srv.addrsToBroadcastt[bucket] = currentAddrList
  1585  			}
  1586  
  1587  			addrsToBroadcast = append(addrsToBroadcast, currentAddr)
  1588  		}
  1589  	}
  1590  
  1591  	return addrsToBroadcast
  1592  }
  1593  
  1594  // Must be run inside a goroutine. Relays addresses to peers at regular intervals
  1595  // and relays our own address to peers once every 24 hours.
  1596  func (srv *Server) _startAddressRelayer() {
  1597  	for numMinutesPassed := 0; ; numMinutesPassed++ {
  1598  		// For the first ten minutes after the server starts, relay our address to all
  1599  		// peers. After the first ten minutes, do it once every 24 hours.
  1600  		glog.V(1).Infof("Server.Start._startAddressRelayer: Relaying our own addr to peers")
  1601  		if numMinutesPassed < 10 || numMinutesPassed%(RebroadcastNodeAddrIntervalMinutes) == 0 {
  1602  			for _, pp := range srv.cmgr.GetAllPeers() {
  1603  				bestAddress := srv.cmgr.addrMgr.GetBestLocalAddress(pp.netAddr)
  1604  				if bestAddress != nil {
  1605  					glog.V(2).Infof("Server.Start._startAddressRelayer: Relaying address %v to "+
  1606  						"peer %v", bestAddress.IP.String(), pp)
  1607  					pp.AddDeSoMessage(&MsgDeSoAddr{
  1608  						AddrList: []*SingleAddr{
  1609  							&SingleAddr{
  1610  								Timestamp: time.Now(),
  1611  								IP:        bestAddress.IP,
  1612  								Port:      bestAddress.Port,
  1613  								Services:  (ServiceFlag)(bestAddress.Services),
  1614  							},
  1615  						},
  1616  					}, false)
  1617  				}
  1618  			}
  1619  		}
  1620  
  1621  		glog.V(2).Infof("Server.Start._startAddressRelayer: Seeing if there are addrs to relay...")
  1622  		// Broadcast the addrs we have to all of our peers.
  1623  		addrsToBroadcast := srv._getAddrsToBroadcast()
  1624  		if len(addrsToBroadcast) == 0 {
  1625  			glog.V(2).Infof("Server.Start._startAddressRelayer: No addrs to relay.")
  1626  			time.Sleep(AddrRelayIntervalSeconds * time.Second)
  1627  			continue
  1628  		}
  1629  
  1630  		glog.V(2).Infof("Server.Start._startAddressRelayer: Found %d addrs to "+
  1631  			"relay: %v", len(addrsToBroadcast), spew.Sdump(addrsToBroadcast))
  1632  		// Iterate over all our peers and broadcast the addrs to all of them.
  1633  		for _, pp := range srv.cmgr.GetAllPeers() {
  1634  			pp.AddDeSoMessage(&MsgDeSoAddr{
  1635  				AddrList: addrsToBroadcast,
  1636  			}, false)
  1637  		}
  1638  		time.Sleep(AddrRelayIntervalSeconds * time.Second)
  1639  		continue
  1640  	}
  1641  }
  1642  
  1643  func (srv *Server) _startTransactionRelayer() {
  1644  	for {
  1645  		// Just continuously relay transactions to peers that don't have them.
  1646  		srv._relayTransactions()
  1647  	}
  1648  }
  1649  
  1650  func (srv *Server) Stop() {
  1651  	glog.Info("Server.Stop: Gracefully shutting down Server")
  1652  
  1653  	// Iterate through all the peers and flush their logs before we quit.
  1654  	glog.Info("Server.Stop: Flushing logs for all peers")
  1655  
  1656  	// Stop the ConnectionManager
  1657  	srv.cmgr.Stop()
  1658  
  1659  	// Stop the miner if we have one running.
  1660  	if srv.miner != nil {
  1661  		srv.miner.Stop()
  1662  	}
  1663  
  1664  	if srv.mempool != nil {
  1665  		// Before the node shuts down, write all the mempool txns to disk
  1666  		// if the flag is set.
  1667  		if srv.mempool.mempoolDir != "" {
  1668  			glog.Info("Doing final mempool dump...")
  1669  			srv.mempool.DumpTxnsToDB()
  1670  			glog.Info("Final mempool dump complete!")
  1671  		}
  1672  
  1673  		srv.mempool.Stop()
  1674  	}
  1675  
  1676  	// Stop the block producer
  1677  	if srv.blockProducer != nil {
  1678  		srv.blockProducer.Stop()
  1679  	}
  1680  
  1681  	// This will signal any goroutines to quit. Note that enqueing this after stopping
  1682  	// the ConnectionManager seems like it should cause the Server to process any remaining
  1683  	// messages before calling waitGroup.Done(), which seems like a good thing.
  1684  	go func() {
  1685  		srv.incomingMessages <- &ServerMessage{
  1686  			// Peer is ignored for MsgDeSoQuit.
  1687  			Peer: nil,
  1688  			Msg:  &MsgDeSoQuit{},
  1689  		}
  1690  	}()
  1691  
  1692  	// Wait for the server to fully shut down.
  1693  	srv.waitGroup.Wait()
  1694  	glog.Info("Server.Stop: Successfully shut down Server")
  1695  }
  1696  
  1697  func (srv *Server) GetStatsdClient() *statsd.Client {
  1698  	return srv.statsdClient
  1699  }
  1700  
  1701  // Start actually kicks off all of the management processes. Among other things, it causes
  1702  // the ConnectionManager to actually start connecting to peers and receiving messages. If
  1703  // requested, it also starts the miner.
  1704  func (srv *Server) Start() {
  1705  	// Start the Server so that it will be ready to process messages once the ConnectionManager
  1706  	// finds some Peers.
  1707  	glog.Info("Server.Start: Starting Server")
  1708  	srv.waitGroup.Add(1)
  1709  	go srv.messageHandler()
  1710  
  1711  	go srv._startAddressRelayer()
  1712  
  1713  	go srv._startTransactionRelayer()
  1714  
  1715  	// Once the ConnectionManager is started, peers will be found and connected to and
  1716  	// messages will begin to flow in to be processed.
  1717  	if !srv.disableNetworking {
  1718  		go srv.cmgr.Start()
  1719  	}
  1720  
  1721  	if srv.miner != nil && len(srv.miner.PublicKeys) > 0 {
  1722  		go srv.miner.Start()
  1723  	}
  1724  }