github.com/lbryio/lbcd@v0.22.119/server.go (about)

     1  // Copyright (c) 2013-2017 The btcsuite developers
     2  // Copyright (c) 2015-2018 The Decred developers
     3  // Use of this source code is governed by an ISC
     4  // license that can be found in the LICENSE file.
     5  
     6  package main
     7  
     8  import (
     9  	"bytes"
    10  	"crypto/rand"
    11  	"crypto/tls"
    12  	"encoding/binary"
    13  	"errors"
    14  	"fmt"
    15  	"math"
    16  	"net"
    17  	"path"
    18  	"runtime"
    19  	"sort"
    20  	"strconv"
    21  	"strings"
    22  	"sync"
    23  	"sync/atomic"
    24  	"time"
    25  
    26  	"github.com/lbryio/lbcd/addrmgr"
    27  	"github.com/lbryio/lbcd/blockchain"
    28  	"github.com/lbryio/lbcd/blockchain/indexers"
    29  	"github.com/lbryio/lbcd/chaincfg"
    30  	"github.com/lbryio/lbcd/chaincfg/chainhash"
    31  	"github.com/lbryio/lbcd/claimtrie"
    32  	claimtrieconfig "github.com/lbryio/lbcd/claimtrie/config"
    33  	"github.com/lbryio/lbcd/connmgr"
    34  	"github.com/lbryio/lbcd/database"
    35  	"github.com/lbryio/lbcd/fees"
    36  	"github.com/lbryio/lbcd/mempool"
    37  	"github.com/lbryio/lbcd/mining"
    38  	"github.com/lbryio/lbcd/mining/cpuminer"
    39  	"github.com/lbryio/lbcd/netsync"
    40  	"github.com/lbryio/lbcd/peer"
    41  	"github.com/lbryio/lbcd/txscript"
    42  	"github.com/lbryio/lbcd/version"
    43  	"github.com/lbryio/lbcd/wire"
    44  	"github.com/lbryio/lbcutil"
    45  	btcutil "github.com/lbryio/lbcutil"
    46  	"github.com/lbryio/lbcutil/bloom"
    47  )
    48  
    49  const (
    50  	// defaultServices describes the default services that are supported by
    51  	// the server.
    52  	defaultServices = wire.SFNodeNetwork | wire.SFNodeBloom |
    53  		wire.SFNodeWitness | wire.SFNodeCF
    54  
    55  	// defaultRequiredServices describes the default services that are
    56  	// required to be supported by outbound peers.
    57  	defaultRequiredServices = wire.SFNodeNetwork
    58  
    59  	// defaultTargetOutbound is the default number of outbound peers to target.
    60  	defaultTargetOutbound = 8
    61  
    62  	// connectionRetryInterval is the base amount of time to wait in between
    63  	// retries when connecting to persistent peers.  It is adjusted by the
    64  	// number of retries such that there is a retry backoff.
    65  	connectionRetryInterval = time.Second * 5
    66  )
    67  
    68  var (
    69  	// userAgentName is the user agent name and is used to help identify
    70  	// ourselves to other bitcoin peers.
    71  	userAgentName = "LBRY.GO"
    72  
    73  	// userAgentVersion is the user agent version and is used to help
    74  	// identify ourselves to other bitcoin peers.
    75  	userAgentVersion = version.Full()
    76  )
    77  
    78  // zeroHash is the zero value hash (all zeros).  It is defined as a convenience.
    79  var zeroHash chainhash.Hash
    80  
    81  // onionAddr implements the net.Addr interface and represents a tor address.
    82  type onionAddr struct {
    83  	addr string
    84  }
    85  
    86  // String returns the onion address.
    87  //
    88  // This is part of the net.Addr interface.
    89  func (oa *onionAddr) String() string {
    90  	return oa.addr
    91  }
    92  
    93  // Network returns "onion".
    94  //
    95  // This is part of the net.Addr interface.
    96  func (oa *onionAddr) Network() string {
    97  	return "onion"
    98  }
    99  
   100  // Ensure onionAddr implements the net.Addr interface.
   101  var _ net.Addr = (*onionAddr)(nil)
   102  
   103  // simpleAddr implements the net.Addr interface with two struct fields
   104  type simpleAddr struct {
   105  	net, addr string
   106  }
   107  
   108  // String returns the address.
   109  //
   110  // This is part of the net.Addr interface.
   111  func (a simpleAddr) String() string {
   112  	return a.addr
   113  }
   114  
   115  // Network returns the network.
   116  //
   117  // This is part of the net.Addr interface.
   118  func (a simpleAddr) Network() string {
   119  	return a.net
   120  }
   121  
   122  // Ensure simpleAddr implements the net.Addr interface.
   123  var _ net.Addr = simpleAddr{}
   124  
   125  // broadcastMsg provides the ability to house a bitcoin message to be broadcast
   126  // to all connected peers except specified excluded peers.
   127  type broadcastMsg struct {
   128  	message      wire.Message
   129  	excludePeers []*serverPeer
   130  }
   131  
   132  // broadcastInventoryAdd is a type used to declare that the InvVect it contains
   133  // needs to be added to the rebroadcast map
   134  type broadcastInventoryAdd relayMsg
   135  
   136  // broadcastInventoryDel is a type used to declare that the InvVect it contains
   137  // needs to be removed from the rebroadcast map
   138  type broadcastInventoryDel *wire.InvVect
   139  
   140  // relayMsg packages an inventory vector along with the newly discovered
   141  // inventory so the relay has access to that information.
   142  type relayMsg struct {
   143  	invVect *wire.InvVect
   144  	data    interface{}
   145  }
   146  
   147  // updatePeerHeightsMsg is a message sent from the blockmanager to the server
   148  // after a new block has been accepted. The purpose of the message is to update
   149  // the heights of peers that were known to announce the block before we
   150  // connected it to the main chain or recognized it as an orphan. With these
   151  // updates, peer heights will be kept up to date, allowing for fresh data when
   152  // selecting sync peer candidacy.
   153  type updatePeerHeightsMsg struct {
   154  	newHash    *chainhash.Hash
   155  	newHeight  int32
   156  	originPeer *peer.Peer
   157  }
   158  
   159  type bannedPeriod struct {
   160  	since time.Time
   161  	until time.Time
   162  }
   163  
   164  // peerState maintains state of inbound, persistent, outbound peers as well
   165  // as banned peers and outbound groups.
   166  type peerState struct {
   167  	inboundPeers    map[int32]*serverPeer
   168  	outboundPeers   map[int32]*serverPeer
   169  	persistentPeers map[int32]*serverPeer
   170  	banned          map[string]bannedPeriod
   171  	outboundGroups  map[string]int
   172  }
   173  
   174  // Count returns the count of all known peers.
   175  func (ps *peerState) Count() int {
   176  	return len(ps.inboundPeers) + len(ps.outboundPeers) +
   177  		len(ps.persistentPeers)
   178  }
   179  
   180  // forAllOutboundPeers is a helper function that runs closure on all outbound
   181  // peers known to peerState.
   182  func (ps *peerState) forAllOutboundPeers(closure func(sp *serverPeer)) {
   183  	for _, e := range ps.outboundPeers {
   184  		closure(e)
   185  	}
   186  	for _, e := range ps.persistentPeers {
   187  		closure(e)
   188  	}
   189  }
   190  
   191  // forAllPeers is a helper function that runs closure on all peers known to
   192  // peerState.
   193  func (ps *peerState) forAllPeers(closure func(sp *serverPeer)) {
   194  	for _, e := range ps.inboundPeers {
   195  		closure(e)
   196  	}
   197  	ps.forAllOutboundPeers(closure)
   198  }
   199  
   200  // cfHeaderKV is a tuple of a filter header and its associated block hash. The
   201  // struct is used to cache cfcheckpt responses.
   202  type cfHeaderKV struct {
   203  	blockHash    chainhash.Hash
   204  	filterHeader chainhash.Hash
   205  }
   206  
   207  // server provides a bitcoin server for handling communications to and from
   208  // bitcoin peers.
   209  type server struct {
   210  	// The following variables must only be used atomically.
   211  	// Putting the uint64s first makes them 64-bit aligned for 32-bit systems.
   212  	bytesReceived uint64 // Total bytes received from all peers since start.
   213  	bytesSent     uint64 // Total bytes sent by all peers since start.
   214  	started       int32
   215  	shutdown      int32
   216  	shutdownSched int32
   217  
   218  	chainParams          *chaincfg.Params
   219  	addrManager          *addrmgr.AddrManager
   220  	connManager          *connmgr.ConnManager
   221  	sigCache             *txscript.SigCache
   222  	hashCache            *txscript.HashCache
   223  	rpcServer            *rpcServer
   224  	syncManager          *netsync.SyncManager
   225  	chain                *blockchain.BlockChain
   226  	txMemPool            *mempool.TxPool
   227  	cpuMiner             *cpuminer.CPUMiner
   228  	modifyRebroadcastInv chan interface{}
   229  	newPeers             chan *serverPeer
   230  	donePeers            chan *serverPeer
   231  	banPeers             chan *serverPeer
   232  	query                chan interface{}
   233  	relayInv             chan relayMsg
   234  	broadcast            chan broadcastMsg
   235  	peerHeightsUpdate    chan updatePeerHeightsMsg
   236  	wg                   sync.WaitGroup
   237  	quit                 chan struct{}
   238  	nat                  NAT
   239  	db                   database.DB
   240  	timeSource           blockchain.MedianTimeSource
   241  	services             wire.ServiceFlag
   242  
   243  	// The following fields are used for optional indexes.  They will be nil
   244  	// if the associated index is not enabled.  These fields are set during
   245  	// initial creation of the server and never changed afterwards, so they
   246  	// do not need to be protected for concurrent access.
   247  	txIndex   *indexers.TxIndex
   248  	addrIndex *indexers.AddrIndex
   249  	cfIndex   *indexers.CfIndex
   250  
   251  	// The fee estimator keeps track of how long transactions are left in
   252  	// the mempool before they are mined into blocks.
   253  	feeEstimator *fees.Estimator
   254  
   255  	// cfCheckptCaches stores a cached slice of filter headers for cfcheckpt
   256  	// messages for each filter type.
   257  	cfCheckptCaches    map[wire.FilterType][]cfHeaderKV
   258  	cfCheckptCachesMtx sync.RWMutex
   259  
   260  	// agentBlacklist is a list of blacklisted substrings by which to filter
   261  	// user agents.
   262  	agentBlacklist []string
   263  
   264  	// agentWhitelist is a list of whitelisted user agent substrings, no
   265  	// whitelisting will be applied if the list is empty or nil.
   266  	agentWhitelist []string
   267  }
   268  
   269  // serverPeer extends the peer to maintain state shared by the server and
   270  // the blockmanager.
   271  type serverPeer struct {
   272  	// The following variables must only be used atomically
   273  	feeFilter int64
   274  
   275  	*peer.Peer
   276  
   277  	connReq        *connmgr.ConnReq
   278  	server         *server
   279  	persistent     bool
   280  	continueHash   *chainhash.Hash
   281  	relayMtx       sync.Mutex
   282  	disableRelayTx bool
   283  	sentAddrs      bool
   284  	isWhitelisted  bool
   285  	filter         *bloom.Filter
   286  	addressesMtx   sync.RWMutex
   287  	knownAddresses map[string]struct{}
   288  	banScore       connmgr.DynamicBanScore
   289  	quit           chan struct{}
   290  	// The following chans are used to sync blockmanager and server.
   291  	txProcessed    chan struct{}
   292  	blockProcessed chan struct{}
   293  }
   294  
   295  // newServerPeer returns a new serverPeer instance. The peer needs to be set by
   296  // the caller.
   297  func newServerPeer(s *server, isPersistent bool) *serverPeer {
   298  	return &serverPeer{
   299  		server:         s,
   300  		persistent:     isPersistent,
   301  		filter:         bloom.LoadFilter(nil),
   302  		knownAddresses: make(map[string]struct{}),
   303  		quit:           make(chan struct{}),
   304  		txProcessed:    make(chan struct{}, 1),
   305  		blockProcessed: make(chan struct{}, 1),
   306  	}
   307  }
   308  
   309  // newestBlock returns the current best block hash and height using the format
   310  // required by the configuration for the peer package.
   311  func (sp *serverPeer) newestBlock() (*chainhash.Hash, int32, error) {
   312  	best := sp.server.chain.BestSnapshot()
   313  	return &best.Hash, best.Height, nil
   314  }
   315  
   316  // addKnownAddresses adds the given addresses to the set of known addresses to
   317  // the peer to prevent sending duplicate addresses.
   318  func (sp *serverPeer) addKnownAddresses(addresses []*wire.NetAddress) {
   319  	sp.addressesMtx.Lock()
   320  	for _, na := range addresses {
   321  		sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{}
   322  	}
   323  	sp.addressesMtx.Unlock()
   324  }
   325  
   326  // addressKnown true if the given address is already known to the peer.
   327  func (sp *serverPeer) addressKnown(na *wire.NetAddress) bool {
   328  	sp.addressesMtx.RLock()
   329  	_, exists := sp.knownAddresses[addrmgr.NetAddressKey(na)]
   330  	sp.addressesMtx.RUnlock()
   331  	return exists
   332  }
   333  
   334  // setDisableRelayTx toggles relaying of transactions for the given peer.
   335  // It is safe for concurrent access.
   336  func (sp *serverPeer) setDisableRelayTx(disable bool) {
   337  	sp.relayMtx.Lock()
   338  	sp.disableRelayTx = disable
   339  	sp.relayMtx.Unlock()
   340  }
   341  
   342  // relayTxDisabled returns whether or not relaying of transactions for the given
   343  // peer is disabled.
   344  // It is safe for concurrent access.
   345  func (sp *serverPeer) relayTxDisabled() bool {
   346  	sp.relayMtx.Lock()
   347  	isDisabled := sp.disableRelayTx
   348  	sp.relayMtx.Unlock()
   349  
   350  	return isDisabled
   351  }
   352  
   353  // pushAddrMsg sends an addr message to the connected peer using the provided
   354  // addresses.
   355  func (sp *serverPeer) pushAddrMsg(addresses []*wire.NetAddress) {
   356  	// Filter addresses already known to the peer.
   357  	addrs := make([]*wire.NetAddress, 0, len(addresses))
   358  	for _, addr := range addresses {
   359  		if !sp.addressKnown(addr) {
   360  			addrs = append(addrs, addr)
   361  		}
   362  	}
   363  	known, err := sp.PushAddrMsg(addrs)
   364  	if err != nil {
   365  		peerLog.Errorf("Can't push address message to %s: %v", sp.Peer, err)
   366  		sp.Disconnect()
   367  		return
   368  	}
   369  	sp.addKnownAddresses(known)
   370  }
   371  
   372  // addBanScore increases the persistent and decaying ban score fields by the
   373  // values passed as parameters. If the resulting score exceeds half of the ban
   374  // threshold, a warning is logged including the reason provided. Further, if
   375  // the score is above the ban threshold, the peer will be banned and
   376  // disconnected.
   377  func (sp *serverPeer) addBanScore(persistent, transient uint32, reason string) bool {
   378  	// No warning is logged and no score is calculated if banning is disabled.
   379  	if cfg.DisableBanning {
   380  		return false
   381  	}
   382  	if sp.isWhitelisted {
   383  		peerLog.Debugf("Misbehaving whitelisted peer %s: %s", sp, reason)
   384  		return false
   385  	}
   386  
   387  	warnThreshold := cfg.BanThreshold >> 1
   388  	if transient == 0 && persistent == 0 {
   389  		// The score is not being increased, but a warning message is still
   390  		// logged if the score is above the warn threshold.
   391  		score := sp.banScore.Int()
   392  		if score > warnThreshold {
   393  			peerLog.Warnf("Misbehaving peer %s: %s -- ban score is %d, "+
   394  				"it was not increased this time", sp, reason, score)
   395  		}
   396  		return false
   397  	}
   398  	score := sp.banScore.Increase(persistent, transient)
   399  	if score > warnThreshold {
   400  		peerLog.Warnf("Misbehaving peer %s: %s -- ban score increased to %d", sp, reason, score)
   401  		if score > cfg.BanThreshold {
   402  			if sp.server.ConnectedCount() <= 1 {
   403  				peerLog.Warnf("Refusing to ban peer %s as it is the only peer", sp)
   404  				return false
   405  			}
   406  			peerLog.Warnf("Misbehaving peer %s -- banning and disconnecting", sp)
   407  			sp.server.BanPeer(sp)
   408  			sp.Disconnect()
   409  			return true
   410  		}
   411  	}
   412  	return false
   413  }
   414  
   415  // hasServices returns whether or not the provided advertised service flags have
   416  // all of the provided desired service flags set.
   417  func hasServices(advertised, desired wire.ServiceFlag) bool {
   418  	return advertised&desired == desired
   419  }
   420  
   421  // OnVersion is invoked when a peer receives a version bitcoin message
   422  // and is used to negotiate the protocol version details as well as kick start
   423  // the communications.
   424  func (sp *serverPeer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) *wire.MsgReject {
   425  	// Update the address manager with the advertised services for outbound
   426  	// connections in case they have changed.  This is not done for inbound
   427  	// connections to help prevent malicious behavior and is skipped when
   428  	// running on the simulation test network since it is only intended to
   429  	// connect to specified peers and actively avoids advertising and
   430  	// connecting to discovered peers.
   431  	//
   432  	// NOTE: This is done before rejecting peers that are too old to ensure
   433  	// it is updated regardless in the case a new minimum protocol version is
   434  	// enforced and the remote node has not upgraded yet.
   435  	isInbound := sp.Inbound()
   436  	remoteAddr := sp.NA()
   437  	addrManager := sp.server.addrManager
   438  	if !cfg.SimNet && !isInbound {
   439  		addrManager.SetServices(remoteAddr, msg.Services)
   440  	}
   441  
   442  	// Ignore peers that have a protcol version that is too old.  The peer
   443  	// negotiation logic will disconnect it after this callback returns.
   444  	if msg.ProtocolVersion < int32(peer.MinAcceptableProtocolVersion) {
   445  		return nil
   446  	}
   447  
   448  	// Reject outbound peers that are not full nodes.
   449  	wantServices := wire.SFNodeNetwork
   450  	if !isInbound && !hasServices(msg.Services, wantServices) {
   451  		missingServices := wantServices & ^msg.Services
   452  		srvrLog.Debugf("Rejecting peer %s with services %v due to not "+
   453  			"providing desired services %v", sp.Peer, msg.Services,
   454  			missingServices)
   455  		reason := fmt.Sprintf("required services %#x not offered",
   456  			uint64(missingServices))
   457  		return wire.NewMsgReject(msg.Command(), wire.RejectNonstandard, reason)
   458  	}
   459  
   460  	if !cfg.SimNet && !isInbound {
   461  		// After soft-fork activation, only make outbound
   462  		// connection to peers if they flag that they're segwit
   463  		// enabled.
   464  		chain := sp.server.chain
   465  		segwitActive, err := chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   466  		if err != nil {
   467  			peerLog.Errorf("Unable to query for segwit soft-fork state: %v",
   468  				err)
   469  			return nil
   470  		}
   471  
   472  		if segwitActive && !sp.IsWitnessEnabled() {
   473  			peerLog.Infof("Disconnecting non-segwit peer %v, isn't segwit "+
   474  				"enabled and we need more segwit enabled peers", sp)
   475  			sp.Disconnect()
   476  			return nil
   477  		}
   478  	}
   479  
   480  	// Add the remote peer time as a sample for creating an offset against
   481  	// the local clock to keep the network time in sync.
   482  	sp.server.timeSource.AddTimeSample(sp.Addr(), msg.Timestamp)
   483  
   484  	// Choose whether or not to relay transactions before a filter command
   485  	// is received.
   486  	sp.setDisableRelayTx(msg.DisableRelayTx)
   487  
   488  	return nil
   489  }
   490  
   491  // OnVerAck is invoked when a peer receives a verack bitcoin message and is used
   492  // to kick start communication with them.
   493  func (sp *serverPeer) OnVerAck(_ *peer.Peer, _ *wire.MsgVerAck) {
   494  	sp.server.AddPeer(sp)
   495  }
   496  
   497  // OnMemPool is invoked when a peer receives a mempool bitcoin message.
   498  // It creates and sends an inventory message with the contents of the memory
   499  // pool up to the maximum inventory allowed per message.  When the peer has a
   500  // bloom filter loaded, the contents are filtered accordingly.
   501  func (sp *serverPeer) OnMemPool(_ *peer.Peer, msg *wire.MsgMemPool) {
   502  	// Only allow mempool requests if the server has bloom filtering
   503  	// enabled.
   504  	if sp.server.services&wire.SFNodeBloom != wire.SFNodeBloom {
   505  		peerLog.Debugf("peer %v sent mempool request with bloom "+
   506  			"filtering disabled -- disconnecting", sp)
   507  		sp.Disconnect()
   508  		return
   509  	}
   510  
   511  	// A decaying ban score increase is applied to prevent flooding.
   512  	// The ban score accumulates and passes the ban threshold if a burst of
   513  	// mempool messages comes from a peer. The score decays each minute to
   514  	// half of its value.
   515  	if sp.addBanScore(0, 33, "mempool") {
   516  		return
   517  	}
   518  
   519  	// Generate inventory message with the available transactions in the
   520  	// transaction memory pool.  Limit it to the max allowed inventory
   521  	// per message.  The NewMsgInvSizeHint function automatically limits
   522  	// the passed hint to the maximum allowed, so it's safe to pass it
   523  	// without double checking it here.
   524  	txMemPool := sp.server.txMemPool
   525  	txDescs := txMemPool.TxDescs()
   526  	invMsg := wire.NewMsgInvSizeHint(uint(len(txDescs)))
   527  
   528  	for _, txDesc := range txDescs {
   529  		// Either add all transactions when there is no bloom filter,
   530  		// or only the transactions that match the filter when there is
   531  		// one.
   532  		if !sp.filter.IsLoaded() || sp.filter.MatchTxAndUpdate(txDesc.Tx) {
   533  			iv := wire.NewInvVect(wire.InvTypeTx, txDesc.Tx.Hash())
   534  			invMsg.AddInvVect(iv)
   535  			if len(invMsg.InvList)+1 > wire.MaxInvPerMsg {
   536  				break
   537  			}
   538  		}
   539  	}
   540  
   541  	// Send the inventory message if there is anything to send.
   542  	if len(invMsg.InvList) > 0 {
   543  		sp.QueueMessage(invMsg, nil)
   544  	}
   545  }
   546  
   547  // OnTx is invoked when a peer receives a tx bitcoin message.  It blocks
   548  // until the bitcoin transaction has been fully processed.  Unlock the block
   549  // handler this does not serialize all transactions through a single thread
   550  // transactions don't rely on the previous one in a linear fashion like blocks.
   551  func (sp *serverPeer) OnTx(_ *peer.Peer, msg *wire.MsgTx) {
   552  	if cfg.BlocksOnly {
   553  		peerLog.Tracef("Ignoring tx %v from %v - blocksonly enabled",
   554  			msg.TxHash(), sp)
   555  		return
   556  	}
   557  
   558  	// Add the transaction to the known inventory for the peer.
   559  	// Convert the raw MsgTx to a btcutil.Tx which provides some convenience
   560  	// methods and things such as hash caching.
   561  	tx := btcutil.NewTx(msg)
   562  	iv := wire.NewInvVect(wire.InvTypeTx, tx.Hash())
   563  	sp.AddKnownInventory(iv)
   564  
   565  	// Queue the transaction up to be handled by the sync manager and
   566  	// intentionally block further receives until the transaction is fully
   567  	// processed and known good or bad.  This helps prevent a malicious peer
   568  	// from queuing up a bunch of bad transactions before disconnecting (or
   569  	// being disconnected) and wasting memory.
   570  	sp.server.syncManager.QueueTx(tx, sp.Peer, sp.txProcessed)
   571  	<-sp.txProcessed
   572  }
   573  
   574  // OnBlock is invoked when a peer receives a block bitcoin message.  It
   575  // blocks until the bitcoin block has been fully processed.
   576  func (sp *serverPeer) OnBlock(_ *peer.Peer, msg *wire.MsgBlock, buf []byte) {
   577  	// Convert the raw MsgBlock to a btcutil.Block which provides some
   578  	// convenience methods and things such as hash caching.
   579  	block := btcutil.NewBlockFromBlockAndBytes(msg, buf)
   580  
   581  	// Add the block to the known inventory for the peer.
   582  	iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
   583  	sp.AddKnownInventory(iv)
   584  
   585  	// Queue the block up to be handled by the block
   586  	// manager and intentionally block further receives
   587  	// until the bitcoin block is fully processed and known
   588  	// good or bad.  This helps prevent a malicious peer
   589  	// from queuing up a bunch of bad blocks before
   590  	// disconnecting (or being disconnected) and wasting
   591  	// memory.  Additionally, this behavior is depended on
   592  	// by at least the block acceptance test tool as the
   593  	// reference implementation processes blocks in the same
   594  	// thread and therefore blocks further messages until
   595  	// the bitcoin block has been fully processed.
   596  	sp.server.syncManager.QueueBlock(block, sp.Peer, sp.blockProcessed)
   597  	<-sp.blockProcessed
   598  }
   599  
   600  // OnInv is invoked when a peer receives an inv bitcoin message and is
   601  // used to examine the inventory being advertised by the remote peer and react
   602  // accordingly.  We pass the message down to blockmanager which will call
   603  // QueueMessage with any appropriate responses.
   604  func (sp *serverPeer) OnInv(_ *peer.Peer, msg *wire.MsgInv) {
   605  	if !cfg.BlocksOnly {
   606  		if len(msg.InvList) > 0 {
   607  			sp.server.syncManager.QueueInv(msg, sp.Peer)
   608  		}
   609  		return
   610  	}
   611  
   612  	newInv := wire.NewMsgInvSizeHint(uint(len(msg.InvList)))
   613  	for _, invVect := range msg.InvList {
   614  		if invVect.Type == wire.InvTypeTx {
   615  			peerLog.Tracef("Ignoring tx %v in inv from %v -- "+
   616  				"blocksonly enabled", invVect.Hash, sp)
   617  			if sp.ProtocolVersion() >= wire.BIP0037Version {
   618  				peerLog.Infof("Peer %v is announcing "+
   619  					"transactions -- disconnecting", sp)
   620  				sp.Disconnect()
   621  				return
   622  			}
   623  			continue
   624  		}
   625  		err := newInv.AddInvVect(invVect)
   626  		if err != nil {
   627  			peerLog.Errorf("Failed to add inventory vector: %v", err)
   628  			break
   629  		}
   630  	}
   631  
   632  	if len(newInv.InvList) > 0 {
   633  		sp.server.syncManager.QueueInv(newInv, sp.Peer)
   634  	}
   635  }
   636  
   637  // OnHeaders is invoked when a peer receives a headers bitcoin
   638  // message.  The message is passed down to the sync manager.
   639  func (sp *serverPeer) OnHeaders(_ *peer.Peer, msg *wire.MsgHeaders) {
   640  	sp.server.syncManager.QueueHeaders(msg, sp.Peer)
   641  }
   642  
   643  // handleGetData is invoked when a peer receives a getdata bitcoin message and
   644  // is used to deliver block and transaction information.
   645  func (sp *serverPeer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) {
   646  	numAdded := 0
   647  	notFound := wire.NewMsgNotFound()
   648  
   649  	length := len(msg.InvList)
   650  	// A decaying ban score increase is applied to prevent exhausting resources
   651  	// with unusually large inventory queries.
   652  	// Requesting more than the maximum inventory vector length within a short
   653  	// period of time yields a score above the default ban threshold. Sustained
   654  	// bursts of small requests are not penalized as that would potentially ban
   655  	// peers performing IBD.
   656  	// This incremental score decays each minute to half of its value.
   657  	if sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata") {
   658  		return
   659  	}
   660  
   661  	// We wait on this wait channel periodically to prevent queuing
   662  	// far more data than we can send in a reasonable time, wasting memory.
   663  	// The waiting occurs after the database fetch for the next one to
   664  	// provide a little pipelining.
   665  	var waitChan chan struct{}
   666  	doneChan := make(chan struct{}, 1)
   667  
   668  	for i, iv := range msg.InvList {
   669  		var c chan struct{}
   670  		// If this will be the last message we send.
   671  		if i == length-1 && len(notFound.InvList) == 0 {
   672  			c = doneChan
   673  		} else if (i+1)%3 == 0 {
   674  			// Buffered so as to not make the send goroutine block.
   675  			c = make(chan struct{}, 1)
   676  		}
   677  		var err error
   678  		switch iv.Type {
   679  		case wire.InvTypeWitnessTx:
   680  			err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding)
   681  		case wire.InvTypeTx:
   682  			err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding)
   683  		case wire.InvTypeWitnessBlock:
   684  			err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding)
   685  		case wire.InvTypeBlock:
   686  			err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding)
   687  		case wire.InvTypeFilteredWitnessBlock:
   688  			err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding)
   689  		case wire.InvTypeFilteredBlock:
   690  			err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding)
   691  		default:
   692  			peerLog.Warnf("Unknown type in inventory request %d",
   693  				iv.Type)
   694  			continue
   695  		}
   696  		if err != nil {
   697  			notFound.AddInvVect(iv)
   698  
   699  			// When there is a failure fetching the final entry
   700  			// and the done channel was sent in due to there
   701  			// being no outstanding not found inventory, consume
   702  			// it here because there is now not found inventory
   703  			// that will use the channel momentarily.
   704  			if i == len(msg.InvList)-1 && c != nil {
   705  				<-c
   706  			}
   707  		} else if iv.Type == wire.InvTypeWitnessTx || iv.Type == wire.InvTypeTx {
   708  			// We interpret fulfilling a GETDATA for a transaction as a
   709  			// successful initial broadcast and remove it from our
   710  			// unbroadcast set.
   711  			sp.server.txMemPool.RemoveUnbroadcastTx(&iv.Hash)
   712  		}
   713  		numAdded++
   714  		waitChan = c
   715  	}
   716  	if len(notFound.InvList) != 0 {
   717  		sp.QueueMessage(notFound, doneChan)
   718  	}
   719  
   720  	// Wait for messages to be sent. We can send quite a lot of data at this
   721  	// point and this will keep the peer busy for a decent amount of time.
   722  	// We don't process anything else by them in this time so that we
   723  	// have an idea of when we should hear back from them - else the idle
   724  	// timeout could fire when we were only half done sending the blocks.
   725  	if numAdded > 0 {
   726  		<-doneChan
   727  	}
   728  }
   729  
   730  // OnGetBlocks is invoked when a peer receives a getblocks bitcoin
   731  // message.
   732  func (sp *serverPeer) OnGetBlocks(_ *peer.Peer, msg *wire.MsgGetBlocks) {
   733  	// Find the most recent known block in the best chain based on the block
   734  	// locator and fetch all of the block hashes after it until either
   735  	// wire.MaxBlocksPerMsg have been fetched or the provided stop hash is
   736  	// encountered.
   737  	//
   738  	// Use the block after the genesis block if no other blocks in the
   739  	// provided locator are known.  This does mean the client will start
   740  	// over with the genesis block if unknown block locators are provided.
   741  	//
   742  	// This mirrors the behavior in the reference implementation.
   743  	chain := sp.server.chain
   744  	hashList := chain.LocateBlocks(msg.BlockLocatorHashes, &msg.HashStop,
   745  		wire.MaxBlocksPerMsg)
   746  
   747  	// Generate inventory message.
   748  	invMsg := wire.NewMsgInv()
   749  	for i := range hashList {
   750  		iv := wire.NewInvVect(wire.InvTypeBlock, &hashList[i])
   751  		invMsg.AddInvVect(iv)
   752  	}
   753  
   754  	// Send the inventory message if there is anything to send.
   755  	if len(invMsg.InvList) > 0 {
   756  		invListLen := len(invMsg.InvList)
   757  		if invListLen == wire.MaxBlocksPerMsg {
   758  			// Intentionally use a copy of the final hash so there
   759  			// is not a reference into the inventory slice which
   760  			// would prevent the entire slice from being eligible
   761  			// for GC as soon as it's sent.
   762  			continueHash := invMsg.InvList[invListLen-1].Hash
   763  			sp.continueHash = &continueHash
   764  		}
   765  		sp.QueueMessage(invMsg, nil)
   766  	}
   767  }
   768  
   769  // OnGetHeaders is invoked when a peer receives a getheaders bitcoin
   770  // message.
   771  func (sp *serverPeer) OnGetHeaders(_ *peer.Peer, msg *wire.MsgGetHeaders) {
   772  	// Ignore getheaders requests if not in sync.
   773  	if !sp.server.syncManager.IsCurrent() {
   774  		return
   775  	}
   776  
   777  	// Find the most recent known block in the best chain based on the block
   778  	// locator and fetch all of the headers after it until either
   779  	// wire.MaxBlockHeadersPerMsg have been fetched or the provided stop
   780  	// hash is encountered.
   781  	//
   782  	// Use the block after the genesis block if no other blocks in the
   783  	// provided locator are known.  This does mean the client will start
   784  	// over with the genesis block if unknown block locators are provided.
   785  	//
   786  	// This mirrors the behavior in the reference implementation.
   787  	chain := sp.server.chain
   788  	headers := chain.LocateHeaders(msg.BlockLocatorHashes, &msg.HashStop)
   789  
   790  	// Send found headers to the requesting peer.
   791  	blockHeaders := make([]*wire.BlockHeader, len(headers))
   792  	for i := range headers {
   793  		blockHeaders[i] = &headers[i]
   794  	}
   795  	sp.QueueMessage(&wire.MsgHeaders{Headers: blockHeaders}, nil)
   796  }
   797  
   798  // OnGetCFilters is invoked when a peer receives a getcfilters bitcoin message.
   799  func (sp *serverPeer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) {
   800  	// Ignore getcfilters requests if not in sync.
   801  	if !sp.server.syncManager.IsCurrent() {
   802  		return
   803  	}
   804  
   805  	// We'll also ensure that the remote party is requesting a set of
   806  	// filters that we actually currently maintain.
   807  	switch msg.FilterType {
   808  	case wire.GCSFilterRegular:
   809  		break
   810  
   811  	default:
   812  		peerLog.Debug("Filter request for unknown filter: %v",
   813  			msg.FilterType)
   814  		return
   815  	}
   816  
   817  	hashes, err := sp.server.chain.HeightToHashRange(
   818  		int32(msg.StartHeight), &msg.StopHash, wire.MaxGetCFiltersReqRange,
   819  	)
   820  	if err != nil {
   821  		peerLog.Debugf("Invalid getcfilters request: %v", err)
   822  		return
   823  	}
   824  
   825  	// Create []*chainhash.Hash from []chainhash.Hash to pass to
   826  	// FiltersByBlockHashes.
   827  	hashPtrs := make([]*chainhash.Hash, len(hashes))
   828  	for i := range hashes {
   829  		hashPtrs[i] = &hashes[i]
   830  	}
   831  
   832  	filters, err := sp.server.cfIndex.FiltersByBlockHashes(
   833  		hashPtrs, msg.FilterType,
   834  	)
   835  	if err != nil {
   836  		peerLog.Errorf("Error retrieving cfilters: %v", err)
   837  		return
   838  	}
   839  
   840  	for i, filterBytes := range filters {
   841  		if len(filterBytes) == 0 {
   842  			peerLog.Warnf("Could not obtain cfilter for %v",
   843  				hashes[i])
   844  			return
   845  		}
   846  
   847  		filterMsg := wire.NewMsgCFilter(
   848  			msg.FilterType, &hashes[i], filterBytes,
   849  		)
   850  		sp.QueueMessage(filterMsg, nil)
   851  	}
   852  }
   853  
   854  // OnGetCFHeaders is invoked when a peer receives a getcfheader bitcoin message.
   855  func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
   856  	// Ignore getcfilterheader requests if not in sync.
   857  	if !sp.server.syncManager.IsCurrent() {
   858  		return
   859  	}
   860  
   861  	// We'll also ensure that the remote party is requesting a set of
   862  	// headers for filters that we actually currently maintain.
   863  	switch msg.FilterType {
   864  	case wire.GCSFilterRegular:
   865  		break
   866  
   867  	default:
   868  		peerLog.Debug("Filter request for unknown headers for "+
   869  			"filter: %v", msg.FilterType)
   870  		return
   871  	}
   872  
   873  	startHeight := int32(msg.StartHeight)
   874  	maxResults := wire.MaxCFHeadersPerMsg
   875  
   876  	// If StartHeight is positive, fetch the predecessor block hash so we
   877  	// can populate the PrevFilterHeader field.
   878  	if msg.StartHeight > 0 {
   879  		startHeight--
   880  		maxResults++
   881  	}
   882  
   883  	// Fetch the hashes from the block index.
   884  	hashList, err := sp.server.chain.HeightToHashRange(
   885  		startHeight, &msg.StopHash, maxResults,
   886  	)
   887  	if err != nil {
   888  		peerLog.Debugf("Invalid getcfheaders request: %v", err)
   889  	}
   890  
   891  	// This is possible if StartHeight is one greater that the height of
   892  	// StopHash, and we pull a valid range of hashes including the previous
   893  	// filter header.
   894  	if len(hashList) == 0 || (msg.StartHeight > 0 && len(hashList) == 1) {
   895  		peerLog.Debug("No results for getcfheaders request")
   896  		return
   897  	}
   898  
   899  	// Create []*chainhash.Hash from []chainhash.Hash to pass to
   900  	// FilterHeadersByBlockHashes.
   901  	hashPtrs := make([]*chainhash.Hash, len(hashList))
   902  	for i := range hashList {
   903  		hashPtrs[i] = &hashList[i]
   904  	}
   905  
   906  	// Fetch the raw filter hash bytes from the database for all blocks.
   907  	filterHashes, err := sp.server.cfIndex.FilterHashesByBlockHashes(
   908  		hashPtrs, msg.FilterType,
   909  	)
   910  	if err != nil {
   911  		peerLog.Errorf("Error retrieving cfilter hashes: %v", err)
   912  		return
   913  	}
   914  
   915  	// Generate cfheaders message and send it.
   916  	headersMsg := wire.NewMsgCFHeaders()
   917  
   918  	// Populate the PrevFilterHeader field.
   919  	if msg.StartHeight > 0 {
   920  		prevBlockHash := &hashList[0]
   921  
   922  		// Fetch the raw committed filter header bytes from the
   923  		// database.
   924  		headerBytes, err := sp.server.cfIndex.FilterHeaderByBlockHash(
   925  			prevBlockHash, msg.FilterType)
   926  		if err != nil {
   927  			peerLog.Errorf("Error retrieving CF header: %v", err)
   928  			return
   929  		}
   930  		if len(headerBytes) == 0 {
   931  			peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash)
   932  			return
   933  		}
   934  
   935  		// Deserialize the hash into PrevFilterHeader.
   936  		err = headersMsg.PrevFilterHeader.SetBytes(headerBytes)
   937  		if err != nil {
   938  			peerLog.Warnf("Committed filter header deserialize "+
   939  				"failed: %v", err)
   940  			return
   941  		}
   942  
   943  		hashList = hashList[1:]
   944  		filterHashes = filterHashes[1:]
   945  	}
   946  
   947  	// Populate HeaderHashes.
   948  	for i, hashBytes := range filterHashes {
   949  		if len(hashBytes) == 0 {
   950  			peerLog.Warnf("Could not obtain CF hash for %v", hashList[i])
   951  			return
   952  		}
   953  
   954  		// Deserialize the hash.
   955  		filterHash, err := chainhash.NewHash(hashBytes)
   956  		if err != nil {
   957  			peerLog.Warnf("Committed filter hash deserialize "+
   958  				"failed: %v", err)
   959  			return
   960  		}
   961  
   962  		headersMsg.AddCFHash(filterHash)
   963  	}
   964  
   965  	headersMsg.FilterType = msg.FilterType
   966  	headersMsg.StopHash = msg.StopHash
   967  
   968  	sp.QueueMessage(headersMsg, nil)
   969  }
   970  
   971  // OnGetCFCheckpt is invoked when a peer receives a getcfcheckpt bitcoin message.
   972  func (sp *serverPeer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) {
   973  	// Ignore getcfcheckpt requests if not in sync.
   974  	if !sp.server.syncManager.IsCurrent() {
   975  		return
   976  	}
   977  
   978  	// We'll also ensure that the remote party is requesting a set of
   979  	// checkpoints for filters that we actually currently maintain.
   980  	switch msg.FilterType {
   981  	case wire.GCSFilterRegular:
   982  		break
   983  
   984  	default:
   985  		peerLog.Debug("Filter request for unknown checkpoints for "+
   986  			"filter: %v", msg.FilterType)
   987  		return
   988  	}
   989  
   990  	// Now that we know the client is fetching a filter that we know of,
   991  	// we'll fetch the block hashes et each check point interval so we can
   992  	// compare against our cache, and create new check points if necessary.
   993  	blockHashes, err := sp.server.chain.IntervalBlockHashes(
   994  		&msg.StopHash, wire.CFCheckptInterval,
   995  	)
   996  	if err != nil {
   997  		peerLog.Debugf("Invalid getcfilters request: %v", err)
   998  		return
   999  	}
  1000  
  1001  	checkptMsg := wire.NewMsgCFCheckpt(
  1002  		msg.FilterType, &msg.StopHash, len(blockHashes),
  1003  	)
  1004  
  1005  	// Fetch the current existing cache so we can decide if we need to
  1006  	// extend it or if its adequate as is.
  1007  	sp.server.cfCheckptCachesMtx.RLock()
  1008  	checkptCache := sp.server.cfCheckptCaches[msg.FilterType]
  1009  
  1010  	// If the set of block hashes is beyond the current size of the cache,
  1011  	// then we'll expand the size of the cache and also retain the write
  1012  	// lock.
  1013  	var updateCache bool
  1014  	if len(blockHashes) > len(checkptCache) {
  1015  		// Now that we know we'll need to modify the size of the cache,
  1016  		// we'll release the read lock and grab the write lock to
  1017  		// possibly expand the cache size.
  1018  		sp.server.cfCheckptCachesMtx.RUnlock()
  1019  
  1020  		sp.server.cfCheckptCachesMtx.Lock()
  1021  		defer sp.server.cfCheckptCachesMtx.Unlock()
  1022  
  1023  		// Now that we have the write lock, we'll check again as it's
  1024  		// possible that the cache has already been expanded.
  1025  		checkptCache = sp.server.cfCheckptCaches[msg.FilterType]
  1026  
  1027  		// If we still need to expand the cache, then We'll mark that
  1028  		// we need to update the cache for below and also expand the
  1029  		// size of the cache in place.
  1030  		if len(blockHashes) > len(checkptCache) {
  1031  			updateCache = true
  1032  
  1033  			additionalLength := len(blockHashes) - len(checkptCache)
  1034  			newEntries := make([]cfHeaderKV, additionalLength)
  1035  
  1036  			peerLog.Infof("Growing size of checkpoint cache from %v to %v "+
  1037  				"block hashes", len(checkptCache), len(blockHashes))
  1038  
  1039  			checkptCache = append(
  1040  				sp.server.cfCheckptCaches[msg.FilterType],
  1041  				newEntries...,
  1042  			)
  1043  		}
  1044  	} else {
  1045  		// Otherwise, we'll hold onto the read lock for the remainder
  1046  		// of this method.
  1047  		defer sp.server.cfCheckptCachesMtx.RUnlock()
  1048  
  1049  		peerLog.Tracef("Serving stale cache of size %v",
  1050  			len(checkptCache))
  1051  	}
  1052  
  1053  	// Now that we know the cache is of an appropriate size, we'll iterate
  1054  	// backwards until the find the block hash. We do this as it's possible
  1055  	// a re-org has occurred so items in the db are now in the main china
  1056  	// while the cache has been partially invalidated.
  1057  	var forkIdx int
  1058  	for forkIdx = len(blockHashes); forkIdx > 0; forkIdx-- {
  1059  		if checkptCache[forkIdx-1].blockHash == blockHashes[forkIdx-1] {
  1060  			break
  1061  		}
  1062  	}
  1063  
  1064  	// Now that we know the how much of the cache is relevant for this
  1065  	// query, we'll populate our check point message with the cache as is.
  1066  	// Shortly below, we'll populate the new elements of the cache.
  1067  	for i := 0; i < forkIdx; i++ {
  1068  		checkptMsg.AddCFHeader(&checkptCache[i].filterHeader)
  1069  	}
  1070  
  1071  	// We'll now collect the set of hashes that are beyond our cache so we
  1072  	// can look up the filter headers to populate the final cache.
  1073  	blockHashPtrs := make([]*chainhash.Hash, 0, len(blockHashes)-forkIdx)
  1074  	for i := forkIdx; i < len(blockHashes); i++ {
  1075  		blockHashPtrs = append(blockHashPtrs, &blockHashes[i])
  1076  	}
  1077  	filterHeaders, err := sp.server.cfIndex.FilterHeadersByBlockHashes(
  1078  		blockHashPtrs, msg.FilterType,
  1079  	)
  1080  	if err != nil {
  1081  		peerLog.Errorf("Error retrieving cfilter headers: %v", err)
  1082  		return
  1083  	}
  1084  
  1085  	// Now that we have the full set of filter headers, we'll add them to
  1086  	// the checkpoint message, and also update our cache in line.
  1087  	for i, filterHeaderBytes := range filterHeaders {
  1088  		if len(filterHeaderBytes) == 0 {
  1089  			peerLog.Warnf("Could not obtain CF header for %v",
  1090  				blockHashPtrs[i])
  1091  			return
  1092  		}
  1093  
  1094  		filterHeader, err := chainhash.NewHash(filterHeaderBytes)
  1095  		if err != nil {
  1096  			peerLog.Warnf("Committed filter header deserialize "+
  1097  				"failed: %v", err)
  1098  			return
  1099  		}
  1100  
  1101  		checkptMsg.AddCFHeader(filterHeader)
  1102  
  1103  		// If the new main chain is longer than what's in the cache,
  1104  		// then we'll override it beyond the fork point.
  1105  		if updateCache {
  1106  			checkptCache[forkIdx+i] = cfHeaderKV{
  1107  				blockHash:    blockHashes[forkIdx+i],
  1108  				filterHeader: *filterHeader,
  1109  			}
  1110  		}
  1111  	}
  1112  
  1113  	// Finally, we'll update the cache if we need to, and send the final
  1114  	// message back to the requesting peer.
  1115  	if updateCache {
  1116  		sp.server.cfCheckptCaches[msg.FilterType] = checkptCache
  1117  	}
  1118  
  1119  	sp.QueueMessage(checkptMsg, nil)
  1120  }
  1121  
  1122  // enforceNodeBloomFlag disconnects the peer if the server is not configured to
  1123  // allow bloom filters.  Additionally, if the peer has negotiated to a protocol
  1124  // version  that is high enough to observe the bloom filter service support bit,
  1125  // it will be banned since it is intentionally violating the protocol.
  1126  func (sp *serverPeer) enforceNodeBloomFlag(cmd string) bool {
  1127  	if sp.server.services&wire.SFNodeBloom != wire.SFNodeBloom {
  1128  		// Ban the peer if the protocol version is high enough that the
  1129  		// peer is knowingly violating the protocol and banning is
  1130  		// enabled.
  1131  		//
  1132  		// NOTE: Even though the addBanScore function already examines
  1133  		// whether or not banning is enabled, it is checked here as well
  1134  		// to ensure the violation is logged and the peer is
  1135  		// disconnected regardless.
  1136  		if sp.ProtocolVersion() >= wire.BIP0111Version &&
  1137  			!cfg.DisableBanning {
  1138  
  1139  			// Disconnect the peer regardless of whether it was
  1140  			// banned.
  1141  			sp.addBanScore(100, 0, cmd)
  1142  			sp.Disconnect()
  1143  			return false
  1144  		}
  1145  
  1146  		// Disconnect the peer regardless of protocol version or banning
  1147  		// state.
  1148  		peerLog.Debugf("%s sent an unsupported %s request -- "+
  1149  			"disconnecting", sp, cmd)
  1150  		sp.Disconnect()
  1151  		return false
  1152  	}
  1153  
  1154  	return true
  1155  }
  1156  
  1157  // OnFeeFilter is invoked when a peer receives a feefilter bitcoin message and
  1158  // is used by remote peers to request that no transactions which have a fee rate
  1159  // lower than provided value are inventoried to them.  The peer will be
  1160  // disconnected if an invalid fee filter value is provided.
  1161  func (sp *serverPeer) OnFeeFilter(_ *peer.Peer, msg *wire.MsgFeeFilter) {
  1162  	// Check that the passed minimum fee is a valid amount.
  1163  	if msg.MinFee < 0 || msg.MinFee > btcutil.MaxSatoshi {
  1164  		peerLog.Debugf("Peer %v sent an invalid feefilter '%v' -- "+
  1165  			"disconnecting", sp, btcutil.Amount(msg.MinFee))
  1166  		sp.Disconnect()
  1167  		return
  1168  	}
  1169  
  1170  	atomic.StoreInt64(&sp.feeFilter, msg.MinFee)
  1171  }
  1172  
  1173  // OnFilterAdd is invoked when a peer receives a filteradd bitcoin
  1174  // message and is used by remote peers to add data to an already loaded bloom
  1175  // filter.  The peer will be disconnected if a filter is not loaded when this
  1176  // message is received or the server is not configured to allow bloom filters.
  1177  func (sp *serverPeer) OnFilterAdd(_ *peer.Peer, msg *wire.MsgFilterAdd) {
  1178  	// Disconnect and/or ban depending on the node bloom services flag and
  1179  	// negotiated protocol version.
  1180  	if !sp.enforceNodeBloomFlag(msg.Command()) {
  1181  		return
  1182  	}
  1183  
  1184  	if !sp.filter.IsLoaded() {
  1185  		peerLog.Debugf("%s sent a filteradd request with no filter "+
  1186  			"loaded -- disconnecting", sp)
  1187  		sp.Disconnect()
  1188  		return
  1189  	}
  1190  
  1191  	sp.filter.Add(msg.Data)
  1192  }
  1193  
  1194  // OnFilterClear is invoked when a peer receives a filterclear bitcoin
  1195  // message and is used by remote peers to clear an already loaded bloom filter.
  1196  // The peer will be disconnected if a filter is not loaded when this message is
  1197  // received  or the server is not configured to allow bloom filters.
  1198  func (sp *serverPeer) OnFilterClear(_ *peer.Peer, msg *wire.MsgFilterClear) {
  1199  	// Disconnect and/or ban depending on the node bloom services flag and
  1200  	// negotiated protocol version.
  1201  	if !sp.enforceNodeBloomFlag(msg.Command()) {
  1202  		return
  1203  	}
  1204  
  1205  	if !sp.filter.IsLoaded() {
  1206  		peerLog.Debugf("%s sent a filterclear request with no "+
  1207  			"filter loaded -- disconnecting", sp)
  1208  		sp.Disconnect()
  1209  		return
  1210  	}
  1211  
  1212  	sp.filter.Unload()
  1213  }
  1214  
  1215  // OnFilterLoad is invoked when a peer receives a filterload bitcoin
  1216  // message and it used to load a bloom filter that should be used for
  1217  // delivering merkle blocks and associated transactions that match the filter.
  1218  // The peer will be disconnected if the server is not configured to allow bloom
  1219  // filters.
  1220  func (sp *serverPeer) OnFilterLoad(_ *peer.Peer, msg *wire.MsgFilterLoad) {
  1221  	// Disconnect and/or ban depending on the node bloom services flag and
  1222  	// negotiated protocol version.
  1223  	if !sp.enforceNodeBloomFlag(msg.Command()) {
  1224  		return
  1225  	}
  1226  
  1227  	sp.setDisableRelayTx(false)
  1228  
  1229  	sp.filter.Reload(msg)
  1230  }
  1231  
  1232  // OnGetAddr is invoked when a peer receives a getaddr bitcoin message
  1233  // and is used to provide the peer with known addresses from the address
  1234  // manager.
  1235  func (sp *serverPeer) OnGetAddr(_ *peer.Peer, msg *wire.MsgGetAddr) {
  1236  	// Don't return any addresses when running on the simulation test
  1237  	// network.  This helps prevent the network from becoming another
  1238  	// public test network since it will not be able to learn about other
  1239  	// peers that have not specifically been provided.
  1240  	if cfg.SimNet {
  1241  		return
  1242  	}
  1243  
  1244  	// Do not accept getaddr requests from outbound peers.  This reduces
  1245  	// fingerprinting attacks.
  1246  	if !sp.Inbound() {
  1247  		peerLog.Debugf("Ignoring getaddr request from outbound peer "+
  1248  			"%v", sp)
  1249  		return
  1250  	}
  1251  
  1252  	// Only allow one getaddr request per connection to discourage
  1253  	// address stamping of inv announcements.
  1254  	if sp.sentAddrs {
  1255  		peerLog.Debugf("Ignoring repeated getaddr request from peer "+
  1256  			"%v", sp)
  1257  		return
  1258  	}
  1259  	sp.sentAddrs = true
  1260  
  1261  	// Get the current known addresses from the address manager.
  1262  	addrCache := sp.server.addrManager.AddressCache()
  1263  
  1264  	// Push the addresses.
  1265  	sp.pushAddrMsg(addrCache)
  1266  }
  1267  
  1268  // OnAddr is invoked when a peer receives an addr bitcoin message and is
  1269  // used to notify the server about advertised addresses.
  1270  func (sp *serverPeer) OnAddr(_ *peer.Peer, msg *wire.MsgAddr) {
  1271  	// Ignore addresses when running on the simulation test network.  This
  1272  	// helps prevent the network from becoming another public test network
  1273  	// since it will not be able to learn about other peers that have not
  1274  	// specifically been provided.
  1275  	if cfg.SimNet {
  1276  		return
  1277  	}
  1278  
  1279  	// Ignore old style addresses which don't include a timestamp.
  1280  	if sp.ProtocolVersion() < wire.NetAddressTimeVersion {
  1281  		return
  1282  	}
  1283  
  1284  	// A message that has no addresses is invalid.
  1285  	if len(msg.AddrList) == 0 {
  1286  		peerLog.Errorf("Command [%s] from %s does not contain any addresses",
  1287  			msg.Command(), sp.Peer)
  1288  		sp.Disconnect()
  1289  		return
  1290  	}
  1291  
  1292  	for _, na := range msg.AddrList {
  1293  		// Don't add more address if we're disconnecting.
  1294  		if !sp.Connected() {
  1295  			return
  1296  		}
  1297  
  1298  		// Set the timestamp to 5 days ago if it's more than 24 hours
  1299  		// in the future so this address is one of the first to be
  1300  		// removed when space is needed.
  1301  		now := time.Now()
  1302  		if na.Timestamp.After(now.Add(time.Minute * 10)) {
  1303  			na.Timestamp = now.Add(-1 * time.Hour * 24 * 5)
  1304  		}
  1305  
  1306  		// Add address to known addresses for this peer.
  1307  		sp.addKnownAddresses([]*wire.NetAddress{na})
  1308  	}
  1309  
  1310  	// Add addresses to server address manager.  The address manager handles
  1311  	// the details of things such as preventing duplicate addresses, max
  1312  	// addresses, and last seen updates.
  1313  	// XXX bitcoind gives a 2 hour time penalty here, do we want to do the
  1314  	// same?
  1315  	sp.server.addrManager.AddAddresses(msg.AddrList, sp.NA())
  1316  }
  1317  
  1318  // OnRead is invoked when a peer receives a message and it is used to update
  1319  // the bytes received by the server.
  1320  func (sp *serverPeer) OnRead(_ *peer.Peer, bytesRead int, msg wire.Message, err error) {
  1321  	sp.server.AddBytesReceived(uint64(bytesRead))
  1322  }
  1323  
  1324  // OnWrite is invoked when a peer sends a message and it is used to update
  1325  // the bytes sent by the server.
  1326  func (sp *serverPeer) OnWrite(_ *peer.Peer, bytesWritten int, msg wire.Message, err error) {
  1327  	sp.server.AddBytesSent(uint64(bytesWritten))
  1328  }
  1329  
  1330  // OnNotFound is invoked when a peer sends a notfound message.
  1331  func (sp *serverPeer) OnNotFound(p *peer.Peer, msg *wire.MsgNotFound) {
  1332  	if !sp.Connected() {
  1333  		return
  1334  	}
  1335  
  1336  	var numBlocks, numTxns uint32
  1337  	for _, inv := range msg.InvList {
  1338  		switch inv.Type {
  1339  		case wire.InvTypeBlock:
  1340  			numBlocks++
  1341  		case wire.InvTypeWitnessBlock:
  1342  			numBlocks++
  1343  		case wire.InvTypeTx:
  1344  			numTxns++
  1345  		case wire.InvTypeWitnessTx:
  1346  			numTxns++
  1347  		default:
  1348  			peerLog.Infof("Invalid inv type '%d' in NotFound message from %s. Disconnecting...", inv.Type, sp)
  1349  			sp.Disconnect()
  1350  			return
  1351  		}
  1352  	}
  1353  	if numBlocks > 0 {
  1354  		blockStr := pickNoun(uint64(numBlocks), "block", "blocks")
  1355  		reason := fmt.Sprintf("%d %v not found on %s", numBlocks, blockStr, sp)
  1356  		if sp.addBanScore(20, 0, reason) {
  1357  			return // once they fail to return us five block requests they're gone for good
  1358  		}
  1359  	}
  1360  	if numTxns > 0 {
  1361  		// This is an expected situation if transactions in the mempool make it into a block before
  1362  		// this node knows about said block. We don't want to ban them for that alone
  1363  		peerLog.Debugf("%d transactions not found on %s", numTxns, sp)
  1364  		if numBlocks+numTxns < wire.MaxInvPerMsg { // if our message is full then it is likely followed by another one that isn't
  1365  			txStr := pickNoun(uint64(numTxns), "transaction", "transactions")
  1366  			reason := fmt.Sprintf("%d %v not found on %s", numTxns, txStr, sp)
  1367  			if sp.addBanScore(0, 20, reason) {
  1368  				return // if they fail us five times in one minute, they're gone -- hitting them at new-block should be rare
  1369  			}
  1370  		}
  1371  	}
  1372  
  1373  	sp.server.syncManager.QueueNotFound(msg, p)
  1374  }
  1375  
  1376  // randomUint16Number returns a random uint16 in a specified input range.  Note
  1377  // that the range is in zeroth ordering; if you pass it 1800, you will get
  1378  // values from 0 to 1800.
  1379  func randomUint16Number(max uint16) uint16 {
  1380  	// In order to avoid modulo bias and ensure every possible outcome in
  1381  	// [0, max) has equal probability, the random number must be sampled
  1382  	// from a random source that has a range limited to a multiple of the
  1383  	// modulus.
  1384  	var randomNumber uint16
  1385  	var limitRange = (math.MaxUint16 / max) * max
  1386  	for {
  1387  		binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
  1388  		if randomNumber < limitRange {
  1389  			return (randomNumber % max)
  1390  		}
  1391  	}
  1392  }
  1393  
  1394  // AddRebroadcastInventory adds 'iv' to the list of inventories to be
  1395  // rebroadcasted at random intervals until they show up in a block.
  1396  func (s *server) AddRebroadcastInventory(iv *wire.InvVect, data interface{}) {
  1397  	// Ignore if shutting down.
  1398  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1399  		return
  1400  	}
  1401  
  1402  	s.modifyRebroadcastInv <- broadcastInventoryAdd{invVect: iv, data: data}
  1403  }
  1404  
  1405  // RemoveRebroadcastInventory removes 'iv' from the list of items to be
  1406  // rebroadcasted if present.
  1407  func (s *server) RemoveRebroadcastInventory(iv *wire.InvVect) {
  1408  	// Ignore if shutting down.
  1409  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1410  		return
  1411  	}
  1412  
  1413  	s.modifyRebroadcastInv <- broadcastInventoryDel(iv)
  1414  }
  1415  
  1416  // relayTransactions generates and relays inventory vectors for all of the
  1417  // passed transactions to all connected peers.
  1418  func (s *server) relayTransactions(txns []*mempool.TxDesc) {
  1419  	for _, txD := range txns {
  1420  		iv := wire.NewInvVect(wire.InvTypeTx, txD.Tx.Hash())
  1421  		s.RelayInventory(iv, txD)
  1422  	}
  1423  }
  1424  
  1425  // AnnounceNewTransactions generates and relays inventory vectors and notifies
  1426  // both websocket and getblocktemplate long poll clients of the passed
  1427  // transactions.  This function should be called whenever new transactions
  1428  // are added to the mempool.
  1429  func (s *server) AnnounceNewTransactions(txns []*mempool.TxDesc) {
  1430  	// Generate and relay inventory vectors for all newly accepted
  1431  	// transactions.
  1432  	s.relayTransactions(txns)
  1433  
  1434  	// Notify both websocket and getblocktemplate long poll clients of all
  1435  	// newly accepted transactions.
  1436  	if s.rpcServer != nil {
  1437  		s.rpcServer.NotifyNewTransactions(txns)
  1438  	}
  1439  }
  1440  
  1441  // Transaction has one confirmation on the main chain. Now we can mark it as no
  1442  // longer needing rebroadcasting.
  1443  func (s *server) TransactionConfirmed(tx *btcutil.Tx) {
  1444  	// Rebroadcasting is only necessary when the RPC server is active.
  1445  	if s.rpcServer == nil {
  1446  		return
  1447  	}
  1448  
  1449  	iv := wire.NewInvVect(wire.InvTypeTx, tx.Hash())
  1450  	s.RemoveRebroadcastInventory(iv)
  1451  }
  1452  
  1453  // pushTxMsg sends a tx message for the provided transaction hash to the
  1454  // connected peer.  An error is returned if the transaction hash is not known.
  1455  func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{},
  1456  	waitChan <-chan struct{}, encoding wire.MessageEncoding) error {
  1457  
  1458  	// Attempt to fetch the requested transaction from the pool.  A
  1459  	// call could be made to check for existence first, but simply trying
  1460  	// to fetch a missing transaction results in the same behavior.
  1461  	tx, err := s.txMemPool.FetchTransaction(hash)
  1462  	if err != nil {
  1463  		peerLog.Tracef("Unable to fetch tx %v from transaction "+
  1464  			"pool: %v", hash, err)
  1465  
  1466  		if doneChan != nil {
  1467  			doneChan <- struct{}{}
  1468  		}
  1469  		return err
  1470  	}
  1471  
  1472  	// Once we have fetched data wait for any previous operation to finish.
  1473  	if waitChan != nil {
  1474  		<-waitChan
  1475  	}
  1476  
  1477  	sp.QueueMessageWithEncoding(tx.MsgTx(), doneChan, encoding)
  1478  
  1479  	return nil
  1480  }
  1481  
  1482  // pushBlockMsg sends a block message for the provided block hash to the
  1483  // connected peer.  An error is returned if the block hash is not known.
  1484  func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{},
  1485  	waitChan <-chan struct{}, encoding wire.MessageEncoding) error {
  1486  
  1487  	// Fetch the raw block bytes from the database.
  1488  	var blockBytes []byte
  1489  	err := sp.server.db.View(func(dbTx database.Tx) error {
  1490  		var err error
  1491  		blockBytes, err = dbTx.FetchBlock(hash)
  1492  		return err
  1493  	})
  1494  	if err != nil {
  1495  		peerLog.Tracef("Unable to fetch requested block hash %v: %v",
  1496  			hash, err)
  1497  
  1498  		if doneChan != nil {
  1499  			doneChan <- struct{}{}
  1500  		}
  1501  		return err
  1502  	}
  1503  
  1504  	// Deserialize the block.
  1505  	var msgBlock wire.MsgBlock
  1506  	err = msgBlock.Deserialize(bytes.NewReader(blockBytes))
  1507  	if err != nil {
  1508  		peerLog.Tracef("Unable to deserialize requested block hash "+
  1509  			"%v: %v", hash, err)
  1510  
  1511  		if doneChan != nil {
  1512  			doneChan <- struct{}{}
  1513  		}
  1514  		return err
  1515  	}
  1516  
  1517  	// Once we have fetched data wait for any previous operation to finish.
  1518  	if waitChan != nil {
  1519  		<-waitChan
  1520  	}
  1521  
  1522  	// We only send the channel for this message if we aren't sending
  1523  	// an inv straight after.
  1524  	var dc chan<- struct{}
  1525  	continueHash := sp.continueHash
  1526  	sendInv := continueHash != nil && continueHash.IsEqual(hash)
  1527  	if !sendInv {
  1528  		dc = doneChan
  1529  	}
  1530  	sp.QueueMessageWithEncoding(&msgBlock, dc, encoding)
  1531  
  1532  	// When the peer requests the final block that was advertised in
  1533  	// response to a getblocks message which requested more blocks than
  1534  	// would fit into a single message, send it a new inventory message
  1535  	// to trigger it to issue another getblocks message for the next
  1536  	// batch of inventory.
  1537  	if sendInv {
  1538  		best := sp.server.chain.BestSnapshot()
  1539  		invMsg := wire.NewMsgInvSizeHint(1)
  1540  		iv := wire.NewInvVect(wire.InvTypeBlock, &best.Hash)
  1541  		invMsg.AddInvVect(iv)
  1542  		sp.QueueMessage(invMsg, doneChan)
  1543  		sp.continueHash = nil
  1544  	}
  1545  	return nil
  1546  }
  1547  
  1548  // pushMerkleBlockMsg sends a merkleblock message for the provided block hash to
  1549  // the connected peer.  Since a merkle block requires the peer to have a filter
  1550  // loaded, this call will simply be ignored if there is no filter loaded.  An
  1551  // error is returned if the block hash is not known.
  1552  func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *chainhash.Hash,
  1553  	doneChan chan<- struct{}, waitChan <-chan struct{}, encoding wire.MessageEncoding) error {
  1554  
  1555  	// Do not send a response if the peer doesn't have a filter loaded.
  1556  	if !sp.filter.IsLoaded() {
  1557  		if doneChan != nil {
  1558  			doneChan <- struct{}{}
  1559  		}
  1560  		return nil
  1561  	}
  1562  
  1563  	// Fetch the raw block bytes from the database.
  1564  	blk, err := sp.server.chain.BlockByHash(hash)
  1565  	if err != nil {
  1566  		peerLog.Tracef("Unable to fetch requested block hash %v: %v",
  1567  			hash, err)
  1568  
  1569  		if doneChan != nil {
  1570  			doneChan <- struct{}{}
  1571  		}
  1572  		return err
  1573  	}
  1574  
  1575  	// Generate a merkle block by filtering the requested block according
  1576  	// to the filter for the peer.
  1577  	merkle, matchedTxIndices := bloom.NewMerkleBlock(blk, sp.filter)
  1578  
  1579  	// Once we have fetched data wait for any previous operation to finish.
  1580  	if waitChan != nil {
  1581  		<-waitChan
  1582  	}
  1583  
  1584  	// Send the merkleblock.  Only send the done channel with this message
  1585  	// if no transactions will be sent afterwards.
  1586  	var dc chan<- struct{}
  1587  	if len(matchedTxIndices) == 0 {
  1588  		dc = doneChan
  1589  	}
  1590  	sp.QueueMessage(merkle, dc)
  1591  
  1592  	// Finally, send any matched transactions.
  1593  	blkTransactions := blk.MsgBlock().Transactions
  1594  	for i, txIndex := range matchedTxIndices {
  1595  		// Only send the done channel on the final transaction.
  1596  		var dc chan<- struct{}
  1597  		if i == len(matchedTxIndices)-1 {
  1598  			dc = doneChan
  1599  		}
  1600  		if txIndex < uint32(len(blkTransactions)) {
  1601  			sp.QueueMessageWithEncoding(blkTransactions[txIndex], dc,
  1602  				encoding)
  1603  		}
  1604  	}
  1605  
  1606  	return nil
  1607  }
  1608  
  1609  // handleUpdatePeerHeight updates the heights of all peers who were known to
  1610  // announce a block we recently accepted.
  1611  func (s *server) handleUpdatePeerHeights(state *peerState, umsg updatePeerHeightsMsg) {
  1612  	state.forAllPeers(func(sp *serverPeer) {
  1613  		// The origin peer should already have the updated height.
  1614  		if sp.Peer == umsg.originPeer {
  1615  			return
  1616  		}
  1617  
  1618  		// This is a pointer to the underlying memory which doesn't
  1619  		// change.
  1620  		latestBlkHash := sp.LastAnnouncedBlock()
  1621  
  1622  		// Skip this peer if it hasn't recently announced any new blocks.
  1623  		if latestBlkHash == nil {
  1624  			return
  1625  		}
  1626  
  1627  		// If the peer has recently announced a block, and this block
  1628  		// matches our newly accepted block, then update their block
  1629  		// height.
  1630  		if *latestBlkHash == *umsg.newHash {
  1631  			sp.UpdateLastBlockHeight(umsg.newHeight)
  1632  			sp.UpdateLastAnnouncedBlock(nil)
  1633  		}
  1634  	})
  1635  }
  1636  
  1637  // handleAddPeerMsg deals with adding new peers.  It is invoked from the
  1638  // peerHandler goroutine.
  1639  func (s *server) handleAddPeerMsg(state *peerState, sp *serverPeer) bool {
  1640  	if sp == nil || !sp.Connected() {
  1641  		return false
  1642  	}
  1643  
  1644  	// Disconnect peers with unwanted user agents.
  1645  	if sp.HasUndesiredUserAgent(s.agentBlacklist, s.agentWhitelist) {
  1646  		sp.Disconnect()
  1647  		return false
  1648  	}
  1649  
  1650  	// Ignore new peers if we're shutting down.
  1651  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1652  		srvrLog.Infof("New peer %s ignored - server is shutting down", sp)
  1653  		sp.Disconnect()
  1654  		return false
  1655  	}
  1656  
  1657  	// Disconnect banned peers.
  1658  	host, _, err := net.SplitHostPort(sp.Addr())
  1659  	if err != nil {
  1660  		srvrLog.Debugf("can't split hostport %v", err)
  1661  		sp.Disconnect()
  1662  		return false
  1663  	}
  1664  	if ban, ok := state.banned[host]; ok {
  1665  		if time.Now().Before(ban.until) {
  1666  			srvrLog.Infof("Peer %s is banned for another %v - disconnecting",
  1667  				host, time.Until(ban.until))
  1668  			sp.Disconnect()
  1669  			return false
  1670  		}
  1671  
  1672  		srvrLog.Infof("Peer %s is no longer banned", host)
  1673  		delete(state.banned, host)
  1674  	}
  1675  
  1676  	// TODO: Check for max peers from a single IP.
  1677  
  1678  	// Limit max number of total peers.
  1679  	if state.Count() >= cfg.MaxPeers {
  1680  		srvrLog.Infof("Max peers reached [%d] - disconnecting peer %s",
  1681  			cfg.MaxPeers, sp)
  1682  		sp.Disconnect()
  1683  		// TODO: how to handle permanent peers here?
  1684  		// they should be rescheduled.
  1685  		return false
  1686  	}
  1687  
  1688  	// Add the new peer and start it.
  1689  	srvrLog.Debugf("New peer %s", sp)
  1690  	if sp.Inbound() {
  1691  		state.inboundPeers[sp.ID()] = sp
  1692  	} else {
  1693  		state.outboundGroups[addrmgr.GroupKey(sp.NA())]++
  1694  		if sp.persistent {
  1695  			state.persistentPeers[sp.ID()] = sp
  1696  		} else {
  1697  			state.outboundPeers[sp.ID()] = sp
  1698  		}
  1699  	}
  1700  
  1701  	// Update the address' last seen time if the peer has acknowledged
  1702  	// our version and has sent us its version as well.
  1703  	if sp.VerAckReceived() && sp.VersionKnown() && sp.NA() != nil {
  1704  		s.addrManager.Connected(sp.NA())
  1705  	}
  1706  
  1707  	// Signal the sync manager this peer is a new sync candidate.
  1708  	s.syncManager.NewPeer(sp.Peer)
  1709  
  1710  	// Update the address manager and request known addresses from the
  1711  	// remote peer for outbound connections. This is skipped when running on
  1712  	// the simulation test network since it is only intended to connect to
  1713  	// specified peers and actively avoids advertising and connecting to
  1714  	// discovered peers.
  1715  	if !cfg.SimNet && !sp.Inbound() {
  1716  		// Advertise the local address when the server accepts incoming
  1717  		// connections and it believes itself to be close to the best
  1718  		// known tip.
  1719  		if !cfg.DisableListen && s.syncManager.IsCurrent() {
  1720  			// Get address that best matches.
  1721  			lna := s.addrManager.GetBestLocalAddress(sp.NA())
  1722  			if addrmgr.IsRoutable(lna) {
  1723  				// Filter addresses the peer already knows about.
  1724  				addresses := []*wire.NetAddress{lna}
  1725  				sp.pushAddrMsg(addresses)
  1726  			}
  1727  		}
  1728  
  1729  		// Request known addresses if the server address manager needs
  1730  		// more and the peer has a protocol version new enough to
  1731  		// include a timestamp with addresses.
  1732  		hasTimestamp := sp.ProtocolVersion() >= wire.NetAddressTimeVersion
  1733  		if s.addrManager.NeedMoreAddresses() && hasTimestamp {
  1734  			sp.QueueMessage(wire.NewMsgGetAddr(), nil)
  1735  		}
  1736  
  1737  		// Mark the address as a known good address.
  1738  		s.addrManager.Good(sp.NA())
  1739  	}
  1740  
  1741  	return true
  1742  }
  1743  
  1744  // handleDonePeerMsg deals with peers that have signalled they are done.  It is
  1745  // invoked from the peerHandler goroutine.
  1746  func (s *server) handleDonePeerMsg(state *peerState, sp *serverPeer) {
  1747  	var list map[int32]*serverPeer
  1748  	if sp.persistent {
  1749  		list = state.persistentPeers
  1750  	} else if sp.Inbound() {
  1751  		list = state.inboundPeers
  1752  	} else {
  1753  		list = state.outboundPeers
  1754  	}
  1755  
  1756  	// Regardless of whether the peer was found in our list, we'll inform
  1757  	// our connection manager about the disconnection. This can happen if we
  1758  	// process a peer's `done` message before its `add`.
  1759  	if !sp.Inbound() {
  1760  		if sp.persistent {
  1761  			s.connManager.Disconnect(sp.connReq.ID())
  1762  		} else {
  1763  			s.connManager.Remove(sp.connReq.ID())
  1764  			go s.connManager.NewConnReq()
  1765  		}
  1766  	}
  1767  
  1768  	if _, ok := list[sp.ID()]; ok {
  1769  		if !sp.Inbound() && sp.VersionKnown() {
  1770  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1771  		}
  1772  		delete(list, sp.ID())
  1773  		srvrLog.Debugf("Removed peer %s", sp)
  1774  		return
  1775  	}
  1776  }
  1777  
  1778  // handleBanPeerMsg deals with banning peers.  It is invoked from the
  1779  // peerHandler goroutine.
  1780  func (s *server) handleBanPeerMsg(state *peerState, sp *serverPeer) {
  1781  	host, _, err := net.SplitHostPort(sp.Addr())
  1782  	if err != nil {
  1783  		srvrLog.Debugf("can't split ban peer %s %v", sp.Addr(), err)
  1784  		return
  1785  	}
  1786  	direction := directionString(sp.Inbound())
  1787  	srvrLog.Infof("Banned peer %s (%s) for %v", host, direction,
  1788  		cfg.BanDuration)
  1789  
  1790  	since := time.Now()
  1791  	state.banned[host] = bannedPeriod{
  1792  		since: since,
  1793  		until: since.Add(cfg.BanDuration),
  1794  	}
  1795  }
  1796  
  1797  // handleRelayInvMsg deals with relaying inventory to peers that are not already
  1798  // known to have it.  It is invoked from the peerHandler goroutine.
  1799  func (s *server) handleRelayInvMsg(state *peerState, msg relayMsg) {
  1800  	state.forAllPeers(func(sp *serverPeer) {
  1801  		if !sp.Connected() {
  1802  			return
  1803  		}
  1804  
  1805  		// If the inventory is a block and the peer prefers headers,
  1806  		// generate and send a headers message instead of an inventory
  1807  		// message.
  1808  		if msg.invVect.Type == wire.InvTypeBlock && sp.WantsHeaders() {
  1809  			blockHeader, ok := msg.data.(wire.BlockHeader)
  1810  			if !ok {
  1811  				peerLog.Warnf("Underlying data for headers" +
  1812  					" is not a block header")
  1813  				return
  1814  			}
  1815  			msgHeaders := wire.NewMsgHeaders()
  1816  			if err := msgHeaders.AddBlockHeader(&blockHeader); err != nil {
  1817  				peerLog.Errorf("Failed to add block"+
  1818  					" header: %v", err)
  1819  				return
  1820  			}
  1821  			sp.QueueMessage(msgHeaders, nil)
  1822  			return
  1823  		}
  1824  
  1825  		if msg.invVect.Type == wire.InvTypeTx {
  1826  			// Don't relay the transaction to the peer when it has
  1827  			// transaction relaying disabled.
  1828  			if sp.relayTxDisabled() {
  1829  				return
  1830  			}
  1831  
  1832  			txD, ok := msg.data.(*mempool.TxDesc)
  1833  			if !ok {
  1834  				peerLog.Warnf("Underlying data for tx inv "+
  1835  					"relay is not a *mempool.TxDesc: %T",
  1836  					msg.data)
  1837  				return
  1838  			}
  1839  
  1840  			// Don't relay the transaction if the transaction fee-per-kb
  1841  			// is less than the peer's feefilter.
  1842  			feeFilter := atomic.LoadInt64(&sp.feeFilter)
  1843  			if feeFilter > 0 && txD.FeePerKB < feeFilter {
  1844  				return
  1845  			}
  1846  
  1847  			// Don't relay the transaction if there is a bloom
  1848  			// filter loaded and the transaction doesn't match it.
  1849  			if sp.filter.IsLoaded() {
  1850  				if !sp.filter.MatchTxAndUpdate(txD.Tx) {
  1851  					return
  1852  				}
  1853  			}
  1854  		}
  1855  
  1856  		// Queue the inventory to be relayed with the next batch.
  1857  		// It will be ignored if the peer is already known to
  1858  		// have the inventory.
  1859  		sp.QueueInventory(msg.invVect)
  1860  	})
  1861  }
  1862  
  1863  // handleBroadcastMsg deals with broadcasting messages to peers.  It is invoked
  1864  // from the peerHandler goroutine.
  1865  func (s *server) handleBroadcastMsg(state *peerState, bmsg *broadcastMsg) {
  1866  	state.forAllPeers(func(sp *serverPeer) {
  1867  		if !sp.Connected() {
  1868  			return
  1869  		}
  1870  
  1871  		for _, ep := range bmsg.excludePeers {
  1872  			if sp == ep {
  1873  				return
  1874  			}
  1875  		}
  1876  
  1877  		sp.QueueMessage(bmsg.message, nil)
  1878  	})
  1879  }
  1880  
  1881  type getConnCountMsg struct {
  1882  	reply chan int32
  1883  }
  1884  
  1885  type getPeersMsg struct {
  1886  	reply chan []*serverPeer
  1887  }
  1888  
  1889  type listBannedPeersMsg struct {
  1890  	reply chan map[string]bannedPeriod
  1891  }
  1892  
  1893  type setBanMsg struct {
  1894  	addr  string
  1895  	since time.Time
  1896  	until time.Time
  1897  	reply chan error
  1898  }
  1899  
  1900  type removeBanMsg struct {
  1901  	addr  string
  1902  	reply chan error
  1903  }
  1904  
  1905  type clearBannedMsg struct {
  1906  	reply chan error
  1907  }
  1908  type getOutboundGroup struct {
  1909  	key   string
  1910  	reply chan int
  1911  }
  1912  
  1913  type getAddedNodesMsg struct {
  1914  	reply chan []*serverPeer
  1915  }
  1916  
  1917  type disconnectNodeMsg struct {
  1918  	cmp   func(*serverPeer) bool
  1919  	reply chan error
  1920  }
  1921  
  1922  type connectNodeMsg struct {
  1923  	addr      string
  1924  	permanent bool
  1925  	reply     chan error
  1926  }
  1927  
  1928  type removeNodeMsg struct {
  1929  	cmp   func(*serverPeer) bool
  1930  	reply chan error
  1931  }
  1932  
  1933  // handleQuery is the central handler for all queries and commands from other
  1934  // goroutines related to peer state.
  1935  func (s *server) handleQuery(state *peerState, querymsg interface{}) {
  1936  	switch msg := querymsg.(type) {
  1937  	case getConnCountMsg:
  1938  		nconnected := int32(0)
  1939  		state.forAllPeers(func(sp *serverPeer) {
  1940  			if sp.Connected() {
  1941  				nconnected++
  1942  			}
  1943  		})
  1944  		msg.reply <- nconnected
  1945  
  1946  	case getPeersMsg:
  1947  		peers := make([]*serverPeer, 0, state.Count())
  1948  		state.forAllPeers(func(sp *serverPeer) {
  1949  			if !sp.Connected() {
  1950  				return
  1951  			}
  1952  			peers = append(peers, sp)
  1953  		})
  1954  		msg.reply <- peers
  1955  
  1956  	case listBannedPeersMsg:
  1957  		banned := map[string]bannedPeriod{}
  1958  		for host, ban := range state.banned {
  1959  			banned[host] = ban
  1960  		}
  1961  		msg.reply <- banned
  1962  
  1963  	case setBanMsg:
  1964  		ban := bannedPeriod{
  1965  			since: msg.since,
  1966  			until: msg.until,
  1967  		}
  1968  		state.banned[msg.addr] = ban
  1969  		msg.reply <- nil
  1970  
  1971  	case removeBanMsg:
  1972  		delete(state.banned, msg.addr)
  1973  		msg.reply <- nil
  1974  
  1975  	case clearBannedMsg:
  1976  		state.banned = map[string]bannedPeriod{}
  1977  		msg.reply <- nil
  1978  
  1979  	case connectNodeMsg:
  1980  		// TODO: duplicate oneshots?
  1981  		// Limit max number of total peers.
  1982  		if state.Count() >= cfg.MaxPeers {
  1983  			msg.reply <- errors.New("max peers reached")
  1984  			return
  1985  		}
  1986  		for _, peer := range state.persistentPeers {
  1987  			if peer.Addr() == msg.addr {
  1988  				if msg.permanent {
  1989  					msg.reply <- errors.New("peer already connected")
  1990  				} else {
  1991  					msg.reply <- errors.New("peer exists as a permanent peer")
  1992  				}
  1993  				return
  1994  			}
  1995  		}
  1996  
  1997  		netAddr, err := addrStringToNetAddr(msg.addr)
  1998  		if err != nil {
  1999  			msg.reply <- err
  2000  			return
  2001  		}
  2002  
  2003  		// TODO: if too many, nuke a non-perm peer.
  2004  		go s.connManager.Connect(&connmgr.ConnReq{
  2005  			Addr:      netAddr,
  2006  			Permanent: msg.permanent,
  2007  		})
  2008  		msg.reply <- nil
  2009  	case removeNodeMsg:
  2010  		found := disconnectPeer(state.persistentPeers, msg.cmp, func(sp *serverPeer) {
  2011  			// Keep group counts ok since we remove from
  2012  			// the list now.
  2013  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  2014  		})
  2015  
  2016  		if found {
  2017  			msg.reply <- nil
  2018  		} else {
  2019  			msg.reply <- errors.New("peer not found")
  2020  		}
  2021  	case getOutboundGroup:
  2022  		count, ok := state.outboundGroups[msg.key]
  2023  		if ok {
  2024  			msg.reply <- count
  2025  		} else {
  2026  			msg.reply <- 0
  2027  		}
  2028  	// Request a list of the persistent (added) peers.
  2029  	case getAddedNodesMsg:
  2030  		// Respond with a slice of the relevant peers.
  2031  		peers := make([]*serverPeer, 0, len(state.persistentPeers))
  2032  		for _, sp := range state.persistentPeers {
  2033  			peers = append(peers, sp)
  2034  		}
  2035  		msg.reply <- peers
  2036  	case disconnectNodeMsg:
  2037  		// Check inbound peers. We pass a nil callback since we don't
  2038  		// require any additional actions on disconnect for inbound peers.
  2039  		found := disconnectPeer(state.inboundPeers, msg.cmp, nil)
  2040  		if found {
  2041  			msg.reply <- nil
  2042  			return
  2043  		}
  2044  
  2045  		// Check outbound peers.
  2046  		found = disconnectPeer(state.outboundPeers, msg.cmp, func(sp *serverPeer) {
  2047  			// Keep group counts ok since we remove from
  2048  			// the list now.
  2049  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  2050  		})
  2051  		if found {
  2052  			// If there are multiple outbound connections to the same
  2053  			// ip:port, continue disconnecting them all until no such
  2054  			// peers are found.
  2055  			for found {
  2056  				found = disconnectPeer(state.outboundPeers, msg.cmp, func(sp *serverPeer) {
  2057  					state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  2058  				})
  2059  			}
  2060  			msg.reply <- nil
  2061  			return
  2062  		}
  2063  
  2064  		msg.reply <- errors.New("peer not found")
  2065  	}
  2066  }
  2067  
  2068  // disconnectPeer attempts to drop the connection of a targeted peer in the
  2069  // passed peer list. Targets are identified via usage of the passed
  2070  // `compareFunc`, which should return `true` if the passed peer is the target
  2071  // peer. This function returns true on success and false if the peer is unable
  2072  // to be located. If the peer is found, and the passed callback: `whenFound'
  2073  // isn't nil, we call it with the peer as the argument before it is removed
  2074  // from the peerList, and is disconnected from the server.
  2075  func disconnectPeer(peerList map[int32]*serverPeer, compareFunc func(*serverPeer) bool, whenFound func(*serverPeer)) bool {
  2076  	for addr, peer := range peerList {
  2077  		if compareFunc(peer) {
  2078  			if whenFound != nil {
  2079  				whenFound(peer)
  2080  			}
  2081  
  2082  			// This is ok because we are not continuing
  2083  			// to iterate so won't corrupt the loop.
  2084  			delete(peerList, addr)
  2085  			peer.Disconnect()
  2086  			return true
  2087  		}
  2088  	}
  2089  	return false
  2090  }
  2091  
  2092  // newPeerConfig returns the configuration for the given serverPeer.
  2093  func newPeerConfig(sp *serverPeer) *peer.Config {
  2094  	return &peer.Config{
  2095  		Listeners: peer.MessageListeners{
  2096  			OnVersion:      sp.OnVersion,
  2097  			OnVerAck:       sp.OnVerAck,
  2098  			OnMemPool:      sp.OnMemPool,
  2099  			OnTx:           sp.OnTx,
  2100  			OnBlock:        sp.OnBlock,
  2101  			OnInv:          sp.OnInv,
  2102  			OnHeaders:      sp.OnHeaders,
  2103  			OnGetData:      sp.OnGetData,
  2104  			OnGetBlocks:    sp.OnGetBlocks,
  2105  			OnGetHeaders:   sp.OnGetHeaders,
  2106  			OnGetCFilters:  sp.OnGetCFilters,
  2107  			OnGetCFHeaders: sp.OnGetCFHeaders,
  2108  			OnGetCFCheckpt: sp.OnGetCFCheckpt,
  2109  			OnFeeFilter:    sp.OnFeeFilter,
  2110  			OnFilterAdd:    sp.OnFilterAdd,
  2111  			OnFilterClear:  sp.OnFilterClear,
  2112  			OnFilterLoad:   sp.OnFilterLoad,
  2113  			OnGetAddr:      sp.OnGetAddr,
  2114  			OnAddr:         sp.OnAddr,
  2115  			OnRead:         sp.OnRead,
  2116  			OnWrite:        sp.OnWrite,
  2117  			OnNotFound:     sp.OnNotFound,
  2118  
  2119  			// Note: The reference client currently bans peers that send alerts
  2120  			// not signed with its key.  We could verify against their key, but
  2121  			// since the reference client is currently unwilling to support
  2122  			// other implementations' alert messages, we will not relay theirs.
  2123  			OnAlert: nil,
  2124  		},
  2125  		NewestBlock:         sp.newestBlock,
  2126  		HostToNetAddress:    sp.server.addrManager.HostToNetAddress,
  2127  		Proxy:               cfg.Proxy,
  2128  		UserAgentName:       userAgentName,
  2129  		UserAgentVersion:    userAgentVersion,
  2130  		UserAgentComments:   cfg.UserAgentComments,
  2131  		ChainParams:         sp.server.chainParams,
  2132  		Services:            sp.server.services,
  2133  		DisableRelayTx:      cfg.BlocksOnly,
  2134  		ProtocolVersion:     peer.MaxProtocolVersion,
  2135  		TrickleInterval:     cfg.TrickleInterval,
  2136  		DisableStallHandler: cfg.DisableStallHandler,
  2137  	}
  2138  }
  2139  
  2140  // inboundPeerConnected is invoked by the connection manager when a new inbound
  2141  // connection is established.  It initializes a new inbound server peer
  2142  // instance, associates it with the connection, and starts a goroutine to wait
  2143  // for disconnection.
  2144  func (s *server) inboundPeerConnected(conn net.Conn) {
  2145  	sp := newServerPeer(s, false)
  2146  	sp.isWhitelisted = isWhitelisted(conn.RemoteAddr())
  2147  	sp.Peer = peer.NewInboundPeer(newPeerConfig(sp))
  2148  	sp.AssociateConnection(conn)
  2149  	go s.peerDoneHandler(sp)
  2150  }
  2151  
  2152  // outboundPeerConnected is invoked by the connection manager when a new
  2153  // outbound connection is established.  It initializes a new outbound server
  2154  // peer instance, associates it with the relevant state such as the connection
  2155  // request instance and the connection itself, and finally notifies the address
  2156  // manager of the attempt.
  2157  func (s *server) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn) {
  2158  	sp := newServerPeer(s, c.Permanent)
  2159  	p, err := peer.NewOutboundPeer(newPeerConfig(sp), c.Addr.String())
  2160  	if err != nil {
  2161  		srvrLog.Debugf("Cannot create outbound peer %s: %v", c.Addr, err)
  2162  		if c.Permanent {
  2163  			s.connManager.Disconnect(c.ID())
  2164  		} else {
  2165  			s.connManager.Remove(c.ID())
  2166  			go s.connManager.NewConnReq()
  2167  		}
  2168  		return
  2169  	}
  2170  	sp.Peer = p
  2171  	sp.connReq = c
  2172  	sp.isWhitelisted = isWhitelisted(conn.RemoteAddr())
  2173  	sp.AssociateConnection(conn)
  2174  	go s.peerDoneHandler(sp)
  2175  }
  2176  
  2177  // peerDoneHandler handles peer disconnects by notifiying the server that it's
  2178  // done along with other performing other desirable cleanup.
  2179  func (s *server) peerDoneHandler(sp *serverPeer) {
  2180  	sp.WaitForDisconnect()
  2181  	s.donePeers <- sp
  2182  
  2183  	// Only tell sync manager we are gone if we ever told it we existed.
  2184  	if sp.VerAckReceived() {
  2185  		s.syncManager.DonePeer(sp.Peer)
  2186  
  2187  		// Evict any remaining orphans that were sent by the peer.
  2188  		numEvicted := s.txMemPool.RemoveOrphansByTag(mempool.Tag(sp.ID()))
  2189  		if numEvicted > 0 {
  2190  			txmpLog.Debugf("Evicted %d %s from peer %v (id %d)",
  2191  				numEvicted, pickNoun(numEvicted, "orphan",
  2192  					"orphans"), sp, sp.ID())
  2193  		}
  2194  	}
  2195  	close(sp.quit)
  2196  }
  2197  
  2198  // peerHandler is used to handle peer operations such as adding and removing
  2199  // peers to and from the server, banning peers, and broadcasting messages to
  2200  // peers.  It must be run in a goroutine.
  2201  func (s *server) peerHandler() {
  2202  	// Start the address manager and sync manager, both of which are needed
  2203  	// by peers.  This is done here since their lifecycle is closely tied
  2204  	// to this handler and rather than adding more channels to sychronize
  2205  	// things, it's easier and slightly faster to simply start and stop them
  2206  	// in this handler.
  2207  	s.addrManager.Start()
  2208  	s.syncManager.Start()
  2209  
  2210  	srvrLog.Tracef("Starting peer handler")
  2211  
  2212  	state := &peerState{
  2213  		inboundPeers:    make(map[int32]*serverPeer),
  2214  		persistentPeers: make(map[int32]*serverPeer),
  2215  		outboundPeers:   make(map[int32]*serverPeer),
  2216  		banned:          make(map[string]bannedPeriod),
  2217  		outboundGroups:  make(map[string]int),
  2218  	}
  2219  
  2220  	if !cfg.DisableDNSSeed {
  2221  		// Add peers discovered through DNS to the address manager.
  2222  		connmgr.SeedFromDNS(activeNetParams.Params, defaultRequiredServices,
  2223  			btcdLookup, func(addrs []*wire.NetAddress) {
  2224  				// Bitcoind uses a lookup of the dns seeder here. This
  2225  				// is rather strange since the values looked up by the
  2226  				// DNS seed lookups will vary quite a lot.
  2227  				// to replicate this behaviour we put all addresses as
  2228  				// having come from the first one.
  2229  				s.addrManager.AddAddresses(addrs, addrs[0])
  2230  			})
  2231  	}
  2232  	go s.connManager.Start()
  2233  
  2234  out:
  2235  	for {
  2236  		select {
  2237  		// New peers connected to the server.
  2238  		case p := <-s.newPeers:
  2239  			s.handleAddPeerMsg(state, p)
  2240  
  2241  		// Disconnected peers.
  2242  		case p := <-s.donePeers:
  2243  			s.handleDonePeerMsg(state, p)
  2244  
  2245  		// Block accepted in mainchain or orphan, update peer height.
  2246  		case umsg := <-s.peerHeightsUpdate:
  2247  			s.handleUpdatePeerHeights(state, umsg)
  2248  
  2249  		// Peer to ban.
  2250  		case p := <-s.banPeers:
  2251  			s.handleBanPeerMsg(state, p)
  2252  
  2253  		// New inventory to potentially be relayed to other peers.
  2254  		case invMsg := <-s.relayInv:
  2255  			s.handleRelayInvMsg(state, invMsg)
  2256  
  2257  		// Message to broadcast to all connected peers except those
  2258  		// which are excluded by the message.
  2259  		case bmsg := <-s.broadcast:
  2260  			s.handleBroadcastMsg(state, &bmsg)
  2261  
  2262  		case qmsg := <-s.query:
  2263  			s.handleQuery(state, qmsg)
  2264  
  2265  		case <-s.quit:
  2266  			// Disconnect all peers on server shutdown.
  2267  			state.forAllPeers(func(sp *serverPeer) {
  2268  				srvrLog.Tracef("Shutdown peer %s", sp)
  2269  				sp.Disconnect()
  2270  			})
  2271  			break out
  2272  		}
  2273  	}
  2274  
  2275  	s.connManager.Stop()
  2276  	s.syncManager.Stop()
  2277  	s.addrManager.Stop()
  2278  
  2279  	// Drain channels before exiting so nothing is left waiting around
  2280  	// to send.
  2281  cleanup:
  2282  	for {
  2283  		select {
  2284  		case <-s.newPeers:
  2285  		case <-s.donePeers:
  2286  		case <-s.peerHeightsUpdate:
  2287  		case <-s.relayInv:
  2288  		case <-s.broadcast:
  2289  		case <-s.query:
  2290  		default:
  2291  			break cleanup
  2292  		}
  2293  	}
  2294  	s.wg.Done()
  2295  	srvrLog.Tracef("Peer handler done")
  2296  }
  2297  
  2298  // AddPeer adds a new peer that has already been connected to the server.
  2299  func (s *server) AddPeer(sp *serverPeer) {
  2300  	s.newPeers <- sp
  2301  }
  2302  
  2303  // BanPeer bans a peer that has already been connected to the server by ip.
  2304  func (s *server) BanPeer(sp *serverPeer) {
  2305  	s.banPeers <- sp
  2306  }
  2307  
  2308  // RelayInventory relays the passed inventory vector to all connected peers
  2309  // that are not already known to have it.
  2310  func (s *server) RelayInventory(invVect *wire.InvVect, data interface{}) {
  2311  	s.relayInv <- relayMsg{invVect: invVect, data: data}
  2312  }
  2313  
  2314  // BroadcastMessage sends msg to all peers currently connected to the server
  2315  // except those in the passed peers to exclude.
  2316  func (s *server) BroadcastMessage(msg wire.Message, exclPeers ...*serverPeer) {
  2317  	// XXX: Need to determine if this is an alert that has already been
  2318  	// broadcast and refrain from broadcasting again.
  2319  	bmsg := broadcastMsg{message: msg, excludePeers: exclPeers}
  2320  	s.broadcast <- bmsg
  2321  }
  2322  
  2323  // ConnectedCount returns the number of currently connected peers.
  2324  func (s *server) ConnectedCount() int32 {
  2325  	replyChan := make(chan int32)
  2326  
  2327  	s.query <- getConnCountMsg{reply: replyChan}
  2328  
  2329  	return <-replyChan
  2330  }
  2331  
  2332  // OutboundGroupCount returns the number of peers connected to the given
  2333  // outbound group key.
  2334  func (s *server) OutboundGroupCount(key string) int {
  2335  	replyChan := make(chan int)
  2336  	s.query <- getOutboundGroup{key: key, reply: replyChan}
  2337  	return <-replyChan
  2338  }
  2339  
  2340  // AddBytesSent adds the passed number of bytes to the total bytes sent counter
  2341  // for the server.  It is safe for concurrent access.
  2342  func (s *server) AddBytesSent(bytesSent uint64) {
  2343  	atomic.AddUint64(&s.bytesSent, bytesSent)
  2344  }
  2345  
  2346  // AddBytesReceived adds the passed number of bytes to the total bytes received
  2347  // counter for the server.  It is safe for concurrent access.
  2348  func (s *server) AddBytesReceived(bytesReceived uint64) {
  2349  	atomic.AddUint64(&s.bytesReceived, bytesReceived)
  2350  }
  2351  
  2352  // NetTotals returns the sum of all bytes received and sent across the network
  2353  // for all peers.  It is safe for concurrent access.
  2354  func (s *server) NetTotals() (uint64, uint64) {
  2355  	return atomic.LoadUint64(&s.bytesReceived),
  2356  		atomic.LoadUint64(&s.bytesSent)
  2357  }
  2358  
  2359  // UpdatePeerHeights updates the heights of all peers who have have announced
  2360  // the latest connected main chain block, or a recognized orphan. These height
  2361  // updates allow us to dynamically refresh peer heights, ensuring sync peer
  2362  // selection has access to the latest block heights for each peer.
  2363  func (s *server) UpdatePeerHeights(latestBlkHash *chainhash.Hash, latestHeight int32, updateSource *peer.Peer) {
  2364  	s.peerHeightsUpdate <- updatePeerHeightsMsg{
  2365  		newHash:    latestBlkHash,
  2366  		newHeight:  latestHeight,
  2367  		originPeer: updateSource,
  2368  	}
  2369  }
  2370  
  2371  // rebroadcastHandler keeps track of user submitted inventories that we have
  2372  // sent out but have not yet made it into a block. We periodically rebroadcast
  2373  // them in case our peers restarted or otherwise lost track of them.
  2374  func (s *server) rebroadcastHandler() {
  2375  	// Wait 5 min before first tx rebroadcast.
  2376  	timer := time.NewTimer(5 * time.Minute)
  2377  	pendingInvs := make(map[wire.InvVect]interface{})
  2378  
  2379  out:
  2380  	for {
  2381  		select {
  2382  		case riv := <-s.modifyRebroadcastInv:
  2383  			switch msg := riv.(type) {
  2384  			// Incoming InvVects are added to our map of RPC txs.
  2385  			case broadcastInventoryAdd:
  2386  				pendingInvs[*msg.invVect] = msg.data
  2387  
  2388  			// When an InvVect has been added to a block, we can
  2389  			// now remove it, if it was present.
  2390  			case broadcastInventoryDel:
  2391  				delete(pendingInvs, *msg)
  2392  			}
  2393  
  2394  		case <-timer.C:
  2395  			// Any inventory we have has not made it into a block
  2396  			// yet. We periodically resubmit them until they have.
  2397  			for iv, data := range pendingInvs {
  2398  				ivCopy := iv
  2399  				s.RelayInventory(&ivCopy, data)
  2400  			}
  2401  
  2402  			// Process at a random time up to 30mins (in seconds)
  2403  			// in the future.
  2404  			timer.Reset(time.Second *
  2405  				time.Duration(randomUint16Number(1800)))
  2406  
  2407  		case <-s.quit:
  2408  			break out
  2409  		}
  2410  	}
  2411  
  2412  	timer.Stop()
  2413  
  2414  	// Drain channels before exiting so nothing is left waiting around
  2415  	// to send.
  2416  cleanup:
  2417  	for {
  2418  		select {
  2419  		case <-s.modifyRebroadcastInv:
  2420  		default:
  2421  			break cleanup
  2422  		}
  2423  	}
  2424  	s.wg.Done()
  2425  }
  2426  
  2427  // Start begins accepting connections from peers.
  2428  func (s *server) Start() {
  2429  	// Already started?
  2430  	if atomic.AddInt32(&s.started, 1) != 1 {
  2431  		return
  2432  	}
  2433  
  2434  	srvrLog.Trace("Starting server")
  2435  
  2436  	// Start the peer handler which in turn starts the address and block
  2437  	// managers.
  2438  	s.wg.Add(1)
  2439  	go s.peerHandler()
  2440  
  2441  	if s.nat != nil {
  2442  		s.wg.Add(1)
  2443  		go s.upnpUpdateThread()
  2444  	}
  2445  
  2446  	if !cfg.DisableRPC {
  2447  		s.wg.Add(1)
  2448  
  2449  		// Start the rebroadcastHandler, which ensures user tx received by
  2450  		// the RPC server are rebroadcast until being included in a block.
  2451  		go s.rebroadcastHandler()
  2452  
  2453  		s.rpcServer.Start()
  2454  	}
  2455  
  2456  	// Start the CPU miner if generation is enabled.
  2457  	if cfg.Generate {
  2458  		s.cpuMiner.Start()
  2459  	}
  2460  }
  2461  
  2462  // Stop gracefully shuts down the server by stopping and disconnecting all
  2463  // peers and the main listener.
  2464  func (s *server) Stop() error {
  2465  	// Make sure this only happens once.
  2466  	if atomic.AddInt32(&s.shutdown, 1) != 1 {
  2467  		srvrLog.Infof("Server is already in the process of shutting down")
  2468  		return nil
  2469  	}
  2470  
  2471  	srvrLog.Warnf("Server shutting down")
  2472  
  2473  	// Stop the CPU miner if needed
  2474  	s.cpuMiner.Stop()
  2475  
  2476  	// Shutdown the RPC server if it's not disabled.
  2477  	if !cfg.DisableRPC {
  2478  		s.rpcServer.Stop()
  2479  	}
  2480  
  2481  	s.feeEstimator.Close()
  2482  
  2483  	// Signal the remaining goroutines to quit.
  2484  	close(s.quit)
  2485  	return nil
  2486  }
  2487  
  2488  // WaitForShutdown blocks until the main listener and peer handlers are stopped.
  2489  func (s *server) WaitForShutdown() {
  2490  	s.wg.Wait()
  2491  }
  2492  
  2493  // ScheduleShutdown schedules a server shutdown after the specified duration.
  2494  // It also dynamically adjusts how often to warn the server is going down based
  2495  // on remaining duration.
  2496  func (s *server) ScheduleShutdown(duration time.Duration) {
  2497  	// Don't schedule shutdown more than once.
  2498  	if atomic.AddInt32(&s.shutdownSched, 1) != 1 {
  2499  		return
  2500  	}
  2501  	srvrLog.Warnf("Server shutdown in %v", duration)
  2502  	go func() {
  2503  		remaining := duration
  2504  		tickDuration := dynamicTickDuration(remaining)
  2505  		done := time.After(remaining)
  2506  		ticker := time.NewTicker(tickDuration)
  2507  	out:
  2508  		for {
  2509  			select {
  2510  			case <-done:
  2511  				ticker.Stop()
  2512  				s.Stop()
  2513  				break out
  2514  			case <-ticker.C:
  2515  				remaining = remaining - tickDuration
  2516  				if remaining < time.Second {
  2517  					continue
  2518  				}
  2519  
  2520  				// Change tick duration dynamically based on remaining time.
  2521  				newDuration := dynamicTickDuration(remaining)
  2522  				if tickDuration != newDuration {
  2523  					tickDuration = newDuration
  2524  					ticker.Stop()
  2525  					ticker = time.NewTicker(tickDuration)
  2526  				}
  2527  				srvrLog.Warnf("Server shutdown in %v", remaining)
  2528  			}
  2529  		}
  2530  	}()
  2531  }
  2532  
  2533  // parseListeners determines whether each listen address is IPv4 and IPv6 and
  2534  // returns a slice of appropriate net.Addrs to listen on with TCP. It also
  2535  // properly detects addresses which apply to "all interfaces" and adds the
  2536  // address as both IPv4 and IPv6.
  2537  func parseListeners(addrs []string) ([]net.Addr, error) {
  2538  	netAddrs := make([]net.Addr, 0, len(addrs)*2)
  2539  	for _, addr := range addrs {
  2540  		host, _, err := net.SplitHostPort(addr)
  2541  		if err != nil {
  2542  			// Shouldn't happen due to already being normalized.
  2543  			return nil, err
  2544  		}
  2545  
  2546  		// Empty host or host of * on plan9 is both IPv4 and IPv6.
  2547  		if host == "" || (host == "*" && runtime.GOOS == "plan9") {
  2548  			netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
  2549  			netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
  2550  			continue
  2551  		}
  2552  
  2553  		// Strip IPv6 zone id if present since net.ParseIP does not
  2554  		// handle it.
  2555  		zoneIndex := strings.LastIndex(host, "%")
  2556  		if zoneIndex > 0 {
  2557  			host = host[:zoneIndex]
  2558  		}
  2559  
  2560  		// Parse the IP.
  2561  		ip := net.ParseIP(host)
  2562  		if ip == nil {
  2563  			return nil, fmt.Errorf("'%s' is not a valid IP address", host)
  2564  		}
  2565  
  2566  		// To4 returns nil when the IP is not an IPv4 address, so use
  2567  		// this determine the address type.
  2568  		if ip.To4() == nil {
  2569  			netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
  2570  		} else {
  2571  			netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
  2572  		}
  2573  	}
  2574  	return netAddrs, nil
  2575  }
  2576  
  2577  func (s *server) upnpUpdateThread() {
  2578  	// Go off immediately to prevent code duplication, thereafter we renew
  2579  	// lease every 15 minutes.
  2580  	timer := time.NewTimer(0 * time.Second)
  2581  	lport, _ := strconv.ParseInt(activeNetParams.DefaultPort, 10, 16)
  2582  	first := true
  2583  out:
  2584  	for {
  2585  		select {
  2586  		case <-timer.C:
  2587  			// TODO: pick external port  more cleverly
  2588  			// TODO: know which ports we are listening to on an external net.
  2589  			// TODO: if specific listen port doesn't work then ask for wildcard
  2590  			// listen port?
  2591  			// XXX this assumes timeout is in seconds.
  2592  			listenPort, err := s.nat.AddPortMapping("tcp", int(lport), int(lport),
  2593  				"lbcd listen port", 20*60)
  2594  			if err != nil {
  2595  				srvrLog.Warnf("can't add UPnP port mapping: %v", err)
  2596  			}
  2597  			if first && err == nil {
  2598  				// TODO: look this up periodically to see if upnp domain changed
  2599  				// and so did ip.
  2600  				externalip, err := s.nat.GetExternalAddress()
  2601  				if err != nil {
  2602  					srvrLog.Warnf("UPnP can't get external address: %v", err)
  2603  					continue out
  2604  				}
  2605  				na := wire.NewNetAddressIPPort(externalip, uint16(listenPort),
  2606  					s.services)
  2607  				err = s.addrManager.AddLocalAddress(na, addrmgr.UpnpPrio)
  2608  				if err != nil {
  2609  					// XXX DeletePortMapping?
  2610  				}
  2611  				srvrLog.Warnf("Successfully bound via UPnP to %s", addrmgr.NetAddressKey(na))
  2612  				first = false
  2613  			}
  2614  			timer.Reset(time.Minute * 15)
  2615  		case <-s.quit:
  2616  			break out
  2617  		}
  2618  	}
  2619  
  2620  	timer.Stop()
  2621  
  2622  	if err := s.nat.DeletePortMapping("tcp", int(lport), int(lport)); err != nil {
  2623  		srvrLog.Warnf("unable to remove UPnP port mapping: %v", err)
  2624  	} else {
  2625  		srvrLog.Debugf("successfully disestablished UPnP port mapping")
  2626  	}
  2627  
  2628  	s.wg.Done()
  2629  }
  2630  
  2631  // setupRPCListeners returns a slice of listeners that are configured for use
  2632  // with the RPC server depending on the configuration settings for listen
  2633  // addresses and TLS.
  2634  func setupRPCListeners() ([]net.Listener, error) {
  2635  	// Setup TLS if not disabled.
  2636  	listenFunc := net.Listen
  2637  	if !cfg.DisableTLS {
  2638  		// Generate the TLS cert and key file if both don't already
  2639  		// exist.
  2640  		if !fileExists(cfg.RPCKey) && !fileExists(cfg.RPCCert) {
  2641  			err := genCertPair(cfg.RPCCert, cfg.RPCKey)
  2642  			if err != nil {
  2643  				return nil, err
  2644  			}
  2645  		}
  2646  		keypair, err := tls.LoadX509KeyPair(cfg.RPCCert, cfg.RPCKey)
  2647  		if err != nil {
  2648  			return nil, err
  2649  		}
  2650  
  2651  		tlsConfig := tls.Config{
  2652  			Certificates: []tls.Certificate{keypair},
  2653  			MinVersion:   tls.VersionTLS12,
  2654  		}
  2655  
  2656  		// Change the standard net.Listen function to the tls one.
  2657  		listenFunc = func(net string, laddr string) (net.Listener, error) {
  2658  			return tls.Listen(net, laddr, &tlsConfig)
  2659  		}
  2660  	}
  2661  
  2662  	netAddrs, err := parseListeners(cfg.RPCListeners)
  2663  	if err != nil {
  2664  		return nil, err
  2665  	}
  2666  
  2667  	listeners := make([]net.Listener, 0, len(netAddrs))
  2668  	for _, addr := range netAddrs {
  2669  		listener, err := listenFunc(addr.Network(), addr.String())
  2670  		if err != nil {
  2671  			rpcsLog.Warnf("Can't listen on %s: %v", addr, err)
  2672  			continue
  2673  		}
  2674  		listeners = append(listeners, listener)
  2675  	}
  2676  
  2677  	return listeners, nil
  2678  }
  2679  
  2680  // newServer returns a new btcd server configured to listen on addr for the
  2681  // bitcoin network type specified by chainParams.  Use start to begin accepting
  2682  // connections from peers.
  2683  func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
  2684  	db database.DB, chainParams *chaincfg.Params,
  2685  	interrupt <-chan struct{}) (*server, error) {
  2686  
  2687  	startupTime := time.Now()
  2688  
  2689  	services := defaultServices
  2690  	if cfg.NoPeerBloomFilters {
  2691  		services &^= wire.SFNodeBloom
  2692  	}
  2693  	if cfg.NoCFilters {
  2694  		services &^= wire.SFNodeCF
  2695  	}
  2696  
  2697  	amgr := addrmgr.New(cfg.DataDir, btcdLookup)
  2698  
  2699  	var listeners []net.Listener
  2700  	var nat NAT
  2701  	if !cfg.DisableListen {
  2702  		var err error
  2703  		listeners, nat, err = initListeners(amgr, listenAddrs, services)
  2704  		if err != nil {
  2705  			return nil, err
  2706  		}
  2707  		if len(listeners) == 0 {
  2708  			return nil, errors.New("no valid listen address")
  2709  		}
  2710  	}
  2711  
  2712  	if len(agentBlacklist) > 0 {
  2713  		srvrLog.Infof("User-agent blacklist %s", agentBlacklist)
  2714  	}
  2715  	if len(agentWhitelist) > 0 {
  2716  		srvrLog.Infof("User-agent whitelist %s", agentWhitelist)
  2717  	}
  2718  
  2719  	s := server{
  2720  		chainParams:          chainParams,
  2721  		addrManager:          amgr,
  2722  		newPeers:             make(chan *serverPeer, cfg.MaxPeers),
  2723  		donePeers:            make(chan *serverPeer, cfg.MaxPeers),
  2724  		banPeers:             make(chan *serverPeer, cfg.MaxPeers),
  2725  		query:                make(chan interface{}),
  2726  		relayInv:             make(chan relayMsg, cfg.MaxPeers),
  2727  		broadcast:            make(chan broadcastMsg, cfg.MaxPeers),
  2728  		quit:                 make(chan struct{}),
  2729  		modifyRebroadcastInv: make(chan interface{}),
  2730  		peerHeightsUpdate:    make(chan updatePeerHeightsMsg),
  2731  		nat:                  nat,
  2732  		db:                   db,
  2733  		timeSource:           blockchain.NewMedianTime(),
  2734  		services:             services,
  2735  		sigCache:             txscript.NewSigCache(cfg.SigCacheMaxSize),
  2736  		hashCache:            txscript.NewHashCache(cfg.SigCacheMaxSize),
  2737  		cfCheckptCaches:      make(map[wire.FilterType][]cfHeaderKV),
  2738  		agentBlacklist:       agentBlacklist,
  2739  		agentWhitelist:       agentWhitelist,
  2740  	}
  2741  
  2742  	// Create the transaction and address indexes if needed.
  2743  	//
  2744  	// CAUTION: the txindex needs to be first in the indexes array because
  2745  	// the addrindex uses data from the txindex during catchup.  If the
  2746  	// addrindex is run first, it may not have the transactions from the
  2747  	// current block indexed.
  2748  	var indexes []indexers.Indexer
  2749  	if cfg.TxIndex || cfg.AddrIndex {
  2750  		// Enable transaction index if address index is enabled since it
  2751  		// requires it.
  2752  		if !cfg.TxIndex {
  2753  			indxLog.Infof("Transaction index enabled because it " +
  2754  				"is required by the address index")
  2755  			cfg.TxIndex = true
  2756  		} else {
  2757  			indxLog.Info("Transaction index is enabled")
  2758  		}
  2759  
  2760  		s.txIndex = indexers.NewTxIndex(db)
  2761  		indexes = append(indexes, s.txIndex)
  2762  	}
  2763  	if cfg.AddrIndex {
  2764  		indxLog.Info("Address index is enabled")
  2765  		s.addrIndex = indexers.NewAddrIndex(db, chainParams)
  2766  		indexes = append(indexes, s.addrIndex)
  2767  	}
  2768  	if !cfg.NoCFilters {
  2769  		indxLog.Info("Committed filter index is enabled")
  2770  		s.cfIndex = indexers.NewCfIndex(db, chainParams)
  2771  		indexes = append(indexes, s.cfIndex)
  2772  	}
  2773  
  2774  	// Create an index manager if any of the optional indexes are enabled.
  2775  	var indexManager blockchain.IndexManager
  2776  	if len(indexes) > 0 {
  2777  		indexManager = indexers.NewManager(db, indexes)
  2778  	}
  2779  
  2780  	// Merge given checkpoints with the default ones unless they are disabled.
  2781  	var checkpoints []chaincfg.Checkpoint
  2782  	if !cfg.DisableCheckpoints {
  2783  		checkpoints = mergeCheckpoints(s.chainParams.Checkpoints, cfg.addCheckpoints)
  2784  	}
  2785  
  2786  	var err error
  2787  
  2788  	claimTrieCfg := claimtrieconfig.DefaultConfig
  2789  	claimTrieCfg.DataDir = cfg.DataDir
  2790  	claimTrieCfg.Interrupt = interrupt
  2791  
  2792  	ct, err := claimtrie.New(claimTrieCfg)
  2793  	if err != nil {
  2794  		return nil, err
  2795  	}
  2796  
  2797  	// Create a new block chain instance with the appropriate configuration.
  2798  	s.chain, err = blockchain.New(&blockchain.Config{
  2799  		DB:           s.db,
  2800  		Interrupt:    interrupt,
  2801  		ChainParams:  s.chainParams,
  2802  		Checkpoints:  checkpoints,
  2803  		TimeSource:   s.timeSource,
  2804  		SigCache:     s.sigCache,
  2805  		IndexManager: indexManager,
  2806  		HashCache:    s.hashCache,
  2807  		ClaimTrie:    ct,
  2808  	})
  2809  	if err != nil {
  2810  		return nil, err
  2811  	}
  2812  
  2813  	feC := fees.EstimatorConfig{
  2814  		MinBucketFee: cfg.minRelayTxFee,
  2815  		MaxBucketFee: lbcutil.Amount(fees.DefaultMaxBucketFeeMultiplier) * cfg.minRelayTxFee,
  2816  		MaxConfirms:  fees.DefaultMaxConfirmations,
  2817  		FeeRateStep:  fees.DefaultFeeRateStep,
  2818  		DatabaseFile: path.Join(cfg.DataDir, "feesdb"),
  2819  
  2820  		// 1e5 is the previous (up to 1.1.0) mempool.DefaultMinRelayTxFee that
  2821  		// un-upgraded wallets will be using, so track this particular rate
  2822  		// explicitly. Note that bumping this value will cause the existing fees
  2823  		// database to become invalid and will force nodes to explicitly delete
  2824  		// it.
  2825  		ExtraBucketFee: 1e5,
  2826  	}
  2827  	fe, err := fees.NewEstimator(&feC)
  2828  	if err != nil {
  2829  		return nil, err
  2830  	}
  2831  	s.feeEstimator = fe
  2832  
  2833  	txC := mempool.Config{
  2834  		Policy: mempool.Policy{
  2835  			DisableRelayPriority: cfg.NoRelayPriority,
  2836  			AcceptNonStd:         cfg.RelayNonStd,
  2837  			FreeTxRelayLimit:     cfg.FreeTxRelayLimit,
  2838  			MaxOrphanTxs:         cfg.MaxOrphanTxs,
  2839  			MaxOrphanTxSize:      defaultMaxOrphanTxSize,
  2840  			MaxSigOpCostPerTx:    blockchain.MaxBlockSigOpsCost / 4,
  2841  			MinRelayTxFee:        cfg.minRelayTxFee,
  2842  			MaxTxVersion:         2,
  2843  			RejectReplacement:    cfg.RejectReplacement,
  2844  		},
  2845  		ChainParams:    chainParams,
  2846  		FetchUtxoView:  s.chain.FetchUtxoView,
  2847  		BestHeight:     func() int32 { return s.chain.BestSnapshot().Height },
  2848  		MedianTimePast: func() time.Time { return s.chain.BestSnapshot().MedianTime },
  2849  		CalcSequenceLock: func(tx *btcutil.Tx, view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) {
  2850  			return s.chain.CalcSequenceLock(tx, view, true)
  2851  		},
  2852  		IsDeploymentActive:        s.chain.IsDeploymentActive,
  2853  		SigCache:                  s.sigCache,
  2854  		HashCache:                 s.hashCache,
  2855  		AddrIndex:                 s.addrIndex,
  2856  		AddTxToFeeEstimation:      s.feeEstimator.AddMemPoolTransaction,
  2857  		RemoveTxFromFeeEstimation: s.feeEstimator.RemoveMemPoolTransaction,
  2858  	}
  2859  	s.txMemPool = mempool.New(&txC)
  2860  
  2861  	s.syncManager, err = netsync.New(&netsync.Config{
  2862  		PeerNotifier:       &s,
  2863  		Chain:              s.chain,
  2864  		TxMemPool:          s.txMemPool,
  2865  		ChainParams:        s.chainParams,
  2866  		DisableCheckpoints: cfg.DisableCheckpoints,
  2867  		MaxPeers:           cfg.MaxPeers,
  2868  		FeeEstimator:       s.feeEstimator,
  2869  	})
  2870  	if err != nil {
  2871  		return nil, err
  2872  	}
  2873  
  2874  	// Create the mining policy and block template generator based on the
  2875  	// configuration options.
  2876  	//
  2877  	// NOTE: The CPU miner relies on the mempool, so the mempool has to be
  2878  	// created before calling the function to create the CPU miner.
  2879  	policy := mining.Policy{
  2880  		BlockMinWeight:    cfg.BlockMinWeight,
  2881  		BlockMaxWeight:    cfg.BlockMaxWeight,
  2882  		BlockMinSize:      cfg.BlockMinSize,
  2883  		BlockMaxSize:      cfg.BlockMaxSize,
  2884  		BlockPrioritySize: cfg.BlockPrioritySize,
  2885  		TxMinFreeFee:      cfg.minRelayTxFee,
  2886  	}
  2887  	blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy,
  2888  		s.chainParams, s.txMemPool, s.chain, s.timeSource,
  2889  		s.sigCache, s.hashCache)
  2890  	s.cpuMiner = cpuminer.New(&cpuminer.Config{
  2891  		ChainParams:            chainParams,
  2892  		BlockTemplateGenerator: blockTemplateGenerator,
  2893  		MiningAddrs:            cfg.miningAddrs,
  2894  		ProcessBlock:           s.syncManager.ProcessBlock,
  2895  		ConnectedCount:         s.ConnectedCount,
  2896  		IsCurrent:              s.syncManager.IsCurrent,
  2897  	})
  2898  
  2899  	// Only setup a function to return new addresses to connect to when
  2900  	// not running in connect-only mode.  The simulation network is always
  2901  	// in connect-only mode since it is only intended to connect to
  2902  	// specified peers and actively avoid advertising and connecting to
  2903  	// discovered peers in order to prevent it from becoming a public test
  2904  	// network.
  2905  	var newAddressFunc func() (net.Addr, error)
  2906  	if !cfg.SimNet && len(cfg.ConnectPeers) == 0 {
  2907  		newAddressFunc = func() (net.Addr, error) {
  2908  			for tries := 0; tries < 100; tries++ {
  2909  				addr := s.addrManager.GetAddress()
  2910  				if addr == nil {
  2911  					break
  2912  				}
  2913  
  2914  				// Address will not be invalid, local or unroutable
  2915  				// because addrmanager rejects those on addition.
  2916  				// Just check that we don't already have an address
  2917  				// in the same group so that we are not connecting
  2918  				// to the same network segment at the expense of
  2919  				// others.
  2920  				key := addrmgr.GroupKey(addr.NetAddress())
  2921  				if s.OutboundGroupCount(key) != 0 {
  2922  					continue
  2923  				}
  2924  
  2925  				// only allow recent nodes (10mins) after we failed 30
  2926  				// times
  2927  				if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute {
  2928  					continue
  2929  				}
  2930  
  2931  				// allow nondefault ports after 50 failed tries.
  2932  				if tries < 50 && fmt.Sprintf("%d", addr.NetAddress().Port) !=
  2933  					activeNetParams.DefaultPort {
  2934  					continue
  2935  				}
  2936  
  2937  				// Mark an attempt for the valid address.
  2938  				s.addrManager.Attempt(addr.NetAddress())
  2939  
  2940  				addrString := addrmgr.NetAddressKey(addr.NetAddress())
  2941  				return addrStringToNetAddr(addrString)
  2942  			}
  2943  
  2944  			return nil, errors.New("no valid connect address")
  2945  		}
  2946  	}
  2947  
  2948  	// Create a connection manager.
  2949  	targetOutbound := defaultTargetOutbound
  2950  	if cfg.MaxPeers < targetOutbound {
  2951  		targetOutbound = cfg.MaxPeers
  2952  	}
  2953  	cmgr, err := connmgr.New(&connmgr.Config{
  2954  		Listeners:      listeners,
  2955  		OnAccept:       s.inboundPeerConnected,
  2956  		RetryDuration:  connectionRetryInterval,
  2957  		TargetOutbound: uint32(targetOutbound),
  2958  		Dial:           btcdDial,
  2959  		OnConnection:   s.outboundPeerConnected,
  2960  		GetNewAddress:  newAddressFunc,
  2961  	})
  2962  	if err != nil {
  2963  		return nil, err
  2964  	}
  2965  	s.connManager = cmgr
  2966  
  2967  	// Start up persistent peers.
  2968  	permanentPeers := cfg.ConnectPeers
  2969  	if len(permanentPeers) == 0 {
  2970  		permanentPeers = cfg.AddPeers
  2971  	}
  2972  	for _, addr := range permanentPeers {
  2973  		netAddr, err := addrStringToNetAddr(addr)
  2974  		if err != nil {
  2975  			return nil, err
  2976  		}
  2977  
  2978  		go s.connManager.Connect(&connmgr.ConnReq{
  2979  			Addr:      netAddr,
  2980  			Permanent: true,
  2981  		})
  2982  	}
  2983  
  2984  	if !cfg.DisableRPC {
  2985  		// Setup listeners for the configured RPC listen addresses and
  2986  		// TLS settings.
  2987  		rpcListeners, err := setupRPCListeners()
  2988  		if err != nil {
  2989  			return nil, err
  2990  		}
  2991  		if len(rpcListeners) == 0 {
  2992  			return nil, errors.New("RPCS: No valid listen address")
  2993  		}
  2994  
  2995  		s.rpcServer, err = newRPCServer(&rpcserverConfig{
  2996  			Listeners:    rpcListeners,
  2997  			StartupTime:  startupTime.Unix(),
  2998  			ConnMgr:      &rpcConnManager{&s},
  2999  			AddrMgr:      amgr,
  3000  			SyncMgr:      &rpcSyncMgr{&s, s.syncManager},
  3001  			TimeSource:   s.timeSource,
  3002  			Chain:        s.chain,
  3003  			ChainParams:  chainParams,
  3004  			DB:           db,
  3005  			TxMemPool:    s.txMemPool,
  3006  			Generator:    blockTemplateGenerator,
  3007  			CPUMiner:     s.cpuMiner,
  3008  			TxIndex:      s.txIndex,
  3009  			AddrIndex:    s.addrIndex,
  3010  			CfIndex:      s.cfIndex,
  3011  			FeeEstimator: s.feeEstimator,
  3012  			Services:     s.services,
  3013  		})
  3014  		if err != nil {
  3015  			return nil, err
  3016  		}
  3017  
  3018  		// Signal process shutdown when the RPC server requests it.
  3019  		go func() {
  3020  			<-s.rpcServer.RequestedProcessShutdown()
  3021  			shutdownRequestChannel <- struct{}{}
  3022  		}()
  3023  	}
  3024  
  3025  	return &s, nil
  3026  }
  3027  
  3028  // initListeners initializes the configured net listeners and adds any bound
  3029  // addresses to the address manager. Returns the listeners and a NAT interface,
  3030  // which is non-nil if UPnP is in use.
  3031  func initListeners(amgr *addrmgr.AddrManager, listenAddrs []string, services wire.ServiceFlag) ([]net.Listener, NAT, error) {
  3032  	// Listen for TCP connections at the configured addresses
  3033  	netAddrs, err := parseListeners(listenAddrs)
  3034  	if err != nil {
  3035  		return nil, nil, err
  3036  	}
  3037  
  3038  	listeners := make([]net.Listener, 0, len(netAddrs))
  3039  	for _, addr := range netAddrs {
  3040  		listener, err := net.Listen(addr.Network(), addr.String())
  3041  		if err != nil {
  3042  			srvrLog.Warnf("Can't listen on %s: %v", addr, err)
  3043  			continue
  3044  		}
  3045  		listeners = append(listeners, listener)
  3046  	}
  3047  
  3048  	var nat NAT
  3049  	if len(cfg.ExternalIPs) != 0 {
  3050  		defaultPort, err := strconv.ParseUint(activeNetParams.DefaultPort, 10, 16)
  3051  		if err != nil {
  3052  			srvrLog.Errorf("Can not parse default port %s for active chain: %v",
  3053  				activeNetParams.DefaultPort, err)
  3054  			return nil, nil, err
  3055  		}
  3056  
  3057  		for _, sip := range cfg.ExternalIPs {
  3058  			eport := uint16(defaultPort)
  3059  			host, portstr, err := net.SplitHostPort(sip)
  3060  			if err != nil {
  3061  				// no port, use default.
  3062  				host = sip
  3063  			} else {
  3064  				port, err := strconv.ParseUint(portstr, 10, 16)
  3065  				if err != nil {
  3066  					srvrLog.Warnf("Can not parse port from %s for "+
  3067  						"externalip: %v", sip, err)
  3068  					continue
  3069  				}
  3070  				eport = uint16(port)
  3071  			}
  3072  			na, err := amgr.HostToNetAddress(host, eport, services)
  3073  			if err != nil {
  3074  				srvrLog.Warnf("Not adding %s as externalip: %v", sip, err)
  3075  				continue
  3076  			}
  3077  
  3078  			err = amgr.AddLocalAddress(na, addrmgr.ManualPrio)
  3079  			if err != nil {
  3080  				amgrLog.Warnf("Skipping specified external IP: %v", err)
  3081  			}
  3082  		}
  3083  	} else {
  3084  		if cfg.Upnp && !cfg.RegressionTest && !cfg.SimNet {
  3085  			var err error
  3086  			nat, err = Discover()
  3087  			if err != nil {
  3088  				srvrLog.Infof("Can't discover UPnP-enabled device: %v", err)
  3089  			} else {
  3090  				address, err := nat.GetExternalAddress()
  3091  				if err == nil && address != nil {
  3092  					srvrLog.Infof("UPnP successfully registered on %s", address.String())
  3093  				}
  3094  			}
  3095  			// nil nat here is fine, just means no upnp on network.
  3096  		}
  3097  
  3098  		// Add bound addresses to address manager to be advertised to peers.
  3099  		for _, listener := range listeners {
  3100  			addr := listener.Addr().String()
  3101  			err := addLocalAddress(amgr, addr, services)
  3102  			if err != nil {
  3103  				amgrLog.Warnf("Skipping bound address %s: %v", addr, err)
  3104  			}
  3105  		}
  3106  	}
  3107  
  3108  	return listeners, nat, nil
  3109  }
  3110  
  3111  // addrStringToNetAddr takes an address in the form of 'host:port' and returns
  3112  // a net.Addr which maps to the original address with any host names resolved
  3113  // to IP addresses.  It also handles tor addresses properly by returning a
  3114  // net.Addr that encapsulates the address.
  3115  func addrStringToNetAddr(addr string) (net.Addr, error) {
  3116  	host, strPort, err := net.SplitHostPort(addr)
  3117  	if err != nil {
  3118  		return nil, err
  3119  	}
  3120  
  3121  	port, err := strconv.Atoi(strPort)
  3122  	if err != nil {
  3123  		return nil, err
  3124  	}
  3125  
  3126  	// Skip if host is already an IP address.
  3127  	if ip := net.ParseIP(host); ip != nil {
  3128  		return &net.TCPAddr{
  3129  			IP:   ip,
  3130  			Port: port,
  3131  		}, nil
  3132  	}
  3133  
  3134  	// Tor addresses cannot be resolved to an IP, so just return an onion
  3135  	// address instead.
  3136  	if strings.HasSuffix(host, ".onion") {
  3137  		if cfg.NoOnion {
  3138  			return nil, errors.New("tor has been disabled")
  3139  		}
  3140  
  3141  		return &onionAddr{addr: addr}, nil
  3142  	}
  3143  
  3144  	// Attempt to look up an IP address associated with the parsed host.
  3145  	ips, err := btcdLookup(host)
  3146  	if err != nil {
  3147  		return nil, err
  3148  	}
  3149  	if len(ips) == 0 {
  3150  		return nil, fmt.Errorf("no addresses found for %s", host)
  3151  	}
  3152  
  3153  	return &net.TCPAddr{
  3154  		IP:   ips[0],
  3155  		Port: port,
  3156  	}, nil
  3157  }
  3158  
  3159  // addLocalAddress adds an address that this node is listening on to the
  3160  // address manager so that it may be relayed to peers.
  3161  func addLocalAddress(addrMgr *addrmgr.AddrManager, addr string, services wire.ServiceFlag) error {
  3162  	host, portStr, err := net.SplitHostPort(addr)
  3163  	if err != nil {
  3164  		return err
  3165  	}
  3166  	port, err := strconv.ParseUint(portStr, 10, 16)
  3167  	if err != nil {
  3168  		return err
  3169  	}
  3170  
  3171  	if ip := net.ParseIP(host); ip != nil && ip.IsUnspecified() {
  3172  		// If bound to unspecified address, advertise all local interfaces
  3173  		addrs, err := net.InterfaceAddrs()
  3174  		if err != nil {
  3175  			return err
  3176  		}
  3177  
  3178  		for _, addr := range addrs {
  3179  			ifaceIP, _, err := net.ParseCIDR(addr.String())
  3180  			if err != nil {
  3181  				continue
  3182  			}
  3183  
  3184  			// If bound to 0.0.0.0, do not add IPv6 interfaces and if bound to
  3185  			// ::, do not add IPv4 interfaces.
  3186  			if (ip.To4() == nil) != (ifaceIP.To4() == nil) {
  3187  				continue
  3188  			}
  3189  
  3190  			netAddr := wire.NewNetAddressIPPort(ifaceIP, uint16(port), services)
  3191  			addrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio)
  3192  		}
  3193  	} else {
  3194  		netAddr, err := addrMgr.HostToNetAddress(host, uint16(port), services)
  3195  		if err != nil {
  3196  			return err
  3197  		}
  3198  
  3199  		addrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio)
  3200  	}
  3201  
  3202  	return nil
  3203  }
  3204  
  3205  // dynamicTickDuration is a convenience function used to dynamically choose a
  3206  // tick duration based on remaining time.  It is primarily used during
  3207  // server shutdown to make shutdown warnings more frequent as the shutdown time
  3208  // approaches.
  3209  func dynamicTickDuration(remaining time.Duration) time.Duration {
  3210  	switch {
  3211  	case remaining <= time.Second*5:
  3212  		return time.Second
  3213  	case remaining <= time.Second*15:
  3214  		return time.Second * 5
  3215  	case remaining <= time.Minute:
  3216  		return time.Second * 15
  3217  	case remaining <= time.Minute*5:
  3218  		return time.Minute
  3219  	case remaining <= time.Minute*15:
  3220  		return time.Minute * 5
  3221  	case remaining <= time.Hour:
  3222  		return time.Minute * 15
  3223  	}
  3224  	return time.Hour
  3225  }
  3226  
  3227  // isWhitelisted returns whether the IP address is included in the whitelisted
  3228  // networks and IPs.
  3229  func isWhitelisted(addr net.Addr) bool {
  3230  	if len(cfg.whitelists) == 0 {
  3231  		return false
  3232  	}
  3233  
  3234  	host, _, err := net.SplitHostPort(addr.String())
  3235  	if err != nil {
  3236  		srvrLog.Warnf("Unable to SplitHostPort on '%s': %v", addr, err)
  3237  		return false
  3238  	}
  3239  	ip := net.ParseIP(host)
  3240  	if ip == nil {
  3241  		srvrLog.Warnf("Unable to parse IP '%s'", addr)
  3242  		return false
  3243  	}
  3244  
  3245  	for _, ipnet := range cfg.whitelists {
  3246  		if ipnet.Contains(ip) {
  3247  			return true
  3248  		}
  3249  	}
  3250  	return false
  3251  }
  3252  
  3253  // checkpointSorter implements sort.Interface to allow a slice of checkpoints to
  3254  // be sorted.
  3255  type checkpointSorter []chaincfg.Checkpoint
  3256  
  3257  // Len returns the number of checkpoints in the slice.  It is part of the
  3258  // sort.Interface implementation.
  3259  func (s checkpointSorter) Len() int {
  3260  	return len(s)
  3261  }
  3262  
  3263  // Swap swaps the checkpoints at the passed indices.  It is part of the
  3264  // sort.Interface implementation.
  3265  func (s checkpointSorter) Swap(i, j int) {
  3266  	s[i], s[j] = s[j], s[i]
  3267  }
  3268  
  3269  // Less returns whether the checkpoint with index i should sort before the
  3270  // checkpoint with index j.  It is part of the sort.Interface implementation.
  3271  func (s checkpointSorter) Less(i, j int) bool {
  3272  	return s[i].Height < s[j].Height
  3273  }
  3274  
  3275  // mergeCheckpoints returns two slices of checkpoints merged into one slice
  3276  // such that the checkpoints are sorted by height.  In the case the additional
  3277  // checkpoints contain a checkpoint with the same height as a checkpoint in the
  3278  // default checkpoints, the additional checkpoint will take precedence and
  3279  // overwrite the default one.
  3280  func mergeCheckpoints(defaultCheckpoints, additional []chaincfg.Checkpoint) []chaincfg.Checkpoint {
  3281  	// Create a map of the additional checkpoints to remove duplicates while
  3282  	// leaving the most recently-specified checkpoint.
  3283  	extra := make(map[int32]chaincfg.Checkpoint)
  3284  	for _, checkpoint := range additional {
  3285  		extra[checkpoint.Height] = checkpoint
  3286  	}
  3287  
  3288  	// Add all default checkpoints that do not have an override in the
  3289  	// additional checkpoints.
  3290  	numDefault := len(defaultCheckpoints)
  3291  	checkpoints := make([]chaincfg.Checkpoint, 0, numDefault+len(extra))
  3292  	for _, checkpoint := range defaultCheckpoints {
  3293  		if _, exists := extra[checkpoint.Height]; !exists {
  3294  			checkpoints = append(checkpoints, checkpoint)
  3295  		}
  3296  	}
  3297  
  3298  	// Append the additional checkpoints and return the sorted results.
  3299  	for _, checkpoint := range extra {
  3300  		checkpoints = append(checkpoints, checkpoint)
  3301  	}
  3302  	sort.Sort(checkpointSorter(checkpoints))
  3303  	return checkpoints
  3304  }
  3305  
  3306  // HasUndesiredUserAgent determines whether the server should continue to pursue
  3307  // a connection with this peer based on its advertised user agent. It performs
  3308  // the following steps:
  3309  // 1) Reject the peer if it contains a blacklisted agent.
  3310  // 2) If no whitelist is provided, accept all user agents.
  3311  // 3) Accept the peer if it contains a whitelisted agent.
  3312  // 4) Reject all other peers.
  3313  func (sp *serverPeer) HasUndesiredUserAgent(blacklistedAgents,
  3314  	whitelistedAgents []string) bool {
  3315  
  3316  	agent := sp.UserAgent()
  3317  
  3318  	// First, if peer's user agent contains any blacklisted substring, we
  3319  	// will ignore the connection request.
  3320  	for _, blacklistedAgent := range blacklistedAgents {
  3321  		if strings.Contains(agent, blacklistedAgent) {
  3322  			srvrLog.Debugf("Ignoring peer %s, user agent "+
  3323  				"contains blacklisted user agent: %s", sp,
  3324  				agent)
  3325  			return true
  3326  		}
  3327  	}
  3328  
  3329  	// If no whitelist is provided, we will accept all user agents.
  3330  	if len(whitelistedAgents) == 0 {
  3331  		return false
  3332  	}
  3333  
  3334  	// Peer's user agent passed blacklist. Now check to see if it contains
  3335  	// one of our whitelisted user agents, if so accept.
  3336  	for _, whitelistedAgent := range whitelistedAgents {
  3337  		if strings.Contains(agent, whitelistedAgent) {
  3338  			return false
  3339  		}
  3340  	}
  3341  
  3342  	// Otherwise, the peer's user agent was not included in our whitelist.
  3343  	// Ignore just in case it could stall the initial block download.
  3344  	srvrLog.Debugf("Ignoring peer %s, user agent: %s not found in "+
  3345  		"whitelist", sp, agent)
  3346  
  3347  	return true
  3348  }