github.com/palcoin-project/palcd@v1.0.0/server.go (about)

     1  // Copyright (c) 2013-2017 The btcsuite developers
     2  // Copyright (c) 2015-2018 The Decred developers
     3  // Use of this source code is governed by an ISC
     4  // license that can be found in the LICENSE file.
     5  
     6  package main
     7  
     8  import (
     9  	"bytes"
    10  	"crypto/rand"
    11  	"crypto/tls"
    12  	"encoding/binary"
    13  	"errors"
    14  	"fmt"
    15  	"math"
    16  	"net"
    17  	"runtime"
    18  	"sort"
    19  	"strconv"
    20  	"strings"
    21  	"sync"
    22  	"sync/atomic"
    23  	"time"
    24  
    25  	"github.com/palcoin-project/palcd/addrmgr"
    26  	"github.com/palcoin-project/palcd/blockchain"
    27  	"github.com/palcoin-project/palcd/blockchain/indexers"
    28  	"github.com/palcoin-project/palcd/chaincfg"
    29  	"github.com/palcoin-project/palcd/chaincfg/chainhash"
    30  	"github.com/palcoin-project/palcd/connmgr"
    31  	"github.com/palcoin-project/palcd/database"
    32  	"github.com/palcoin-project/palcd/mempool"
    33  	"github.com/palcoin-project/palcd/mining"
    34  	"github.com/palcoin-project/palcd/mining/cpuminer"
    35  	"github.com/palcoin-project/palcd/netsync"
    36  	"github.com/palcoin-project/palcd/peer"
    37  	"github.com/palcoin-project/palcd/txscript"
    38  	"github.com/palcoin-project/palcd/wire"
    39  	"github.com/palcoin-project/palcutil"
    40  	"github.com/palcoin-project/palcutil/bloom"
    41  )
    42  
    43  const (
    44  	// defaultServices describes the default services that are supported by
    45  	// the server.
    46  	defaultServices = wire.SFNodeNetwork | wire.SFNodeBloom |
    47  		wire.SFNodeWitness | wire.SFNodeCF
    48  
    49  	// defaultRequiredServices describes the default services that are
    50  	// required to be supported by outbound peers.
    51  	defaultRequiredServices = wire.SFNodeNetwork
    52  
    53  	// defaultTargetOutbound is the default number of outbound peers to target.
    54  	defaultTargetOutbound = 8
    55  
    56  	// connectionRetryInterval is the base amount of time to wait in between
    57  	// retries when connecting to persistent peers.  It is adjusted by the
    58  	// number of retries such that there is a retry backoff.
    59  	connectionRetryInterval = time.Second * 5
    60  )
    61  
    62  var (
    63  	// userAgentName is the user agent name and is used to help identify
    64  	// ourselves to other bitcoin peers.
    65  	userAgentName = "palcd"
    66  
    67  	// userAgentVersion is the user agent version and is used to help
    68  	// identify ourselves to other bitcoin peers.
    69  	userAgentVersion = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
    70  )
    71  
    72  // zeroHash is the zero value hash (all zeros).  It is defined as a convenience.
    73  var zeroHash chainhash.Hash
    74  
    75  // onionAddr implements the net.Addr interface and represents a tor address.
    76  type onionAddr struct {
    77  	addr string
    78  }
    79  
    80  // String returns the onion address.
    81  //
    82  // This is part of the net.Addr interface.
    83  func (oa *onionAddr) String() string {
    84  	return oa.addr
    85  }
    86  
    87  // Network returns "onion".
    88  //
    89  // This is part of the net.Addr interface.
    90  func (oa *onionAddr) Network() string {
    91  	return "onion"
    92  }
    93  
    94  // Ensure onionAddr implements the net.Addr interface.
    95  var _ net.Addr = (*onionAddr)(nil)
    96  
    97  // simpleAddr implements the net.Addr interface with two struct fields
    98  type simpleAddr struct {
    99  	net, addr string
   100  }
   101  
   102  // String returns the address.
   103  //
   104  // This is part of the net.Addr interface.
   105  func (a simpleAddr) String() string {
   106  	return a.addr
   107  }
   108  
   109  // Network returns the network.
   110  //
   111  // This is part of the net.Addr interface.
   112  func (a simpleAddr) Network() string {
   113  	return a.net
   114  }
   115  
   116  // Ensure simpleAddr implements the net.Addr interface.
   117  var _ net.Addr = simpleAddr{}
   118  
   119  // broadcastMsg provides the ability to house a bitcoin message to be broadcast
   120  // to all connected peers except specified excluded peers.
   121  type broadcastMsg struct {
   122  	message      wire.Message
   123  	excludePeers []*serverPeer
   124  }
   125  
   126  // broadcastInventoryAdd is a type used to declare that the InvVect it contains
   127  // needs to be added to the rebroadcast map
   128  type broadcastInventoryAdd relayMsg
   129  
   130  // broadcastInventoryDel is a type used to declare that the InvVect it contains
   131  // needs to be removed from the rebroadcast map
   132  type broadcastInventoryDel *wire.InvVect
   133  
   134  // relayMsg packages an inventory vector along with the newly discovered
   135  // inventory so the relay has access to that information.
   136  type relayMsg struct {
   137  	invVect *wire.InvVect
   138  	data    interface{}
   139  }
   140  
   141  // updatePeerHeightsMsg is a message sent from the blockmanager to the server
   142  // after a new block has been accepted. The purpose of the message is to update
   143  // the heights of peers that were known to announce the block before we
   144  // connected it to the main chain or recognized it as an orphan. With these
   145  // updates, peer heights will be kept up to date, allowing for fresh data when
   146  // selecting sync peer candidacy.
   147  type updatePeerHeightsMsg struct {
   148  	newHash    *chainhash.Hash
   149  	newHeight  int32
   150  	originPeer *peer.Peer
   151  }
   152  
   153  // peerState maintains state of inbound, persistent, outbound peers as well
   154  // as banned peers and outbound groups.
   155  type peerState struct {
   156  	inboundPeers    map[int32]*serverPeer
   157  	outboundPeers   map[int32]*serverPeer
   158  	persistentPeers map[int32]*serverPeer
   159  	banned          map[string]time.Time
   160  	outboundGroups  map[string]int
   161  }
   162  
   163  // Count returns the count of all known peers.
   164  func (ps *peerState) Count() int {
   165  	return len(ps.inboundPeers) + len(ps.outboundPeers) +
   166  		len(ps.persistentPeers)
   167  }
   168  
   169  // forAllOutboundPeers is a helper function that runs closure on all outbound
   170  // peers known to peerState.
   171  func (ps *peerState) forAllOutboundPeers(closure func(sp *serverPeer)) {
   172  	for _, e := range ps.outboundPeers {
   173  		closure(e)
   174  	}
   175  	for _, e := range ps.persistentPeers {
   176  		closure(e)
   177  	}
   178  }
   179  
   180  // forAllPeers is a helper function that runs closure on all peers known to
   181  // peerState.
   182  func (ps *peerState) forAllPeers(closure func(sp *serverPeer)) {
   183  	for _, e := range ps.inboundPeers {
   184  		closure(e)
   185  	}
   186  	ps.forAllOutboundPeers(closure)
   187  }
   188  
   189  // cfHeaderKV is a tuple of a filter header and its associated block hash. The
   190  // struct is used to cache cfcheckpt responses.
   191  type cfHeaderKV struct {
   192  	blockHash    chainhash.Hash
   193  	filterHeader chainhash.Hash
   194  }
   195  
   196  // server provides a bitcoin server for handling communications to and from
   197  // bitcoin peers.
   198  type server struct {
   199  	// The following variables must only be used atomically.
   200  	// Putting the uint64s first makes them 64-bit aligned for 32-bit systems.
   201  	bytesReceived uint64 // Total bytes received from all peers since start.
   202  	bytesSent     uint64 // Total bytes sent by all peers since start.
   203  	started       int32
   204  	shutdown      int32
   205  	shutdownSched int32
   206  	startupTime   int64
   207  
   208  	chainParams          *chaincfg.Params
   209  	addrManager          *addrmgr.AddrManager
   210  	connManager          *connmgr.ConnManager
   211  	sigCache             *txscript.SigCache
   212  	hashCache            *txscript.HashCache
   213  	rpcServer            *rpcServer
   214  	syncManager          *netsync.SyncManager
   215  	chain                *blockchain.BlockChain
   216  	txMemPool            *mempool.TxPool
   217  	cpuMiner             *cpuminer.CPUMiner
   218  	modifyRebroadcastInv chan interface{}
   219  	newPeers             chan *serverPeer
   220  	donePeers            chan *serverPeer
   221  	banPeers             chan *serverPeer
   222  	query                chan interface{}
   223  	relayInv             chan relayMsg
   224  	broadcast            chan broadcastMsg
   225  	peerHeightsUpdate    chan updatePeerHeightsMsg
   226  	wg                   sync.WaitGroup
   227  	quit                 chan struct{}
   228  	nat                  NAT
   229  	db                   database.DB
   230  	timeSource           blockchain.MedianTimeSource
   231  	services             wire.ServiceFlag
   232  
   233  	// The following fields are used for optional indexes.  They will be nil
   234  	// if the associated index is not enabled.  These fields are set during
   235  	// initial creation of the server and never changed afterwards, so they
   236  	// do not need to be protected for concurrent access.
   237  	txIndex   *indexers.TxIndex
   238  	addrIndex *indexers.AddrIndex
   239  	cfIndex   *indexers.CfIndex
   240  
   241  	// The fee estimator keeps track of how long transactions are left in
   242  	// the mempool before they are mined into blocks.
   243  	feeEstimator *mempool.FeeEstimator
   244  
   245  	// cfCheckptCaches stores a cached slice of filter headers for cfcheckpt
   246  	// messages for each filter type.
   247  	cfCheckptCaches    map[wire.FilterType][]cfHeaderKV
   248  	cfCheckptCachesMtx sync.RWMutex
   249  
   250  	// agentBlacklist is a list of blacklisted substrings by which to filter
   251  	// user agents.
   252  	agentBlacklist []string
   253  
   254  	// agentWhitelist is a list of whitelisted user agent substrings, no
   255  	// whitelisting will be applied if the list is empty or nil.
   256  	agentWhitelist []string
   257  }
   258  
   259  // serverPeer extends the peer to maintain state shared by the server and
   260  // the blockmanager.
   261  type serverPeer struct {
   262  	// The following variables must only be used atomically
   263  	feeFilter int64
   264  
   265  	*peer.Peer
   266  
   267  	connReq        *connmgr.ConnReq
   268  	server         *server
   269  	persistent     bool
   270  	continueHash   *chainhash.Hash
   271  	relayMtx       sync.Mutex
   272  	disableRelayTx bool
   273  	sentAddrs      bool
   274  	isWhitelisted  bool
   275  	filter         *bloom.Filter
   276  	addressesMtx   sync.RWMutex
   277  	knownAddresses map[string]struct{}
   278  	banScore       connmgr.DynamicBanScore
   279  	quit           chan struct{}
   280  	// The following chans are used to sync blockmanager and server.
   281  	txProcessed    chan struct{}
   282  	blockProcessed chan struct{}
   283  }
   284  
   285  // newServerPeer returns a new serverPeer instance. The peer needs to be set by
   286  // the caller.
   287  func newServerPeer(s *server, isPersistent bool) *serverPeer {
   288  	return &serverPeer{
   289  		server:         s,
   290  		persistent:     isPersistent,
   291  		filter:         bloom.LoadFilter(nil),
   292  		knownAddresses: make(map[string]struct{}),
   293  		quit:           make(chan struct{}),
   294  		txProcessed:    make(chan struct{}, 1),
   295  		blockProcessed: make(chan struct{}, 1),
   296  	}
   297  }
   298  
   299  // newestBlock returns the current best block hash and height using the format
   300  // required by the configuration for the peer package.
   301  func (sp *serverPeer) newestBlock() (*chainhash.Hash, int32, error) {
   302  	best := sp.server.chain.BestSnapshot()
   303  	return &best.Hash, best.Height, nil
   304  }
   305  
   306  // addKnownAddresses adds the given addresses to the set of known addresses to
   307  // the peer to prevent sending duplicate addresses.
   308  func (sp *serverPeer) addKnownAddresses(addresses []*wire.NetAddress) {
   309  	sp.addressesMtx.Lock()
   310  	for _, na := range addresses {
   311  		sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{}
   312  	}
   313  	sp.addressesMtx.Unlock()
   314  }
   315  
   316  // addressKnown true if the given address is already known to the peer.
   317  func (sp *serverPeer) addressKnown(na *wire.NetAddress) bool {
   318  	sp.addressesMtx.RLock()
   319  	_, exists := sp.knownAddresses[addrmgr.NetAddressKey(na)]
   320  	sp.addressesMtx.RUnlock()
   321  	return exists
   322  }
   323  
   324  // setDisableRelayTx toggles relaying of transactions for the given peer.
   325  // It is safe for concurrent access.
   326  func (sp *serverPeer) setDisableRelayTx(disable bool) {
   327  	sp.relayMtx.Lock()
   328  	sp.disableRelayTx = disable
   329  	sp.relayMtx.Unlock()
   330  }
   331  
   332  // relayTxDisabled returns whether or not relaying of transactions for the given
   333  // peer is disabled.
   334  // It is safe for concurrent access.
   335  func (sp *serverPeer) relayTxDisabled() bool {
   336  	sp.relayMtx.Lock()
   337  	isDisabled := sp.disableRelayTx
   338  	sp.relayMtx.Unlock()
   339  
   340  	return isDisabled
   341  }
   342  
   343  // pushAddrMsg sends an addr message to the connected peer using the provided
   344  // addresses.
   345  func (sp *serverPeer) pushAddrMsg(addresses []*wire.NetAddress) {
   346  	// Filter addresses already known to the peer.
   347  	addrs := make([]*wire.NetAddress, 0, len(addresses))
   348  	for _, addr := range addresses {
   349  		if !sp.addressKnown(addr) {
   350  			addrs = append(addrs, addr)
   351  		}
   352  	}
   353  	known, err := sp.PushAddrMsg(addrs)
   354  	if err != nil {
   355  		peerLog.Errorf("Can't push address message to %s: %v", sp.Peer, err)
   356  		sp.Disconnect()
   357  		return
   358  	}
   359  	sp.addKnownAddresses(known)
   360  }
   361  
   362  // addBanScore increases the persistent and decaying ban score fields by the
   363  // values passed as parameters. If the resulting score exceeds half of the ban
   364  // threshold, a warning is logged including the reason provided. Further, if
   365  // the score is above the ban threshold, the peer will be banned and
   366  // disconnected.
   367  func (sp *serverPeer) addBanScore(persistent, transient uint32, reason string) bool {
   368  	// No warning is logged and no score is calculated if banning is disabled.
   369  	if cfg.DisableBanning {
   370  		return false
   371  	}
   372  	if sp.isWhitelisted {
   373  		peerLog.Debugf("Misbehaving whitelisted peer %s: %s", sp, reason)
   374  		return false
   375  	}
   376  
   377  	warnThreshold := cfg.BanThreshold >> 1
   378  	if transient == 0 && persistent == 0 {
   379  		// The score is not being increased, but a warning message is still
   380  		// logged if the score is above the warn threshold.
   381  		score := sp.banScore.Int()
   382  		if score > warnThreshold {
   383  			peerLog.Warnf("Misbehaving peer %s: %s -- ban score is %d, "+
   384  				"it was not increased this time", sp, reason, score)
   385  		}
   386  		return false
   387  	}
   388  	score := sp.banScore.Increase(persistent, transient)
   389  	if score > warnThreshold {
   390  		peerLog.Warnf("Misbehaving peer %s: %s -- ban score increased to %d",
   391  			sp, reason, score)
   392  		if score > cfg.BanThreshold {
   393  			peerLog.Warnf("Misbehaving peer %s -- banning and disconnecting",
   394  				sp)
   395  			sp.server.BanPeer(sp)
   396  			sp.Disconnect()
   397  			return true
   398  		}
   399  	}
   400  	return false
   401  }
   402  
   403  // hasServices returns whether or not the provided advertised service flags have
   404  // all of the provided desired service flags set.
   405  func hasServices(advertised, desired wire.ServiceFlag) bool {
   406  	return advertised&desired == desired
   407  }
   408  
   409  // OnVersion is invoked when a peer receives a version bitcoin message
   410  // and is used to negotiate the protocol version details as well as kick start
   411  // the communications.
   412  func (sp *serverPeer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) *wire.MsgReject {
   413  	// Update the address manager with the advertised services for outbound
   414  	// connections in case they have changed.  This is not done for inbound
   415  	// connections to help prevent malicious behavior and is skipped when
   416  	// running on the simulation test network since it is only intended to
   417  	// connect to specified peers and actively avoids advertising and
   418  	// connecting to discovered peers.
   419  	//
   420  	// NOTE: This is done before rejecting peers that are too old to ensure
   421  	// it is updated regardless in the case a new minimum protocol version is
   422  	// enforced and the remote node has not upgraded yet.
   423  	isInbound := sp.Inbound()
   424  	remoteAddr := sp.NA()
   425  	addrManager := sp.server.addrManager
   426  	if !cfg.SimNet && !isInbound {
   427  		addrManager.SetServices(remoteAddr, msg.Services)
   428  	}
   429  
   430  	// Ignore peers that have a protcol version that is too old.  The peer
   431  	// negotiation logic will disconnect it after this callback returns.
   432  	if msg.ProtocolVersion < int32(peer.MinAcceptableProtocolVersion) {
   433  		return nil
   434  	}
   435  
   436  	// Reject outbound peers that are not full nodes.
   437  	wantServices := wire.SFNodeNetwork
   438  	if !isInbound && !hasServices(msg.Services, wantServices) {
   439  		missingServices := wantServices & ^msg.Services
   440  		srvrLog.Debugf("Rejecting peer %s with services %v due to not "+
   441  			"providing desired services %v", sp.Peer, msg.Services,
   442  			missingServices)
   443  		reason := fmt.Sprintf("required services %#x not offered",
   444  			uint64(missingServices))
   445  		return wire.NewMsgReject(msg.Command(), wire.RejectNonstandard, reason)
   446  	}
   447  
   448  	if !cfg.SimNet && !isInbound {
   449  		// After soft-fork activation, only make outbound
   450  		// connection to peers if they flag that they're segwit
   451  		// enabled.
   452  		chain := sp.server.chain
   453  		segwitActive, err := chain.IsDeploymentActive(chaincfg.DeploymentSegwit)
   454  		if err != nil {
   455  			peerLog.Errorf("Unable to query for segwit soft-fork state: %v",
   456  				err)
   457  			return nil
   458  		}
   459  
   460  		if segwitActive && !sp.IsWitnessEnabled() {
   461  			peerLog.Infof("Disconnecting non-segwit peer %v, isn't segwit "+
   462  				"enabled and we need more segwit enabled peers", sp)
   463  			sp.Disconnect()
   464  			return nil
   465  		}
   466  	}
   467  
   468  	// Add the remote peer time as a sample for creating an offset against
   469  	// the local clock to keep the network time in sync.
   470  	sp.server.timeSource.AddTimeSample(sp.Addr(), msg.Timestamp)
   471  
   472  	// Choose whether or not to relay transactions before a filter command
   473  	// is received.
   474  	sp.setDisableRelayTx(msg.DisableRelayTx)
   475  
   476  	return nil
   477  }
   478  
   479  // OnVerAck is invoked when a peer receives a verack bitcoin message and is used
   480  // to kick start communication with them.
   481  func (sp *serverPeer) OnVerAck(_ *peer.Peer, _ *wire.MsgVerAck) {
   482  	sp.server.AddPeer(sp)
   483  }
   484  
   485  // OnMemPool is invoked when a peer receives a mempool bitcoin message.
   486  // It creates and sends an inventory message with the contents of the memory
   487  // pool up to the maximum inventory allowed per message.  When the peer has a
   488  // bloom filter loaded, the contents are filtered accordingly.
   489  func (sp *serverPeer) OnMemPool(_ *peer.Peer, msg *wire.MsgMemPool) {
   490  	// Only allow mempool requests if the server has bloom filtering
   491  	// enabled.
   492  	if sp.server.services&wire.SFNodeBloom != wire.SFNodeBloom {
   493  		peerLog.Debugf("peer %v sent mempool request with bloom "+
   494  			"filtering disabled -- disconnecting", sp)
   495  		sp.Disconnect()
   496  		return
   497  	}
   498  
   499  	// A decaying ban score increase is applied to prevent flooding.
   500  	// The ban score accumulates and passes the ban threshold if a burst of
   501  	// mempool messages comes from a peer. The score decays each minute to
   502  	// half of its value.
   503  	if sp.addBanScore(0, 33, "mempool") {
   504  		return
   505  	}
   506  
   507  	// Generate inventory message with the available transactions in the
   508  	// transaction memory pool.  Limit it to the max allowed inventory
   509  	// per message.  The NewMsgInvSizeHint function automatically limits
   510  	// the passed hint to the maximum allowed, so it's safe to pass it
   511  	// without double checking it here.
   512  	txMemPool := sp.server.txMemPool
   513  	txDescs := txMemPool.TxDescs()
   514  	invMsg := wire.NewMsgInvSizeHint(uint(len(txDescs)))
   515  
   516  	for _, txDesc := range txDescs {
   517  		// Either add all transactions when there is no bloom filter,
   518  		// or only the transactions that match the filter when there is
   519  		// one.
   520  		if !sp.filter.IsLoaded() || sp.filter.MatchTxAndUpdate(txDesc.Tx) {
   521  			iv := wire.NewInvVect(wire.InvTypeTx, txDesc.Tx.Hash())
   522  			invMsg.AddInvVect(iv)
   523  			if len(invMsg.InvList)+1 > wire.MaxInvPerMsg {
   524  				break
   525  			}
   526  		}
   527  	}
   528  
   529  	// Send the inventory message if there is anything to send.
   530  	if len(invMsg.InvList) > 0 {
   531  		sp.QueueMessage(invMsg, nil)
   532  	}
   533  }
   534  
   535  // OnTx is invoked when a peer receives a tx bitcoin message.  It blocks
   536  // until the bitcoin transaction has been fully processed.  Unlock the block
   537  // handler this does not serialize all transactions through a single thread
   538  // transactions don't rely on the previous one in a linear fashion like blocks.
   539  func (sp *serverPeer) OnTx(_ *peer.Peer, msg *wire.MsgTx) {
   540  	if cfg.BlocksOnly {
   541  		peerLog.Tracef("Ignoring tx %v from %v - blocksonly enabled",
   542  			msg.TxHash(), sp)
   543  		return
   544  	}
   545  
   546  	// Add the transaction to the known inventory for the peer.
   547  	// Convert the raw MsgTx to a palcutil.Tx which provides some convenience
   548  	// methods and things such as hash caching.
   549  	tx := palcutil.NewTx(msg)
   550  	iv := wire.NewInvVect(wire.InvTypeTx, tx.Hash())
   551  	sp.AddKnownInventory(iv)
   552  
   553  	// Queue the transaction up to be handled by the sync manager and
   554  	// intentionally block further receives until the transaction is fully
   555  	// processed and known good or bad.  This helps prevent a malicious peer
   556  	// from queuing up a bunch of bad transactions before disconnecting (or
   557  	// being disconnected) and wasting memory.
   558  	sp.server.syncManager.QueueTx(tx, sp.Peer, sp.txProcessed)
   559  	<-sp.txProcessed
   560  }
   561  
   562  // OnBlock is invoked when a peer receives a block bitcoin message.  It
   563  // blocks until the bitcoin block has been fully processed.
   564  func (sp *serverPeer) OnBlock(_ *peer.Peer, msg *wire.MsgBlock, buf []byte) {
   565  	// Convert the raw MsgBlock to a palcutil.Block which provides some
   566  	// convenience methods and things such as hash caching.
   567  	block := palcutil.NewBlockFromBlockAndBytes(msg, buf)
   568  
   569  	// Add the block to the known inventory for the peer.
   570  	iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
   571  	sp.AddKnownInventory(iv)
   572  
   573  	// Queue the block up to be handled by the block
   574  	// manager and intentionally block further receives
   575  	// until the bitcoin block is fully processed and known
   576  	// good or bad.  This helps prevent a malicious peer
   577  	// from queuing up a bunch of bad blocks before
   578  	// disconnecting (or being disconnected) and wasting
   579  	// memory.  Additionally, this behavior is depended on
   580  	// by at least the block acceptance test tool as the
   581  	// reference implementation processes blocks in the same
   582  	// thread and therefore blocks further messages until
   583  	// the bitcoin block has been fully processed.
   584  	sp.server.syncManager.QueueBlock(block, sp.Peer, sp.blockProcessed)
   585  	<-sp.blockProcessed
   586  }
   587  
   588  // OnInv is invoked when a peer receives an inv bitcoin message and is
   589  // used to examine the inventory being advertised by the remote peer and react
   590  // accordingly.  We pass the message down to blockmanager which will call
   591  // QueueMessage with any appropriate responses.
   592  func (sp *serverPeer) OnInv(_ *peer.Peer, msg *wire.MsgInv) {
   593  	if !cfg.BlocksOnly {
   594  		if len(msg.InvList) > 0 {
   595  			sp.server.syncManager.QueueInv(msg, sp.Peer)
   596  		}
   597  		return
   598  	}
   599  
   600  	newInv := wire.NewMsgInvSizeHint(uint(len(msg.InvList)))
   601  	for _, invVect := range msg.InvList {
   602  		if invVect.Type == wire.InvTypeTx {
   603  			peerLog.Tracef("Ignoring tx %v in inv from %v -- "+
   604  				"blocksonly enabled", invVect.Hash, sp)
   605  			if sp.ProtocolVersion() >= wire.BIP0037Version {
   606  				peerLog.Infof("Peer %v is announcing "+
   607  					"transactions -- disconnecting", sp)
   608  				sp.Disconnect()
   609  				return
   610  			}
   611  			continue
   612  		}
   613  		err := newInv.AddInvVect(invVect)
   614  		if err != nil {
   615  			peerLog.Errorf("Failed to add inventory vector: %v", err)
   616  			break
   617  		}
   618  	}
   619  
   620  	if len(newInv.InvList) > 0 {
   621  		sp.server.syncManager.QueueInv(newInv, sp.Peer)
   622  	}
   623  }
   624  
   625  // OnHeaders is invoked when a peer receives a headers bitcoin
   626  // message.  The message is passed down to the sync manager.
   627  func (sp *serverPeer) OnHeaders(_ *peer.Peer, msg *wire.MsgHeaders) {
   628  	sp.server.syncManager.QueueHeaders(msg, sp.Peer)
   629  }
   630  
   631  // handleGetData is invoked when a peer receives a getdata bitcoin message and
   632  // is used to deliver block and transaction information.
   633  func (sp *serverPeer) OnGetData(_ *peer.Peer, msg *wire.MsgGetData) {
   634  	numAdded := 0
   635  	notFound := wire.NewMsgNotFound()
   636  
   637  	length := len(msg.InvList)
   638  	// A decaying ban score increase is applied to prevent exhausting resources
   639  	// with unusually large inventory queries.
   640  	// Requesting more than the maximum inventory vector length within a short
   641  	// period of time yields a score above the default ban threshold. Sustained
   642  	// bursts of small requests are not penalized as that would potentially ban
   643  	// peers performing IBD.
   644  	// This incremental score decays each minute to half of its value.
   645  	if sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata") {
   646  		return
   647  	}
   648  
   649  	// We wait on this wait channel periodically to prevent queuing
   650  	// far more data than we can send in a reasonable time, wasting memory.
   651  	// The waiting occurs after the database fetch for the next one to
   652  	// provide a little pipelining.
   653  	var waitChan chan struct{}
   654  	doneChan := make(chan struct{}, 1)
   655  
   656  	for i, iv := range msg.InvList {
   657  		var c chan struct{}
   658  		// If this will be the last message we send.
   659  		if i == length-1 && len(notFound.InvList) == 0 {
   660  			c = doneChan
   661  		} else if (i+1)%3 == 0 {
   662  			// Buffered so as to not make the send goroutine block.
   663  			c = make(chan struct{}, 1)
   664  		}
   665  		var err error
   666  		switch iv.Type {
   667  		case wire.InvTypeWitnessTx:
   668  			err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding)
   669  		case wire.InvTypeTx:
   670  			err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding)
   671  		case wire.InvTypeWitnessBlock:
   672  			err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding)
   673  		case wire.InvTypeBlock:
   674  			err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding)
   675  		case wire.InvTypeFilteredWitnessBlock:
   676  			err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan, wire.WitnessEncoding)
   677  		case wire.InvTypeFilteredBlock:
   678  			err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan, wire.BaseEncoding)
   679  		default:
   680  			peerLog.Warnf("Unknown type in inventory request %d",
   681  				iv.Type)
   682  			continue
   683  		}
   684  		if err != nil {
   685  			notFound.AddInvVect(iv)
   686  
   687  			// When there is a failure fetching the final entry
   688  			// and the done channel was sent in due to there
   689  			// being no outstanding not found inventory, consume
   690  			// it here because there is now not found inventory
   691  			// that will use the channel momentarily.
   692  			if i == len(msg.InvList)-1 && c != nil {
   693  				<-c
   694  			}
   695  		}
   696  		numAdded++
   697  		waitChan = c
   698  	}
   699  	if len(notFound.InvList) != 0 {
   700  		sp.QueueMessage(notFound, doneChan)
   701  	}
   702  
   703  	// Wait for messages to be sent. We can send quite a lot of data at this
   704  	// point and this will keep the peer busy for a decent amount of time.
   705  	// We don't process anything else by them in this time so that we
   706  	// have an idea of when we should hear back from them - else the idle
   707  	// timeout could fire when we were only half done sending the blocks.
   708  	if numAdded > 0 {
   709  		<-doneChan
   710  	}
   711  }
   712  
   713  // OnGetBlocks is invoked when a peer receives a getblocks bitcoin
   714  // message.
   715  func (sp *serverPeer) OnGetBlocks(_ *peer.Peer, msg *wire.MsgGetBlocks) {
   716  	// Find the most recent known block in the best chain based on the block
   717  	// locator and fetch all of the block hashes after it until either
   718  	// wire.MaxBlocksPerMsg have been fetched or the provided stop hash is
   719  	// encountered.
   720  	//
   721  	// Use the block after the genesis block if no other blocks in the
   722  	// provided locator are known.  This does mean the client will start
   723  	// over with the genesis block if unknown block locators are provided.
   724  	//
   725  	// This mirrors the behavior in the reference implementation.
   726  	chain := sp.server.chain
   727  	hashList := chain.LocateBlocks(msg.BlockLocatorHashes, &msg.HashStop,
   728  		wire.MaxBlocksPerMsg)
   729  
   730  	// Generate inventory message.
   731  	invMsg := wire.NewMsgInv()
   732  	for i := range hashList {
   733  		iv := wire.NewInvVect(wire.InvTypeBlock, &hashList[i])
   734  		invMsg.AddInvVect(iv)
   735  	}
   736  
   737  	// Send the inventory message if there is anything to send.
   738  	if len(invMsg.InvList) > 0 {
   739  		invListLen := len(invMsg.InvList)
   740  		if invListLen == wire.MaxBlocksPerMsg {
   741  			// Intentionally use a copy of the final hash so there
   742  			// is not a reference into the inventory slice which
   743  			// would prevent the entire slice from being eligible
   744  			// for GC as soon as it's sent.
   745  			continueHash := invMsg.InvList[invListLen-1].Hash
   746  			sp.continueHash = &continueHash
   747  		}
   748  		sp.QueueMessage(invMsg, nil)
   749  	}
   750  }
   751  
   752  // OnGetHeaders is invoked when a peer receives a getheaders bitcoin
   753  // message.
   754  func (sp *serverPeer) OnGetHeaders(_ *peer.Peer, msg *wire.MsgGetHeaders) {
   755  	// Ignore getheaders requests if not in sync.
   756  	if !sp.server.syncManager.IsCurrent() {
   757  		return
   758  	}
   759  
   760  	// Find the most recent known block in the best chain based on the block
   761  	// locator and fetch all of the headers after it until either
   762  	// wire.MaxBlockHeadersPerMsg have been fetched or the provided stop
   763  	// hash is encountered.
   764  	//
   765  	// Use the block after the genesis block if no other blocks in the
   766  	// provided locator are known.  This does mean the client will start
   767  	// over with the genesis block if unknown block locators are provided.
   768  	//
   769  	// This mirrors the behavior in the reference implementation.
   770  	chain := sp.server.chain
   771  	headers := chain.LocateHeaders(msg.BlockLocatorHashes, &msg.HashStop)
   772  
   773  	// Send found headers to the requesting peer.
   774  	blockHeaders := make([]*wire.BlockHeader, len(headers))
   775  	for i := range headers {
   776  		blockHeaders[i] = &headers[i]
   777  	}
   778  	sp.QueueMessage(&wire.MsgHeaders{Headers: blockHeaders}, nil)
   779  }
   780  
   781  // OnGetCFilters is invoked when a peer receives a getcfilters bitcoin message.
   782  func (sp *serverPeer) OnGetCFilters(_ *peer.Peer, msg *wire.MsgGetCFilters) {
   783  	// Ignore getcfilters requests if not in sync.
   784  	if !sp.server.syncManager.IsCurrent() {
   785  		return
   786  	}
   787  
   788  	// We'll also ensure that the remote party is requesting a set of
   789  	// filters that we actually currently maintain.
   790  	switch msg.FilterType {
   791  	case wire.GCSFilterRegular:
   792  		break
   793  
   794  	default:
   795  		peerLog.Debug("Filter request for unknown filter: %v",
   796  			msg.FilterType)
   797  		return
   798  	}
   799  
   800  	hashes, err := sp.server.chain.HeightToHashRange(
   801  		int32(msg.StartHeight), &msg.StopHash, wire.MaxGetCFiltersReqRange,
   802  	)
   803  	if err != nil {
   804  		peerLog.Debugf("Invalid getcfilters request: %v", err)
   805  		return
   806  	}
   807  
   808  	// Create []*chainhash.Hash from []chainhash.Hash to pass to
   809  	// FiltersByBlockHashes.
   810  	hashPtrs := make([]*chainhash.Hash, len(hashes))
   811  	for i := range hashes {
   812  		hashPtrs[i] = &hashes[i]
   813  	}
   814  
   815  	filters, err := sp.server.cfIndex.FiltersByBlockHashes(
   816  		hashPtrs, msg.FilterType,
   817  	)
   818  	if err != nil {
   819  		peerLog.Errorf("Error retrieving cfilters: %v", err)
   820  		return
   821  	}
   822  
   823  	for i, filterBytes := range filters {
   824  		if len(filterBytes) == 0 {
   825  			peerLog.Warnf("Could not obtain cfilter for %v",
   826  				hashes[i])
   827  			return
   828  		}
   829  
   830  		filterMsg := wire.NewMsgCFilter(
   831  			msg.FilterType, &hashes[i], filterBytes,
   832  		)
   833  		sp.QueueMessage(filterMsg, nil)
   834  	}
   835  }
   836  
   837  // OnGetCFHeaders is invoked when a peer receives a getcfheader bitcoin message.
   838  func (sp *serverPeer) OnGetCFHeaders(_ *peer.Peer, msg *wire.MsgGetCFHeaders) {
   839  	// Ignore getcfilterheader requests if not in sync.
   840  	if !sp.server.syncManager.IsCurrent() {
   841  		return
   842  	}
   843  
   844  	// We'll also ensure that the remote party is requesting a set of
   845  	// headers for filters that we actually currently maintain.
   846  	switch msg.FilterType {
   847  	case wire.GCSFilterRegular:
   848  		break
   849  
   850  	default:
   851  		peerLog.Debug("Filter request for unknown headers for "+
   852  			"filter: %v", msg.FilterType)
   853  		return
   854  	}
   855  
   856  	startHeight := int32(msg.StartHeight)
   857  	maxResults := wire.MaxCFHeadersPerMsg
   858  
   859  	// If StartHeight is positive, fetch the predecessor block hash so we
   860  	// can populate the PrevFilterHeader field.
   861  	if msg.StartHeight > 0 {
   862  		startHeight--
   863  		maxResults++
   864  	}
   865  
   866  	// Fetch the hashes from the block index.
   867  	hashList, err := sp.server.chain.HeightToHashRange(
   868  		startHeight, &msg.StopHash, maxResults,
   869  	)
   870  	if err != nil {
   871  		peerLog.Debugf("Invalid getcfheaders request: %v", err)
   872  	}
   873  
   874  	// This is possible if StartHeight is one greater that the height of
   875  	// StopHash, and we pull a valid range of hashes including the previous
   876  	// filter header.
   877  	if len(hashList) == 0 || (msg.StartHeight > 0 && len(hashList) == 1) {
   878  		peerLog.Debug("No results for getcfheaders request")
   879  		return
   880  	}
   881  
   882  	// Create []*chainhash.Hash from []chainhash.Hash to pass to
   883  	// FilterHeadersByBlockHashes.
   884  	hashPtrs := make([]*chainhash.Hash, len(hashList))
   885  	for i := range hashList {
   886  		hashPtrs[i] = &hashList[i]
   887  	}
   888  
   889  	// Fetch the raw filter hash bytes from the database for all blocks.
   890  	filterHashes, err := sp.server.cfIndex.FilterHashesByBlockHashes(
   891  		hashPtrs, msg.FilterType,
   892  	)
   893  	if err != nil {
   894  		peerLog.Errorf("Error retrieving cfilter hashes: %v", err)
   895  		return
   896  	}
   897  
   898  	// Generate cfheaders message and send it.
   899  	headersMsg := wire.NewMsgCFHeaders()
   900  
   901  	// Populate the PrevFilterHeader field.
   902  	if msg.StartHeight > 0 {
   903  		prevBlockHash := &hashList[0]
   904  
   905  		// Fetch the raw committed filter header bytes from the
   906  		// database.
   907  		headerBytes, err := sp.server.cfIndex.FilterHeaderByBlockHash(
   908  			prevBlockHash, msg.FilterType)
   909  		if err != nil {
   910  			peerLog.Errorf("Error retrieving CF header: %v", err)
   911  			return
   912  		}
   913  		if len(headerBytes) == 0 {
   914  			peerLog.Warnf("Could not obtain CF header for %v", prevBlockHash)
   915  			return
   916  		}
   917  
   918  		// Deserialize the hash into PrevFilterHeader.
   919  		err = headersMsg.PrevFilterHeader.SetBytes(headerBytes)
   920  		if err != nil {
   921  			peerLog.Warnf("Committed filter header deserialize "+
   922  				"failed: %v", err)
   923  			return
   924  		}
   925  
   926  		hashList = hashList[1:]
   927  		filterHashes = filterHashes[1:]
   928  	}
   929  
   930  	// Populate HeaderHashes.
   931  	for i, hashBytes := range filterHashes {
   932  		if len(hashBytes) == 0 {
   933  			peerLog.Warnf("Could not obtain CF hash for %v", hashList[i])
   934  			return
   935  		}
   936  
   937  		// Deserialize the hash.
   938  		filterHash, err := chainhash.NewHash(hashBytes)
   939  		if err != nil {
   940  			peerLog.Warnf("Committed filter hash deserialize "+
   941  				"failed: %v", err)
   942  			return
   943  		}
   944  
   945  		headersMsg.AddCFHash(filterHash)
   946  	}
   947  
   948  	headersMsg.FilterType = msg.FilterType
   949  	headersMsg.StopHash = msg.StopHash
   950  
   951  	sp.QueueMessage(headersMsg, nil)
   952  }
   953  
   954  // OnGetCFCheckpt is invoked when a peer receives a getcfcheckpt bitcoin message.
   955  func (sp *serverPeer) OnGetCFCheckpt(_ *peer.Peer, msg *wire.MsgGetCFCheckpt) {
   956  	// Ignore getcfcheckpt requests if not in sync.
   957  	if !sp.server.syncManager.IsCurrent() {
   958  		return
   959  	}
   960  
   961  	// We'll also ensure that the remote party is requesting a set of
   962  	// checkpoints for filters that we actually currently maintain.
   963  	switch msg.FilterType {
   964  	case wire.GCSFilterRegular:
   965  		break
   966  
   967  	default:
   968  		peerLog.Debug("Filter request for unknown checkpoints for "+
   969  			"filter: %v", msg.FilterType)
   970  		return
   971  	}
   972  
   973  	// Now that we know the client is fetching a filter that we know of,
   974  	// we'll fetch the block hashes et each check point interval so we can
   975  	// compare against our cache, and create new check points if necessary.
   976  	blockHashes, err := sp.server.chain.IntervalBlockHashes(
   977  		&msg.StopHash, wire.CFCheckptInterval,
   978  	)
   979  	if err != nil {
   980  		peerLog.Debugf("Invalid getcfilters request: %v", err)
   981  		return
   982  	}
   983  
   984  	checkptMsg := wire.NewMsgCFCheckpt(
   985  		msg.FilterType, &msg.StopHash, len(blockHashes),
   986  	)
   987  
   988  	// Fetch the current existing cache so we can decide if we need to
   989  	// extend it or if its adequate as is.
   990  	sp.server.cfCheckptCachesMtx.RLock()
   991  	checkptCache := sp.server.cfCheckptCaches[msg.FilterType]
   992  
   993  	// If the set of block hashes is beyond the current size of the cache,
   994  	// then we'll expand the size of the cache and also retain the write
   995  	// lock.
   996  	var updateCache bool
   997  	if len(blockHashes) > len(checkptCache) {
   998  		// Now that we know we'll need to modify the size of the cache,
   999  		// we'll release the read lock and grab the write lock to
  1000  		// possibly expand the cache size.
  1001  		sp.server.cfCheckptCachesMtx.RUnlock()
  1002  
  1003  		sp.server.cfCheckptCachesMtx.Lock()
  1004  		defer sp.server.cfCheckptCachesMtx.Unlock()
  1005  
  1006  		// Now that we have the write lock, we'll check again as it's
  1007  		// possible that the cache has already been expanded.
  1008  		checkptCache = sp.server.cfCheckptCaches[msg.FilterType]
  1009  
  1010  		// If we still need to expand the cache, then We'll mark that
  1011  		// we need to update the cache for below and also expand the
  1012  		// size of the cache in place.
  1013  		if len(blockHashes) > len(checkptCache) {
  1014  			updateCache = true
  1015  
  1016  			additionalLength := len(blockHashes) - len(checkptCache)
  1017  			newEntries := make([]cfHeaderKV, additionalLength)
  1018  
  1019  			peerLog.Infof("Growing size of checkpoint cache from %v to %v "+
  1020  				"block hashes", len(checkptCache), len(blockHashes))
  1021  
  1022  			checkptCache = append(
  1023  				sp.server.cfCheckptCaches[msg.FilterType],
  1024  				newEntries...,
  1025  			)
  1026  		}
  1027  	} else {
  1028  		// Otherwise, we'll hold onto the read lock for the remainder
  1029  		// of this method.
  1030  		defer sp.server.cfCheckptCachesMtx.RUnlock()
  1031  
  1032  		peerLog.Tracef("Serving stale cache of size %v",
  1033  			len(checkptCache))
  1034  	}
  1035  
  1036  	// Now that we know the cache is of an appropriate size, we'll iterate
  1037  	// backwards until the find the block hash. We do this as it's possible
  1038  	// a re-org has occurred so items in the db are now in the main china
  1039  	// while the cache has been partially invalidated.
  1040  	var forkIdx int
  1041  	for forkIdx = len(blockHashes); forkIdx > 0; forkIdx-- {
  1042  		if checkptCache[forkIdx-1].blockHash == blockHashes[forkIdx-1] {
  1043  			break
  1044  		}
  1045  	}
  1046  
  1047  	// Now that we know the how much of the cache is relevant for this
  1048  	// query, we'll populate our check point message with the cache as is.
  1049  	// Shortly below, we'll populate the new elements of the cache.
  1050  	for i := 0; i < forkIdx; i++ {
  1051  		checkptMsg.AddCFHeader(&checkptCache[i].filterHeader)
  1052  	}
  1053  
  1054  	// We'll now collect the set of hashes that are beyond our cache so we
  1055  	// can look up the filter headers to populate the final cache.
  1056  	blockHashPtrs := make([]*chainhash.Hash, 0, len(blockHashes)-forkIdx)
  1057  	for i := forkIdx; i < len(blockHashes); i++ {
  1058  		blockHashPtrs = append(blockHashPtrs, &blockHashes[i])
  1059  	}
  1060  	filterHeaders, err := sp.server.cfIndex.FilterHeadersByBlockHashes(
  1061  		blockHashPtrs, msg.FilterType,
  1062  	)
  1063  	if err != nil {
  1064  		peerLog.Errorf("Error retrieving cfilter headers: %v", err)
  1065  		return
  1066  	}
  1067  
  1068  	// Now that we have the full set of filter headers, we'll add them to
  1069  	// the checkpoint message, and also update our cache in line.
  1070  	for i, filterHeaderBytes := range filterHeaders {
  1071  		if len(filterHeaderBytes) == 0 {
  1072  			peerLog.Warnf("Could not obtain CF header for %v",
  1073  				blockHashPtrs[i])
  1074  			return
  1075  		}
  1076  
  1077  		filterHeader, err := chainhash.NewHash(filterHeaderBytes)
  1078  		if err != nil {
  1079  			peerLog.Warnf("Committed filter header deserialize "+
  1080  				"failed: %v", err)
  1081  			return
  1082  		}
  1083  
  1084  		checkptMsg.AddCFHeader(filterHeader)
  1085  
  1086  		// If the new main chain is longer than what's in the cache,
  1087  		// then we'll override it beyond the fork point.
  1088  		if updateCache {
  1089  			checkptCache[forkIdx+i] = cfHeaderKV{
  1090  				blockHash:    blockHashes[forkIdx+i],
  1091  				filterHeader: *filterHeader,
  1092  			}
  1093  		}
  1094  	}
  1095  
  1096  	// Finally, we'll update the cache if we need to, and send the final
  1097  	// message back to the requesting peer.
  1098  	if updateCache {
  1099  		sp.server.cfCheckptCaches[msg.FilterType] = checkptCache
  1100  	}
  1101  
  1102  	sp.QueueMessage(checkptMsg, nil)
  1103  }
  1104  
  1105  // enforceNodeBloomFlag disconnects the peer if the server is not configured to
  1106  // allow bloom filters.  Additionally, if the peer has negotiated to a protocol
  1107  // version  that is high enough to observe the bloom filter service support bit,
  1108  // it will be banned since it is intentionally violating the protocol.
  1109  func (sp *serverPeer) enforceNodeBloomFlag(cmd string) bool {
  1110  	if sp.server.services&wire.SFNodeBloom != wire.SFNodeBloom {
  1111  		// Ban the peer if the protocol version is high enough that the
  1112  		// peer is knowingly violating the protocol and banning is
  1113  		// enabled.
  1114  		//
  1115  		// NOTE: Even though the addBanScore function already examines
  1116  		// whether or not banning is enabled, it is checked here as well
  1117  		// to ensure the violation is logged and the peer is
  1118  		// disconnected regardless.
  1119  		if sp.ProtocolVersion() >= wire.BIP0111Version &&
  1120  			!cfg.DisableBanning {
  1121  
  1122  			// Disconnect the peer regardless of whether it was
  1123  			// banned.
  1124  			sp.addBanScore(100, 0, cmd)
  1125  			sp.Disconnect()
  1126  			return false
  1127  		}
  1128  
  1129  		// Disconnect the peer regardless of protocol version or banning
  1130  		// state.
  1131  		peerLog.Debugf("%s sent an unsupported %s request -- "+
  1132  			"disconnecting", sp, cmd)
  1133  		sp.Disconnect()
  1134  		return false
  1135  	}
  1136  
  1137  	return true
  1138  }
  1139  
  1140  // OnFeeFilter is invoked when a peer receives a feefilter bitcoin message and
  1141  // is used by remote peers to request that no transactions which have a fee rate
  1142  // lower than provided value are inventoried to them.  The peer will be
  1143  // disconnected if an invalid fee filter value is provided.
  1144  func (sp *serverPeer) OnFeeFilter(_ *peer.Peer, msg *wire.MsgFeeFilter) {
  1145  	// Check that the passed minimum fee is a valid amount.
  1146  	if msg.MinFee < 0 || msg.MinFee > palcutil.MaxSatoshi {
  1147  		peerLog.Debugf("Peer %v sent an invalid feefilter '%v' -- "+
  1148  			"disconnecting", sp, palcutil.Amount(msg.MinFee))
  1149  		sp.Disconnect()
  1150  		return
  1151  	}
  1152  
  1153  	atomic.StoreInt64(&sp.feeFilter, msg.MinFee)
  1154  }
  1155  
  1156  // OnFilterAdd is invoked when a peer receives a filteradd bitcoin
  1157  // message and is used by remote peers to add data to an already loaded bloom
  1158  // filter.  The peer will be disconnected if a filter is not loaded when this
  1159  // message is received or the server is not configured to allow bloom filters.
  1160  func (sp *serverPeer) OnFilterAdd(_ *peer.Peer, msg *wire.MsgFilterAdd) {
  1161  	// Disconnect and/or ban depending on the node bloom services flag and
  1162  	// negotiated protocol version.
  1163  	if !sp.enforceNodeBloomFlag(msg.Command()) {
  1164  		return
  1165  	}
  1166  
  1167  	if !sp.filter.IsLoaded() {
  1168  		peerLog.Debugf("%s sent a filteradd request with no filter "+
  1169  			"loaded -- disconnecting", sp)
  1170  		sp.Disconnect()
  1171  		return
  1172  	}
  1173  
  1174  	sp.filter.Add(msg.Data)
  1175  }
  1176  
  1177  // OnFilterClear is invoked when a peer receives a filterclear bitcoin
  1178  // message and is used by remote peers to clear an already loaded bloom filter.
  1179  // The peer will be disconnected if a filter is not loaded when this message is
  1180  // received  or the server is not configured to allow bloom filters.
  1181  func (sp *serverPeer) OnFilterClear(_ *peer.Peer, msg *wire.MsgFilterClear) {
  1182  	// Disconnect and/or ban depending on the node bloom services flag and
  1183  	// negotiated protocol version.
  1184  	if !sp.enforceNodeBloomFlag(msg.Command()) {
  1185  		return
  1186  	}
  1187  
  1188  	if !sp.filter.IsLoaded() {
  1189  		peerLog.Debugf("%s sent a filterclear request with no "+
  1190  			"filter loaded -- disconnecting", sp)
  1191  		sp.Disconnect()
  1192  		return
  1193  	}
  1194  
  1195  	sp.filter.Unload()
  1196  }
  1197  
  1198  // OnFilterLoad is invoked when a peer receives a filterload bitcoin
  1199  // message and it used to load a bloom filter that should be used for
  1200  // delivering merkle blocks and associated transactions that match the filter.
  1201  // The peer will be disconnected if the server is not configured to allow bloom
  1202  // filters.
  1203  func (sp *serverPeer) OnFilterLoad(_ *peer.Peer, msg *wire.MsgFilterLoad) {
  1204  	// Disconnect and/or ban depending on the node bloom services flag and
  1205  	// negotiated protocol version.
  1206  	if !sp.enforceNodeBloomFlag(msg.Command()) {
  1207  		return
  1208  	}
  1209  
  1210  	sp.setDisableRelayTx(false)
  1211  
  1212  	sp.filter.Reload(msg)
  1213  }
  1214  
  1215  // OnGetAddr is invoked when a peer receives a getaddr bitcoin message
  1216  // and is used to provide the peer with known addresses from the address
  1217  // manager.
  1218  func (sp *serverPeer) OnGetAddr(_ *peer.Peer, msg *wire.MsgGetAddr) {
  1219  	// Don't return any addresses when running on the simulation test
  1220  	// network.  This helps prevent the network from becoming another
  1221  	// public test network since it will not be able to learn about other
  1222  	// peers that have not specifically been provided.
  1223  	if cfg.SimNet {
  1224  		return
  1225  	}
  1226  
  1227  	// Do not accept getaddr requests from outbound peers.  This reduces
  1228  	// fingerprinting attacks.
  1229  	if !sp.Inbound() {
  1230  		peerLog.Debugf("Ignoring getaddr request from outbound peer "+
  1231  			"%v", sp)
  1232  		return
  1233  	}
  1234  
  1235  	// Only allow one getaddr request per connection to discourage
  1236  	// address stamping of inv announcements.
  1237  	if sp.sentAddrs {
  1238  		peerLog.Debugf("Ignoring repeated getaddr request from peer "+
  1239  			"%v", sp)
  1240  		return
  1241  	}
  1242  	sp.sentAddrs = true
  1243  
  1244  	// Get the current known addresses from the address manager.
  1245  	addrCache := sp.server.addrManager.AddressCache()
  1246  
  1247  	// Push the addresses.
  1248  	sp.pushAddrMsg(addrCache)
  1249  }
  1250  
  1251  // OnAddr is invoked when a peer receives an addr bitcoin message and is
  1252  // used to notify the server about advertised addresses.
  1253  func (sp *serverPeer) OnAddr(_ *peer.Peer, msg *wire.MsgAddr) {
  1254  	// Ignore addresses when running on the simulation test network.  This
  1255  	// helps prevent the network from becoming another public test network
  1256  	// since it will not be able to learn about other peers that have not
  1257  	// specifically been provided.
  1258  	if cfg.SimNet {
  1259  		return
  1260  	}
  1261  
  1262  	// Ignore old style addresses which don't include a timestamp.
  1263  	if sp.ProtocolVersion() < wire.NetAddressTimeVersion {
  1264  		return
  1265  	}
  1266  
  1267  	// A message that has no addresses is invalid.
  1268  	if len(msg.AddrList) == 0 {
  1269  		peerLog.Errorf("Command [%s] from %s does not contain any addresses",
  1270  			msg.Command(), sp.Peer)
  1271  		sp.Disconnect()
  1272  		return
  1273  	}
  1274  
  1275  	for _, na := range msg.AddrList {
  1276  		// Don't add more address if we're disconnecting.
  1277  		if !sp.Connected() {
  1278  			return
  1279  		}
  1280  
  1281  		// Set the timestamp to 5 days ago if it's more than 24 hours
  1282  		// in the future so this address is one of the first to be
  1283  		// removed when space is needed.
  1284  		now := time.Now()
  1285  		if na.Timestamp.After(now.Add(time.Minute * 10)) {
  1286  			na.Timestamp = now.Add(-1 * time.Hour * 24 * 5)
  1287  		}
  1288  
  1289  		// Add address to known addresses for this peer.
  1290  		sp.addKnownAddresses([]*wire.NetAddress{na})
  1291  	}
  1292  
  1293  	// Add addresses to server address manager.  The address manager handles
  1294  	// the details of things such as preventing duplicate addresses, max
  1295  	// addresses, and last seen updates.
  1296  	// XXX bitcoind gives a 2 hour time penalty here, do we want to do the
  1297  	// same?
  1298  	sp.server.addrManager.AddAddresses(msg.AddrList, sp.NA())
  1299  }
  1300  
  1301  // OnRead is invoked when a peer receives a message and it is used to update
  1302  // the bytes received by the server.
  1303  func (sp *serverPeer) OnRead(_ *peer.Peer, bytesRead int, msg wire.Message, err error) {
  1304  	sp.server.AddBytesReceived(uint64(bytesRead))
  1305  }
  1306  
  1307  // OnWrite is invoked when a peer sends a message and it is used to update
  1308  // the bytes sent by the server.
  1309  func (sp *serverPeer) OnWrite(_ *peer.Peer, bytesWritten int, msg wire.Message, err error) {
  1310  	sp.server.AddBytesSent(uint64(bytesWritten))
  1311  }
  1312  
  1313  // OnNotFound is invoked when a peer sends a notfound message.
  1314  func (sp *serverPeer) OnNotFound(p *peer.Peer, msg *wire.MsgNotFound) {
  1315  	if !sp.Connected() {
  1316  		return
  1317  	}
  1318  
  1319  	var numBlocks, numTxns uint32
  1320  	for _, inv := range msg.InvList {
  1321  		switch inv.Type {
  1322  		case wire.InvTypeBlock:
  1323  			numBlocks++
  1324  		case wire.InvTypeWitnessBlock:
  1325  			numBlocks++
  1326  		case wire.InvTypeTx:
  1327  			numTxns++
  1328  		case wire.InvTypeWitnessTx:
  1329  			numTxns++
  1330  		default:
  1331  			peerLog.Debugf("Invalid inv type '%d' in notfound message from %s",
  1332  				inv.Type, sp)
  1333  			sp.Disconnect()
  1334  			return
  1335  		}
  1336  	}
  1337  	if numBlocks > 0 {
  1338  		blockStr := pickNoun(uint64(numBlocks), "block", "blocks")
  1339  		reason := fmt.Sprintf("%d %v not found", numBlocks, blockStr)
  1340  		if sp.addBanScore(20*numBlocks, 0, reason) {
  1341  			return
  1342  		}
  1343  	}
  1344  	if numTxns > 0 {
  1345  		txStr := pickNoun(uint64(numTxns), "transaction", "transactions")
  1346  		reason := fmt.Sprintf("%d %v not found", numBlocks, txStr)
  1347  		if sp.addBanScore(0, 10*numTxns, reason) {
  1348  			return
  1349  		}
  1350  	}
  1351  
  1352  	sp.server.syncManager.QueueNotFound(msg, p)
  1353  }
  1354  
  1355  // randomUint16Number returns a random uint16 in a specified input range.  Note
  1356  // that the range is in zeroth ordering; if you pass it 1800, you will get
  1357  // values from 0 to 1800.
  1358  func randomUint16Number(max uint16) uint16 {
  1359  	// In order to avoid modulo bias and ensure every possible outcome in
  1360  	// [0, max) has equal probability, the random number must be sampled
  1361  	// from a random source that has a range limited to a multiple of the
  1362  	// modulus.
  1363  	var randomNumber uint16
  1364  	var limitRange = (math.MaxUint16 / max) * max
  1365  	for {
  1366  		binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
  1367  		if randomNumber < limitRange {
  1368  			return (randomNumber % max)
  1369  		}
  1370  	}
  1371  }
  1372  
  1373  // AddRebroadcastInventory adds 'iv' to the list of inventories to be
  1374  // rebroadcasted at random intervals until they show up in a block.
  1375  func (s *server) AddRebroadcastInventory(iv *wire.InvVect, data interface{}) {
  1376  	// Ignore if shutting down.
  1377  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1378  		return
  1379  	}
  1380  
  1381  	s.modifyRebroadcastInv <- broadcastInventoryAdd{invVect: iv, data: data}
  1382  }
  1383  
  1384  // RemoveRebroadcastInventory removes 'iv' from the list of items to be
  1385  // rebroadcasted if present.
  1386  func (s *server) RemoveRebroadcastInventory(iv *wire.InvVect) {
  1387  	// Ignore if shutting down.
  1388  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1389  		return
  1390  	}
  1391  
  1392  	s.modifyRebroadcastInv <- broadcastInventoryDel(iv)
  1393  }
  1394  
  1395  // relayTransactions generates and relays inventory vectors for all of the
  1396  // passed transactions to all connected peers.
  1397  func (s *server) relayTransactions(txns []*mempool.TxDesc) {
  1398  	for _, txD := range txns {
  1399  		iv := wire.NewInvVect(wire.InvTypeTx, txD.Tx.Hash())
  1400  		s.RelayInventory(iv, txD)
  1401  	}
  1402  }
  1403  
  1404  // AnnounceNewTransactions generates and relays inventory vectors and notifies
  1405  // both websocket and getblocktemplate long poll clients of the passed
  1406  // transactions.  This function should be called whenever new transactions
  1407  // are added to the mempool.
  1408  func (s *server) AnnounceNewTransactions(txns []*mempool.TxDesc) {
  1409  	// Generate and relay inventory vectors for all newly accepted
  1410  	// transactions.
  1411  	s.relayTransactions(txns)
  1412  
  1413  	// Notify both websocket and getblocktemplate long poll clients of all
  1414  	// newly accepted transactions.
  1415  	if s.rpcServer != nil {
  1416  		s.rpcServer.NotifyNewTransactions(txns)
  1417  	}
  1418  }
  1419  
  1420  // Transaction has one confirmation on the main chain. Now we can mark it as no
  1421  // longer needing rebroadcasting.
  1422  func (s *server) TransactionConfirmed(tx *palcutil.Tx) {
  1423  	// Rebroadcasting is only necessary when the RPC server is active.
  1424  	if s.rpcServer == nil {
  1425  		return
  1426  	}
  1427  
  1428  	iv := wire.NewInvVect(wire.InvTypeTx, tx.Hash())
  1429  	s.RemoveRebroadcastInventory(iv)
  1430  }
  1431  
  1432  // pushTxMsg sends a tx message for the provided transaction hash to the
  1433  // connected peer.  An error is returned if the transaction hash is not known.
  1434  func (s *server) pushTxMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{},
  1435  	waitChan <-chan struct{}, encoding wire.MessageEncoding) error {
  1436  
  1437  	// Attempt to fetch the requested transaction from the pool.  A
  1438  	// call could be made to check for existence first, but simply trying
  1439  	// to fetch a missing transaction results in the same behavior.
  1440  	tx, err := s.txMemPool.FetchTransaction(hash)
  1441  	if err != nil {
  1442  		peerLog.Tracef("Unable to fetch tx %v from transaction "+
  1443  			"pool: %v", hash, err)
  1444  
  1445  		if doneChan != nil {
  1446  			doneChan <- struct{}{}
  1447  		}
  1448  		return err
  1449  	}
  1450  
  1451  	// Once we have fetched data wait for any previous operation to finish.
  1452  	if waitChan != nil {
  1453  		<-waitChan
  1454  	}
  1455  
  1456  	sp.QueueMessageWithEncoding(tx.MsgTx(), doneChan, encoding)
  1457  
  1458  	return nil
  1459  }
  1460  
  1461  // pushBlockMsg sends a block message for the provided block hash to the
  1462  // connected peer.  An error is returned if the block hash is not known.
  1463  func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan chan<- struct{},
  1464  	waitChan <-chan struct{}, encoding wire.MessageEncoding) error {
  1465  
  1466  	// Fetch the raw block bytes from the database.
  1467  	var blockBytes []byte
  1468  	err := sp.server.db.View(func(dbTx database.Tx) error {
  1469  		var err error
  1470  		blockBytes, err = dbTx.FetchBlock(hash)
  1471  		return err
  1472  	})
  1473  	if err != nil {
  1474  		peerLog.Tracef("Unable to fetch requested block hash %v: %v",
  1475  			hash, err)
  1476  
  1477  		if doneChan != nil {
  1478  			doneChan <- struct{}{}
  1479  		}
  1480  		return err
  1481  	}
  1482  
  1483  	// Deserialize the block.
  1484  	var msgBlock wire.MsgBlock
  1485  	err = msgBlock.Deserialize(bytes.NewReader(blockBytes))
  1486  	if err != nil {
  1487  		peerLog.Tracef("Unable to deserialize requested block hash "+
  1488  			"%v: %v", hash, err)
  1489  
  1490  		if doneChan != nil {
  1491  			doneChan <- struct{}{}
  1492  		}
  1493  		return err
  1494  	}
  1495  
  1496  	// Once we have fetched data wait for any previous operation to finish.
  1497  	if waitChan != nil {
  1498  		<-waitChan
  1499  	}
  1500  
  1501  	// We only send the channel for this message if we aren't sending
  1502  	// an inv straight after.
  1503  	var dc chan<- struct{}
  1504  	continueHash := sp.continueHash
  1505  	sendInv := continueHash != nil && continueHash.IsEqual(hash)
  1506  	if !sendInv {
  1507  		dc = doneChan
  1508  	}
  1509  	sp.QueueMessageWithEncoding(&msgBlock, dc, encoding)
  1510  
  1511  	// When the peer requests the final block that was advertised in
  1512  	// response to a getblocks message which requested more blocks than
  1513  	// would fit into a single message, send it a new inventory message
  1514  	// to trigger it to issue another getblocks message for the next
  1515  	// batch of inventory.
  1516  	if sendInv {
  1517  		best := sp.server.chain.BestSnapshot()
  1518  		invMsg := wire.NewMsgInvSizeHint(1)
  1519  		iv := wire.NewInvVect(wire.InvTypeBlock, &best.Hash)
  1520  		invMsg.AddInvVect(iv)
  1521  		sp.QueueMessage(invMsg, doneChan)
  1522  		sp.continueHash = nil
  1523  	}
  1524  	return nil
  1525  }
  1526  
  1527  // pushMerkleBlockMsg sends a merkleblock message for the provided block hash to
  1528  // the connected peer.  Since a merkle block requires the peer to have a filter
  1529  // loaded, this call will simply be ignored if there is no filter loaded.  An
  1530  // error is returned if the block hash is not known.
  1531  func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *chainhash.Hash,
  1532  	doneChan chan<- struct{}, waitChan <-chan struct{}, encoding wire.MessageEncoding) error {
  1533  
  1534  	// Do not send a response if the peer doesn't have a filter loaded.
  1535  	if !sp.filter.IsLoaded() {
  1536  		if doneChan != nil {
  1537  			doneChan <- struct{}{}
  1538  		}
  1539  		return nil
  1540  	}
  1541  
  1542  	// Fetch the raw block bytes from the database.
  1543  	blk, err := sp.server.chain.BlockByHash(hash)
  1544  	if err != nil {
  1545  		peerLog.Tracef("Unable to fetch requested block hash %v: %v",
  1546  			hash, err)
  1547  
  1548  		if doneChan != nil {
  1549  			doneChan <- struct{}{}
  1550  		}
  1551  		return err
  1552  	}
  1553  
  1554  	// Generate a merkle block by filtering the requested block according
  1555  	// to the filter for the peer.
  1556  	merkle, matchedTxIndices := bloom.NewMerkleBlock(blk, sp.filter)
  1557  
  1558  	// Once we have fetched data wait for any previous operation to finish.
  1559  	if waitChan != nil {
  1560  		<-waitChan
  1561  	}
  1562  
  1563  	// Send the merkleblock.  Only send the done channel with this message
  1564  	// if no transactions will be sent afterwards.
  1565  	var dc chan<- struct{}
  1566  	if len(matchedTxIndices) == 0 {
  1567  		dc = doneChan
  1568  	}
  1569  	sp.QueueMessage(merkle, dc)
  1570  
  1571  	// Finally, send any matched transactions.
  1572  	blkTransactions := blk.MsgBlock().Transactions
  1573  	for i, txIndex := range matchedTxIndices {
  1574  		// Only send the done channel on the final transaction.
  1575  		var dc chan<- struct{}
  1576  		if i == len(matchedTxIndices)-1 {
  1577  			dc = doneChan
  1578  		}
  1579  		if txIndex < uint32(len(blkTransactions)) {
  1580  			sp.QueueMessageWithEncoding(blkTransactions[txIndex], dc,
  1581  				encoding)
  1582  		}
  1583  	}
  1584  
  1585  	return nil
  1586  }
  1587  
  1588  // handleUpdatePeerHeight updates the heights of all peers who were known to
  1589  // announce a block we recently accepted.
  1590  func (s *server) handleUpdatePeerHeights(state *peerState, umsg updatePeerHeightsMsg) {
  1591  	state.forAllPeers(func(sp *serverPeer) {
  1592  		// The origin peer should already have the updated height.
  1593  		if sp.Peer == umsg.originPeer {
  1594  			return
  1595  		}
  1596  
  1597  		// This is a pointer to the underlying memory which doesn't
  1598  		// change.
  1599  		latestBlkHash := sp.LastAnnouncedBlock()
  1600  
  1601  		// Skip this peer if it hasn't recently announced any new blocks.
  1602  		if latestBlkHash == nil {
  1603  			return
  1604  		}
  1605  
  1606  		// If the peer has recently announced a block, and this block
  1607  		// matches our newly accepted block, then update their block
  1608  		// height.
  1609  		if *latestBlkHash == *umsg.newHash {
  1610  			sp.UpdateLastBlockHeight(umsg.newHeight)
  1611  			sp.UpdateLastAnnouncedBlock(nil)
  1612  		}
  1613  	})
  1614  }
  1615  
  1616  // handleAddPeerMsg deals with adding new peers.  It is invoked from the
  1617  // peerHandler goroutine.
  1618  func (s *server) handleAddPeerMsg(state *peerState, sp *serverPeer) bool {
  1619  	if sp == nil || !sp.Connected() {
  1620  		return false
  1621  	}
  1622  
  1623  	// Disconnect peers with unwanted user agents.
  1624  	if sp.HasUndesiredUserAgent(s.agentBlacklist, s.agentWhitelist) {
  1625  		sp.Disconnect()
  1626  		return false
  1627  	}
  1628  
  1629  	// Ignore new peers if we're shutting down.
  1630  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1631  		srvrLog.Infof("New peer %s ignored - server is shutting down", sp)
  1632  		sp.Disconnect()
  1633  		return false
  1634  	}
  1635  
  1636  	// Disconnect banned peers.
  1637  	host, _, err := net.SplitHostPort(sp.Addr())
  1638  	if err != nil {
  1639  		srvrLog.Debugf("can't split hostport %v", err)
  1640  		sp.Disconnect()
  1641  		return false
  1642  	}
  1643  	if banEnd, ok := state.banned[host]; ok {
  1644  		if time.Now().Before(banEnd) {
  1645  			srvrLog.Debugf("Peer %s is banned for another %v - disconnecting",
  1646  				host, time.Until(banEnd))
  1647  			sp.Disconnect()
  1648  			return false
  1649  		}
  1650  
  1651  		srvrLog.Infof("Peer %s is no longer banned", host)
  1652  		delete(state.banned, host)
  1653  	}
  1654  
  1655  	// TODO: Check for max peers from a single IP.
  1656  
  1657  	// Limit max number of total peers.
  1658  	if state.Count() >= cfg.MaxPeers {
  1659  		srvrLog.Infof("Max peers reached [%d] - disconnecting peer %s",
  1660  			cfg.MaxPeers, sp)
  1661  		sp.Disconnect()
  1662  		// TODO: how to handle permanent peers here?
  1663  		// they should be rescheduled.
  1664  		return false
  1665  	}
  1666  
  1667  	// Add the new peer and start it.
  1668  	srvrLog.Debugf("New peer %s", sp)
  1669  	if sp.Inbound() {
  1670  		state.inboundPeers[sp.ID()] = sp
  1671  	} else {
  1672  		state.outboundGroups[addrmgr.GroupKey(sp.NA())]++
  1673  		if sp.persistent {
  1674  			state.persistentPeers[sp.ID()] = sp
  1675  		} else {
  1676  			state.outboundPeers[sp.ID()] = sp
  1677  		}
  1678  	}
  1679  
  1680  	// Update the address' last seen time if the peer has acknowledged
  1681  	// our version and has sent us its version as well.
  1682  	if sp.VerAckReceived() && sp.VersionKnown() && sp.NA() != nil {
  1683  		s.addrManager.Connected(sp.NA())
  1684  	}
  1685  
  1686  	// Signal the sync manager this peer is a new sync candidate.
  1687  	s.syncManager.NewPeer(sp.Peer)
  1688  
  1689  	// Update the address manager and request known addresses from the
  1690  	// remote peer for outbound connections. This is skipped when running on
  1691  	// the simulation test network since it is only intended to connect to
  1692  	// specified peers and actively avoids advertising and connecting to
  1693  	// discovered peers.
  1694  	if !cfg.SimNet && !sp.Inbound() {
  1695  		// Advertise the local address when the server accepts incoming
  1696  		// connections and it believes itself to be close to the best
  1697  		// known tip.
  1698  		if !cfg.DisableListen && s.syncManager.IsCurrent() {
  1699  			// Get address that best matches.
  1700  			lna := s.addrManager.GetBestLocalAddress(sp.NA())
  1701  			if addrmgr.IsRoutable(lna) {
  1702  				// Filter addresses the peer already knows about.
  1703  				addresses := []*wire.NetAddress{lna}
  1704  				sp.pushAddrMsg(addresses)
  1705  			}
  1706  		}
  1707  
  1708  		// Request known addresses if the server address manager needs
  1709  		// more and the peer has a protocol version new enough to
  1710  		// include a timestamp with addresses.
  1711  		hasTimestamp := sp.ProtocolVersion() >= wire.NetAddressTimeVersion
  1712  		if s.addrManager.NeedMoreAddresses() && hasTimestamp {
  1713  			sp.QueueMessage(wire.NewMsgGetAddr(), nil)
  1714  		}
  1715  
  1716  		// Mark the address as a known good address.
  1717  		s.addrManager.Good(sp.NA())
  1718  	}
  1719  
  1720  	return true
  1721  }
  1722  
  1723  // handleDonePeerMsg deals with peers that have signalled they are done.  It is
  1724  // invoked from the peerHandler goroutine.
  1725  func (s *server) handleDonePeerMsg(state *peerState, sp *serverPeer) {
  1726  	var list map[int32]*serverPeer
  1727  	if sp.persistent {
  1728  		list = state.persistentPeers
  1729  	} else if sp.Inbound() {
  1730  		list = state.inboundPeers
  1731  	} else {
  1732  		list = state.outboundPeers
  1733  	}
  1734  
  1735  	// Regardless of whether the peer was found in our list, we'll inform
  1736  	// our connection manager about the disconnection. This can happen if we
  1737  	// process a peer's `done` message before its `add`.
  1738  	if !sp.Inbound() {
  1739  		if sp.persistent {
  1740  			s.connManager.Disconnect(sp.connReq.ID())
  1741  		} else {
  1742  			s.connManager.Remove(sp.connReq.ID())
  1743  			go s.connManager.NewConnReq()
  1744  		}
  1745  	}
  1746  
  1747  	if _, ok := list[sp.ID()]; ok {
  1748  		if !sp.Inbound() && sp.VersionKnown() {
  1749  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1750  		}
  1751  		delete(list, sp.ID())
  1752  		srvrLog.Debugf("Removed peer %s", sp)
  1753  		return
  1754  	}
  1755  }
  1756  
  1757  // handleBanPeerMsg deals with banning peers.  It is invoked from the
  1758  // peerHandler goroutine.
  1759  func (s *server) handleBanPeerMsg(state *peerState, sp *serverPeer) {
  1760  	host, _, err := net.SplitHostPort(sp.Addr())
  1761  	if err != nil {
  1762  		srvrLog.Debugf("can't split ban peer %s %v", sp.Addr(), err)
  1763  		return
  1764  	}
  1765  	direction := directionString(sp.Inbound())
  1766  	srvrLog.Infof("Banned peer %s (%s) for %v", host, direction,
  1767  		cfg.BanDuration)
  1768  	state.banned[host] = time.Now().Add(cfg.BanDuration)
  1769  }
  1770  
  1771  // handleRelayInvMsg deals with relaying inventory to peers that are not already
  1772  // known to have it.  It is invoked from the peerHandler goroutine.
  1773  func (s *server) handleRelayInvMsg(state *peerState, msg relayMsg) {
  1774  	state.forAllPeers(func(sp *serverPeer) {
  1775  		if !sp.Connected() {
  1776  			return
  1777  		}
  1778  
  1779  		// If the inventory is a block and the peer prefers headers,
  1780  		// generate and send a headers message instead of an inventory
  1781  		// message.
  1782  		if msg.invVect.Type == wire.InvTypeBlock && sp.WantsHeaders() {
  1783  			blockHeader, ok := msg.data.(wire.BlockHeader)
  1784  			if !ok {
  1785  				peerLog.Warnf("Underlying data for headers" +
  1786  					" is not a block header")
  1787  				return
  1788  			}
  1789  			msgHeaders := wire.NewMsgHeaders()
  1790  			if err := msgHeaders.AddBlockHeader(&blockHeader); err != nil {
  1791  				peerLog.Errorf("Failed to add block"+
  1792  					" header: %v", err)
  1793  				return
  1794  			}
  1795  			sp.QueueMessage(msgHeaders, nil)
  1796  			return
  1797  		}
  1798  
  1799  		if msg.invVect.Type == wire.InvTypeTx {
  1800  			// Don't relay the transaction to the peer when it has
  1801  			// transaction relaying disabled.
  1802  			if sp.relayTxDisabled() {
  1803  				return
  1804  			}
  1805  
  1806  			txD, ok := msg.data.(*mempool.TxDesc)
  1807  			if !ok {
  1808  				peerLog.Warnf("Underlying data for tx inv "+
  1809  					"relay is not a *mempool.TxDesc: %T",
  1810  					msg.data)
  1811  				return
  1812  			}
  1813  
  1814  			// Don't relay the transaction if the transaction fee-per-kb
  1815  			// is less than the peer's feefilter.
  1816  			feeFilter := atomic.LoadInt64(&sp.feeFilter)
  1817  			if feeFilter > 0 && txD.FeePerKB < feeFilter {
  1818  				return
  1819  			}
  1820  
  1821  			// Don't relay the transaction if there is a bloom
  1822  			// filter loaded and the transaction doesn't match it.
  1823  			if sp.filter.IsLoaded() {
  1824  				if !sp.filter.MatchTxAndUpdate(txD.Tx) {
  1825  					return
  1826  				}
  1827  			}
  1828  		}
  1829  
  1830  		// Queue the inventory to be relayed with the next batch.
  1831  		// It will be ignored if the peer is already known to
  1832  		// have the inventory.
  1833  		sp.QueueInventory(msg.invVect)
  1834  	})
  1835  }
  1836  
  1837  // handleBroadcastMsg deals with broadcasting messages to peers.  It is invoked
  1838  // from the peerHandler goroutine.
  1839  func (s *server) handleBroadcastMsg(state *peerState, bmsg *broadcastMsg) {
  1840  	state.forAllPeers(func(sp *serverPeer) {
  1841  		if !sp.Connected() {
  1842  			return
  1843  		}
  1844  
  1845  		for _, ep := range bmsg.excludePeers {
  1846  			if sp == ep {
  1847  				return
  1848  			}
  1849  		}
  1850  
  1851  		sp.QueueMessage(bmsg.message, nil)
  1852  	})
  1853  }
  1854  
  1855  type getConnCountMsg struct {
  1856  	reply chan int32
  1857  }
  1858  
  1859  type getPeersMsg struct {
  1860  	reply chan []*serverPeer
  1861  }
  1862  
  1863  type getOutboundGroup struct {
  1864  	key   string
  1865  	reply chan int
  1866  }
  1867  
  1868  type getAddedNodesMsg struct {
  1869  	reply chan []*serverPeer
  1870  }
  1871  
  1872  type disconnectNodeMsg struct {
  1873  	cmp   func(*serverPeer) bool
  1874  	reply chan error
  1875  }
  1876  
  1877  type connectNodeMsg struct {
  1878  	addr      string
  1879  	permanent bool
  1880  	reply     chan error
  1881  }
  1882  
  1883  type removeNodeMsg struct {
  1884  	cmp   func(*serverPeer) bool
  1885  	reply chan error
  1886  }
  1887  
  1888  // handleQuery is the central handler for all queries and commands from other
  1889  // goroutines related to peer state.
  1890  func (s *server) handleQuery(state *peerState, querymsg interface{}) {
  1891  	switch msg := querymsg.(type) {
  1892  	case getConnCountMsg:
  1893  		nconnected := int32(0)
  1894  		state.forAllPeers(func(sp *serverPeer) {
  1895  			if sp.Connected() {
  1896  				nconnected++
  1897  			}
  1898  		})
  1899  		msg.reply <- nconnected
  1900  
  1901  	case getPeersMsg:
  1902  		peers := make([]*serverPeer, 0, state.Count())
  1903  		state.forAllPeers(func(sp *serverPeer) {
  1904  			if !sp.Connected() {
  1905  				return
  1906  			}
  1907  			peers = append(peers, sp)
  1908  		})
  1909  		msg.reply <- peers
  1910  
  1911  	case connectNodeMsg:
  1912  		// TODO: duplicate oneshots?
  1913  		// Limit max number of total peers.
  1914  		if state.Count() >= cfg.MaxPeers {
  1915  			msg.reply <- errors.New("max peers reached")
  1916  			return
  1917  		}
  1918  		for _, peer := range state.persistentPeers {
  1919  			if peer.Addr() == msg.addr {
  1920  				if msg.permanent {
  1921  					msg.reply <- errors.New("peer already connected")
  1922  				} else {
  1923  					msg.reply <- errors.New("peer exists as a permanent peer")
  1924  				}
  1925  				return
  1926  			}
  1927  		}
  1928  
  1929  		netAddr, err := addrStringToNetAddr(msg.addr)
  1930  		if err != nil {
  1931  			msg.reply <- err
  1932  			return
  1933  		}
  1934  
  1935  		// TODO: if too many, nuke a non-perm peer.
  1936  		go s.connManager.Connect(&connmgr.ConnReq{
  1937  			Addr:      netAddr,
  1938  			Permanent: msg.permanent,
  1939  		})
  1940  		msg.reply <- nil
  1941  	case removeNodeMsg:
  1942  		found := disconnectPeer(state.persistentPeers, msg.cmp, func(sp *serverPeer) {
  1943  			// Keep group counts ok since we remove from
  1944  			// the list now.
  1945  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1946  		})
  1947  
  1948  		if found {
  1949  			msg.reply <- nil
  1950  		} else {
  1951  			msg.reply <- errors.New("peer not found")
  1952  		}
  1953  	case getOutboundGroup:
  1954  		count, ok := state.outboundGroups[msg.key]
  1955  		if ok {
  1956  			msg.reply <- count
  1957  		} else {
  1958  			msg.reply <- 0
  1959  		}
  1960  	// Request a list of the persistent (added) peers.
  1961  	case getAddedNodesMsg:
  1962  		// Respond with a slice of the relevant peers.
  1963  		peers := make([]*serverPeer, 0, len(state.persistentPeers))
  1964  		for _, sp := range state.persistentPeers {
  1965  			peers = append(peers, sp)
  1966  		}
  1967  		msg.reply <- peers
  1968  	case disconnectNodeMsg:
  1969  		// Check inbound peers. We pass a nil callback since we don't
  1970  		// require any additional actions on disconnect for inbound peers.
  1971  		found := disconnectPeer(state.inboundPeers, msg.cmp, nil)
  1972  		if found {
  1973  			msg.reply <- nil
  1974  			return
  1975  		}
  1976  
  1977  		// Check outbound peers.
  1978  		found = disconnectPeer(state.outboundPeers, msg.cmp, func(sp *serverPeer) {
  1979  			// Keep group counts ok since we remove from
  1980  			// the list now.
  1981  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1982  		})
  1983  		if found {
  1984  			// If there are multiple outbound connections to the same
  1985  			// ip:port, continue disconnecting them all until no such
  1986  			// peers are found.
  1987  			for found {
  1988  				found = disconnectPeer(state.outboundPeers, msg.cmp, func(sp *serverPeer) {
  1989  					state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1990  				})
  1991  			}
  1992  			msg.reply <- nil
  1993  			return
  1994  		}
  1995  
  1996  		msg.reply <- errors.New("peer not found")
  1997  	}
  1998  }
  1999  
  2000  // disconnectPeer attempts to drop the connection of a targeted peer in the
  2001  // passed peer list. Targets are identified via usage of the passed
  2002  // `compareFunc`, which should return `true` if the passed peer is the target
  2003  // peer. This function returns true on success and false if the peer is unable
  2004  // to be located. If the peer is found, and the passed callback: `whenFound'
  2005  // isn't nil, we call it with the peer as the argument before it is removed
  2006  // from the peerList, and is disconnected from the server.
  2007  func disconnectPeer(peerList map[int32]*serverPeer, compareFunc func(*serverPeer) bool, whenFound func(*serverPeer)) bool {
  2008  	for addr, peer := range peerList {
  2009  		if compareFunc(peer) {
  2010  			if whenFound != nil {
  2011  				whenFound(peer)
  2012  			}
  2013  
  2014  			// This is ok because we are not continuing
  2015  			// to iterate so won't corrupt the loop.
  2016  			delete(peerList, addr)
  2017  			peer.Disconnect()
  2018  			return true
  2019  		}
  2020  	}
  2021  	return false
  2022  }
  2023  
  2024  // newPeerConfig returns the configuration for the given serverPeer.
  2025  func newPeerConfig(sp *serverPeer) *peer.Config {
  2026  	return &peer.Config{
  2027  		Listeners: peer.MessageListeners{
  2028  			OnVersion:      sp.OnVersion,
  2029  			OnVerAck:       sp.OnVerAck,
  2030  			OnMemPool:      sp.OnMemPool,
  2031  			OnTx:           sp.OnTx,
  2032  			OnBlock:        sp.OnBlock,
  2033  			OnInv:          sp.OnInv,
  2034  			OnHeaders:      sp.OnHeaders,
  2035  			OnGetData:      sp.OnGetData,
  2036  			OnGetBlocks:    sp.OnGetBlocks,
  2037  			OnGetHeaders:   sp.OnGetHeaders,
  2038  			OnGetCFilters:  sp.OnGetCFilters,
  2039  			OnGetCFHeaders: sp.OnGetCFHeaders,
  2040  			OnGetCFCheckpt: sp.OnGetCFCheckpt,
  2041  			OnFeeFilter:    sp.OnFeeFilter,
  2042  			OnFilterAdd:    sp.OnFilterAdd,
  2043  			OnFilterClear:  sp.OnFilterClear,
  2044  			OnFilterLoad:   sp.OnFilterLoad,
  2045  			OnGetAddr:      sp.OnGetAddr,
  2046  			OnAddr:         sp.OnAddr,
  2047  			OnRead:         sp.OnRead,
  2048  			OnWrite:        sp.OnWrite,
  2049  			OnNotFound:     sp.OnNotFound,
  2050  
  2051  			// Note: The reference client currently bans peers that send alerts
  2052  			// not signed with its key.  We could verify against their key, but
  2053  			// since the reference client is currently unwilling to support
  2054  			// other implementations' alert messages, we will not relay theirs.
  2055  			OnAlert: nil,
  2056  		},
  2057  		NewestBlock:       sp.newestBlock,
  2058  		HostToNetAddress:  sp.server.addrManager.HostToNetAddress,
  2059  		Proxy:             cfg.Proxy,
  2060  		UserAgentName:     userAgentName,
  2061  		UserAgentVersion:  userAgentVersion,
  2062  		UserAgentComments: cfg.UserAgentComments,
  2063  		ChainParams:       sp.server.chainParams,
  2064  		Services:          sp.server.services,
  2065  		DisableRelayTx:    cfg.BlocksOnly,
  2066  		ProtocolVersion:   peer.MaxProtocolVersion,
  2067  		TrickleInterval:   cfg.TrickleInterval,
  2068  	}
  2069  }
  2070  
  2071  // inboundPeerConnected is invoked by the connection manager when a new inbound
  2072  // connection is established.  It initializes a new inbound server peer
  2073  // instance, associates it with the connection, and starts a goroutine to wait
  2074  // for disconnection.
  2075  func (s *server) inboundPeerConnected(conn net.Conn) {
  2076  	sp := newServerPeer(s, false)
  2077  	sp.isWhitelisted = isWhitelisted(conn.RemoteAddr())
  2078  	sp.Peer = peer.NewInboundPeer(newPeerConfig(sp))
  2079  	sp.AssociateConnection(conn)
  2080  	go s.peerDoneHandler(sp)
  2081  }
  2082  
  2083  // outboundPeerConnected is invoked by the connection manager when a new
  2084  // outbound connection is established.  It initializes a new outbound server
  2085  // peer instance, associates it with the relevant state such as the connection
  2086  // request instance and the connection itself, and finally notifies the address
  2087  // manager of the attempt.
  2088  func (s *server) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn) {
  2089  	sp := newServerPeer(s, c.Permanent)
  2090  	p, err := peer.NewOutboundPeer(newPeerConfig(sp), c.Addr.String())
  2091  	if err != nil {
  2092  		srvrLog.Debugf("Cannot create outbound peer %s: %v", c.Addr, err)
  2093  		if c.Permanent {
  2094  			s.connManager.Disconnect(c.ID())
  2095  		} else {
  2096  			s.connManager.Remove(c.ID())
  2097  			go s.connManager.NewConnReq()
  2098  		}
  2099  		return
  2100  	}
  2101  	sp.Peer = p
  2102  	sp.connReq = c
  2103  	sp.isWhitelisted = isWhitelisted(conn.RemoteAddr())
  2104  	sp.AssociateConnection(conn)
  2105  	go s.peerDoneHandler(sp)
  2106  }
  2107  
  2108  // peerDoneHandler handles peer disconnects by notifiying the server that it's
  2109  // done along with other performing other desirable cleanup.
  2110  func (s *server) peerDoneHandler(sp *serverPeer) {
  2111  	sp.WaitForDisconnect()
  2112  	s.donePeers <- sp
  2113  
  2114  	// Only tell sync manager we are gone if we ever told it we existed.
  2115  	if sp.VerAckReceived() {
  2116  		s.syncManager.DonePeer(sp.Peer)
  2117  
  2118  		// Evict any remaining orphans that were sent by the peer.
  2119  		numEvicted := s.txMemPool.RemoveOrphansByTag(mempool.Tag(sp.ID()))
  2120  		if numEvicted > 0 {
  2121  			txmpLog.Debugf("Evicted %d %s from peer %v (id %d)",
  2122  				numEvicted, pickNoun(numEvicted, "orphan",
  2123  					"orphans"), sp, sp.ID())
  2124  		}
  2125  	}
  2126  	close(sp.quit)
  2127  }
  2128  
  2129  // peerHandler is used to handle peer operations such as adding and removing
  2130  // peers to and from the server, banning peers, and broadcasting messages to
  2131  // peers.  It must be run in a goroutine.
  2132  func (s *server) peerHandler() {
  2133  	// Start the address manager and sync manager, both of which are needed
  2134  	// by peers.  This is done here since their lifecycle is closely tied
  2135  	// to this handler and rather than adding more channels to sychronize
  2136  	// things, it's easier and slightly faster to simply start and stop them
  2137  	// in this handler.
  2138  	s.addrManager.Start()
  2139  	s.syncManager.Start()
  2140  
  2141  	srvrLog.Tracef("Starting peer handler")
  2142  
  2143  	state := &peerState{
  2144  		inboundPeers:    make(map[int32]*serverPeer),
  2145  		persistentPeers: make(map[int32]*serverPeer),
  2146  		outboundPeers:   make(map[int32]*serverPeer),
  2147  		banned:          make(map[string]time.Time),
  2148  		outboundGroups:  make(map[string]int),
  2149  	}
  2150  
  2151  	if !cfg.DisableDNSSeed {
  2152  		// Add peers discovered through DNS to the address manager.
  2153  		connmgr.SeedFromDNS(activeNetParams.Params, defaultRequiredServices,
  2154  			btcdLookup, func(addrs []*wire.NetAddress) {
  2155  				// Bitcoind uses a lookup of the dns seeder here. This
  2156  				// is rather strange since the values looked up by the
  2157  				// DNS seed lookups will vary quite a lot.
  2158  				// to replicate this behaviour we put all addresses as
  2159  				// having come from the first one.
  2160  				s.addrManager.AddAddresses(addrs, addrs[0])
  2161  			})
  2162  	}
  2163  	go s.connManager.Start()
  2164  
  2165  out:
  2166  	for {
  2167  		select {
  2168  		// New peers connected to the server.
  2169  		case p := <-s.newPeers:
  2170  			s.handleAddPeerMsg(state, p)
  2171  
  2172  		// Disconnected peers.
  2173  		case p := <-s.donePeers:
  2174  			s.handleDonePeerMsg(state, p)
  2175  
  2176  		// Block accepted in mainchain or orphan, update peer height.
  2177  		case umsg := <-s.peerHeightsUpdate:
  2178  			s.handleUpdatePeerHeights(state, umsg)
  2179  
  2180  		// Peer to ban.
  2181  		case p := <-s.banPeers:
  2182  			s.handleBanPeerMsg(state, p)
  2183  
  2184  		// New inventory to potentially be relayed to other peers.
  2185  		case invMsg := <-s.relayInv:
  2186  			s.handleRelayInvMsg(state, invMsg)
  2187  
  2188  		// Message to broadcast to all connected peers except those
  2189  		// which are excluded by the message.
  2190  		case bmsg := <-s.broadcast:
  2191  			s.handleBroadcastMsg(state, &bmsg)
  2192  
  2193  		case qmsg := <-s.query:
  2194  			s.handleQuery(state, qmsg)
  2195  
  2196  		case <-s.quit:
  2197  			// Disconnect all peers on server shutdown.
  2198  			state.forAllPeers(func(sp *serverPeer) {
  2199  				srvrLog.Tracef("Shutdown peer %s", sp)
  2200  				sp.Disconnect()
  2201  			})
  2202  			break out
  2203  		}
  2204  	}
  2205  
  2206  	s.connManager.Stop()
  2207  	s.syncManager.Stop()
  2208  	s.addrManager.Stop()
  2209  
  2210  	// Drain channels before exiting so nothing is left waiting around
  2211  	// to send.
  2212  cleanup:
  2213  	for {
  2214  		select {
  2215  		case <-s.newPeers:
  2216  		case <-s.donePeers:
  2217  		case <-s.peerHeightsUpdate:
  2218  		case <-s.relayInv:
  2219  		case <-s.broadcast:
  2220  		case <-s.query:
  2221  		default:
  2222  			break cleanup
  2223  		}
  2224  	}
  2225  	s.wg.Done()
  2226  	srvrLog.Tracef("Peer handler done")
  2227  }
  2228  
  2229  // AddPeer adds a new peer that has already been connected to the server.
  2230  func (s *server) AddPeer(sp *serverPeer) {
  2231  	s.newPeers <- sp
  2232  }
  2233  
  2234  // BanPeer bans a peer that has already been connected to the server by ip.
  2235  func (s *server) BanPeer(sp *serverPeer) {
  2236  	s.banPeers <- sp
  2237  }
  2238  
  2239  // RelayInventory relays the passed inventory vector to all connected peers
  2240  // that are not already known to have it.
  2241  func (s *server) RelayInventory(invVect *wire.InvVect, data interface{}) {
  2242  	s.relayInv <- relayMsg{invVect: invVect, data: data}
  2243  }
  2244  
  2245  // BroadcastMessage sends msg to all peers currently connected to the server
  2246  // except those in the passed peers to exclude.
  2247  func (s *server) BroadcastMessage(msg wire.Message, exclPeers ...*serverPeer) {
  2248  	// XXX: Need to determine if this is an alert that has already been
  2249  	// broadcast and refrain from broadcasting again.
  2250  	bmsg := broadcastMsg{message: msg, excludePeers: exclPeers}
  2251  	s.broadcast <- bmsg
  2252  }
  2253  
  2254  // ConnectedCount returns the number of currently connected peers.
  2255  func (s *server) ConnectedCount() int32 {
  2256  	replyChan := make(chan int32)
  2257  
  2258  	s.query <- getConnCountMsg{reply: replyChan}
  2259  
  2260  	return <-replyChan
  2261  }
  2262  
  2263  // OutboundGroupCount returns the number of peers connected to the given
  2264  // outbound group key.
  2265  func (s *server) OutboundGroupCount(key string) int {
  2266  	replyChan := make(chan int)
  2267  	s.query <- getOutboundGroup{key: key, reply: replyChan}
  2268  	return <-replyChan
  2269  }
  2270  
  2271  // AddBytesSent adds the passed number of bytes to the total bytes sent counter
  2272  // for the server.  It is safe for concurrent access.
  2273  func (s *server) AddBytesSent(bytesSent uint64) {
  2274  	atomic.AddUint64(&s.bytesSent, bytesSent)
  2275  }
  2276  
  2277  // AddBytesReceived adds the passed number of bytes to the total bytes received
  2278  // counter for the server.  It is safe for concurrent access.
  2279  func (s *server) AddBytesReceived(bytesReceived uint64) {
  2280  	atomic.AddUint64(&s.bytesReceived, bytesReceived)
  2281  }
  2282  
  2283  // NetTotals returns the sum of all bytes received and sent across the network
  2284  // for all peers.  It is safe for concurrent access.
  2285  func (s *server) NetTotals() (uint64, uint64) {
  2286  	return atomic.LoadUint64(&s.bytesReceived),
  2287  		atomic.LoadUint64(&s.bytesSent)
  2288  }
  2289  
  2290  // UpdatePeerHeights updates the heights of all peers who have have announced
  2291  // the latest connected main chain block, or a recognized orphan. These height
  2292  // updates allow us to dynamically refresh peer heights, ensuring sync peer
  2293  // selection has access to the latest block heights for each peer.
  2294  func (s *server) UpdatePeerHeights(latestBlkHash *chainhash.Hash, latestHeight int32, updateSource *peer.Peer) {
  2295  	s.peerHeightsUpdate <- updatePeerHeightsMsg{
  2296  		newHash:    latestBlkHash,
  2297  		newHeight:  latestHeight,
  2298  		originPeer: updateSource,
  2299  	}
  2300  }
  2301  
  2302  // rebroadcastHandler keeps track of user submitted inventories that we have
  2303  // sent out but have not yet made it into a block. We periodically rebroadcast
  2304  // them in case our peers restarted or otherwise lost track of them.
  2305  func (s *server) rebroadcastHandler() {
  2306  	// Wait 5 min before first tx rebroadcast.
  2307  	timer := time.NewTimer(5 * time.Minute)
  2308  	pendingInvs := make(map[wire.InvVect]interface{})
  2309  
  2310  out:
  2311  	for {
  2312  		select {
  2313  		case riv := <-s.modifyRebroadcastInv:
  2314  			switch msg := riv.(type) {
  2315  			// Incoming InvVects are added to our map of RPC txs.
  2316  			case broadcastInventoryAdd:
  2317  				pendingInvs[*msg.invVect] = msg.data
  2318  
  2319  			// When an InvVect has been added to a block, we can
  2320  			// now remove it, if it was present.
  2321  			case broadcastInventoryDel:
  2322  				delete(pendingInvs, *msg)
  2323  			}
  2324  
  2325  		case <-timer.C:
  2326  			// Any inventory we have has not made it into a block
  2327  			// yet. We periodically resubmit them until they have.
  2328  			for iv, data := range pendingInvs {
  2329  				ivCopy := iv
  2330  				s.RelayInventory(&ivCopy, data)
  2331  			}
  2332  
  2333  			// Process at a random time up to 30mins (in seconds)
  2334  			// in the future.
  2335  			timer.Reset(time.Second *
  2336  				time.Duration(randomUint16Number(1800)))
  2337  
  2338  		case <-s.quit:
  2339  			break out
  2340  		}
  2341  	}
  2342  
  2343  	timer.Stop()
  2344  
  2345  	// Drain channels before exiting so nothing is left waiting around
  2346  	// to send.
  2347  cleanup:
  2348  	for {
  2349  		select {
  2350  		case <-s.modifyRebroadcastInv:
  2351  		default:
  2352  			break cleanup
  2353  		}
  2354  	}
  2355  	s.wg.Done()
  2356  }
  2357  
  2358  // Start begins accepting connections from peers.
  2359  func (s *server) Start() {
  2360  	// Already started?
  2361  	if atomic.AddInt32(&s.started, 1) != 1 {
  2362  		return
  2363  	}
  2364  
  2365  	srvrLog.Trace("Starting server")
  2366  
  2367  	// Server startup time. Used for the uptime command for uptime calculation.
  2368  	s.startupTime = time.Now().Unix()
  2369  
  2370  	// Start the peer handler which in turn starts the address and block
  2371  	// managers.
  2372  	s.wg.Add(1)
  2373  	go s.peerHandler()
  2374  
  2375  	if s.nat != nil {
  2376  		s.wg.Add(1)
  2377  		go s.upnpUpdateThread()
  2378  	}
  2379  
  2380  	if !cfg.DisableRPC {
  2381  		s.wg.Add(1)
  2382  
  2383  		// Start the rebroadcastHandler, which ensures user tx received by
  2384  		// the RPC server are rebroadcast until being included in a block.
  2385  		go s.rebroadcastHandler()
  2386  
  2387  		s.rpcServer.Start()
  2388  	}
  2389  
  2390  	// Start the CPU miner if generation is enabled.
  2391  	if cfg.Generate {
  2392  		s.cpuMiner.Start()
  2393  	}
  2394  }
  2395  
  2396  // Stop gracefully shuts down the server by stopping and disconnecting all
  2397  // peers and the main listener.
  2398  func (s *server) Stop() error {
  2399  	// Make sure this only happens once.
  2400  	if atomic.AddInt32(&s.shutdown, 1) != 1 {
  2401  		srvrLog.Infof("Server is already in the process of shutting down")
  2402  		return nil
  2403  	}
  2404  
  2405  	srvrLog.Warnf("Server shutting down")
  2406  
  2407  	// Stop the CPU miner if needed
  2408  	s.cpuMiner.Stop()
  2409  
  2410  	// Shutdown the RPC server if it's not disabled.
  2411  	if !cfg.DisableRPC {
  2412  		s.rpcServer.Stop()
  2413  	}
  2414  
  2415  	// Save fee estimator state in the database.
  2416  	s.db.Update(func(tx database.Tx) error {
  2417  		metadata := tx.Metadata()
  2418  		metadata.Put(mempool.EstimateFeeDatabaseKey, s.feeEstimator.Save())
  2419  
  2420  		return nil
  2421  	})
  2422  
  2423  	// Signal the remaining goroutines to quit.
  2424  	close(s.quit)
  2425  	return nil
  2426  }
  2427  
  2428  // WaitForShutdown blocks until the main listener and peer handlers are stopped.
  2429  func (s *server) WaitForShutdown() {
  2430  	s.wg.Wait()
  2431  }
  2432  
  2433  // ScheduleShutdown schedules a server shutdown after the specified duration.
  2434  // It also dynamically adjusts how often to warn the server is going down based
  2435  // on remaining duration.
  2436  func (s *server) ScheduleShutdown(duration time.Duration) {
  2437  	// Don't schedule shutdown more than once.
  2438  	if atomic.AddInt32(&s.shutdownSched, 1) != 1 {
  2439  		return
  2440  	}
  2441  	srvrLog.Warnf("Server shutdown in %v", duration)
  2442  	go func() {
  2443  		remaining := duration
  2444  		tickDuration := dynamicTickDuration(remaining)
  2445  		done := time.After(remaining)
  2446  		ticker := time.NewTicker(tickDuration)
  2447  	out:
  2448  		for {
  2449  			select {
  2450  			case <-done:
  2451  				ticker.Stop()
  2452  				s.Stop()
  2453  				break out
  2454  			case <-ticker.C:
  2455  				remaining = remaining - tickDuration
  2456  				if remaining < time.Second {
  2457  					continue
  2458  				}
  2459  
  2460  				// Change tick duration dynamically based on remaining time.
  2461  				newDuration := dynamicTickDuration(remaining)
  2462  				if tickDuration != newDuration {
  2463  					tickDuration = newDuration
  2464  					ticker.Stop()
  2465  					ticker = time.NewTicker(tickDuration)
  2466  				}
  2467  				srvrLog.Warnf("Server shutdown in %v", remaining)
  2468  			}
  2469  		}
  2470  	}()
  2471  }
  2472  
  2473  // parseListeners determines whether each listen address is IPv4 and IPv6 and
  2474  // returns a slice of appropriate net.Addrs to listen on with TCP. It also
  2475  // properly detects addresses which apply to "all interfaces" and adds the
  2476  // address as both IPv4 and IPv6.
  2477  func parseListeners(addrs []string) ([]net.Addr, error) {
  2478  	netAddrs := make([]net.Addr, 0, len(addrs)*2)
  2479  	for _, addr := range addrs {
  2480  		host, _, err := net.SplitHostPort(addr)
  2481  		if err != nil {
  2482  			// Shouldn't happen due to already being normalized.
  2483  			return nil, err
  2484  		}
  2485  
  2486  		// Empty host or host of * on plan9 is both IPv4 and IPv6.
  2487  		if host == "" || (host == "*" && runtime.GOOS == "plan9") {
  2488  			netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
  2489  			netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
  2490  			continue
  2491  		}
  2492  
  2493  		// Strip IPv6 zone id if present since net.ParseIP does not
  2494  		// handle it.
  2495  		zoneIndex := strings.LastIndex(host, "%")
  2496  		if zoneIndex > 0 {
  2497  			host = host[:zoneIndex]
  2498  		}
  2499  
  2500  		// Parse the IP.
  2501  		ip := net.ParseIP(host)
  2502  		if ip == nil {
  2503  			return nil, fmt.Errorf("'%s' is not a valid IP address", host)
  2504  		}
  2505  
  2506  		// To4 returns nil when the IP is not an IPv4 address, so use
  2507  		// this determine the address type.
  2508  		if ip.To4() == nil {
  2509  			netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr})
  2510  		} else {
  2511  			netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr})
  2512  		}
  2513  	}
  2514  	return netAddrs, nil
  2515  }
  2516  
  2517  func (s *server) upnpUpdateThread() {
  2518  	// Go off immediately to prevent code duplication, thereafter we renew
  2519  	// lease every 15 minutes.
  2520  	timer := time.NewTimer(0 * time.Second)
  2521  	lport, _ := strconv.ParseInt(activeNetParams.DefaultPort, 10, 16)
  2522  	first := true
  2523  out:
  2524  	for {
  2525  		select {
  2526  		case <-timer.C:
  2527  			// TODO: pick external port  more cleverly
  2528  			// TODO: know which ports we are listening to on an external net.
  2529  			// TODO: if specific listen port doesn't work then ask for wildcard
  2530  			// listen port?
  2531  			// XXX this assumes timeout is in seconds.
  2532  			listenPort, err := s.nat.AddPortMapping("tcp", int(lport), int(lport),
  2533  				"btcd listen port", 20*60)
  2534  			if err != nil {
  2535  				srvrLog.Warnf("can't add UPnP port mapping: %v", err)
  2536  			}
  2537  			if first && err == nil {
  2538  				// TODO: look this up periodically to see if upnp domain changed
  2539  				// and so did ip.
  2540  				externalip, err := s.nat.GetExternalAddress()
  2541  				if err != nil {
  2542  					srvrLog.Warnf("UPnP can't get external address: %v", err)
  2543  					continue out
  2544  				}
  2545  				na := wire.NewNetAddressIPPort(externalip, uint16(listenPort),
  2546  					s.services)
  2547  				err = s.addrManager.AddLocalAddress(na, addrmgr.UpnpPrio)
  2548  				if err != nil {
  2549  					// XXX DeletePortMapping?
  2550  				}
  2551  				srvrLog.Warnf("Successfully bound via UPnP to %s", addrmgr.NetAddressKey(na))
  2552  				first = false
  2553  			}
  2554  			timer.Reset(time.Minute * 15)
  2555  		case <-s.quit:
  2556  			break out
  2557  		}
  2558  	}
  2559  
  2560  	timer.Stop()
  2561  
  2562  	if err := s.nat.DeletePortMapping("tcp", int(lport), int(lport)); err != nil {
  2563  		srvrLog.Warnf("unable to remove UPnP port mapping: %v", err)
  2564  	} else {
  2565  		srvrLog.Debugf("successfully disestablished UPnP port mapping")
  2566  	}
  2567  
  2568  	s.wg.Done()
  2569  }
  2570  
  2571  // setupRPCListeners returns a slice of listeners that are configured for use
  2572  // with the RPC server depending on the configuration settings for listen
  2573  // addresses and TLS.
  2574  func setupRPCListeners() ([]net.Listener, error) {
  2575  	// Setup TLS if not disabled.
  2576  	listenFunc := net.Listen
  2577  	if !cfg.DisableTLS {
  2578  		// Generate the TLS cert and key file if both don't already
  2579  		// exist.
  2580  		if !fileExists(cfg.RPCKey) && !fileExists(cfg.RPCCert) {
  2581  			err := genCertPair(cfg.RPCCert, cfg.RPCKey)
  2582  			if err != nil {
  2583  				return nil, err
  2584  			}
  2585  		}
  2586  		keypair, err := tls.LoadX509KeyPair(cfg.RPCCert, cfg.RPCKey)
  2587  		if err != nil {
  2588  			return nil, err
  2589  		}
  2590  
  2591  		tlsConfig := tls.Config{
  2592  			Certificates: []tls.Certificate{keypair},
  2593  			MinVersion:   tls.VersionTLS12,
  2594  		}
  2595  
  2596  		// Change the standard net.Listen function to the tls one.
  2597  		listenFunc = func(net string, laddr string) (net.Listener, error) {
  2598  			return tls.Listen(net, laddr, &tlsConfig)
  2599  		}
  2600  	}
  2601  
  2602  	netAddrs, err := parseListeners(cfg.RPCListeners)
  2603  	if err != nil {
  2604  		return nil, err
  2605  	}
  2606  
  2607  	listeners := make([]net.Listener, 0, len(netAddrs))
  2608  	for _, addr := range netAddrs {
  2609  		listener, err := listenFunc(addr.Network(), addr.String())
  2610  		if err != nil {
  2611  			rpcsLog.Warnf("Can't listen on %s: %v", addr, err)
  2612  			continue
  2613  		}
  2614  		listeners = append(listeners, listener)
  2615  	}
  2616  
  2617  	return listeners, nil
  2618  }
  2619  
  2620  // newServer returns a new btcd server configured to listen on addr for the
  2621  // bitcoin network type specified by chainParams.  Use start to begin accepting
  2622  // connections from peers.
  2623  func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
  2624  	db database.DB, chainParams *chaincfg.Params,
  2625  	interrupt <-chan struct{}) (*server, error) {
  2626  
  2627  	services := defaultServices
  2628  	if cfg.NoPeerBloomFilters {
  2629  		services &^= wire.SFNodeBloom
  2630  	}
  2631  	if cfg.NoCFilters {
  2632  		services &^= wire.SFNodeCF
  2633  	}
  2634  
  2635  	amgr := addrmgr.New(cfg.DataDir, btcdLookup)
  2636  
  2637  	var listeners []net.Listener
  2638  	var nat NAT
  2639  	if !cfg.DisableListen {
  2640  		var err error
  2641  		listeners, nat, err = initListeners(amgr, listenAddrs, services)
  2642  		if err != nil {
  2643  			return nil, err
  2644  		}
  2645  		if len(listeners) == 0 {
  2646  			return nil, errors.New("no valid listen address")
  2647  		}
  2648  	}
  2649  
  2650  	if len(agentBlacklist) > 0 {
  2651  		srvrLog.Infof("User-agent blacklist %s", agentBlacklist)
  2652  	}
  2653  	if len(agentWhitelist) > 0 {
  2654  		srvrLog.Infof("User-agent whitelist %s", agentWhitelist)
  2655  	}
  2656  
  2657  	s := server{
  2658  		chainParams:          chainParams,
  2659  		addrManager:          amgr,
  2660  		newPeers:             make(chan *serverPeer, cfg.MaxPeers),
  2661  		donePeers:            make(chan *serverPeer, cfg.MaxPeers),
  2662  		banPeers:             make(chan *serverPeer, cfg.MaxPeers),
  2663  		query:                make(chan interface{}),
  2664  		relayInv:             make(chan relayMsg, cfg.MaxPeers),
  2665  		broadcast:            make(chan broadcastMsg, cfg.MaxPeers),
  2666  		quit:                 make(chan struct{}),
  2667  		modifyRebroadcastInv: make(chan interface{}),
  2668  		peerHeightsUpdate:    make(chan updatePeerHeightsMsg),
  2669  		nat:                  nat,
  2670  		db:                   db,
  2671  		timeSource:           blockchain.NewMedianTime(),
  2672  		services:             services,
  2673  		sigCache:             txscript.NewSigCache(cfg.SigCacheMaxSize),
  2674  		hashCache:            txscript.NewHashCache(cfg.SigCacheMaxSize),
  2675  		cfCheckptCaches:      make(map[wire.FilterType][]cfHeaderKV),
  2676  		agentBlacklist:       agentBlacklist,
  2677  		agentWhitelist:       agentWhitelist,
  2678  	}
  2679  
  2680  	// Create the transaction and address indexes if needed.
  2681  	//
  2682  	// CAUTION: the txindex needs to be first in the indexes array because
  2683  	// the addrindex uses data from the txindex during catchup.  If the
  2684  	// addrindex is run first, it may not have the transactions from the
  2685  	// current block indexed.
  2686  	var indexes []indexers.Indexer
  2687  	if cfg.TxIndex || cfg.AddrIndex {
  2688  		// Enable transaction index if address index is enabled since it
  2689  		// requires it.
  2690  		if !cfg.TxIndex {
  2691  			indxLog.Infof("Transaction index enabled because it " +
  2692  				"is required by the address index")
  2693  			cfg.TxIndex = true
  2694  		} else {
  2695  			indxLog.Info("Transaction index is enabled")
  2696  		}
  2697  
  2698  		s.txIndex = indexers.NewTxIndex(db)
  2699  		indexes = append(indexes, s.txIndex)
  2700  	}
  2701  	if cfg.AddrIndex {
  2702  		indxLog.Info("Address index is enabled")
  2703  		s.addrIndex = indexers.NewAddrIndex(db, chainParams)
  2704  		indexes = append(indexes, s.addrIndex)
  2705  	}
  2706  	if !cfg.NoCFilters {
  2707  		indxLog.Info("Committed filter index is enabled")
  2708  		s.cfIndex = indexers.NewCfIndex(db, chainParams)
  2709  		indexes = append(indexes, s.cfIndex)
  2710  	}
  2711  
  2712  	// Create an index manager if any of the optional indexes are enabled.
  2713  	var indexManager blockchain.IndexManager
  2714  	if len(indexes) > 0 {
  2715  		indexManager = indexers.NewManager(db, indexes)
  2716  	}
  2717  
  2718  	// Merge given checkpoints with the default ones unless they are disabled.
  2719  	var checkpoints []chaincfg.Checkpoint
  2720  	if !cfg.DisableCheckpoints {
  2721  		checkpoints = mergeCheckpoints(s.chainParams.Checkpoints, cfg.addCheckpoints)
  2722  	}
  2723  
  2724  	// Create a new block chain instance with the appropriate configuration.
  2725  	var err error
  2726  	s.chain, err = blockchain.New(&blockchain.Config{
  2727  		DB:           s.db,
  2728  		Interrupt:    interrupt,
  2729  		ChainParams:  s.chainParams,
  2730  		Checkpoints:  checkpoints,
  2731  		TimeSource:   s.timeSource,
  2732  		SigCache:     s.sigCache,
  2733  		IndexManager: indexManager,
  2734  		HashCache:    s.hashCache,
  2735  	})
  2736  	if err != nil {
  2737  		return nil, err
  2738  	}
  2739  
  2740  	// Search for a FeeEstimator state in the database. If none can be found
  2741  	// or if it cannot be loaded, create a new one.
  2742  	db.Update(func(tx database.Tx) error {
  2743  		metadata := tx.Metadata()
  2744  		feeEstimationData := metadata.Get(mempool.EstimateFeeDatabaseKey)
  2745  		if feeEstimationData != nil {
  2746  			// delete it from the database so that we don't try to restore the
  2747  			// same thing again somehow.
  2748  			metadata.Delete(mempool.EstimateFeeDatabaseKey)
  2749  
  2750  			// If there is an error, log it and make a new fee estimator.
  2751  			var err error
  2752  			s.feeEstimator, err = mempool.RestoreFeeEstimator(feeEstimationData)
  2753  
  2754  			if err != nil {
  2755  				peerLog.Errorf("Failed to restore fee estimator %v", err)
  2756  			}
  2757  		}
  2758  
  2759  		return nil
  2760  	})
  2761  
  2762  	// If no feeEstimator has been found, or if the one that has been found
  2763  	// is behind somehow, create a new one and start over.
  2764  	if s.feeEstimator == nil || s.feeEstimator.LastKnownHeight() != s.chain.BestSnapshot().Height {
  2765  		s.feeEstimator = mempool.NewFeeEstimator(
  2766  			mempool.DefaultEstimateFeeMaxRollback,
  2767  			mempool.DefaultEstimateFeeMinRegisteredBlocks)
  2768  	}
  2769  
  2770  	txC := mempool.Config{
  2771  		Policy: mempool.Policy{
  2772  			DisableRelayPriority: cfg.NoRelayPriority,
  2773  			AcceptNonStd:         cfg.RelayNonStd,
  2774  			FreeTxRelayLimit:     cfg.FreeTxRelayLimit,
  2775  			MaxOrphanTxs:         cfg.MaxOrphanTxs,
  2776  			MaxOrphanTxSize:      defaultMaxOrphanTxSize,
  2777  			MaxSigOpCostPerTx:    blockchain.MaxBlockSigOpsCost / 4,
  2778  			MinRelayTxFee:        cfg.minRelayTxFee,
  2779  			MaxTxVersion:         2,
  2780  			RejectReplacement:    cfg.RejectReplacement,
  2781  		},
  2782  		ChainParams:    chainParams,
  2783  		FetchUtxoView:  s.chain.FetchUtxoView,
  2784  		BestHeight:     func() int32 { return s.chain.BestSnapshot().Height },
  2785  		MedianTimePast: func() time.Time { return s.chain.BestSnapshot().MedianTime },
  2786  		CalcSequenceLock: func(tx *palcutil.Tx, view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) {
  2787  			return s.chain.CalcSequenceLock(tx, view, true)
  2788  		},
  2789  		IsDeploymentActive: s.chain.IsDeploymentActive,
  2790  		SigCache:           s.sigCache,
  2791  		HashCache:          s.hashCache,
  2792  		AddrIndex:          s.addrIndex,
  2793  		FeeEstimator:       s.feeEstimator,
  2794  	}
  2795  	s.txMemPool = mempool.New(&txC)
  2796  
  2797  	s.syncManager, err = netsync.New(&netsync.Config{
  2798  		PeerNotifier:       &s,
  2799  		Chain:              s.chain,
  2800  		TxMemPool:          s.txMemPool,
  2801  		ChainParams:        s.chainParams,
  2802  		DisableCheckpoints: cfg.DisableCheckpoints,
  2803  		MaxPeers:           cfg.MaxPeers,
  2804  		FeeEstimator:       s.feeEstimator,
  2805  	})
  2806  	if err != nil {
  2807  		return nil, err
  2808  	}
  2809  
  2810  	// Create the mining policy and block template generator based on the
  2811  	// configuration options.
  2812  	//
  2813  	// NOTE: The CPU miner relies on the mempool, so the mempool has to be
  2814  	// created before calling the function to create the CPU miner.
  2815  	policy := mining.Policy{
  2816  		BlockMinWeight:    cfg.BlockMinWeight,
  2817  		BlockMaxWeight:    cfg.BlockMaxWeight,
  2818  		BlockMinSize:      cfg.BlockMinSize,
  2819  		BlockMaxSize:      cfg.BlockMaxSize,
  2820  		BlockPrioritySize: cfg.BlockPrioritySize,
  2821  		TxMinFreeFee:      cfg.minRelayTxFee,
  2822  	}
  2823  	blockTemplateGenerator := mining.NewBlkTmplGenerator(&policy,
  2824  		s.chainParams, s.txMemPool, s.chain, s.timeSource,
  2825  		s.sigCache, s.hashCache)
  2826  	s.cpuMiner = cpuminer.New(&cpuminer.Config{
  2827  		ChainParams:            chainParams,
  2828  		BlockTemplateGenerator: blockTemplateGenerator,
  2829  		MiningAddrs:            cfg.miningAddrs,
  2830  		ProcessBlock:           s.syncManager.ProcessBlock,
  2831  		ConnectedCount:         s.ConnectedCount,
  2832  		IsCurrent:              s.syncManager.IsCurrent,
  2833  	})
  2834  
  2835  	// Only setup a function to return new addresses to connect to when
  2836  	// not running in connect-only mode.  The simulation network is always
  2837  	// in connect-only mode since it is only intended to connect to
  2838  	// specified peers and actively avoid advertising and connecting to
  2839  	// discovered peers in order to prevent it from becoming a public test
  2840  	// network.
  2841  	var newAddressFunc func() (net.Addr, error)
  2842  	if !cfg.SimNet && len(cfg.ConnectPeers) == 0 {
  2843  		newAddressFunc = func() (net.Addr, error) {
  2844  			for tries := 0; tries < 100; tries++ {
  2845  				addr := s.addrManager.GetAddress()
  2846  				if addr == nil {
  2847  					break
  2848  				}
  2849  
  2850  				// Address will not be invalid, local or unroutable
  2851  				// because addrmanager rejects those on addition.
  2852  				// Just check that we don't already have an address
  2853  				// in the same group so that we are not connecting
  2854  				// to the same network segment at the expense of
  2855  				// others.
  2856  				key := addrmgr.GroupKey(addr.NetAddress())
  2857  				if s.OutboundGroupCount(key) != 0 {
  2858  					continue
  2859  				}
  2860  
  2861  				// only allow recent nodes (10mins) after we failed 30
  2862  				// times
  2863  				if tries < 30 && time.Since(addr.LastAttempt()) < 10*time.Minute {
  2864  					continue
  2865  				}
  2866  
  2867  				// allow nondefault ports after 50 failed tries.
  2868  				if tries < 50 && fmt.Sprintf("%d", addr.NetAddress().Port) !=
  2869  					activeNetParams.DefaultPort {
  2870  					continue
  2871  				}
  2872  
  2873  				// Mark an attempt for the valid address.
  2874  				s.addrManager.Attempt(addr.NetAddress())
  2875  
  2876  				addrString := addrmgr.NetAddressKey(addr.NetAddress())
  2877  				return addrStringToNetAddr(addrString)
  2878  			}
  2879  
  2880  			return nil, errors.New("no valid connect address")
  2881  		}
  2882  	}
  2883  
  2884  	// Create a connection manager.
  2885  	targetOutbound := defaultTargetOutbound
  2886  	if cfg.MaxPeers < targetOutbound {
  2887  		targetOutbound = cfg.MaxPeers
  2888  	}
  2889  	cmgr, err := connmgr.New(&connmgr.Config{
  2890  		Listeners:      listeners,
  2891  		OnAccept:       s.inboundPeerConnected,
  2892  		RetryDuration:  connectionRetryInterval,
  2893  		TargetOutbound: uint32(targetOutbound),
  2894  		Dial:           btcdDial,
  2895  		OnConnection:   s.outboundPeerConnected,
  2896  		GetNewAddress:  newAddressFunc,
  2897  	})
  2898  	if err != nil {
  2899  		return nil, err
  2900  	}
  2901  	s.connManager = cmgr
  2902  
  2903  	// Start up persistent peers.
  2904  	permanentPeers := cfg.ConnectPeers
  2905  	if len(permanentPeers) == 0 {
  2906  		permanentPeers = cfg.AddPeers
  2907  	}
  2908  	for _, addr := range permanentPeers {
  2909  		netAddr, err := addrStringToNetAddr(addr)
  2910  		if err != nil {
  2911  			return nil, err
  2912  		}
  2913  
  2914  		go s.connManager.Connect(&connmgr.ConnReq{
  2915  			Addr:      netAddr,
  2916  			Permanent: true,
  2917  		})
  2918  	}
  2919  
  2920  	if !cfg.DisableRPC {
  2921  		// Setup listeners for the configured RPC listen addresses and
  2922  		// TLS settings.
  2923  		rpcListeners, err := setupRPCListeners()
  2924  		if err != nil {
  2925  			return nil, err
  2926  		}
  2927  		if len(rpcListeners) == 0 {
  2928  			return nil, errors.New("RPCS: No valid listen address")
  2929  		}
  2930  
  2931  		s.rpcServer, err = newRPCServer(&rpcserverConfig{
  2932  			Listeners:    rpcListeners,
  2933  			StartupTime:  s.startupTime,
  2934  			ConnMgr:      &rpcConnManager{&s},
  2935  			SyncMgr:      &rpcSyncMgr{&s, s.syncManager},
  2936  			TimeSource:   s.timeSource,
  2937  			Chain:        s.chain,
  2938  			ChainParams:  chainParams,
  2939  			DB:           db,
  2940  			TxMemPool:    s.txMemPool,
  2941  			Generator:    blockTemplateGenerator,
  2942  			CPUMiner:     s.cpuMiner,
  2943  			TxIndex:      s.txIndex,
  2944  			AddrIndex:    s.addrIndex,
  2945  			CfIndex:      s.cfIndex,
  2946  			FeeEstimator: s.feeEstimator,
  2947  		})
  2948  		if err != nil {
  2949  			return nil, err
  2950  		}
  2951  
  2952  		// Signal process shutdown when the RPC server requests it.
  2953  		go func() {
  2954  			<-s.rpcServer.RequestedProcessShutdown()
  2955  			shutdownRequestChannel <- struct{}{}
  2956  		}()
  2957  	}
  2958  
  2959  	return &s, nil
  2960  }
  2961  
  2962  // initListeners initializes the configured net listeners and adds any bound
  2963  // addresses to the address manager. Returns the listeners and a NAT interface,
  2964  // which is non-nil if UPnP is in use.
  2965  func initListeners(amgr *addrmgr.AddrManager, listenAddrs []string, services wire.ServiceFlag) ([]net.Listener, NAT, error) {
  2966  	// Listen for TCP connections at the configured addresses
  2967  	netAddrs, err := parseListeners(listenAddrs)
  2968  	if err != nil {
  2969  		return nil, nil, err
  2970  	}
  2971  
  2972  	listeners := make([]net.Listener, 0, len(netAddrs))
  2973  	for _, addr := range netAddrs {
  2974  		listener, err := net.Listen(addr.Network(), addr.String())
  2975  		if err != nil {
  2976  			srvrLog.Warnf("Can't listen on %s: %v", addr, err)
  2977  			continue
  2978  		}
  2979  		listeners = append(listeners, listener)
  2980  	}
  2981  
  2982  	var nat NAT
  2983  	if len(cfg.ExternalIPs) != 0 {
  2984  		defaultPort, err := strconv.ParseUint(activeNetParams.DefaultPort, 10, 16)
  2985  		if err != nil {
  2986  			srvrLog.Errorf("Can not parse default port %s for active chain: %v",
  2987  				activeNetParams.DefaultPort, err)
  2988  			return nil, nil, err
  2989  		}
  2990  
  2991  		for _, sip := range cfg.ExternalIPs {
  2992  			eport := uint16(defaultPort)
  2993  			host, portstr, err := net.SplitHostPort(sip)
  2994  			if err != nil {
  2995  				// no port, use default.
  2996  				host = sip
  2997  			} else {
  2998  				port, err := strconv.ParseUint(portstr, 10, 16)
  2999  				if err != nil {
  3000  					srvrLog.Warnf("Can not parse port from %s for "+
  3001  						"externalip: %v", sip, err)
  3002  					continue
  3003  				}
  3004  				eport = uint16(port)
  3005  			}
  3006  			na, err := amgr.HostToNetAddress(host, eport, services)
  3007  			if err != nil {
  3008  				srvrLog.Warnf("Not adding %s as externalip: %v", sip, err)
  3009  				continue
  3010  			}
  3011  
  3012  			err = amgr.AddLocalAddress(na, addrmgr.ManualPrio)
  3013  			if err != nil {
  3014  				amgrLog.Warnf("Skipping specified external IP: %v", err)
  3015  			}
  3016  		}
  3017  	} else {
  3018  		if cfg.Upnp {
  3019  			var err error
  3020  			nat, err = Discover()
  3021  			if err != nil {
  3022  				srvrLog.Warnf("Can't discover upnp: %v", err)
  3023  			}
  3024  			// nil nat here is fine, just means no upnp on network.
  3025  		}
  3026  
  3027  		// Add bound addresses to address manager to be advertised to peers.
  3028  		for _, listener := range listeners {
  3029  			addr := listener.Addr().String()
  3030  			err := addLocalAddress(amgr, addr, services)
  3031  			if err != nil {
  3032  				amgrLog.Warnf("Skipping bound address %s: %v", addr, err)
  3033  			}
  3034  		}
  3035  	}
  3036  
  3037  	return listeners, nat, nil
  3038  }
  3039  
  3040  // addrStringToNetAddr takes an address in the form of 'host:port' and returns
  3041  // a net.Addr which maps to the original address with any host names resolved
  3042  // to IP addresses.  It also handles tor addresses properly by returning a
  3043  // net.Addr that encapsulates the address.
  3044  func addrStringToNetAddr(addr string) (net.Addr, error) {
  3045  	host, strPort, err := net.SplitHostPort(addr)
  3046  	if err != nil {
  3047  		return nil, err
  3048  	}
  3049  
  3050  	port, err := strconv.Atoi(strPort)
  3051  	if err != nil {
  3052  		return nil, err
  3053  	}
  3054  
  3055  	// Skip if host is already an IP address.
  3056  	if ip := net.ParseIP(host); ip != nil {
  3057  		return &net.TCPAddr{
  3058  			IP:   ip,
  3059  			Port: port,
  3060  		}, nil
  3061  	}
  3062  
  3063  	// Tor addresses cannot be resolved to an IP, so just return an onion
  3064  	// address instead.
  3065  	if strings.HasSuffix(host, ".onion") {
  3066  		if cfg.NoOnion {
  3067  			return nil, errors.New("tor has been disabled")
  3068  		}
  3069  
  3070  		return &onionAddr{addr: addr}, nil
  3071  	}
  3072  
  3073  	// Attempt to look up an IP address associated with the parsed host.
  3074  	ips, err := btcdLookup(host)
  3075  	if err != nil {
  3076  		return nil, err
  3077  	}
  3078  	if len(ips) == 0 {
  3079  		return nil, fmt.Errorf("no addresses found for %s", host)
  3080  	}
  3081  
  3082  	return &net.TCPAddr{
  3083  		IP:   ips[0],
  3084  		Port: port,
  3085  	}, nil
  3086  }
  3087  
  3088  // addLocalAddress adds an address that this node is listening on to the
  3089  // address manager so that it may be relayed to peers.
  3090  func addLocalAddress(addrMgr *addrmgr.AddrManager, addr string, services wire.ServiceFlag) error {
  3091  	host, portStr, err := net.SplitHostPort(addr)
  3092  	if err != nil {
  3093  		return err
  3094  	}
  3095  	port, err := strconv.ParseUint(portStr, 10, 16)
  3096  	if err != nil {
  3097  		return err
  3098  	}
  3099  
  3100  	if ip := net.ParseIP(host); ip != nil && ip.IsUnspecified() {
  3101  		// If bound to unspecified address, advertise all local interfaces
  3102  		addrs, err := net.InterfaceAddrs()
  3103  		if err != nil {
  3104  			return err
  3105  		}
  3106  
  3107  		for _, addr := range addrs {
  3108  			ifaceIP, _, err := net.ParseCIDR(addr.String())
  3109  			if err != nil {
  3110  				continue
  3111  			}
  3112  
  3113  			// If bound to 0.0.0.0, do not add IPv6 interfaces and if bound to
  3114  			// ::, do not add IPv4 interfaces.
  3115  			if (ip.To4() == nil) != (ifaceIP.To4() == nil) {
  3116  				continue
  3117  			}
  3118  
  3119  			netAddr := wire.NewNetAddressIPPort(ifaceIP, uint16(port), services)
  3120  			addrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio)
  3121  		}
  3122  	} else {
  3123  		netAddr, err := addrMgr.HostToNetAddress(host, uint16(port), services)
  3124  		if err != nil {
  3125  			return err
  3126  		}
  3127  
  3128  		addrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio)
  3129  	}
  3130  
  3131  	return nil
  3132  }
  3133  
  3134  // dynamicTickDuration is a convenience function used to dynamically choose a
  3135  // tick duration based on remaining time.  It is primarily used during
  3136  // server shutdown to make shutdown warnings more frequent as the shutdown time
  3137  // approaches.
  3138  func dynamicTickDuration(remaining time.Duration) time.Duration {
  3139  	switch {
  3140  	case remaining <= time.Second*5:
  3141  		return time.Second
  3142  	case remaining <= time.Second*15:
  3143  		return time.Second * 5
  3144  	case remaining <= time.Minute:
  3145  		return time.Second * 15
  3146  	case remaining <= time.Minute*5:
  3147  		return time.Minute
  3148  	case remaining <= time.Minute*15:
  3149  		return time.Minute * 5
  3150  	case remaining <= time.Hour:
  3151  		return time.Minute * 15
  3152  	}
  3153  	return time.Hour
  3154  }
  3155  
  3156  // isWhitelisted returns whether the IP address is included in the whitelisted
  3157  // networks and IPs.
  3158  func isWhitelisted(addr net.Addr) bool {
  3159  	if len(cfg.whitelists) == 0 {
  3160  		return false
  3161  	}
  3162  
  3163  	host, _, err := net.SplitHostPort(addr.String())
  3164  	if err != nil {
  3165  		srvrLog.Warnf("Unable to SplitHostPort on '%s': %v", addr, err)
  3166  		return false
  3167  	}
  3168  	ip := net.ParseIP(host)
  3169  	if ip == nil {
  3170  		srvrLog.Warnf("Unable to parse IP '%s'", addr)
  3171  		return false
  3172  	}
  3173  
  3174  	for _, ipnet := range cfg.whitelists {
  3175  		if ipnet.Contains(ip) {
  3176  			return true
  3177  		}
  3178  	}
  3179  	return false
  3180  }
  3181  
  3182  // checkpointSorter implements sort.Interface to allow a slice of checkpoints to
  3183  // be sorted.
  3184  type checkpointSorter []chaincfg.Checkpoint
  3185  
  3186  // Len returns the number of checkpoints in the slice.  It is part of the
  3187  // sort.Interface implementation.
  3188  func (s checkpointSorter) Len() int {
  3189  	return len(s)
  3190  }
  3191  
  3192  // Swap swaps the checkpoints at the passed indices.  It is part of the
  3193  // sort.Interface implementation.
  3194  func (s checkpointSorter) Swap(i, j int) {
  3195  	s[i], s[j] = s[j], s[i]
  3196  }
  3197  
  3198  // Less returns whether the checkpoint with index i should sort before the
  3199  // checkpoint with index j.  It is part of the sort.Interface implementation.
  3200  func (s checkpointSorter) Less(i, j int) bool {
  3201  	return s[i].Height < s[j].Height
  3202  }
  3203  
  3204  // mergeCheckpoints returns two slices of checkpoints merged into one slice
  3205  // such that the checkpoints are sorted by height.  In the case the additional
  3206  // checkpoints contain a checkpoint with the same height as a checkpoint in the
  3207  // default checkpoints, the additional checkpoint will take precedence and
  3208  // overwrite the default one.
  3209  func mergeCheckpoints(defaultCheckpoints, additional []chaincfg.Checkpoint) []chaincfg.Checkpoint {
  3210  	// Create a map of the additional checkpoints to remove duplicates while
  3211  	// leaving the most recently-specified checkpoint.
  3212  	extra := make(map[int32]chaincfg.Checkpoint)
  3213  	for _, checkpoint := range additional {
  3214  		extra[checkpoint.Height] = checkpoint
  3215  	}
  3216  
  3217  	// Add all default checkpoints that do not have an override in the
  3218  	// additional checkpoints.
  3219  	numDefault := len(defaultCheckpoints)
  3220  	checkpoints := make([]chaincfg.Checkpoint, 0, numDefault+len(extra))
  3221  	for _, checkpoint := range defaultCheckpoints {
  3222  		if _, exists := extra[checkpoint.Height]; !exists {
  3223  			checkpoints = append(checkpoints, checkpoint)
  3224  		}
  3225  	}
  3226  
  3227  	// Append the additional checkpoints and return the sorted results.
  3228  	for _, checkpoint := range extra {
  3229  		checkpoints = append(checkpoints, checkpoint)
  3230  	}
  3231  	sort.Sort(checkpointSorter(checkpoints))
  3232  	return checkpoints
  3233  }
  3234  
  3235  // HasUndesiredUserAgent determines whether the server should continue to pursue
  3236  // a connection with this peer based on its advertised user agent. It performs
  3237  // the following steps:
  3238  // 1) Reject the peer if it contains a blacklisted agent.
  3239  // 2) If no whitelist is provided, accept all user agents.
  3240  // 3) Accept the peer if it contains a whitelisted agent.
  3241  // 4) Reject all other peers.
  3242  func (sp *serverPeer) HasUndesiredUserAgent(blacklistedAgents,
  3243  	whitelistedAgents []string) bool {
  3244  
  3245  	agent := sp.UserAgent()
  3246  
  3247  	// First, if peer's user agent contains any blacklisted substring, we
  3248  	// will ignore the connection request.
  3249  	for _, blacklistedAgent := range blacklistedAgents {
  3250  		if strings.Contains(agent, blacklistedAgent) {
  3251  			srvrLog.Debugf("Ignoring peer %s, user agent "+
  3252  				"contains blacklisted user agent: %s", sp,
  3253  				agent)
  3254  			return true
  3255  		}
  3256  	}
  3257  
  3258  	// If no whitelist is provided, we will accept all user agents.
  3259  	if len(whitelistedAgents) == 0 {
  3260  		return false
  3261  	}
  3262  
  3263  	// Peer's user agent passed blacklist. Now check to see if it contains
  3264  	// one of our whitelisted user agents, if so accept.
  3265  	for _, whitelistedAgent := range whitelistedAgents {
  3266  		if strings.Contains(agent, whitelistedAgent) {
  3267  			return false
  3268  		}
  3269  	}
  3270  
  3271  	// Otherwise, the peer's user agent was not included in our whitelist.
  3272  	// Ignore just in case it could stall the initial block download.
  3273  	srvrLog.Debugf("Ignoring peer %s, user agent: %s not found in "+
  3274  		"whitelist", sp, agent)
  3275  
  3276  	return true
  3277  }