github.com/BlockABC/godash@v0.0.0-20191112120524-f4aa3a32c566/server.go (about)

     1  // Copyright (c) 2013-2016 The btcsuite developers
     2  // Copyright (c) 2016 The Dash developers
     3  // Use of this source code is governed by an ISC
     4  // license that can be found in the LICENSE file.
     5  
     6  package main
     7  
     8  import (
     9  	"bytes"
    10  	"crypto/rand"
    11  	"encoding/binary"
    12  	"errors"
    13  	"fmt"
    14  	"math"
    15  	mrand "math/rand"
    16  	"net"
    17  	"runtime"
    18  	"strconv"
    19  	"strings"
    20  	"sync"
    21  	"sync/atomic"
    22  	"time"
    23  
    24  	"github.com/BlockABC/godash/addrmgr"
    25  	"github.com/BlockABC/godash/blockchain"
    26  	"github.com/BlockABC/godash/blockchain/indexers"
    27  	"github.com/BlockABC/godash/chaincfg"
    28  	"github.com/BlockABC/godash/database"
    29  	"github.com/BlockABC/godash/mining"
    30  	"github.com/BlockABC/godash/peer"
    31  	"github.com/BlockABC/godash/txscript"
    32  	"github.com/BlockABC/godash/wire"
    33  	"github.com/BlockABC/godashutil"
    34  	"github.com/BlockABC/godashutil/bloom"
    35  )
    36  
    37  const (
    38  	// These constants are used by the DNS seed code to pick a random last
    39  	// seen time.
    40  	secondsIn3Days int32 = 24 * 60 * 60 * 3
    41  	secondsIn4Days int32 = 24 * 60 * 60 * 4
    42  )
    43  
    44  const (
    45  	// defaultServices describes the default services that are supported by
    46  	// the server.
    47  	defaultServices = wire.SFNodeNetwork | wire.SFNodeBloom
    48  
    49  	// defaultMaxOutbound is the default number of max outbound peers.
    50  	defaultMaxOutbound = 8
    51  
    52  	// connectionRetryInterval is the base amount of time to wait in between
    53  	// retries when connecting to persistent peers.  It is adjusted by the
    54  	// number of retries such that there is a retry backoff.
    55  	connectionRetryInterval = time.Second * 5
    56  
    57  	// maxConnectionRetryInterval is the max amount of time retrying of a
    58  	// persistent peer is allowed to grow to.  This is necessary since the
    59  	// retry logic uses a backoff mechanism which increases the interval
    60  	// base done the number of retries that have been done.
    61  	maxConnectionRetryInterval = time.Minute * 5
    62  )
    63  
    64  var (
    65  	// userAgentName is the user agent name and is used to help identify
    66  	// ourselves to other bitcoin peers.
    67  	userAgentName = "btcd"
    68  
    69  	// userAgentVersion is the user agent version and is used to help
    70  	// identify ourselves to other bitcoin peers.
    71  	userAgentVersion = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch)
    72  )
    73  
    74  // broadcastMsg provides the ability to house a bitcoin message to be broadcast
    75  // to all connected peers except specified excluded peers.
    76  type broadcastMsg struct {
    77  	message      wire.Message
    78  	excludePeers []*serverPeer
    79  }
    80  
    81  // broadcastInventoryAdd is a type used to declare that the InvVect it contains
    82  // needs to be added to the rebroadcast map
    83  type broadcastInventoryAdd relayMsg
    84  
    85  // broadcastInventoryDel is a type used to declare that the InvVect it contains
    86  // needs to be removed from the rebroadcast map
    87  type broadcastInventoryDel *wire.InvVect
    88  
    89  // relayMsg packages an inventory vector along with the newly discovered
    90  // inventory so the relay has access to that information.
    91  type relayMsg struct {
    92  	invVect *wire.InvVect
    93  	data    interface{}
    94  }
    95  
    96  // updatePeerHeightsMsg is a message sent from the blockmanager to the server
    97  // after a new block has been accepted. The purpose of the message is to update
    98  // the heights of peers that were known to announce the block before we
    99  // connected it to the main chain or recognized it as an orphan. With these
   100  // updates, peer heights will be kept up to date, allowing for fresh data when
   101  // selecting sync peer candidacy.
   102  type updatePeerHeightsMsg struct {
   103  	newSha     *wire.ShaHash
   104  	newHeight  int32
   105  	originPeer *serverPeer
   106  }
   107  
   108  // peerState maintains state of inbound, persistent, outbound peers as well
   109  // as banned peers and outbound groups.
   110  type peerState struct {
   111  	pendingPeers     map[string]*serverPeer
   112  	inboundPeers     map[int32]*serverPeer
   113  	outboundPeers    map[int32]*serverPeer
   114  	persistentPeers  map[int32]*serverPeer
   115  	banned           map[string]time.Time
   116  	outboundGroups   map[string]int
   117  	maxOutboundPeers int
   118  }
   119  
   120  // Count returns the count of all known peers.
   121  func (ps *peerState) Count() int {
   122  	return len(ps.inboundPeers) + len(ps.outboundPeers) +
   123  		len(ps.persistentPeers)
   124  }
   125  
   126  // OutboundCount returns the count of known outbound peers.
   127  func (ps *peerState) OutboundCount() int {
   128  	return len(ps.outboundPeers) + len(ps.persistentPeers)
   129  }
   130  
   131  // NeedMoreOutbound returns true if more outbound peers are required.
   132  func (ps *peerState) NeedMoreOutbound() bool {
   133  	return ps.OutboundCount() < ps.maxOutboundPeers &&
   134  		ps.Count() < cfg.MaxPeers
   135  }
   136  
   137  // NeedMoreTries returns true if more outbound peer attempts can be tried.
   138  func (ps *peerState) NeedMoreTries() bool {
   139  	return len(ps.pendingPeers) < 2*(ps.maxOutboundPeers-ps.OutboundCount())
   140  }
   141  
   142  // forAllOutboundPeers is a helper function that runs closure on all outbound
   143  // peers known to peerState.
   144  func (ps *peerState) forAllOutboundPeers(closure func(sp *serverPeer)) {
   145  	for _, e := range ps.outboundPeers {
   146  		closure(e)
   147  	}
   148  	for _, e := range ps.persistentPeers {
   149  		closure(e)
   150  	}
   151  }
   152  
   153  // forPendingPeers is a helper function that runs closure on all pending peers
   154  // known to peerState.
   155  func (ps *peerState) forPendingPeers(closure func(sp *serverPeer)) {
   156  	for _, e := range ps.pendingPeers {
   157  		closure(e)
   158  	}
   159  }
   160  
   161  // forAllPeers is a helper function that runs closure on all peers known to
   162  // peerState.
   163  func (ps *peerState) forAllPeers(closure func(sp *serverPeer)) {
   164  	for _, e := range ps.inboundPeers {
   165  		closure(e)
   166  	}
   167  	ps.forAllOutboundPeers(closure)
   168  }
   169  
   170  // server provides a bitcoin server for handling communications to and from
   171  // bitcoin peers.
   172  type server struct {
   173  	// The following variables must only be used atomically.
   174  	// Putting the uint64s first makes them 64-bit aligned for 32-bit systems.
   175  	bytesReceived uint64 // Total bytes received from all peers since start.
   176  	bytesSent     uint64 // Total bytes sent by all peers since start.
   177  	started       int32
   178  	shutdown      int32
   179  	shutdownSched int32
   180  
   181  	listeners            []net.Listener
   182  	chainParams          *chaincfg.Params
   183  	addrManager          *addrmgr.AddrManager
   184  	sigCache             *txscript.SigCache
   185  	rpcServer            *rpcServer
   186  	blockManager         *blockManager
   187  	txMemPool            *txMemPool
   188  	cpuMiner             *CPUMiner
   189  	modifyRebroadcastInv chan interface{}
   190  	pendingPeers         chan *serverPeer
   191  	newPeers             chan *serverPeer
   192  	donePeers            chan *serverPeer
   193  	banPeers             chan *serverPeer
   194  	retryPeers           chan *serverPeer
   195  	wakeup               chan struct{}
   196  	query                chan interface{}
   197  	relayInv             chan relayMsg
   198  	broadcast            chan broadcastMsg
   199  	peerHeightsUpdate    chan updatePeerHeightsMsg
   200  	wg                   sync.WaitGroup
   201  	quit                 chan struct{}
   202  	nat                  NAT
   203  	db                   database.DB
   204  	timeSource           blockchain.MedianTimeSource
   205  	services             wire.ServiceFlag
   206  
   207  	// The following fields are used for optional indexes.  They will be nil
   208  	// if the associated index is not enabled.  These fields are set during
   209  	// initial creation of the server and never changed afterwards, so they
   210  	// do not need to be protected for concurrent access.
   211  	txIndex   *indexers.TxIndex
   212  	addrIndex *indexers.AddrIndex
   213  }
   214  
   215  // serverPeer extends the peer to maintain state shared by the server and
   216  // the blockmanager.
   217  type serverPeer struct {
   218  	*peer.Peer
   219  
   220  	server          *server
   221  	persistent      bool
   222  	continueHash    *wire.ShaHash
   223  	relayMtx        sync.Mutex
   224  	disableRelayTx  bool
   225  	requestQueue    []*wire.InvVect
   226  	requestedTxns   map[wire.ShaHash]struct{}
   227  	requestedBlocks map[wire.ShaHash]struct{}
   228  	filter          *bloom.Filter
   229  	knownAddresses  map[string]struct{}
   230  	banScore        dynamicBanScore
   231  	quit            chan struct{}
   232  	// The following chans are used to sync blockmanager and server.
   233  	txProcessed    chan struct{}
   234  	blockProcessed chan struct{}
   235  }
   236  
   237  // newServerPeer returns a new serverPeer instance. The peer needs to be set by
   238  // the caller.
   239  func newServerPeer(s *server, isPersistent bool) *serverPeer {
   240  	return &serverPeer{
   241  		server:          s,
   242  		persistent:      isPersistent,
   243  		requestedTxns:   make(map[wire.ShaHash]struct{}),
   244  		requestedBlocks: make(map[wire.ShaHash]struct{}),
   245  		filter:          bloom.LoadFilter(nil),
   246  		knownAddresses:  make(map[string]struct{}),
   247  		quit:            make(chan struct{}),
   248  		txProcessed:     make(chan struct{}, 1),
   249  		blockProcessed:  make(chan struct{}, 1),
   250  	}
   251  }
   252  
   253  // newestBlock returns the current best block hash and height using the format
   254  // required by the configuration for the peer package.
   255  func (sp *serverPeer) newestBlock() (*wire.ShaHash, int32, error) {
   256  	best := sp.server.blockManager.chain.BestSnapshot()
   257  	return best.Hash, best.Height, nil
   258  }
   259  
   260  // addKnownAddresses adds the given addresses to the set of known addreses to
   261  // the peer to prevent sending duplicate addresses.
   262  func (sp *serverPeer) addKnownAddresses(addresses []*wire.NetAddress) {
   263  	for _, na := range addresses {
   264  		sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{}
   265  	}
   266  }
   267  
   268  // addressKnown true if the given address is already known to the peer.
   269  func (sp *serverPeer) addressKnown(na *wire.NetAddress) bool {
   270  	_, exists := sp.knownAddresses[addrmgr.NetAddressKey(na)]
   271  	return exists
   272  }
   273  
   274  // setDisableRelayTx toggles relaying of transactions for the given peer.
   275  // It is safe for concurrent access.
   276  func (sp *serverPeer) setDisableRelayTx(disable bool) {
   277  	sp.relayMtx.Lock()
   278  	sp.disableRelayTx = disable
   279  	sp.relayMtx.Unlock()
   280  }
   281  
   282  // relayTxDisabled returns whether or not relaying of transactions for the given
   283  // peer is disabled.
   284  // It is safe for concurrent access.
   285  func (sp *serverPeer) relayTxDisabled() bool {
   286  	sp.relayMtx.Lock()
   287  	defer sp.relayMtx.Unlock()
   288  
   289  	return sp.disableRelayTx
   290  }
   291  
   292  // pushAddrMsg sends an addr message to the connected peer using the provided
   293  // addresses.
   294  func (sp *serverPeer) pushAddrMsg(addresses []*wire.NetAddress) {
   295  	// Filter addresses already known to the peer.
   296  	addrs := make([]*wire.NetAddress, 0, len(addresses))
   297  	for _, addr := range addresses {
   298  		if !sp.addressKnown(addr) {
   299  			addrs = append(addrs, addr)
   300  		}
   301  	}
   302  	known, err := sp.PushAddrMsg(addrs)
   303  	if err != nil {
   304  		peerLog.Errorf("Can't push address message to %s: %v", sp.Peer, err)
   305  		sp.Disconnect()
   306  		return
   307  	}
   308  	sp.addKnownAddresses(known)
   309  }
   310  
   311  // addBanScore increases the persistent and decaying ban score fields by the
   312  // values passed as parameters. If the resulting score exceeds half of the ban
   313  // threshold, a warning is logged including the reason provided. Further, if
   314  // the score is above the ban threshold, the peer will be banned and
   315  // disconnected.
   316  func (sp *serverPeer) addBanScore(persistent, transient uint32, reason string) {
   317  	// No warning is logged and no score is calculated if banning is disabled.
   318  	if cfg.DisableBanning {
   319  		return
   320  	}
   321  	warnThreshold := cfg.BanThreshold >> 1
   322  	if transient == 0 && persistent == 0 {
   323  		// The score is not being increased, but a warning message is still
   324  		// logged if the score is above the warn threshold.
   325  		score := sp.banScore.Int()
   326  		if score > warnThreshold {
   327  			peerLog.Warnf("Misbehaving peer %s: %s -- ban score is %d, "+
   328  				"it was not increased this time", sp, reason, score)
   329  		}
   330  		return
   331  	}
   332  	score := sp.banScore.Increase(persistent, transient)
   333  	if score > warnThreshold {
   334  		peerLog.Warnf("Misbehaving peer %s: %s -- ban score increased to %d",
   335  			sp, reason, score)
   336  		if score > cfg.BanThreshold {
   337  			peerLog.Warnf("Misbehaving peer %s -- banning and disconnecting",
   338  				sp)
   339  			sp.server.BanPeer(sp)
   340  			sp.Disconnect()
   341  		}
   342  	}
   343  }
   344  
   345  // OnVersion is invoked when a peer receives a version bitcoin message
   346  // and is used to negotiate the protocol version details as well as kick start
   347  // the communications.
   348  func (sp *serverPeer) OnVersion(p *peer.Peer, msg *wire.MsgVersion) {
   349  	// Add the remote peer time as a sample for creating an offset against
   350  	// the local clock to keep the network time in sync.
   351  	sp.server.timeSource.AddTimeSample(p.Addr(), msg.Timestamp)
   352  
   353  	// Signal the block manager this peer is a new sync candidate.
   354  	sp.server.blockManager.NewPeer(sp)
   355  
   356  	// Choose whether or not to relay transactions before a filter command
   357  	// is received.
   358  	sp.setDisableRelayTx(msg.DisableRelayTx)
   359  
   360  	// Update the address manager and request known addresses from the
   361  	// remote peer for outbound connections.  This is skipped when running
   362  	// on the simulation test network since it is only intended to connect
   363  	// to specified peers and actively avoids advertising and connecting to
   364  	// discovered peers.
   365  	if !cfg.SimNet {
   366  		addrManager := sp.server.addrManager
   367  		// Outbound connections.
   368  		if !p.Inbound() {
   369  			// TODO(davec): Only do this if not doing the initial block
   370  			// download and the local address is routable.
   371  			if !cfg.DisableListen /* && isCurrent? */ {
   372  				// Get address that best matches.
   373  				lna := addrManager.GetBestLocalAddress(p.NA())
   374  				if addrmgr.IsRoutable(lna) {
   375  					// Filter addresses the peer already knows about.
   376  					addresses := []*wire.NetAddress{lna}
   377  					sp.pushAddrMsg(addresses)
   378  				}
   379  			}
   380  
   381  			// Request known addresses if the server address manager needs
   382  			// more and the peer has a protocol version new enough to
   383  			// include a timestamp with addresses.
   384  			hasTimestamp := p.ProtocolVersion() >=
   385  				wire.NetAddressTimeVersion
   386  			if addrManager.NeedMoreAddresses() && hasTimestamp {
   387  				p.QueueMessage(wire.NewMsgGetAddr(), nil)
   388  			}
   389  
   390  			// Mark the address as a known good address.
   391  			addrManager.Good(p.NA())
   392  		} else {
   393  			// A peer might not be advertising the same address that it
   394  			// actually connected from.  One example of why this can happen
   395  			// is with NAT.  Only add the address to the address manager if
   396  			// the addresses agree.
   397  			if addrmgr.NetAddressKey(&msg.AddrMe) == addrmgr.NetAddressKey(p.NA()) {
   398  				addrManager.AddAddress(p.NA(), p.NA())
   399  				addrManager.Good(p.NA())
   400  			}
   401  		}
   402  	}
   403  
   404  	// Add valid peer to the server.
   405  	sp.server.AddPeer(sp)
   406  }
   407  
   408  // OnMemPool is invoked when a peer receives a mempool bitcoin message.
   409  // It creates and sends an inventory message with the contents of the memory
   410  // pool up to the maximum inventory allowed per message.  When the peer has a
   411  // bloom filter loaded, the contents are filtered accordingly.
   412  func (sp *serverPeer) OnMemPool(p *peer.Peer, msg *wire.MsgMemPool) {
   413  	// A decaying ban score increase is applied to prevent flooding.
   414  	// The ban score accumulates and passes the ban threshold if a burst of
   415  	// mempool messages comes from a peer. The score decays each minute to
   416  	// half of its value.
   417  	sp.addBanScore(0, 33, "mempool")
   418  
   419  	// Generate inventory message with the available transactions in the
   420  	// transaction memory pool.  Limit it to the max allowed inventory
   421  	// per message.  The NewMsgInvSizeHint function automatically limits
   422  	// the passed hint to the maximum allowed, so it's safe to pass it
   423  	// without double checking it here.
   424  	txMemPool := sp.server.txMemPool
   425  	txDescs := txMemPool.TxDescs()
   426  	invMsg := wire.NewMsgInvSizeHint(uint(len(txDescs)))
   427  
   428  	for i, txDesc := range txDescs {
   429  		// Another thread might have removed the transaction from the
   430  		// pool since the initial query.
   431  		hash := txDesc.Tx.Sha()
   432  		if !txMemPool.IsTransactionInPool(hash) {
   433  			continue
   434  		}
   435  
   436  		// Either add all transactions when there is no bloom filter,
   437  		// or only the transactions that match the filter when there is
   438  		// one.
   439  		if !sp.filter.IsLoaded() || sp.filter.MatchTxAndUpdate(txDesc.Tx) {
   440  			iv := wire.NewInvVect(wire.InvTypeTx, hash)
   441  			invMsg.AddInvVect(iv)
   442  			if i+1 >= wire.MaxInvPerMsg {
   443  				break
   444  			}
   445  		}
   446  	}
   447  
   448  	// Send the inventory message if there is anything to send.
   449  	if len(invMsg.InvList) > 0 {
   450  		p.QueueMessage(invMsg, nil)
   451  	}
   452  }
   453  
   454  // OnTx is invoked when a peer receives a tx bitcoin message.  It blocks
   455  // until the bitcoin transaction has been fully processed.  Unlock the block
   456  // handler this does not serialize all transactions through a single thread
   457  // transactions don't rely on the previous one in a linear fashion like blocks.
   458  func (sp *serverPeer) OnTx(p *peer.Peer, msg *wire.MsgTx) {
   459  	if cfg.BlocksOnly {
   460  		peerLog.Tracef("Ignoring tx %v from %v - blocksonly enabled",
   461  			msg.TxSha(), p)
   462  		return
   463  	}
   464  
   465  	// Add the transaction to the known inventory for the peer.
   466  	// Convert the raw MsgTx to a godashutil.Tx which provides some convenience
   467  	// methods and things such as hash caching.
   468  	tx := godashutil.NewTx(msg)
   469  	iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha())
   470  	p.AddKnownInventory(iv)
   471  
   472  	// Queue the transaction up to be handled by the block manager and
   473  	// intentionally block further receives until the transaction is fully
   474  	// processed and known good or bad.  This helps prevent a malicious peer
   475  	// from queuing up a bunch of bad transactions before disconnecting (or
   476  	// being disconnected) and wasting memory.
   477  	sp.server.blockManager.QueueTx(tx, sp)
   478  	<-sp.txProcessed
   479  }
   480  
   481  // OnBlock is invoked when a peer receives a block bitcoin message.  It
   482  // blocks until the bitcoin block has been fully processed.
   483  func (sp *serverPeer) OnBlock(p *peer.Peer, msg *wire.MsgBlock, buf []byte) {
   484  	// Convert the raw MsgBlock to a godashutil.Block which provides some
   485  	// convenience methods and things such as hash caching.
   486  	block := godashutil.NewBlockFromBlockAndBytes(msg, buf)
   487  
   488  	// Add the block to the known inventory for the peer.
   489  	iv := wire.NewInvVect(wire.InvTypeBlock, block.Sha())
   490  	p.AddKnownInventory(iv)
   491  
   492  	// Queue the block up to be handled by the block
   493  	// manager and intentionally block further receives
   494  	// until the bitcoin block is fully processed and known
   495  	// good or bad.  This helps prevent a malicious peer
   496  	// from queuing up a bunch of bad blocks before
   497  	// disconnecting (or being disconnected) and wasting
   498  	// memory.  Additionally, this behavior is depended on
   499  	// by at least the block acceptance test tool as the
   500  	// reference implementation processes blocks in the same
   501  	// thread and therefore blocks further messages until
   502  	// the bitcoin block has been fully processed.
   503  	sp.server.blockManager.QueueBlock(block, sp)
   504  	<-sp.blockProcessed
   505  }
   506  
   507  // OnInv is invoked when a peer receives an inv bitcoin message and is
   508  // used to examine the inventory being advertised by the remote peer and react
   509  // accordingly.  We pass the message down to blockmanager which will call
   510  // QueueMessage with any appropriate responses.
   511  func (sp *serverPeer) OnInv(p *peer.Peer, msg *wire.MsgInv) {
   512  	if !cfg.BlocksOnly {
   513  		if len(msg.InvList) > 0 {
   514  			sp.server.blockManager.QueueInv(msg, sp)
   515  		}
   516  		return
   517  	}
   518  
   519  	newInv := wire.NewMsgInvSizeHint(uint(len(msg.InvList)))
   520  	for _, invVect := range msg.InvList {
   521  		if invVect.Type == wire.InvTypeTx {
   522  			peerLog.Tracef("Ignoring tx %v in inv from %v -- "+
   523  				"blocksonly enabled", invVect.Hash, p)
   524  			if p.ProtocolVersion() >= wire.BIP0037Version {
   525  				peerLog.Infof("Peer %v is announcing "+
   526  					"transactions -- disconnecting", p)
   527  				p.Disconnect()
   528  				return
   529  			}
   530  			continue
   531  		}
   532  		err := newInv.AddInvVect(invVect)
   533  		if err != nil {
   534  			peerLog.Errorf("Failed to add inventory vector: %v", err)
   535  			break
   536  		}
   537  	}
   538  
   539  	if len(newInv.InvList) > 0 {
   540  		sp.server.blockManager.QueueInv(newInv, sp)
   541  	}
   542  }
   543  
   544  // OnHeaders is invoked when a peer receives a headers bitcoin
   545  // message.  The message is passed down to the block manager.
   546  func (sp *serverPeer) OnHeaders(p *peer.Peer, msg *wire.MsgHeaders) {
   547  	sp.server.blockManager.QueueHeaders(msg, sp)
   548  }
   549  
   550  // handleGetData is invoked when a peer receives a getdata bitcoin message and
   551  // is used to deliver block and transaction information.
   552  func (sp *serverPeer) OnGetData(p *peer.Peer, msg *wire.MsgGetData) {
   553  	numAdded := 0
   554  	notFound := wire.NewMsgNotFound()
   555  
   556  	length := len(msg.InvList)
   557  	// A decaying ban score increase is applied to prevent exhausting resources
   558  	// with unusually large inventory queries.
   559  	// Requesting more than the maximum inventory vector length within a short
   560  	// period of time yields a score above the default ban threshold. Sustained
   561  	// bursts of small requests are not penalized as that would potentially ban
   562  	// peers performing IBD.
   563  	// This incremental score decays each minute to half of its value.
   564  	sp.addBanScore(0, uint32(length)*99/wire.MaxInvPerMsg, "getdata")
   565  
   566  	// We wait on this wait channel periodically to prevent queuing
   567  	// far more data than we can send in a reasonable time, wasting memory.
   568  	// The waiting occurs after the database fetch for the next one to
   569  	// provide a little pipelining.
   570  	var waitChan chan struct{}
   571  	doneChan := make(chan struct{}, 1)
   572  
   573  	for i, iv := range msg.InvList {
   574  		var c chan struct{}
   575  		// If this will be the last message we send.
   576  		if i == length-1 && len(notFound.InvList) == 0 {
   577  			c = doneChan
   578  		} else if (i+1)%3 == 0 {
   579  			// Buffered so as to not make the send goroutine block.
   580  			c = make(chan struct{}, 1)
   581  		}
   582  		var err error
   583  		switch iv.Type {
   584  		case wire.InvTypeTx:
   585  			err = sp.server.pushTxMsg(sp, &iv.Hash, c, waitChan)
   586  		case wire.InvTypeBlock:
   587  			err = sp.server.pushBlockMsg(sp, &iv.Hash, c, waitChan)
   588  		case wire.InvTypeFilteredBlock:
   589  			err = sp.server.pushMerkleBlockMsg(sp, &iv.Hash, c, waitChan)
   590  		default:
   591  			peerLog.Warnf("Unknown type in inventory request %d",
   592  				iv.Type)
   593  			continue
   594  		}
   595  		if err != nil {
   596  			notFound.AddInvVect(iv)
   597  
   598  			// When there is a failure fetching the final entry
   599  			// and the done channel was sent in due to there
   600  			// being no outstanding not found inventory, consume
   601  			// it here because there is now not found inventory
   602  			// that will use the channel momentarily.
   603  			if i == len(msg.InvList)-1 && c != nil {
   604  				<-c
   605  			}
   606  		}
   607  		numAdded++
   608  		waitChan = c
   609  	}
   610  	if len(notFound.InvList) != 0 {
   611  		p.QueueMessage(notFound, doneChan)
   612  	}
   613  
   614  	// Wait for messages to be sent. We can send quite a lot of data at this
   615  	// point and this will keep the peer busy for a decent amount of time.
   616  	// We don't process anything else by them in this time so that we
   617  	// have an idea of when we should hear back from them - else the idle
   618  	// timeout could fire when we were only half done sending the blocks.
   619  	if numAdded > 0 {
   620  		<-doneChan
   621  	}
   622  }
   623  
   624  // OnGetBlocks is invoked when a peer receives a getblocks bitcoin
   625  // message.
   626  func (sp *serverPeer) OnGetBlocks(p *peer.Peer, msg *wire.MsgGetBlocks) {
   627  	// Return all block hashes to the latest one (up to max per message) if
   628  	// no stop hash was specified.
   629  	// Attempt to find the ending index of the stop hash if specified.
   630  	chain := sp.server.blockManager.chain
   631  	endIdx := int32(math.MaxInt32)
   632  	if !msg.HashStop.IsEqual(&zeroHash) {
   633  		height, err := chain.BlockHeightByHash(&msg.HashStop)
   634  		if err == nil {
   635  			endIdx = height + 1
   636  		}
   637  	}
   638  
   639  	// Find the most recent known block based on the block locator.
   640  	// Use the block after the genesis block if no other blocks in the
   641  	// provided locator are known.  This does mean the client will start
   642  	// over with the genesis block if unknown block locators are provided.
   643  	// This mirrors the behavior in the reference implementation.
   644  	startIdx := int32(1)
   645  	for _, hash := range msg.BlockLocatorHashes {
   646  		height, err := chain.BlockHeightByHash(hash)
   647  		if err == nil {
   648  			// Start with the next hash since we know this one.
   649  			startIdx = height + 1
   650  			break
   651  		}
   652  	}
   653  
   654  	// Don't attempt to fetch more than we can put into a single message.
   655  	autoContinue := false
   656  	if endIdx-startIdx > wire.MaxBlocksPerMsg {
   657  		endIdx = startIdx + wire.MaxBlocksPerMsg
   658  		autoContinue = true
   659  	}
   660  
   661  	// Fetch the inventory from the block database.
   662  	hashList, err := chain.HeightRange(startIdx, endIdx)
   663  	if err != nil {
   664  		peerLog.Warnf("Block lookup failed: %v", err)
   665  		return
   666  	}
   667  
   668  	// Generate inventory message.
   669  	invMsg := wire.NewMsgInv()
   670  	for i := range hashList {
   671  		iv := wire.NewInvVect(wire.InvTypeBlock, &hashList[i])
   672  		invMsg.AddInvVect(iv)
   673  	}
   674  
   675  	// Send the inventory message if there is anything to send.
   676  	if len(invMsg.InvList) > 0 {
   677  		invListLen := len(invMsg.InvList)
   678  		if autoContinue && invListLen == wire.MaxBlocksPerMsg {
   679  			// Intentionally use a copy of the final hash so there
   680  			// is not a reference into the inventory slice which
   681  			// would prevent the entire slice from being eligible
   682  			// for GC as soon as it's sent.
   683  			continueHash := invMsg.InvList[invListLen-1].Hash
   684  			sp.continueHash = &continueHash
   685  		}
   686  		p.QueueMessage(invMsg, nil)
   687  	}
   688  }
   689  
   690  // OnGetHeaders is invoked when a peer receives a getheaders bitcoin
   691  // message.
   692  func (sp *serverPeer) OnGetHeaders(p *peer.Peer, msg *wire.MsgGetHeaders) {
   693  	// Ignore getheaders requests if not in sync.
   694  	if !sp.server.blockManager.IsCurrent() {
   695  		return
   696  	}
   697  
   698  	// Attempt to look up the height of the provided stop hash.
   699  	chain := sp.server.blockManager.chain
   700  	endIdx := int32(math.MaxInt32)
   701  	height, err := chain.BlockHeightByHash(&msg.HashStop)
   702  	if err == nil {
   703  		endIdx = height + 1
   704  	}
   705  
   706  	// There are no block locators so a specific header is being requested
   707  	// as identified by the stop hash.
   708  	if len(msg.BlockLocatorHashes) == 0 {
   709  		// No blocks with the stop hash were found so there is nothing
   710  		// to do.  Just return.  This behavior mirrors the reference
   711  		// implementation.
   712  		if endIdx == math.MaxInt32 {
   713  			return
   714  		}
   715  
   716  		// Fetch the raw block header bytes from the database.
   717  		var headerBytes []byte
   718  		err := sp.server.db.View(func(dbTx database.Tx) error {
   719  			var err error
   720  			headerBytes, err = dbTx.FetchBlockHeader(&msg.HashStop)
   721  			return err
   722  		})
   723  		if err != nil {
   724  			peerLog.Warnf("Lookup of known block hash failed: %v",
   725  				err)
   726  			return
   727  		}
   728  
   729  		// Deserialize the block header.
   730  		var header wire.BlockHeader
   731  		err = header.Deserialize(bytes.NewReader(headerBytes))
   732  		if err != nil {
   733  			peerLog.Warnf("Block header deserialize failed: %v",
   734  				err)
   735  			return
   736  		}
   737  
   738  		headersMsg := wire.NewMsgHeaders()
   739  		headersMsg.AddBlockHeader(&header)
   740  		p.QueueMessage(headersMsg, nil)
   741  		return
   742  	}
   743  
   744  	// Find the most recent known block based on the block locator.
   745  	// Use the block after the genesis block if no other blocks in the
   746  	// provided locator are known.  This does mean the client will start
   747  	// over with the genesis block if unknown block locators are provided.
   748  	// This mirrors the behavior in the reference implementation.
   749  	startIdx := int32(1)
   750  	for _, hash := range msg.BlockLocatorHashes {
   751  		height, err := chain.BlockHeightByHash(hash)
   752  		if err == nil {
   753  			// Start with the next hash since we know this one.
   754  			startIdx = height + 1
   755  			break
   756  		}
   757  	}
   758  
   759  	// Don't attempt to fetch more than we can put into a single message.
   760  	if endIdx-startIdx > wire.MaxBlockHeadersPerMsg {
   761  		endIdx = startIdx + wire.MaxBlockHeadersPerMsg
   762  	}
   763  
   764  	// Fetch the inventory from the block database.
   765  	hashList, err := chain.HeightRange(startIdx, endIdx)
   766  	if err != nil {
   767  		peerLog.Warnf("Header lookup failed: %v", err)
   768  		return
   769  	}
   770  
   771  	// Generate headers message and send it.
   772  	headersMsg := wire.NewMsgHeaders()
   773  	err = sp.server.db.View(func(dbTx database.Tx) error {
   774  		for i := range hashList {
   775  			headerBytes, err := dbTx.FetchBlockHeader(&hashList[i])
   776  			if err != nil {
   777  				return err
   778  			}
   779  
   780  			var header wire.BlockHeader
   781  			err = header.Deserialize(bytes.NewReader(headerBytes))
   782  			if err != nil {
   783  				return err
   784  			}
   785  			headersMsg.AddBlockHeader(&header)
   786  		}
   787  
   788  		return nil
   789  	})
   790  	if err != nil {
   791  		peerLog.Warnf("Failed to build headers: %v", err)
   792  		return
   793  	}
   794  
   795  	p.QueueMessage(headersMsg, nil)
   796  }
   797  
   798  // OnFilterAdd is invoked when a peer receives a filteradd bitcoin
   799  // message and is used by remote peers to add data to an already loaded bloom
   800  // filter.  The peer will be disconnected if a filter is not loaded when this
   801  // message is received.
   802  func (sp *serverPeer) OnFilterAdd(p *peer.Peer, msg *wire.MsgFilterAdd) {
   803  	if sp.filter.IsLoaded() {
   804  		peerLog.Debugf("%s sent a filteradd request with no filter "+
   805  			"loaded -- disconnecting", p)
   806  		p.Disconnect()
   807  		return
   808  	}
   809  
   810  	sp.filter.Add(msg.Data)
   811  }
   812  
   813  // OnFilterClear is invoked when a peer receives a filterclear bitcoin
   814  // message and is used by remote peers to clear an already loaded bloom filter.
   815  // The peer will be disconnected if a filter is not loaded when this message is
   816  // received.
   817  func (sp *serverPeer) OnFilterClear(p *peer.Peer, msg *wire.MsgFilterClear) {
   818  	if !sp.filter.IsLoaded() {
   819  		peerLog.Debugf("%s sent a filterclear request with no "+
   820  			"filter loaded -- disconnecting", p)
   821  		p.Disconnect()
   822  		return
   823  	}
   824  
   825  	sp.filter.Unload()
   826  }
   827  
   828  // OnFilterLoad is invoked when a peer receives a filterload bitcoin
   829  // message and it used to load a bloom filter that should be used for
   830  // delivering merkle blocks and associated transactions that match the filter.
   831  func (sp *serverPeer) OnFilterLoad(p *peer.Peer, msg *wire.MsgFilterLoad) {
   832  	sp.setDisableRelayTx(false)
   833  
   834  	sp.filter.Reload(msg)
   835  }
   836  
   837  // OnGetAddr is invoked when a peer receives a getaddr bitcoin message
   838  // and is used to provide the peer with known addresses from the address
   839  // manager.
   840  func (sp *serverPeer) OnGetAddr(p *peer.Peer, msg *wire.MsgGetAddr) {
   841  	// Don't return any addresses when running on the simulation test
   842  	// network.  This helps prevent the network from becoming another
   843  	// public test network since it will not be able to learn about other
   844  	// peers that have not specifically been provided.
   845  	if cfg.SimNet {
   846  		return
   847  	}
   848  
   849  	// Do not accept getaddr requests from outbound peers.  This reduces
   850  	// fingerprinting attacks.
   851  	if !p.Inbound() {
   852  		return
   853  	}
   854  
   855  	// Get the current known addresses from the address manager.
   856  	addrCache := sp.server.addrManager.AddressCache()
   857  
   858  	// Push the addresses.
   859  	sp.pushAddrMsg(addrCache)
   860  }
   861  
   862  // OnAddr is invoked when a peer receives an addr bitcoin message and is
   863  // used to notify the server about advertised addresses.
   864  func (sp *serverPeer) OnAddr(p *peer.Peer, msg *wire.MsgAddr) {
   865  	// Ignore addresses when running on the simulation test network.  This
   866  	// helps prevent the network from becoming another public test network
   867  	// since it will not be able to learn about other peers that have not
   868  	// specifically been provided.
   869  	if cfg.SimNet {
   870  		return
   871  	}
   872  
   873  	// Ignore old style addresses which don't include a timestamp.
   874  	if p.ProtocolVersion() < wire.NetAddressTimeVersion {
   875  		return
   876  	}
   877  
   878  	// A message that has no addresses is invalid.
   879  	if len(msg.AddrList) == 0 {
   880  		peerLog.Errorf("Command [%s] from %s does not contain any addresses",
   881  			msg.Command(), p)
   882  		p.Disconnect()
   883  		return
   884  	}
   885  
   886  	for _, na := range msg.AddrList {
   887  		// Don't add more address if we're disconnecting.
   888  		if !p.Connected() {
   889  			return
   890  		}
   891  
   892  		// Set the timestamp to 5 days ago if it's more than 24 hours
   893  		// in the future so this address is one of the first to be
   894  		// removed when space is needed.
   895  		now := time.Now()
   896  		if na.Timestamp.After(now.Add(time.Minute * 10)) {
   897  			na.Timestamp = now.Add(-1 * time.Hour * 24 * 5)
   898  		}
   899  
   900  		// Add address to known addresses for this peer.
   901  		sp.addKnownAddresses([]*wire.NetAddress{na})
   902  	}
   903  
   904  	// Add addresses to server address manager.  The address manager handles
   905  	// the details of things such as preventing duplicate addresses, max
   906  	// addresses, and last seen updates.
   907  	// XXX bitcoind gives a 2 hour time penalty here, do we want to do the
   908  	// same?
   909  	sp.server.addrManager.AddAddresses(msg.AddrList, p.NA())
   910  }
   911  
   912  // OnRead is invoked when a peer receives a message and it is used to update
   913  // the bytes received by the server.
   914  func (sp *serverPeer) OnRead(p *peer.Peer, bytesRead int, msg wire.Message, err error) {
   915  	sp.server.AddBytesReceived(uint64(bytesRead))
   916  }
   917  
   918  // OnWrite is invoked when a peer sends a message and it is used to update
   919  // the bytes sent by the server.
   920  func (sp *serverPeer) OnWrite(p *peer.Peer, bytesWritten int, msg wire.Message, err error) {
   921  	sp.server.AddBytesSent(uint64(bytesWritten))
   922  }
   923  
   924  // randomUint16Number returns a random uint16 in a specified input range.  Note
   925  // that the range is in zeroth ordering; if you pass it 1800, you will get
   926  // values from 0 to 1800.
   927  func randomUint16Number(max uint16) uint16 {
   928  	// In order to avoid modulo bias and ensure every possible outcome in
   929  	// [0, max) has equal probability, the random number must be sampled
   930  	// from a random source that has a range limited to a multiple of the
   931  	// modulus.
   932  	var randomNumber uint16
   933  	var limitRange = (math.MaxUint16 / max) * max
   934  	for {
   935  		binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)
   936  		if randomNumber < limitRange {
   937  			return (randomNumber % max)
   938  		}
   939  	}
   940  }
   941  
   942  // AddRebroadcastInventory adds 'iv' to the list of inventories to be
   943  // rebroadcasted at random intervals until they show up in a block.
   944  func (s *server) AddRebroadcastInventory(iv *wire.InvVect, data interface{}) {
   945  	// Ignore if shutting down.
   946  	if atomic.LoadInt32(&s.shutdown) != 0 {
   947  		return
   948  	}
   949  
   950  	s.modifyRebroadcastInv <- broadcastInventoryAdd{invVect: iv, data: data}
   951  }
   952  
   953  // RemoveRebroadcastInventory removes 'iv' from the list of items to be
   954  // rebroadcasted if present.
   955  func (s *server) RemoveRebroadcastInventory(iv *wire.InvVect) {
   956  	// Ignore if shutting down.
   957  	if atomic.LoadInt32(&s.shutdown) != 0 {
   958  		return
   959  	}
   960  
   961  	s.modifyRebroadcastInv <- broadcastInventoryDel(iv)
   962  }
   963  
   964  // AnnounceNewTransactions generates and relays inventory vectors and notifies
   965  // both websocket and getblocktemplate long poll clients of the passed
   966  // transactions.  This function should be called whenever new transactions
   967  // are added to the mempool.
   968  func (s *server) AnnounceNewTransactions(newTxs []*godashutil.Tx) {
   969  	// Generate and relay inventory vectors for all newly accepted
   970  	// transactions into the memory pool due to the original being
   971  	// accepted.
   972  	for _, tx := range newTxs {
   973  		// Generate the inventory vector and relay it.
   974  		iv := wire.NewInvVect(wire.InvTypeTx, tx.Sha())
   975  		s.RelayInventory(iv, tx)
   976  
   977  		if s.rpcServer != nil {
   978  			// Notify websocket clients about mempool transactions.
   979  			s.rpcServer.ntfnMgr.NotifyMempoolTx(tx, true)
   980  
   981  			// Potentially notify any getblocktemplate long poll clients
   982  			// about stale block templates due to the new transaction.
   983  			s.rpcServer.gbtWorkState.NotifyMempoolTx(
   984  				s.txMemPool.LastUpdated())
   985  		}
   986  	}
   987  }
   988  
   989  // pushTxMsg sends a tx message for the provided transaction hash to the
   990  // connected peer.  An error is returned if the transaction hash is not known.
   991  func (s *server) pushTxMsg(sp *serverPeer, sha *wire.ShaHash, doneChan chan<- struct{}, waitChan <-chan struct{}) error {
   992  	// Attempt to fetch the requested transaction from the pool.  A
   993  	// call could be made to check for existence first, but simply trying
   994  	// to fetch a missing transaction results in the same behavior.
   995  	tx, err := s.txMemPool.FetchTransaction(sha)
   996  	if err != nil {
   997  		peerLog.Tracef("Unable to fetch tx %v from transaction "+
   998  			"pool: %v", sha, err)
   999  
  1000  		if doneChan != nil {
  1001  			doneChan <- struct{}{}
  1002  		}
  1003  		return err
  1004  	}
  1005  
  1006  	// Once we have fetched data wait for any previous operation to finish.
  1007  	if waitChan != nil {
  1008  		<-waitChan
  1009  	}
  1010  
  1011  	sp.QueueMessage(tx.MsgTx(), doneChan)
  1012  
  1013  	return nil
  1014  }
  1015  
  1016  // pushBlockMsg sends a block message for the provided block hash to the
  1017  // connected peer.  An error is returned if the block hash is not known.
  1018  func (s *server) pushBlockMsg(sp *serverPeer, hash *wire.ShaHash, doneChan chan<- struct{}, waitChan <-chan struct{}) error {
  1019  	// Fetch the raw block bytes from the database.
  1020  	var blockBytes []byte
  1021  	err := sp.server.db.View(func(dbTx database.Tx) error {
  1022  		var err error
  1023  		blockBytes, err = dbTx.FetchBlock(hash)
  1024  		return err
  1025  	})
  1026  	if err != nil {
  1027  		peerLog.Tracef("Unable to fetch requested block hash %v: %v",
  1028  			hash, err)
  1029  
  1030  		if doneChan != nil {
  1031  			doneChan <- struct{}{}
  1032  		}
  1033  		return err
  1034  	}
  1035  
  1036  	// Deserialize the block.
  1037  	var msgBlock wire.MsgBlock
  1038  	err = msgBlock.Deserialize(bytes.NewReader(blockBytes))
  1039  	if err != nil {
  1040  		peerLog.Tracef("Unable to deserialize requested block hash "+
  1041  			"%v: %v", hash, err)
  1042  
  1043  		if doneChan != nil {
  1044  			doneChan <- struct{}{}
  1045  		}
  1046  		return err
  1047  	}
  1048  
  1049  	// Once we have fetched data wait for any previous operation to finish.
  1050  	if waitChan != nil {
  1051  		<-waitChan
  1052  	}
  1053  
  1054  	// We only send the channel for this message if we aren't sending
  1055  	// an inv straight after.
  1056  	var dc chan<- struct{}
  1057  	continueHash := sp.continueHash
  1058  	sendInv := continueHash != nil && continueHash.IsEqual(hash)
  1059  	if !sendInv {
  1060  		dc = doneChan
  1061  	}
  1062  	sp.QueueMessage(&msgBlock, dc)
  1063  
  1064  	// When the peer requests the final block that was advertised in
  1065  	// response to a getblocks message which requested more blocks than
  1066  	// would fit into a single message, send it a new inventory message
  1067  	// to trigger it to issue another getblocks message for the next
  1068  	// batch of inventory.
  1069  	if sendInv {
  1070  		best := sp.server.blockManager.chain.BestSnapshot()
  1071  		invMsg := wire.NewMsgInvSizeHint(1)
  1072  		iv := wire.NewInvVect(wire.InvTypeBlock, best.Hash)
  1073  		invMsg.AddInvVect(iv)
  1074  		sp.QueueMessage(invMsg, doneChan)
  1075  		sp.continueHash = nil
  1076  	}
  1077  	return nil
  1078  }
  1079  
  1080  // pushMerkleBlockMsg sends a merkleblock message for the provided block hash to
  1081  // the connected peer.  Since a merkle block requires the peer to have a filter
  1082  // loaded, this call will simply be ignored if there is no filter loaded.  An
  1083  // error is returned if the block hash is not known.
  1084  func (s *server) pushMerkleBlockMsg(sp *serverPeer, hash *wire.ShaHash, doneChan chan<- struct{}, waitChan <-chan struct{}) error {
  1085  	// Do not send a response if the peer doesn't have a filter loaded.
  1086  	if !sp.filter.IsLoaded() {
  1087  		if doneChan != nil {
  1088  			doneChan <- struct{}{}
  1089  		}
  1090  		return nil
  1091  	}
  1092  
  1093  	// Fetch the raw block bytes from the database.
  1094  	blk, err := sp.server.blockManager.chain.BlockByHash(hash)
  1095  	if err != nil {
  1096  		peerLog.Tracef("Unable to fetch requested block hash %v: %v",
  1097  			hash, err)
  1098  
  1099  		if doneChan != nil {
  1100  			doneChan <- struct{}{}
  1101  		}
  1102  		return err
  1103  	}
  1104  
  1105  	// Generate a merkle block by filtering the requested block according
  1106  	// to the filter for the peer.
  1107  	merkle, matchedTxIndices := bloom.NewMerkleBlock(blk, sp.filter)
  1108  
  1109  	// Once we have fetched data wait for any previous operation to finish.
  1110  	if waitChan != nil {
  1111  		<-waitChan
  1112  	}
  1113  
  1114  	// Send the merkleblock.  Only send the done channel with this message
  1115  	// if no transactions will be sent afterwards.
  1116  	var dc chan<- struct{}
  1117  	if len(matchedTxIndices) == 0 {
  1118  		dc = doneChan
  1119  	}
  1120  	sp.QueueMessage(merkle, dc)
  1121  
  1122  	// Finally, send any matched transactions.
  1123  	blkTransactions := blk.MsgBlock().Transactions
  1124  	for i, txIndex := range matchedTxIndices {
  1125  		// Only send the done channel on the final transaction.
  1126  		var dc chan<- struct{}
  1127  		if i == len(matchedTxIndices)-1 {
  1128  			dc = doneChan
  1129  		}
  1130  		if txIndex < uint32(len(blkTransactions)) {
  1131  			sp.QueueMessage(blkTransactions[txIndex], dc)
  1132  		}
  1133  	}
  1134  
  1135  	return nil
  1136  }
  1137  
  1138  // handleUpdatePeerHeight updates the heights of all peers who were known to
  1139  // announce a block we recently accepted.
  1140  func (s *server) handleUpdatePeerHeights(state *peerState, umsg updatePeerHeightsMsg) {
  1141  	state.forAllPeers(func(sp *serverPeer) {
  1142  		// The origin peer should already have the updated height.
  1143  		if sp == umsg.originPeer {
  1144  			return
  1145  		}
  1146  
  1147  		// This is a pointer to the underlying memory which doesn't
  1148  		// change.
  1149  		latestBlkSha := sp.LastAnnouncedBlock()
  1150  
  1151  		// Skip this peer if it hasn't recently announced any new blocks.
  1152  		if latestBlkSha == nil {
  1153  			return
  1154  		}
  1155  
  1156  		// If the peer has recently announced a block, and this block
  1157  		// matches our newly accepted block, then update their block
  1158  		// height.
  1159  		if *latestBlkSha == *umsg.newSha {
  1160  			sp.UpdateLastBlockHeight(umsg.newHeight)
  1161  			sp.UpdateLastAnnouncedBlock(nil)
  1162  		}
  1163  	})
  1164  }
  1165  
  1166  // handleAddPeerMsg deals with adding new peers.  It is invoked from the
  1167  // peerHandler goroutine.
  1168  func (s *server) handleAddPeerMsg(state *peerState, sp *serverPeer) bool {
  1169  	if sp == nil {
  1170  		return false
  1171  	}
  1172  
  1173  	// Ignore new peers if we're shutting down.
  1174  	if atomic.LoadInt32(&s.shutdown) != 0 {
  1175  		srvrLog.Infof("New peer %s ignored - server is shutting down", sp)
  1176  		sp.Disconnect()
  1177  		return false
  1178  	}
  1179  
  1180  	// Disconnect banned peers.
  1181  	host, _, err := net.SplitHostPort(sp.Addr())
  1182  	if err != nil {
  1183  		srvrLog.Debugf("can't split hostport %v", err)
  1184  		sp.Disconnect()
  1185  		return false
  1186  	}
  1187  	if banEnd, ok := state.banned[host]; ok {
  1188  		if time.Now().Before(banEnd) {
  1189  			srvrLog.Debugf("Peer %s is banned for another %v - disconnecting",
  1190  				host, banEnd.Sub(time.Now()))
  1191  			sp.Disconnect()
  1192  			return false
  1193  		}
  1194  
  1195  		srvrLog.Infof("Peer %s is no longer banned", host)
  1196  		delete(state.banned, host)
  1197  	}
  1198  
  1199  	// TODO: Check for max peers from a single IP.
  1200  
  1201  	// Limit max outbound peers.
  1202  	if _, ok := state.pendingPeers[sp.Addr()]; ok {
  1203  		if state.OutboundCount() >= state.maxOutboundPeers {
  1204  			srvrLog.Infof("Max outbound peers reached [%d] - disconnecting "+
  1205  				"peer %s", state.maxOutboundPeers, sp)
  1206  			sp.Disconnect()
  1207  			return false
  1208  		}
  1209  	}
  1210  
  1211  	// Limit max number of total peers.
  1212  	if state.Count() >= cfg.MaxPeers {
  1213  		srvrLog.Infof("Max peers reached [%d] - disconnecting peer %s",
  1214  			cfg.MaxPeers, sp)
  1215  		sp.Disconnect()
  1216  		// TODO(oga) how to handle permanent peers here?
  1217  		// they should be rescheduled.
  1218  		return false
  1219  	}
  1220  
  1221  	// Add the new peer and start it.
  1222  	srvrLog.Debugf("New peer %s", sp)
  1223  	if sp.Inbound() {
  1224  		state.inboundPeers[sp.ID()] = sp
  1225  	} else {
  1226  		state.outboundGroups[addrmgr.GroupKey(sp.NA())]++
  1227  		if sp.persistent {
  1228  			state.persistentPeers[sp.ID()] = sp
  1229  		} else {
  1230  			state.outboundPeers[sp.ID()] = sp
  1231  		}
  1232  		// Remove from pending peers.
  1233  		delete(state.pendingPeers, sp.Addr())
  1234  	}
  1235  
  1236  	return true
  1237  }
  1238  
  1239  // handleDonePeerMsg deals with peers that have signalled they are done.  It is
  1240  // invoked from the peerHandler goroutine.
  1241  func (s *server) handleDonePeerMsg(state *peerState, sp *serverPeer) {
  1242  	if _, ok := state.pendingPeers[sp.Addr()]; ok {
  1243  		delete(state.pendingPeers, sp.Addr())
  1244  		srvrLog.Debugf("Removed pending peer %s", sp)
  1245  		return
  1246  	}
  1247  
  1248  	var list map[int32]*serverPeer
  1249  	if sp.persistent {
  1250  		list = state.persistentPeers
  1251  	} else if sp.Inbound() {
  1252  		list = state.inboundPeers
  1253  	} else {
  1254  		list = state.outboundPeers
  1255  	}
  1256  	if _, ok := list[sp.ID()]; ok {
  1257  		// Issue an asynchronous reconnect if the peer was a
  1258  		// persistent outbound connection.
  1259  		if !sp.Inbound() && sp.persistent && atomic.LoadInt32(&s.shutdown) == 0 {
  1260  			// Retry peer
  1261  			sp2 := s.newOutboundPeer(sp.Addr(), sp.persistent)
  1262  			if sp2 != nil {
  1263  				go s.retryConn(sp2, false)
  1264  			}
  1265  		}
  1266  		if !sp.Inbound() && sp.VersionKnown() {
  1267  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1268  		}
  1269  		delete(list, sp.ID())
  1270  		srvrLog.Debugf("Removed peer %s", sp)
  1271  		return
  1272  	}
  1273  
  1274  	// Update the address' last seen time if the peer has acknowledged
  1275  	// our version and has sent us its version as well.
  1276  	if sp.VerAckReceived() && sp.VersionKnown() && sp.NA() != nil {
  1277  		s.addrManager.Connected(sp.NA())
  1278  	}
  1279  
  1280  	// If we get here it means that either we didn't know about the peer
  1281  	// or we purposefully deleted it.
  1282  }
  1283  
  1284  // handleBanPeerMsg deals with banning peers.  It is invoked from the
  1285  // peerHandler goroutine.
  1286  func (s *server) handleBanPeerMsg(state *peerState, sp *serverPeer) {
  1287  	host, _, err := net.SplitHostPort(sp.Addr())
  1288  	if err != nil {
  1289  		srvrLog.Debugf("can't split ban peer %s %v", sp.Addr(), err)
  1290  		return
  1291  	}
  1292  	direction := directionString(sp.Inbound())
  1293  	srvrLog.Infof("Banned peer %s (%s) for %v", host, direction,
  1294  		cfg.BanDuration)
  1295  	state.banned[host] = time.Now().Add(cfg.BanDuration)
  1296  }
  1297  
  1298  // handleRelayInvMsg deals with relaying inventory to peers that are not already
  1299  // known to have it.  It is invoked from the peerHandler goroutine.
  1300  func (s *server) handleRelayInvMsg(state *peerState, msg relayMsg) {
  1301  	state.forAllPeers(func(sp *serverPeer) {
  1302  		if !sp.Connected() {
  1303  			return
  1304  		}
  1305  
  1306  		// If the inventory is a block and the peer prefers headers,
  1307  		// generate and send a headers message instead of an inventory
  1308  		// message.
  1309  		if msg.invVect.Type == wire.InvTypeBlock && sp.WantsHeaders() {
  1310  			blockHeader, ok := msg.data.(wire.BlockHeader)
  1311  			if !ok {
  1312  				peerLog.Warnf("Underlying data for headers" +
  1313  					" is not a block header")
  1314  				return
  1315  			}
  1316  			msgHeaders := wire.NewMsgHeaders()
  1317  			if err := msgHeaders.AddBlockHeader(&blockHeader); err != nil {
  1318  				peerLog.Errorf("Failed to add block"+
  1319  					" header: %v", err)
  1320  				return
  1321  			}
  1322  			sp.QueueMessage(msgHeaders, nil)
  1323  			return
  1324  		}
  1325  
  1326  		if msg.invVect.Type == wire.InvTypeTx {
  1327  			// Don't relay the transaction to the peer when it has
  1328  			// transaction relaying disabled.
  1329  			if sp.relayTxDisabled() {
  1330  				return
  1331  			}
  1332  			// Don't relay the transaction if there is a bloom
  1333  			// filter loaded and the transaction doesn't match it.
  1334  			if sp.filter.IsLoaded() {
  1335  				tx, ok := msg.data.(*godashutil.Tx)
  1336  				if !ok {
  1337  					peerLog.Warnf("Underlying data for tx" +
  1338  						" inv relay is not a transaction")
  1339  					return
  1340  				}
  1341  
  1342  				if !sp.filter.MatchTxAndUpdate(tx) {
  1343  					return
  1344  				}
  1345  			}
  1346  		}
  1347  
  1348  		// Queue the inventory to be relayed with the next batch.
  1349  		// It will be ignored if the peer is already known to
  1350  		// have the inventory.
  1351  		sp.QueueInventory(msg.invVect)
  1352  	})
  1353  }
  1354  
  1355  // handleBroadcastMsg deals with broadcasting messages to peers.  It is invoked
  1356  // from the peerHandler goroutine.
  1357  func (s *server) handleBroadcastMsg(state *peerState, bmsg *broadcastMsg) {
  1358  	state.forAllPeers(func(sp *serverPeer) {
  1359  		if !sp.Connected() {
  1360  			return
  1361  		}
  1362  
  1363  		for _, ep := range bmsg.excludePeers {
  1364  			if sp == ep {
  1365  				return
  1366  			}
  1367  		}
  1368  
  1369  		sp.QueueMessage(bmsg.message, nil)
  1370  	})
  1371  }
  1372  
  1373  type getConnCountMsg struct {
  1374  	reply chan int32
  1375  }
  1376  
  1377  type getPeersMsg struct {
  1378  	reply chan []*serverPeer
  1379  }
  1380  
  1381  type getAddedNodesMsg struct {
  1382  	reply chan []*serverPeer
  1383  }
  1384  
  1385  type disconnectNodeMsg struct {
  1386  	cmp   func(*serverPeer) bool
  1387  	reply chan error
  1388  }
  1389  
  1390  type connectNodeMsg struct {
  1391  	addr      string
  1392  	permanent bool
  1393  	reply     chan error
  1394  }
  1395  
  1396  type removeNodeMsg struct {
  1397  	cmp   func(*serverPeer) bool
  1398  	reply chan error
  1399  }
  1400  
  1401  // handleQuery is the central handler for all queries and commands from other
  1402  // goroutines related to peer state.
  1403  func (s *server) handleQuery(state *peerState, querymsg interface{}) {
  1404  	switch msg := querymsg.(type) {
  1405  	case getConnCountMsg:
  1406  		nconnected := int32(0)
  1407  		state.forAllPeers(func(sp *serverPeer) {
  1408  			if sp.Connected() {
  1409  				nconnected++
  1410  			}
  1411  		})
  1412  		msg.reply <- nconnected
  1413  
  1414  	case getPeersMsg:
  1415  		peers := make([]*serverPeer, 0, state.Count())
  1416  		state.forAllPeers(func(sp *serverPeer) {
  1417  			if !sp.Connected() {
  1418  				return
  1419  			}
  1420  			peers = append(peers, sp)
  1421  		})
  1422  		msg.reply <- peers
  1423  
  1424  	case connectNodeMsg:
  1425  		// XXX(oga) duplicate oneshots?
  1426  		for _, peer := range state.persistentPeers {
  1427  			if peer.Addr() == msg.addr {
  1428  				if msg.permanent {
  1429  					msg.reply <- errors.New("peer already connected")
  1430  				} else {
  1431  					msg.reply <- errors.New("peer exists as a permanent peer")
  1432  				}
  1433  				return
  1434  			}
  1435  		}
  1436  
  1437  		// TODO(oga) if too many, nuke a non-perm peer.
  1438  		sp := s.newOutboundPeer(msg.addr, msg.permanent)
  1439  		if sp != nil {
  1440  			go s.peerConnHandler(sp)
  1441  			msg.reply <- nil
  1442  		} else {
  1443  			msg.reply <- errors.New("failed to add peer")
  1444  		}
  1445  	case removeNodeMsg:
  1446  		found := disconnectPeer(state.persistentPeers, msg.cmp, func(sp *serverPeer) {
  1447  			// Keep group counts ok since we remove from
  1448  			// the list now.
  1449  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1450  		})
  1451  
  1452  		if found {
  1453  			msg.reply <- nil
  1454  		} else {
  1455  			msg.reply <- errors.New("peer not found")
  1456  		}
  1457  	// Request a list of the persistent (added) peers.
  1458  	case getAddedNodesMsg:
  1459  		// Respond with a slice of the relavent peers.
  1460  		peers := make([]*serverPeer, 0, len(state.persistentPeers))
  1461  		for _, sp := range state.persistentPeers {
  1462  			peers = append(peers, sp)
  1463  		}
  1464  		msg.reply <- peers
  1465  	case disconnectNodeMsg:
  1466  		// Check inbound peers. We pass a nil callback since we don't
  1467  		// require any additional actions on disconnect for inbound peers.
  1468  		found := disconnectPeer(state.inboundPeers, msg.cmp, nil)
  1469  		if found {
  1470  			msg.reply <- nil
  1471  			return
  1472  		}
  1473  
  1474  		// Check outbound peers.
  1475  		found = disconnectPeer(state.outboundPeers, msg.cmp, func(sp *serverPeer) {
  1476  			// Keep group counts ok since we remove from
  1477  			// the list now.
  1478  			state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1479  		})
  1480  		if found {
  1481  			// If there are multiple outbound connections to the same
  1482  			// ip:port, continue disconnecting them all until no such
  1483  			// peers are found.
  1484  			for found {
  1485  				found = disconnectPeer(state.outboundPeers, msg.cmp, func(sp *serverPeer) {
  1486  					state.outboundGroups[addrmgr.GroupKey(sp.NA())]--
  1487  				})
  1488  			}
  1489  			msg.reply <- nil
  1490  			return
  1491  		}
  1492  
  1493  		msg.reply <- errors.New("peer not found")
  1494  	}
  1495  }
  1496  
  1497  // disconnectPeer attempts to drop the connection of a tageted peer in the
  1498  // passed peer list. Targets are identified via usage of the passed
  1499  // `compareFunc`, which should return `true` if the passed peer is the target
  1500  // peer. This function returns true on success and false if the peer is unable
  1501  // to be located. If the peer is found, and the passed callback: `whenFound'
  1502  // isn't nil, we call it with the peer as the argument before it is removed
  1503  // from the peerList, and is disconnected from the server.
  1504  func disconnectPeer(peerList map[int32]*serverPeer, compareFunc func(*serverPeer) bool, whenFound func(*serverPeer)) bool {
  1505  	for addr, peer := range peerList {
  1506  		if compareFunc(peer) {
  1507  			if whenFound != nil {
  1508  				whenFound(peer)
  1509  			}
  1510  
  1511  			// This is ok because we are not continuing
  1512  			// to iterate so won't corrupt the loop.
  1513  			delete(peerList, addr)
  1514  			peer.Disconnect()
  1515  			return true
  1516  		}
  1517  	}
  1518  	return false
  1519  }
  1520  
  1521  // newPeerConfig returns the configuration for the given serverPeer.
  1522  func newPeerConfig(sp *serverPeer) *peer.Config {
  1523  	return &peer.Config{
  1524  		Listeners: peer.MessageListeners{
  1525  			OnVersion:     sp.OnVersion,
  1526  			OnMemPool:     sp.OnMemPool,
  1527  			OnTx:          sp.OnTx,
  1528  			OnBlock:       sp.OnBlock,
  1529  			OnInv:         sp.OnInv,
  1530  			OnHeaders:     sp.OnHeaders,
  1531  			OnGetData:     sp.OnGetData,
  1532  			OnGetBlocks:   sp.OnGetBlocks,
  1533  			OnGetHeaders:  sp.OnGetHeaders,
  1534  			OnFilterAdd:   sp.OnFilterAdd,
  1535  			OnFilterClear: sp.OnFilterClear,
  1536  			OnFilterLoad:  sp.OnFilterLoad,
  1537  			OnGetAddr:     sp.OnGetAddr,
  1538  			OnAddr:        sp.OnAddr,
  1539  			OnRead:        sp.OnRead,
  1540  			OnWrite:       sp.OnWrite,
  1541  
  1542  			// Note: The reference client currently bans peers that send alerts
  1543  			// not signed with its key.  We could verify against their key, but
  1544  			// since the reference client is currently unwilling to support
  1545  			// other implementations' alert messages, we will not relay theirs.
  1546  			OnAlert: nil,
  1547  		},
  1548  		NewestBlock:      sp.newestBlock,
  1549  		BestLocalAddress: sp.server.addrManager.GetBestLocalAddress,
  1550  		HostToNetAddress: sp.server.addrManager.HostToNetAddress,
  1551  		Proxy:            cfg.Proxy,
  1552  		UserAgentName:    userAgentName,
  1553  		UserAgentVersion: userAgentVersion,
  1554  		ChainParams:      sp.server.chainParams,
  1555  		Services:         sp.server.services,
  1556  		DisableRelayTx:   cfg.BlocksOnly,
  1557  		ProtocolVersion:  wire.SendHeadersVersion,
  1558  	}
  1559  }
  1560  
  1561  // listenHandler is the main listener which accepts incoming connections for the
  1562  // server.  It must be run as a goroutine.
  1563  func (s *server) listenHandler(listener net.Listener) {
  1564  	srvrLog.Infof("Server listening on %s", listener.Addr())
  1565  	for atomic.LoadInt32(&s.shutdown) == 0 {
  1566  		conn, err := listener.Accept()
  1567  		if err != nil {
  1568  			// Only log the error if we're not forcibly shutting down.
  1569  			if atomic.LoadInt32(&s.shutdown) == 0 {
  1570  				srvrLog.Errorf("Can't accept connection: %v", err)
  1571  			}
  1572  			continue
  1573  		}
  1574  		sp := newServerPeer(s, false)
  1575  		sp.Peer = peer.NewInboundPeer(newPeerConfig(sp))
  1576  		go s.peerDoneHandler(sp)
  1577  		sp.Connect(conn)
  1578  	}
  1579  	s.wg.Done()
  1580  	srvrLog.Tracef("Listener handler done for %s", listener.Addr())
  1581  }
  1582  
  1583  // seedFromDNS uses DNS seeding to populate the address manager with peers.
  1584  func (s *server) seedFromDNS() {
  1585  	// Nothing to do if DNS seeding is disabled.
  1586  	if cfg.DisableDNSSeed {
  1587  		return
  1588  	}
  1589  
  1590  	for _, seeder := range activeNetParams.DNSSeeds {
  1591  		go func(seeder string) {
  1592  			randSource := mrand.New(mrand.NewSource(time.Now().UnixNano()))
  1593  
  1594  			seedpeers, err := dnsDiscover(seeder)
  1595  			if err != nil {
  1596  				discLog.Infof("DNS discovery failed on seed %s: %v", seeder, err)
  1597  				return
  1598  			}
  1599  			numPeers := len(seedpeers)
  1600  
  1601  			discLog.Infof("%d addresses found from DNS seed %s", numPeers, seeder)
  1602  
  1603  			if numPeers == 0 {
  1604  				return
  1605  			}
  1606  			addresses := make([]*wire.NetAddress, len(seedpeers))
  1607  			// if this errors then we have *real* problems
  1608  			intPort, _ := strconv.Atoi(activeNetParams.DefaultPort)
  1609  			for i, peer := range seedpeers {
  1610  				addresses[i] = new(wire.NetAddress)
  1611  				addresses[i].SetAddress(peer, uint16(intPort))
  1612  				// bitcoind seeds with addresses from
  1613  				// a time randomly selected between 3
  1614  				// and 7 days ago.
  1615  				addresses[i].Timestamp = time.Now().Add(-1 *
  1616  					time.Second * time.Duration(secondsIn3Days+
  1617  					randSource.Int31n(secondsIn4Days)))
  1618  			}
  1619  
  1620  			// Bitcoind uses a lookup of the dns seeder here. This
  1621  			// is rather strange since the values looked up by the
  1622  			// DNS seed lookups will vary quite a lot.
  1623  			// to replicate this behaviour we put all addresses as
  1624  			// having come from the first one.
  1625  			s.addrManager.AddAddresses(addresses, addresses[0])
  1626  		}(seeder)
  1627  	}
  1628  }
  1629  
  1630  // newOutboundPeer initializes a new outbound peer and setups the message
  1631  // listeners.
  1632  func (s *server) newOutboundPeer(addr string, persistent bool) *serverPeer {
  1633  	sp := newServerPeer(s, persistent)
  1634  	p, err := peer.NewOutboundPeer(newPeerConfig(sp), addr)
  1635  	if err != nil {
  1636  		srvrLog.Errorf("Cannot create outbound peer %s: %v", addr, err)
  1637  		return nil
  1638  	}
  1639  	sp.Peer = p
  1640  	go s.peerDoneHandler(sp)
  1641  	return sp
  1642  }
  1643  
  1644  // peerConnHandler handles peer connections. It must be run in a goroutine.
  1645  func (s *server) peerConnHandler(sp *serverPeer) {
  1646  	err := s.establishConn(sp)
  1647  	if err != nil {
  1648  		srvrLog.Debugf("Failed to connect to %s: %v", sp.Addr(), err)
  1649  		sp.Disconnect()
  1650  	}
  1651  }
  1652  
  1653  // peerDoneHandler handles peer disconnects by notifiying the server that it's
  1654  // done.
  1655  func (s *server) peerDoneHandler(sp *serverPeer) {
  1656  	sp.WaitForDisconnect()
  1657  	s.donePeers <- sp
  1658  
  1659  	// Only tell block manager we are gone if we ever told it we existed.
  1660  	if sp.VersionKnown() {
  1661  		s.blockManager.DonePeer(sp)
  1662  	}
  1663  	close(sp.quit)
  1664  }
  1665  
  1666  // establishConn establishes a connection to the peer.
  1667  func (s *server) establishConn(sp *serverPeer) error {
  1668  	srvrLog.Debugf("Attempting to connect to %s", sp.Addr())
  1669  	conn, err := btcdDial("tcp", sp.Addr())
  1670  	if err != nil {
  1671  		return err
  1672  	}
  1673  	sp.Connect(conn)
  1674  	s.addrManager.Attempt(sp.NA())
  1675  	return nil
  1676  }
  1677  
  1678  // retryConn retries connection to the peer after the given duration.  It must
  1679  // be run as a goroutine.
  1680  func (s *server) retryConn(sp *serverPeer, initialAttempt bool) {
  1681  	retryDuration := connectionRetryInterval
  1682  	for {
  1683  		if initialAttempt {
  1684  			retryDuration = 0
  1685  			initialAttempt = false
  1686  		} else {
  1687  			srvrLog.Debugf("Retrying connection to %s in %s", sp.Addr(),
  1688  				retryDuration)
  1689  		}
  1690  		select {
  1691  		case <-time.After(retryDuration):
  1692  			err := s.establishConn(sp)
  1693  			if err != nil {
  1694  				retryDuration += connectionRetryInterval
  1695  				if retryDuration > maxConnectionRetryInterval {
  1696  					retryDuration = maxConnectionRetryInterval
  1697  				}
  1698  				continue
  1699  			}
  1700  			return
  1701  
  1702  		case <-sp.quit:
  1703  			return
  1704  
  1705  		case <-s.quit:
  1706  			return
  1707  		}
  1708  	}
  1709  }
  1710  
  1711  // peerHandler is used to handle peer operations such as adding and removing
  1712  // peers to and from the server, banning peers, and broadcasting messages to
  1713  // peers.  It must be run in a goroutine.
  1714  func (s *server) peerHandler() {
  1715  	// Start the address manager and block manager, both of which are needed
  1716  	// by peers.  This is done here since their lifecycle is closely tied
  1717  	// to this handler and rather than adding more channels to sychronize
  1718  	// things, it's easier and slightly faster to simply start and stop them
  1719  	// in this handler.
  1720  	s.addrManager.Start()
  1721  	s.blockManager.Start()
  1722  
  1723  	srvrLog.Tracef("Starting peer handler")
  1724  
  1725  	state := &peerState{
  1726  		pendingPeers:     make(map[string]*serverPeer),
  1727  		inboundPeers:     make(map[int32]*serverPeer),
  1728  		persistentPeers:  make(map[int32]*serverPeer),
  1729  		outboundPeers:    make(map[int32]*serverPeer),
  1730  		banned:           make(map[string]time.Time),
  1731  		maxOutboundPeers: defaultMaxOutbound,
  1732  		outboundGroups:   make(map[string]int),
  1733  	}
  1734  	if cfg.MaxPeers < state.maxOutboundPeers {
  1735  		state.maxOutboundPeers = cfg.MaxPeers
  1736  	}
  1737  	// Add peers discovered through DNS to the address manager.
  1738  	s.seedFromDNS()
  1739  
  1740  	// Start up persistent peers.
  1741  	permanentPeers := cfg.ConnectPeers
  1742  	if len(permanentPeers) == 0 {
  1743  		permanentPeers = cfg.AddPeers
  1744  	}
  1745  	for _, addr := range permanentPeers {
  1746  		sp := s.newOutboundPeer(addr, true)
  1747  		if sp != nil {
  1748  			go s.retryConn(sp, true)
  1749  		}
  1750  	}
  1751  
  1752  	// if nothing else happens, wake us up soon.
  1753  	time.AfterFunc(10*time.Second, func() { s.wakeup <- struct{}{} })
  1754  
  1755  out:
  1756  	for {
  1757  		select {
  1758  		// New peers connected to the server.
  1759  		case p := <-s.newPeers:
  1760  			s.handleAddPeerMsg(state, p)
  1761  
  1762  		// Disconnected peers.
  1763  		case p := <-s.donePeers:
  1764  			s.handleDonePeerMsg(state, p)
  1765  
  1766  		// Block accepted in mainchain or orphan, update peer height.
  1767  		case umsg := <-s.peerHeightsUpdate:
  1768  			s.handleUpdatePeerHeights(state, umsg)
  1769  
  1770  		// Peer to ban.
  1771  		case p := <-s.banPeers:
  1772  			s.handleBanPeerMsg(state, p)
  1773  
  1774  		// New inventory to potentially be relayed to other peers.
  1775  		case invMsg := <-s.relayInv:
  1776  			s.handleRelayInvMsg(state, invMsg)
  1777  
  1778  		// Message to broadcast to all connected peers except those
  1779  		// which are excluded by the message.
  1780  		case bmsg := <-s.broadcast:
  1781  			s.handleBroadcastMsg(state, &bmsg)
  1782  
  1783  		// Used by timers below to wake us back up.
  1784  		case <-s.wakeup:
  1785  			// this page left intentionally blank
  1786  
  1787  		case qmsg := <-s.query:
  1788  			s.handleQuery(state, qmsg)
  1789  
  1790  		case <-s.quit:
  1791  			// Disconnect all peers on server shutdown.
  1792  			state.forAllPeers(func(sp *serverPeer) {
  1793  				srvrLog.Tracef("Shutdown peer %s", sp)
  1794  				sp.Disconnect()
  1795  			})
  1796  			break out
  1797  		}
  1798  
  1799  		// Don't try to connect to more peers when running on the
  1800  		// simulation test network.  The simulation network is only
  1801  		// intended to connect to specified peers and actively avoid
  1802  		// advertising and connecting to discovered peers.
  1803  		if cfg.SimNet {
  1804  			continue
  1805  		}
  1806  
  1807  		// Only try connect to more peers if we actually need more.
  1808  		if !state.NeedMoreOutbound() || len(cfg.ConnectPeers) > 0 ||
  1809  			atomic.LoadInt32(&s.shutdown) != 0 {
  1810  			state.forPendingPeers(func(sp *serverPeer) {
  1811  				srvrLog.Tracef("Shutdown peer %s", sp)
  1812  				sp.Disconnect()
  1813  			})
  1814  			continue
  1815  		}
  1816  		tries := 0
  1817  		for state.NeedMoreOutbound() &&
  1818  			state.NeedMoreTries() &&
  1819  			atomic.LoadInt32(&s.shutdown) == 0 {
  1820  			addr := s.addrManager.GetAddress("any")
  1821  			if addr == nil {
  1822  				break
  1823  			}
  1824  			key := addrmgr.GroupKey(addr.NetAddress())
  1825  			// Address will not be invalid, local or unroutable
  1826  			// because addrmanager rejects those on addition.
  1827  			// Just check that we don't already have an address
  1828  			// in the same group so that we are not connecting
  1829  			// to the same network segment at the expense of
  1830  			// others.
  1831  			if state.outboundGroups[key] != 0 {
  1832  				break
  1833  			}
  1834  
  1835  			// Check that we don't have a pending connection to this addr.
  1836  			addrStr := addrmgr.NetAddressKey(addr.NetAddress())
  1837  			if _, ok := state.pendingPeers[addrStr]; ok {
  1838  				continue
  1839  			}
  1840  
  1841  			tries++
  1842  			// After 100 bad tries exit the loop and we'll try again
  1843  			// later.
  1844  			if tries > 100 {
  1845  				break
  1846  			}
  1847  
  1848  			// XXX if we have limited that address skip
  1849  
  1850  			// only allow recent nodes (10mins) after we failed 30
  1851  			// times
  1852  			if tries < 30 && time.Now().Sub(addr.LastAttempt()) < 10*time.Minute {
  1853  				continue
  1854  			}
  1855  
  1856  			// allow nondefault ports after 50 failed tries.
  1857  			if fmt.Sprintf("%d", addr.NetAddress().Port) !=
  1858  				activeNetParams.DefaultPort && tries < 50 {
  1859  				continue
  1860  			}
  1861  
  1862  			tries = 0
  1863  			sp := s.newOutboundPeer(addrStr, false)
  1864  			if sp != nil {
  1865  				go s.peerConnHandler(sp)
  1866  				state.pendingPeers[sp.Addr()] = sp
  1867  			}
  1868  		}
  1869  
  1870  		// We need more peers, wake up in ten seconds and try again.
  1871  		if state.NeedMoreOutbound() {
  1872  			time.AfterFunc(10*time.Second, func() {
  1873  				s.wakeup <- struct{}{}
  1874  			})
  1875  		}
  1876  	}
  1877  
  1878  	s.blockManager.Stop()
  1879  	s.addrManager.Stop()
  1880  
  1881  	// Drain channels before exiting so nothing is left waiting around
  1882  	// to send.
  1883  cleanup:
  1884  	for {
  1885  		select {
  1886  		case <-s.newPeers:
  1887  		case <-s.donePeers:
  1888  		case <-s.peerHeightsUpdate:
  1889  		case <-s.relayInv:
  1890  		case <-s.broadcast:
  1891  		case <-s.wakeup:
  1892  		case <-s.query:
  1893  		default:
  1894  			break cleanup
  1895  		}
  1896  	}
  1897  	s.wg.Done()
  1898  	srvrLog.Tracef("Peer handler done")
  1899  }
  1900  
  1901  // AddPeer adds a new peer that has already been connected to the server.
  1902  func (s *server) AddPeer(sp *serverPeer) {
  1903  	s.newPeers <- sp
  1904  }
  1905  
  1906  // BanPeer bans a peer that has already been connected to the server by ip.
  1907  func (s *server) BanPeer(sp *serverPeer) {
  1908  	s.banPeers <- sp
  1909  }
  1910  
  1911  // RelayInventory relays the passed inventory vector to all connected peers
  1912  // that are not already known to have it.
  1913  func (s *server) RelayInventory(invVect *wire.InvVect, data interface{}) {
  1914  	s.relayInv <- relayMsg{invVect: invVect, data: data}
  1915  }
  1916  
  1917  // BroadcastMessage sends msg to all peers currently connected to the server
  1918  // except those in the passed peers to exclude.
  1919  func (s *server) BroadcastMessage(msg wire.Message, exclPeers ...*serverPeer) {
  1920  	// XXX: Need to determine if this is an alert that has already been
  1921  	// broadcast and refrain from broadcasting again.
  1922  	bmsg := broadcastMsg{message: msg, excludePeers: exclPeers}
  1923  	s.broadcast <- bmsg
  1924  }
  1925  
  1926  // ConnectedCount returns the number of currently connected peers.
  1927  func (s *server) ConnectedCount() int32 {
  1928  	replyChan := make(chan int32)
  1929  
  1930  	s.query <- getConnCountMsg{reply: replyChan}
  1931  
  1932  	return <-replyChan
  1933  }
  1934  
  1935  // AddedNodeInfo returns an array of btcjson.GetAddedNodeInfoResult structures
  1936  // describing the persistent (added) nodes.
  1937  func (s *server) AddedNodeInfo() []*serverPeer {
  1938  	replyChan := make(chan []*serverPeer)
  1939  	s.query <- getAddedNodesMsg{reply: replyChan}
  1940  	return <-replyChan
  1941  }
  1942  
  1943  // Peers returns an array of all connected peers.
  1944  func (s *server) Peers() []*serverPeer {
  1945  	replyChan := make(chan []*serverPeer)
  1946  
  1947  	s.query <- getPeersMsg{reply: replyChan}
  1948  
  1949  	return <-replyChan
  1950  }
  1951  
  1952  // DisconnectNodeByAddr disconnects a peer by target address. Both outbound and
  1953  // inbound nodes will be searched for the target node. An error message will
  1954  // be returned if the peer was not found.
  1955  func (s *server) DisconnectNodeByAddr(addr string) error {
  1956  	replyChan := make(chan error)
  1957  
  1958  	s.query <- disconnectNodeMsg{
  1959  		cmp:   func(sp *serverPeer) bool { return sp.Addr() == addr },
  1960  		reply: replyChan,
  1961  	}
  1962  
  1963  	return <-replyChan
  1964  }
  1965  
  1966  // DisconnectNodeByID disconnects a peer by target node id. Both outbound and
  1967  // inbound nodes will be searched for the target node. An error message will be
  1968  // returned if the peer was not found.
  1969  func (s *server) DisconnectNodeByID(id int32) error {
  1970  	replyChan := make(chan error)
  1971  
  1972  	s.query <- disconnectNodeMsg{
  1973  		cmp:   func(sp *serverPeer) bool { return sp.ID() == id },
  1974  		reply: replyChan,
  1975  	}
  1976  
  1977  	return <-replyChan
  1978  }
  1979  
  1980  // RemoveNodeByAddr removes a peer from the list of persistent peers if
  1981  // present. An error will be returned if the peer was not found.
  1982  func (s *server) RemoveNodeByAddr(addr string) error {
  1983  	replyChan := make(chan error)
  1984  
  1985  	s.query <- removeNodeMsg{
  1986  		cmp:   func(sp *serverPeer) bool { return sp.Addr() == addr },
  1987  		reply: replyChan,
  1988  	}
  1989  
  1990  	return <-replyChan
  1991  }
  1992  
  1993  // RemoveNodeByID removes a peer by node ID from the list of persistent peers
  1994  // if present. An error will be returned if the peer was not found.
  1995  func (s *server) RemoveNodeByID(id int32) error {
  1996  	replyChan := make(chan error)
  1997  
  1998  	s.query <- removeNodeMsg{
  1999  		cmp:   func(sp *serverPeer) bool { return sp.ID() == id },
  2000  		reply: replyChan,
  2001  	}
  2002  
  2003  	return <-replyChan
  2004  }
  2005  
  2006  // ConnectNode adds `addr' as a new outbound peer. If permanent is true then the
  2007  // peer will be persistent and reconnect if the connection is lost.
  2008  // It is an error to call this with an already existing peer.
  2009  func (s *server) ConnectNode(addr string, permanent bool) error {
  2010  	replyChan := make(chan error)
  2011  
  2012  	s.query <- connectNodeMsg{addr: addr, permanent: permanent, reply: replyChan}
  2013  
  2014  	return <-replyChan
  2015  }
  2016  
  2017  // AddBytesSent adds the passed number of bytes to the total bytes sent counter
  2018  // for the server.  It is safe for concurrent access.
  2019  func (s *server) AddBytesSent(bytesSent uint64) {
  2020  	atomic.AddUint64(&s.bytesSent, bytesSent)
  2021  }
  2022  
  2023  // AddBytesReceived adds the passed number of bytes to the total bytes received
  2024  // counter for the server.  It is safe for concurrent access.
  2025  func (s *server) AddBytesReceived(bytesReceived uint64) {
  2026  	atomic.AddUint64(&s.bytesReceived, bytesReceived)
  2027  }
  2028  
  2029  // NetTotals returns the sum of all bytes received and sent across the network
  2030  // for all peers.  It is safe for concurrent access.
  2031  func (s *server) NetTotals() (uint64, uint64) {
  2032  	return atomic.LoadUint64(&s.bytesReceived),
  2033  		atomic.LoadUint64(&s.bytesSent)
  2034  }
  2035  
  2036  // UpdatePeerHeights updates the heights of all peers who have have announced
  2037  // the latest connected main chain block, or a recognized orphan. These height
  2038  // updates allow us to dynamically refresh peer heights, ensuring sync peer
  2039  // selection has access to the latest block heights for each peer.
  2040  func (s *server) UpdatePeerHeights(latestBlkSha *wire.ShaHash, latestHeight int32, updateSource *serverPeer) {
  2041  	s.peerHeightsUpdate <- updatePeerHeightsMsg{
  2042  		newSha:     latestBlkSha,
  2043  		newHeight:  latestHeight,
  2044  		originPeer: updateSource,
  2045  	}
  2046  }
  2047  
  2048  // rebroadcastHandler keeps track of user submitted inventories that we have
  2049  // sent out but have not yet made it into a block. We periodically rebroadcast
  2050  // them in case our peers restarted or otherwise lost track of them.
  2051  func (s *server) rebroadcastHandler() {
  2052  	// Wait 5 min before first tx rebroadcast.
  2053  	timer := time.NewTimer(5 * time.Minute)
  2054  	pendingInvs := make(map[wire.InvVect]interface{})
  2055  
  2056  out:
  2057  	for {
  2058  		select {
  2059  		case riv := <-s.modifyRebroadcastInv:
  2060  			switch msg := riv.(type) {
  2061  			// Incoming InvVects are added to our map of RPC txs.
  2062  			case broadcastInventoryAdd:
  2063  				pendingInvs[*msg.invVect] = msg.data
  2064  
  2065  			// When an InvVect has been added to a block, we can
  2066  			// now remove it, if it was present.
  2067  			case broadcastInventoryDel:
  2068  				if _, ok := pendingInvs[*msg]; ok {
  2069  					delete(pendingInvs, *msg)
  2070  				}
  2071  			}
  2072  
  2073  		case <-timer.C:
  2074  			// Any inventory we have has not made it into a block
  2075  			// yet. We periodically resubmit them until they have.
  2076  			for iv, data := range pendingInvs {
  2077  				ivCopy := iv
  2078  				s.RelayInventory(&ivCopy, data)
  2079  			}
  2080  
  2081  			// Process at a random time up to 30mins (in seconds)
  2082  			// in the future.
  2083  			timer.Reset(time.Second *
  2084  				time.Duration(randomUint16Number(1800)))
  2085  
  2086  		case <-s.quit:
  2087  			break out
  2088  		}
  2089  	}
  2090  
  2091  	timer.Stop()
  2092  
  2093  	// Drain channels before exiting so nothing is left waiting around
  2094  	// to send.
  2095  cleanup:
  2096  	for {
  2097  		select {
  2098  		case <-s.modifyRebroadcastInv:
  2099  		default:
  2100  			break cleanup
  2101  		}
  2102  	}
  2103  	s.wg.Done()
  2104  }
  2105  
  2106  // Start begins accepting connections from peers.
  2107  func (s *server) Start() {
  2108  	// Already started?
  2109  	if atomic.AddInt32(&s.started, 1) != 1 {
  2110  		return
  2111  	}
  2112  
  2113  	srvrLog.Trace("Starting server")
  2114  
  2115  	// Start all the listeners.  There will not be any if listening is
  2116  	// disabled.
  2117  	for _, listener := range s.listeners {
  2118  		s.wg.Add(1)
  2119  		go s.listenHandler(listener)
  2120  	}
  2121  
  2122  	// Start the peer handler which in turn starts the address and block
  2123  	// managers.
  2124  	s.wg.Add(1)
  2125  	go s.peerHandler()
  2126  
  2127  	if s.nat != nil {
  2128  		s.wg.Add(1)
  2129  		go s.upnpUpdateThread()
  2130  	}
  2131  
  2132  	if !cfg.DisableRPC {
  2133  		s.wg.Add(1)
  2134  
  2135  		// Start the rebroadcastHandler, which ensures user tx received by
  2136  		// the RPC server are rebroadcast until being included in a block.
  2137  		go s.rebroadcastHandler()
  2138  
  2139  		s.rpcServer.Start()
  2140  	}
  2141  
  2142  	// Start the CPU miner if generation is enabled.
  2143  	if cfg.Generate {
  2144  		s.cpuMiner.Start()
  2145  	}
  2146  }
  2147  
  2148  // Stop gracefully shuts down the server by stopping and disconnecting all
  2149  // peers and the main listener.
  2150  func (s *server) Stop() error {
  2151  	// Make sure this only happens once.
  2152  	if atomic.AddInt32(&s.shutdown, 1) != 1 {
  2153  		srvrLog.Infof("Server is already in the process of shutting down")
  2154  		return nil
  2155  	}
  2156  
  2157  	srvrLog.Warnf("Server shutting down")
  2158  
  2159  	// Stop all the listeners.  There will not be any listeners if
  2160  	// listening is disabled.
  2161  	for _, listener := range s.listeners {
  2162  		err := listener.Close()
  2163  		if err != nil {
  2164  			return err
  2165  		}
  2166  	}
  2167  
  2168  	// Stop the CPU miner if needed
  2169  	s.cpuMiner.Stop()
  2170  
  2171  	// Shutdown the RPC server if it's not disabled.
  2172  	if !cfg.DisableRPC {
  2173  		s.rpcServer.Stop()
  2174  	}
  2175  
  2176  	// Signal the remaining goroutines to quit.
  2177  	close(s.quit)
  2178  	return nil
  2179  }
  2180  
  2181  // WaitForShutdown blocks until the main listener and peer handlers are stopped.
  2182  func (s *server) WaitForShutdown() {
  2183  	s.wg.Wait()
  2184  }
  2185  
  2186  // ScheduleShutdown schedules a server shutdown after the specified duration.
  2187  // It also dynamically adjusts how often to warn the server is going down based
  2188  // on remaining duration.
  2189  func (s *server) ScheduleShutdown(duration time.Duration) {
  2190  	// Don't schedule shutdown more than once.
  2191  	if atomic.AddInt32(&s.shutdownSched, 1) != 1 {
  2192  		return
  2193  	}
  2194  	srvrLog.Warnf("Server shutdown in %v", duration)
  2195  	go func() {
  2196  		remaining := duration
  2197  		tickDuration := dynamicTickDuration(remaining)
  2198  		done := time.After(remaining)
  2199  		ticker := time.NewTicker(tickDuration)
  2200  	out:
  2201  		for {
  2202  			select {
  2203  			case <-done:
  2204  				ticker.Stop()
  2205  				s.Stop()
  2206  				break out
  2207  			case <-ticker.C:
  2208  				remaining = remaining - tickDuration
  2209  				if remaining < time.Second {
  2210  					continue
  2211  				}
  2212  
  2213  				// Change tick duration dynamically based on remaining time.
  2214  				newDuration := dynamicTickDuration(remaining)
  2215  				if tickDuration != newDuration {
  2216  					tickDuration = newDuration
  2217  					ticker.Stop()
  2218  					ticker = time.NewTicker(tickDuration)
  2219  				}
  2220  				srvrLog.Warnf("Server shutdown in %v", remaining)
  2221  			}
  2222  		}
  2223  	}()
  2224  }
  2225  
  2226  // parseListeners splits the list of listen addresses passed in addrs into
  2227  // IPv4 and IPv6 slices and returns them.  This allows easy creation of the
  2228  // listeners on the correct interface "tcp4" and "tcp6".  It also properly
  2229  // detects addresses which apply to "all interfaces" and adds the address to
  2230  // both slices.
  2231  func parseListeners(addrs []string) ([]string, []string, bool, error) {
  2232  	ipv4ListenAddrs := make([]string, 0, len(addrs)*2)
  2233  	ipv6ListenAddrs := make([]string, 0, len(addrs)*2)
  2234  	haveWildcard := false
  2235  
  2236  	for _, addr := range addrs {
  2237  		host, _, err := net.SplitHostPort(addr)
  2238  		if err != nil {
  2239  			// Shouldn't happen due to already being normalized.
  2240  			return nil, nil, false, err
  2241  		}
  2242  
  2243  		// Empty host or host of * on plan9 is both IPv4 and IPv6.
  2244  		if host == "" || (host == "*" && runtime.GOOS == "plan9") {
  2245  			ipv4ListenAddrs = append(ipv4ListenAddrs, addr)
  2246  			ipv6ListenAddrs = append(ipv6ListenAddrs, addr)
  2247  			haveWildcard = true
  2248  			continue
  2249  		}
  2250  
  2251  		// Strip IPv6 zone id if present since net.ParseIP does not
  2252  		// handle it.
  2253  		zoneIndex := strings.LastIndex(host, "%")
  2254  		if zoneIndex > 0 {
  2255  			host = host[:zoneIndex]
  2256  		}
  2257  
  2258  		// Parse the IP.
  2259  		ip := net.ParseIP(host)
  2260  		if ip == nil {
  2261  			return nil, nil, false, fmt.Errorf("'%s' is not a "+
  2262  				"valid IP address", host)
  2263  		}
  2264  
  2265  		// To4 returns nil when the IP is not an IPv4 address, so use
  2266  		// this determine the address type.
  2267  		if ip.To4() == nil {
  2268  			ipv6ListenAddrs = append(ipv6ListenAddrs, addr)
  2269  		} else {
  2270  			ipv4ListenAddrs = append(ipv4ListenAddrs, addr)
  2271  		}
  2272  	}
  2273  	return ipv4ListenAddrs, ipv6ListenAddrs, haveWildcard, nil
  2274  }
  2275  
  2276  func (s *server) upnpUpdateThread() {
  2277  	// Go off immediately to prevent code duplication, thereafter we renew
  2278  	// lease every 15 minutes.
  2279  	timer := time.NewTimer(0 * time.Second)
  2280  	lport, _ := strconv.ParseInt(activeNetParams.DefaultPort, 10, 16)
  2281  	first := true
  2282  out:
  2283  	for {
  2284  		select {
  2285  		case <-timer.C:
  2286  			// TODO(oga) pick external port  more cleverly
  2287  			// TODO(oga) know which ports we are listening to on an external net.
  2288  			// TODO(oga) if specific listen port doesn't work then ask for wildcard
  2289  			// listen port?
  2290  			// XXX this assumes timeout is in seconds.
  2291  			listenPort, err := s.nat.AddPortMapping("tcp", int(lport), int(lport),
  2292  				"btcd listen port", 20*60)
  2293  			if err != nil {
  2294  				srvrLog.Warnf("can't add UPnP port mapping: %v", err)
  2295  			}
  2296  			if first && err == nil {
  2297  				// TODO(oga): look this up periodically to see if upnp domain changed
  2298  				// and so did ip.
  2299  				externalip, err := s.nat.GetExternalAddress()
  2300  				if err != nil {
  2301  					srvrLog.Warnf("UPnP can't get external address: %v", err)
  2302  					continue out
  2303  				}
  2304  				na := wire.NewNetAddressIPPort(externalip, uint16(listenPort),
  2305  					s.services)
  2306  				err = s.addrManager.AddLocalAddress(na, addrmgr.UpnpPrio)
  2307  				if err != nil {
  2308  					// XXX DeletePortMapping?
  2309  				}
  2310  				srvrLog.Warnf("Successfully bound via UPnP to %s", addrmgr.NetAddressKey(na))
  2311  				first = false
  2312  			}
  2313  			timer.Reset(time.Minute * 15)
  2314  		case <-s.quit:
  2315  			break out
  2316  		}
  2317  	}
  2318  
  2319  	timer.Stop()
  2320  
  2321  	if err := s.nat.DeletePortMapping("tcp", int(lport), int(lport)); err != nil {
  2322  		srvrLog.Warnf("unable to remove UPnP port mapping: %v", err)
  2323  	} else {
  2324  		srvrLog.Debugf("succesfully disestablished UPnP port mapping")
  2325  	}
  2326  
  2327  	s.wg.Done()
  2328  }
  2329  
  2330  // newServer returns a new btcd server configured to listen on addr for the
  2331  // bitcoin network type specified by chainParams.  Use start to begin accepting
  2332  // connections from peers.
  2333  func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Params) (*server, error) {
  2334  	services := defaultServices
  2335  	if cfg.NoPeerBloomFilters {
  2336  		services &^= wire.SFNodeBloom
  2337  	}
  2338  
  2339  	amgr := addrmgr.New(cfg.DataDir, btcdLookup)
  2340  
  2341  	var listeners []net.Listener
  2342  	var nat NAT
  2343  	if !cfg.DisableListen {
  2344  		ipv4Addrs, ipv6Addrs, wildcard, err :=
  2345  			parseListeners(listenAddrs)
  2346  		if err != nil {
  2347  			return nil, err
  2348  		}
  2349  		listeners = make([]net.Listener, 0, len(ipv4Addrs)+len(ipv6Addrs))
  2350  		discover := true
  2351  		if len(cfg.ExternalIPs) != 0 {
  2352  			discover = false
  2353  			// if this fails we have real issues.
  2354  			port, _ := strconv.ParseUint(
  2355  				activeNetParams.DefaultPort, 10, 16)
  2356  
  2357  			for _, sip := range cfg.ExternalIPs {
  2358  				eport := uint16(port)
  2359  				host, portstr, err := net.SplitHostPort(sip)
  2360  				if err != nil {
  2361  					// no port, use default.
  2362  					host = sip
  2363  				} else {
  2364  					port, err := strconv.ParseUint(
  2365  						portstr, 10, 16)
  2366  					if err != nil {
  2367  						srvrLog.Warnf("Can not parse "+
  2368  							"port from %s for "+
  2369  							"externalip: %v", sip,
  2370  							err)
  2371  						continue
  2372  					}
  2373  					eport = uint16(port)
  2374  				}
  2375  				na, err := amgr.HostToNetAddress(host, eport,
  2376  					services)
  2377  				if err != nil {
  2378  					srvrLog.Warnf("Not adding %s as "+
  2379  						"externalip: %v", sip, err)
  2380  					continue
  2381  				}
  2382  
  2383  				err = amgr.AddLocalAddress(na, addrmgr.ManualPrio)
  2384  				if err != nil {
  2385  					amgrLog.Warnf("Skipping specified external IP: %v", err)
  2386  				}
  2387  			}
  2388  		} else if discover && cfg.Upnp {
  2389  			nat, err = Discover()
  2390  			if err != nil {
  2391  				srvrLog.Warnf("Can't discover upnp: %v", err)
  2392  			}
  2393  			// nil nat here is fine, just means no upnp on network.
  2394  		}
  2395  
  2396  		// TODO(oga) nonstandard port...
  2397  		if wildcard {
  2398  			port, err :=
  2399  				strconv.ParseUint(activeNetParams.DefaultPort,
  2400  					10, 16)
  2401  			if err != nil {
  2402  				// I can't think of a cleaner way to do this...
  2403  				goto nowc
  2404  			}
  2405  			addrs, err := net.InterfaceAddrs()
  2406  			for _, a := range addrs {
  2407  				ip, _, err := net.ParseCIDR(a.String())
  2408  				if err != nil {
  2409  					continue
  2410  				}
  2411  				na := wire.NewNetAddressIPPort(ip,
  2412  					uint16(port), services)
  2413  				if discover {
  2414  					err = amgr.AddLocalAddress(na, addrmgr.InterfacePrio)
  2415  					if err != nil {
  2416  						amgrLog.Debugf("Skipping local address: %v", err)
  2417  					}
  2418  				}
  2419  			}
  2420  		}
  2421  	nowc:
  2422  
  2423  		for _, addr := range ipv4Addrs {
  2424  			listener, err := net.Listen("tcp4", addr)
  2425  			if err != nil {
  2426  				srvrLog.Warnf("Can't listen on %s: %v", addr,
  2427  					err)
  2428  				continue
  2429  			}
  2430  			listeners = append(listeners, listener)
  2431  
  2432  			if discover {
  2433  				if na, err := amgr.DeserializeNetAddress(addr); err == nil {
  2434  					err = amgr.AddLocalAddress(na, addrmgr.BoundPrio)
  2435  					if err != nil {
  2436  						amgrLog.Warnf("Skipping bound address: %v", err)
  2437  					}
  2438  				}
  2439  			}
  2440  		}
  2441  
  2442  		for _, addr := range ipv6Addrs {
  2443  			listener, err := net.Listen("tcp6", addr)
  2444  			if err != nil {
  2445  				srvrLog.Warnf("Can't listen on %s: %v", addr,
  2446  					err)
  2447  				continue
  2448  			}
  2449  			listeners = append(listeners, listener)
  2450  			if discover {
  2451  				if na, err := amgr.DeserializeNetAddress(addr); err == nil {
  2452  					err = amgr.AddLocalAddress(na, addrmgr.BoundPrio)
  2453  					if err != nil {
  2454  						amgrLog.Debugf("Skipping bound address: %v", err)
  2455  					}
  2456  				}
  2457  			}
  2458  		}
  2459  
  2460  		if len(listeners) == 0 {
  2461  			return nil, errors.New("no valid listen address")
  2462  		}
  2463  	}
  2464  
  2465  	s := server{
  2466  		listeners:            listeners,
  2467  		chainParams:          chainParams,
  2468  		addrManager:          amgr,
  2469  		newPeers:             make(chan *serverPeer, cfg.MaxPeers),
  2470  		donePeers:            make(chan *serverPeer, cfg.MaxPeers),
  2471  		banPeers:             make(chan *serverPeer, cfg.MaxPeers),
  2472  		retryPeers:           make(chan *serverPeer, cfg.MaxPeers),
  2473  		wakeup:               make(chan struct{}),
  2474  		query:                make(chan interface{}),
  2475  		relayInv:             make(chan relayMsg, cfg.MaxPeers),
  2476  		broadcast:            make(chan broadcastMsg, cfg.MaxPeers),
  2477  		quit:                 make(chan struct{}),
  2478  		modifyRebroadcastInv: make(chan interface{}),
  2479  		peerHeightsUpdate:    make(chan updatePeerHeightsMsg),
  2480  		nat:                  nat,
  2481  		db:                   db,
  2482  		timeSource:           blockchain.NewMedianTime(),
  2483  		services:             services,
  2484  		sigCache:             txscript.NewSigCache(cfg.SigCacheMaxSize),
  2485  	}
  2486  
  2487  	// Create the transaction and address indexes if needed.
  2488  	//
  2489  	// CAUTION: the txindex needs to be first in the indexes array because
  2490  	// the addrindex uses data from the txindex during catchup.  If the
  2491  	// addrindex is run first, it may not have the transactions from the
  2492  	// current block indexed.
  2493  	var indexes []indexers.Indexer
  2494  	if cfg.TxIndex || cfg.AddrIndex {
  2495  		// Enable transaction index if address index is enabled since it
  2496  		// requires it.
  2497  		if !cfg.TxIndex {
  2498  			indxLog.Infof("Transaction index enabled because it " +
  2499  				"is required by the address index")
  2500  			cfg.TxIndex = true
  2501  		} else {
  2502  			indxLog.Info("Transaction index is enabled")
  2503  		}
  2504  
  2505  		s.txIndex = indexers.NewTxIndex(db)
  2506  		indexes = append(indexes, s.txIndex)
  2507  	}
  2508  	if cfg.AddrIndex {
  2509  		indxLog.Info("Address index is enabled")
  2510  		s.addrIndex = indexers.NewAddrIndex(db, chainParams)
  2511  		indexes = append(indexes, s.addrIndex)
  2512  	}
  2513  
  2514  	// Create an index manager if any of the optional indexes are enabled.
  2515  	var indexManager blockchain.IndexManager
  2516  	if len(indexes) > 0 {
  2517  		indexManager = indexers.NewManager(db, indexes)
  2518  	}
  2519  	bm, err := newBlockManager(&s, indexManager)
  2520  	if err != nil {
  2521  		return nil, err
  2522  	}
  2523  	s.blockManager = bm
  2524  
  2525  	txC := mempoolConfig{
  2526  		Policy: mempoolPolicy{
  2527  			DisableRelayPriority: cfg.NoRelayPriority,
  2528  			FreeTxRelayLimit:     cfg.FreeTxRelayLimit,
  2529  			MaxOrphanTxs:         cfg.MaxOrphanTxs,
  2530  			MaxOrphanTxSize:      defaultMaxOrphanTxSize,
  2531  			MaxSigOpsPerTx:       blockchain.MaxSigOpsPerBlock / 5,
  2532  			MinRelayTxFee:        cfg.minRelayTxFee,
  2533  		},
  2534  		FetchUtxoView: s.blockManager.chain.FetchUtxoView,
  2535  		Chain:         s.blockManager.chain,
  2536  		SigCache:      s.sigCache,
  2537  		TimeSource:    s.timeSource,
  2538  		AddrIndex:     s.addrIndex,
  2539  	}
  2540  	s.txMemPool = newTxMemPool(&txC)
  2541  
  2542  	// Create the mining policy based on the configuration options.
  2543  	// NOTE: The CPU miner relies on the mempool, so the mempool has to be
  2544  	// created before calling the function to create the CPU miner.
  2545  	policy := mining.Policy{
  2546  		BlockMinSize:      cfg.BlockMinSize,
  2547  		BlockMaxSize:      cfg.BlockMaxSize,
  2548  		BlockPrioritySize: cfg.BlockPrioritySize,
  2549  		TxMinFreeFee:      cfg.minRelayTxFee,
  2550  	}
  2551  	s.cpuMiner = newCPUMiner(&policy, &s)
  2552  
  2553  	if !cfg.DisableRPC {
  2554  		s.rpcServer, err = newRPCServer(cfg.RPCListeners, &policy, &s)
  2555  		if err != nil {
  2556  			return nil, err
  2557  		}
  2558  	}
  2559  
  2560  	return &s, nil
  2561  }
  2562  
  2563  // dynamicTickDuration is a convenience function used to dynamically choose a
  2564  // tick duration based on remaining time.  It is primarily used during
  2565  // server shutdown to make shutdown warnings more frequent as the shutdown time
  2566  // approaches.
  2567  func dynamicTickDuration(remaining time.Duration) time.Duration {
  2568  	switch {
  2569  	case remaining <= time.Second*5:
  2570  		return time.Second
  2571  	case remaining <= time.Second*15:
  2572  		return time.Second * 5
  2573  	case remaining <= time.Minute:
  2574  		return time.Second * 15
  2575  	case remaining <= time.Minute*5:
  2576  		return time.Minute
  2577  	case remaining <= time.Minute*15:
  2578  		return time.Minute * 5
  2579  	case remaining <= time.Hour:
  2580  		return time.Minute * 15
  2581  	}
  2582  	return time.Hour
  2583  }