github.com/deso-protocol/core@v1.2.9/lib/peer.go (about)

     1  package lib
     2  
     3  import (
     4  	"fmt"
     5  	"github.com/decred/dcrd/lru"
     6  	"math"
     7  	"net"
     8  	"sort"
     9  	"sync/atomic"
    10  	"time"
    11  
    12  	"github.com/btcsuite/btcd/wire"
    13  	"github.com/deso-protocol/go-deadlock"
    14  	"github.com/golang/glog"
    15  	"github.com/pkg/errors"
    16  )
    17  
    18  // peer.go defines an interface for connecting to and managing an DeSo
    19  // peer. Each peer a node is connected to is represented by a Peer object,
    20  // and the Peer object is how messages are sent and received to/from the
    21  // peer. A good place to start is inHandler and outHandler in this file.
    22  
    23  // ExpectedResponse is a struct used to enforce timeouts on peers. For example,
    24  // if we send a GetBlocks message, we would expect a response within a given
    25  // window and disconnect from the Peer if we don't get that response.
    26  type ExpectedResponse struct {
    27  	TimeExpected time.Time
    28  	MessageType  MsgType
    29  }
    30  
    31  type DeSoMessageMeta struct {
    32  	DeSoMessage DeSoMessage
    33  	Inbound     bool
    34  }
    35  
    36  // Peer is an object that holds all of the state for a connection to another node.
    37  // Any communication with other nodes happens via this object, which maintains a
    38  // queue of messages to send to the other node.
    39  type Peer struct {
    40  	// These stats should be accessed atomically.
    41  	bytesReceived uint64
    42  	bytesSent     uint64
    43  	totalMessages uint64
    44  	lastRecv      int64
    45  	lastSend      int64
    46  
    47  	// Stats that should be accessed using the mutex below.
    48  	StatsMtx       deadlock.RWMutex
    49  	timeOffsetSecs int64
    50  	timeConnected  time.Time
    51  	startingHeight uint32
    52  	ID             uint64
    53  	// Ping-related fields.
    54  	LastPingNonce  uint64
    55  	LastPingTime   time.Time
    56  	LastPingMicros int64
    57  
    58  	// Connection info.
    59  	cmgr                *ConnectionManager
    60  	conn                net.Conn
    61  	isOutbound          bool
    62  	isPersistent        bool
    63  	stallTimeoutSeconds uint64
    64  	Params              *DeSoParams
    65  	MessageChan         chan *ServerMessage
    66  	// A hack to make it so that we can allow an API endpoint to manually
    67  	// delete a peer.
    68  	PeerManuallyRemovedFromConnectionManager bool
    69  
    70  	// In order to complete a version negotiation successfully, the peer must
    71  	// reply to the initial version message we send them with a verack message
    72  	// containing the nonce from that initial version message. This ensures that
    73  	// the peer's IP isn't being spoofed since the only way to actually produce
    74  	// a verack with the appropriate response is to actually own the IP that
    75  	// the peer claims it has. As such, we maintain the version nonce we sent
    76  	// the peer and the version nonce they sent us here.
    77  	//
    78  	// TODO: The way we synchronize the version nonce is currently a bit
    79  	// messy; ideally we could do it without keeping global state.
    80  	versionNonceSent     uint64
    81  	versionNonceReceived uint64
    82  
    83  	// A pointer to the Server
    84  	srv *Server
    85  
    86  	// Basic state.
    87  	PeerInfoMtx               deadlock.Mutex
    88  	serviceFlags              ServiceFlag
    89  	addrStr                   string
    90  	netAddr                   *wire.NetAddress
    91  	userAgent                 string
    92  	advertisedProtocolVersion uint64
    93  	negotiatedProtocolVersion uint64
    94  	versionNegotiated         bool
    95  	minTxFeeRateNanosPerKB    uint64
    96  	// Messages for which we are expecting a reply within a fixed
    97  	// amount of time. This list is always sorted by ExpectedTime,
    98  	// with the item having the earliest time at the front.
    99  	expectedResponses []*ExpectedResponse
   100  
   101  	// The addresses this peer is aware of.
   102  	knownAddressMapLock deadlock.RWMutex
   103  	knownAddressesmap   map[string]bool
   104  
   105  	// Output queue for messages that need to be sent to the peer.
   106  	outputQueueChan chan DeSoMessage
   107  
   108  	// Set to zero until Disconnect has been called on the Peer. Used to make it
   109  	// so that the logic in Disconnect will only be executed once.
   110  	disconnected int32
   111  	// Signals that the peer is now in the stopped state.
   112  	quit chan interface{}
   113  
   114  	// Each Peer is only allowed to have certain number of blocks being sent
   115  	// to them at any gven time. We use
   116  	// this value to enforce that constraint. The reason we need to do this is without
   117  	// it one peer could theoretically clog our Server by issuing many GetBlocks
   118  	// requests that ultimately don't get delivered. This way the number of blocks
   119  	// being sent is limited to a multiple of the number of Peers we have.
   120  	blocksToSendMtx deadlock.Mutex
   121  	blocksToSend    map[BlockHash]bool
   122  
   123  	// Inventory stuff.
   124  	// The inventory that we know the peer already has.
   125  	knownInventory lru.Cache
   126  
   127  	// Whether the peer is ready to receive INV messages. For a peer that
   128  	// still needs a mempool download, this is false.
   129  	canReceiveInvMessagess bool
   130  
   131  	// We process GetTransaction requests in a separate loop. This allows us
   132  	// to ensure that the responses are ordered.
   133  	mtxMessageQueue deadlock.RWMutex
   134  	messagQueue     []*DeSoMessageMeta
   135  
   136  	requestedBlocks map[BlockHash]bool
   137  }
   138  
   139  func (pp *Peer) AddDeSoMessage(desoMessage DeSoMessage, inbound bool) {
   140  	// Don't add any more messages if the peer is disconnected
   141  	if pp.disconnected != 0 {
   142  		glog.Errorf("AddDeSoMessage: Not enqueueing message %v because peer is disconnecting", desoMessage.GetMsgType())
   143  		return
   144  	}
   145  
   146  	pp.mtxMessageQueue.Lock()
   147  	defer pp.mtxMessageQueue.Unlock()
   148  
   149  	pp.messagQueue = append(pp.messagQueue, &DeSoMessageMeta{
   150  		DeSoMessage: desoMessage,
   151  		Inbound:     inbound,
   152  	})
   153  }
   154  
   155  func (pp *Peer) MaybeDequeueDeSoMessage() *DeSoMessageMeta {
   156  	pp.mtxMessageQueue.Lock()
   157  	defer pp.mtxMessageQueue.Unlock()
   158  
   159  	// If we don't have any requests to process just return
   160  	if len(pp.messagQueue) == 0 {
   161  		return nil
   162  	}
   163  	// If we get here then we know we have messages to process.
   164  
   165  	messageToReturn := pp.messagQueue[0]
   166  	pp.messagQueue = pp.messagQueue[1:]
   167  
   168  	return messageToReturn
   169  }
   170  
   171  // This call blocks on the Peer's queue.
   172  func (pp *Peer) HandleGetTransactionsMsg(getTxnMsg *MsgDeSoGetTransactions) {
   173  	// Get all the transactions we have from the mempool.
   174  	glog.V(1).Infof("Peer._handleGetTransactions: Processing "+
   175  		"MsgDeSoGetTransactions message with %v txns from peer %v",
   176  		len(getTxnMsg.HashList), pp)
   177  
   178  	mempoolTxs := []*MempoolTx{}
   179  	txnMap := pp.srv.mempool.readOnlyUniversalTransactionMap
   180  	for _, txHash := range getTxnMsg.HashList {
   181  		mempoolTx, exists := txnMap[*txHash]
   182  		// If the transaction isn't in the pool, just continue without adding
   183  		// it. It is generally OK to respond with only a subset of the transactions
   184  		// that were requested.
   185  		if !exists {
   186  			continue
   187  		}
   188  
   189  		mempoolTxs = append(mempoolTxs, mempoolTx)
   190  	}
   191  
   192  	// Sort the transactions in the order in which they were added to the mempool.
   193  	// Doing this helps the Peer when they go to add the transactions by reducing
   194  	// unconnectedTxns and transactions being rejected due to missing dependencies.
   195  	sort.Slice(mempoolTxs, func(ii, jj int) bool {
   196  		return mempoolTxs[ii].Added.Before(mempoolTxs[jj].Added)
   197  	})
   198  
   199  	// Add all of the fetched transactions to a response.
   200  	res := &MsgDeSoTransactionBundle{}
   201  	for _, mempoolTx := range mempoolTxs {
   202  		res.Transactions = append(res.Transactions, mempoolTx.Tx)
   203  	}
   204  
   205  	// At this point the response should have all of the transactions that
   206  	// we had available from the request. It should also be below the limit
   207  	// for number of transactions since the request itself was below the
   208  	// limit. So push the bundle to the Peer.
   209  	glog.V(1).Infof("Peer._handleGetTransactions: Sending txn bundle with size %v to peer %v",
   210  		len(res.Transactions), pp)
   211  	pp.QueueMessage(res)
   212  }
   213  
   214  func (pp *Peer) HandleTransactionBundleMessage(msg *MsgDeSoTransactionBundle) {
   215  	// TODO: I think making it so that we can't process more than one TransactionBundle at
   216  	// a time would reduce transaction reorderings. Right now, if you get multiple bundles
   217  	// from multiple peers they'll be processed all at once, potentially interleaving with
   218  	// one another.
   219  
   220  	glog.V(1).Infof("Received TransactionBundle "+
   221  		"message of size %v from Peer %v", len(msg.Transactions), pp)
   222  
   223  	transactionsToRelay := pp.srv._processTransactions(pp, msg)
   224  	glog.V(1).Infof("Server._handleTransactionBundle: Accepted %v txns from Peer %v",
   225  		len(transactionsToRelay), pp)
   226  
   227  	_ = transactionsToRelay
   228  
   229  	// Remove all the transactions we received from requestedTransactions now
   230  	// that we've processed them. Don't remove them from inventoryBeingProcessed,
   231  	// since that will guard against reprocessing transactions that had errors while
   232  	// processing.
   233  	pp.srv.dataLock.Lock()
   234  	for _, txn := range msg.Transactions {
   235  		txHash := txn.Hash()
   236  		delete(pp.srv.requestedTransactionsMap, *txHash)
   237  	}
   238  	pp.srv.dataLock.Unlock()
   239  
   240  	// At this point we should have attempted to add all the transactions to our
   241  	// mempool.
   242  	pp.srv.hasProcessedFirstTransactionBundle = true
   243  }
   244  
   245  func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) {
   246  	// Get the requestedTransactions lock and release it at the end of the function.
   247  	pp.srv.dataLock.Lock()
   248  	defer pp.srv.dataLock.Unlock()
   249  
   250  	// Iterate through the message. Gather the transactions and the
   251  	// blocks we don't already have into separate inventory lists.
   252  	glog.V(1).Infof("Server._handleInv: Processing INV message of size %v from peer %v", len(msg.InvList), pp)
   253  	txHashList := []*BlockHash{}
   254  	blockHashList := []*BlockHash{}
   255  
   256  	for _, invVect := range msg.InvList {
   257  		// No matter what, add the inv to the peer's known inventory.
   258  		pp.knownInventory.Add(*invVect)
   259  
   260  		// If this is a hash we are currently processing, no need to do anything.
   261  		// This check serves to fill the gap between the time when we've decided
   262  		// to ask for the data corresponding to an inv and when we actually receive
   263  		// that data. Without this check, the following would happen:
   264  		// - Receive inv from peer1
   265  		// - Get data for inv from peer1
   266  		// - Receive same inv from peer2
   267  		// - Get same data for same inv from peer2 before we've received
   268  		//   a response from peer1
   269  		// Instead, because of this check, the following happens instead:
   270  		// - Receive inv from peer1
   271  		// - Get data for inv from peer1 *and* add it to inventoryBeingProcessed.
   272  		// - Receive same inv from peer2
   273  		// - Notice second inv is already in inventoryBeingProcessed so don't
   274  		//   request data for it.
   275  		if pp.srv.inventoryBeingProcessed.Contains(*invVect) {
   276  			continue
   277  		}
   278  
   279  		// Extract a copy of the block hash to avoid the iterator changing the
   280  		// value underneath us.
   281  		currentHash := BlockHash{}
   282  		copy(currentHash[:], invVect.Hash[:])
   283  
   284  		if invVect.Type == InvTypeTx {
   285  			// For transactions, check that the transaction isn't in the
   286  			// mempool and that it isn't currently being requested.
   287  			_, requestIsInFlight := pp.srv.requestedTransactionsMap[currentHash]
   288  			if requestIsInFlight || pp.srv.mempool.IsTransactionInPool(&currentHash) {
   289  				continue
   290  			}
   291  
   292  			txHashList = append(txHashList, &currentHash)
   293  		} else if invVect.Type == InvTypeBlock {
   294  			// For blocks, we check that the hash isn't known to us either in our
   295  			// main header chain or in side chains.
   296  			if pp.srv.blockchain.HasHeader(&currentHash) {
   297  				continue
   298  			}
   299  
   300  			blockHashList = append(blockHashList, &currentHash)
   301  		}
   302  
   303  		// If we made it here, it means the inventory was added to one of the
   304  		// lists so mark it as processed on the Server.
   305  		pp.srv.inventoryBeingProcessed.Add(*invVect)
   306  	}
   307  
   308  	// If there were any transactions we don't yet have, request them using
   309  	// a GetTransactions message.
   310  	if len(txHashList) > 0 {
   311  		// Add all the transactions we think we need to the list of transactions
   312  		// requested (i.e. in-flight) since we're about to request them.
   313  		for _, txHash := range txHashList {
   314  			pp.srv.requestedTransactionsMap[*txHash] = &GetDataRequestInfo{
   315  				PeerWhoSentInv: pp,
   316  				TimeRequested:  time.Now(),
   317  			}
   318  		}
   319  
   320  		pp.AddDeSoMessage(&MsgDeSoGetTransactions{
   321  			HashList: txHashList,
   322  		}, false /*inbound*/)
   323  	} else {
   324  		glog.V(1).Infof("Server._handleInv: Not sending GET_TRANSACTIONS because no new hashes")
   325  	}
   326  
   327  	// If the peer has sent us any block hashes that are new to us then send
   328  	// a GetHeaders message to her to get back in sync with her. The flow
   329  	// for this is generally:
   330  	// - Receive an inv message from a peer for a block we don't have.
   331  	// - Send them a GetHeaders message with our most up-to-date block locator.
   332  	// - Receive back from them all the headers they're aware of that can be
   333  	//   accepted into our chain.
   334  	// - We will then request from them all of the block data for the new headers
   335  	//   we have if they affect our main chain.
   336  	// - When the blocks come in, we process them by adding them to the chain
   337  	//   one-by-one.
   338  	if len(blockHashList) > 0 {
   339  		locator := pp.srv.blockchain.LatestHeaderLocator()
   340  		pp.AddDeSoMessage(&MsgDeSoGetHeaders{
   341  			StopHash:     &BlockHash{},
   342  			BlockLocator: locator,
   343  		}, false /*inbound*/)
   344  	}
   345  }
   346  
   347  func (pp *Peer) HandleInv(msg *MsgDeSoInv) {
   348  	// Ignore invs while we're still syncing and before we've requested
   349  	// all mempool transactions from one of our peers to bootstrap.
   350  	if pp.srv.blockchain.isSyncing() {
   351  		glog.Infof("Server._handleInv: Ignoring INV while syncing from Peer %v", pp)
   352  		return
   353  	}
   354  
   355  	// Expire any transactions that we've been waiting too long on.
   356  	// Also remove them from inventoryProcessed in case another Peer wants to send
   357  	// them to us in the future.
   358  	pp.srv.ExpireRequests()
   359  
   360  	pp.HelpHandleInv(msg)
   361  }
   362  
   363  func (pp *Peer) HandleGetBlocks(msg *MsgDeSoGetBlocks) {
   364  	// Nothing to do if the request is empty.
   365  	if len(msg.HashList) == 0 {
   366  		glog.V(1).Infof("Server._handleGetBlocks: Received empty GetBlocks "+
   367  			"request. No response needed for Peer %v", pp)
   368  		return
   369  	}
   370  
   371  	// For each block the Peer has requested, fetch it and queue it to
   372  	// be sent. It takes some time to fetch the blocks which is why we
   373  	// do it in a goroutine. This might also block if the Peer's send
   374  	// queue is full.
   375  	//
   376  	// Note that the requester should generally ask for the blocks in the
   377  	// order they'd like to receive them as we will typically honor this
   378  	// ordering.
   379  	for _, hashToSend := range msg.HashList {
   380  		blockToSend := pp.srv.blockchain.GetBlock(hashToSend)
   381  		if blockToSend == nil {
   382  			// Don't ask us for blocks before verifying that we have them with a
   383  			// GetHeaders request.
   384  			glog.Errorf("Server._handleGetBlocks: Disconnecting peer %v because "+
   385  				"she asked for a block with hash %v that we don't have", pp, msg.HashList[0])
   386  			pp.Disconnect()
   387  			return
   388  		}
   389  		pp.AddDeSoMessage(blockToSend, false)
   390  	}
   391  }
   392  
   393  func (pp *Peer) cleanupMessageProcessor() {
   394  	pp.mtxMessageQueue.Lock()
   395  	defer pp.mtxMessageQueue.Unlock()
   396  
   397  	// We assume that no more elements will be added to the message queue once this function
   398  	// is called.
   399  	glog.Infof("StartDeSoMessageProcessor: Cleaning up message queue for peer: %v", pp)
   400  	pp.messagQueue = nil
   401  	// Set a few more things to nil just to make sure the garbage collector doesn't
   402  	// get confused when freeing up this Peer's memory. This is to fix a bug where
   403  	// inbound peers disconnecting was causing an OOM.
   404  	pp.cmgr = nil
   405  	pp.srv = nil
   406  	pp.MessageChan = nil
   407  	//pp.conn = nil
   408  }
   409  
   410  func (pp *Peer) StartDeSoMessageProcessor() {
   411  	glog.Infof("StartDeSoMessageProcessor: Starting for peer %v", pp)
   412  	for {
   413  		if pp.disconnected != 0 {
   414  			pp.cleanupMessageProcessor()
   415  			glog.Infof("StartDeSoMessageProcessor: Stopping because peer disconnected: %v", pp)
   416  			return
   417  		}
   418  		msgToProcess := pp.MaybeDequeueDeSoMessage()
   419  		if msgToProcess == nil {
   420  			time.Sleep(50 * time.Millisecond)
   421  			continue
   422  		}
   423  		// If we get here we know we have a transaction to process.
   424  
   425  		if msgToProcess.Inbound {
   426  			if msgToProcess.DeSoMessage.GetMsgType() == MsgTypeGetTransactions {
   427  				glog.V(1).Infof("StartDeSoMessageProcessor: RECEIVED message of "+
   428  					"type %v with num hashes %v from peer %v", msgToProcess.DeSoMessage.GetMsgType(),
   429  					len(msgToProcess.DeSoMessage.(*MsgDeSoGetTransactions).HashList), pp)
   430  				pp.HandleGetTransactionsMsg(msgToProcess.DeSoMessage.(*MsgDeSoGetTransactions))
   431  
   432  			} else if msgToProcess.DeSoMessage.GetMsgType() == MsgTypeTransactionBundle {
   433  				glog.V(1).Infof("StartDeSoMessageProcessor: RECEIVED message of "+
   434  					"type %v with num txns %v from peer %v", msgToProcess.DeSoMessage.GetMsgType(),
   435  					len(msgToProcess.DeSoMessage.(*MsgDeSoTransactionBundle).Transactions), pp)
   436  				pp.HandleTransactionBundleMessage(msgToProcess.DeSoMessage.(*MsgDeSoTransactionBundle))
   437  
   438  			} else if msgToProcess.DeSoMessage.GetMsgType() == MsgTypeInv {
   439  				glog.V(1).Infof("StartDeSoMessageProcessor: RECEIVED message of "+
   440  					"type %v with num hashes %v from peer %v", msgToProcess.DeSoMessage.GetMsgType(),
   441  					len(msgToProcess.DeSoMessage.(*MsgDeSoInv).InvList), pp)
   442  				pp.HandleInv(msgToProcess.DeSoMessage.(*MsgDeSoInv))
   443  
   444  			} else if msgToProcess.DeSoMessage.GetMsgType() == MsgTypeGetBlocks {
   445  				glog.V(1).Infof("StartDeSoMessageProcessor: RECEIVED message of "+
   446  					"type %v with num hashes %v from peer %v", msgToProcess.DeSoMessage.GetMsgType(),
   447  					len(msgToProcess.DeSoMessage.(*MsgDeSoGetBlocks).HashList), pp)
   448  				pp.HandleGetBlocks(msgToProcess.DeSoMessage.(*MsgDeSoGetBlocks))
   449  
   450  			} else {
   451  				glog.Errorf("StartDeSoMessageProcessor: ERROR RECEIVED message of "+
   452  					"type %v from peer %v", msgToProcess.DeSoMessage.GetMsgType(), pp)
   453  			}
   454  		} else {
   455  			glog.V(1).Infof("StartDeSoMessageProcessor: SENDING message of "+
   456  				"type %v to peer %v", msgToProcess.DeSoMessage.GetMsgType(), pp)
   457  			pp.QueueMessage(msgToProcess.DeSoMessage)
   458  		}
   459  	}
   460  }
   461  
   462  // NewPeer creates a new Peer object.
   463  func NewPeer(_conn net.Conn, _isOutbound bool, _netAddr *wire.NetAddress,
   464  	_isPersistent bool, _stallTimeoutSeconds uint64,
   465  	_minFeeRateNanosPerKB uint64,
   466  	params *DeSoParams,
   467  	messageChan chan *ServerMessage,
   468  	_cmgr *ConnectionManager, _srv *Server) *Peer {
   469  
   470  	pp := Peer{
   471  		cmgr:                   _cmgr,
   472  		srv:                    _srv,
   473  		conn:                   _conn,
   474  		addrStr:                _conn.RemoteAddr().String(),
   475  		netAddr:                _netAddr,
   476  		isOutbound:             _isOutbound,
   477  		isPersistent:           _isPersistent,
   478  		outputQueueChan:        make(chan DeSoMessage),
   479  		quit:                   make(chan interface{}),
   480  		knownInventory:         lru.NewCache(maxKnownInventory),
   481  		blocksToSend:           make(map[BlockHash]bool),
   482  		stallTimeoutSeconds:    _stallTimeoutSeconds,
   483  		minTxFeeRateNanosPerKB: _minFeeRateNanosPerKB,
   484  		knownAddressesmap:      make(map[string]bool),
   485  		Params:                 params,
   486  		MessageChan:            messageChan,
   487  		requestedBlocks:        make(map[BlockHash]bool),
   488  	}
   489  	if _cmgr != nil {
   490  		pp.ID = atomic.AddUint64(&_cmgr.peerIndex, 1)
   491  	}
   492  
   493  	// TODO: Before, we would give each Peer its own Logger object. Now we
   494  	// have a much better way of debugging which is that we include a nonce
   495  	// in all messages related to a Peer (i.e. PeerID=%d) that allows us to
   496  	// pipe the output to a file and inspect it (and if we choose to filter on
   497  	// a PeerID= then we can see exclusively that Peer's related messages).
   498  	// Still, we're going to leave this logic here for a little while longer in
   499  	// case a situation arises where commenting it in seems like it would be
   500  	// useful.
   501  	//
   502  	// Each peer gets its own log directory. Name the directory with
   503  	// IP:PORT_ID to ensure it's identifiable but also unique. The higher
   504  	// the ID the more recently the peer connection was established.
   505  	/*
   506  		logDir := fmt.Sprintf("%s.%05d_%d.log", addrmgr.NetAddressKey(_netAddr), pp.ID, time.Now().UnixNano())
   507  		resetLogDir := false
   508  		pp.Logger = glog.NewLogger(logDir, resetLogDir)
   509  		// Don't log peer information to stderr.
   510  		pp.Logger.AlsoToStderr = false
   511  	*/
   512  	return &pp
   513  }
   514  
   515  // MinFeeRateNanosPerKB returns the minimum fee rate this peer requires in order to
   516  // accept transactions into its mempool. We should generally not send a peer a
   517  // transaction below this fee rate.
   518  func (pp *Peer) MinFeeRateNanosPerKB() uint64 {
   519  	pp.StatsMtx.RLock()
   520  	defer pp.StatsMtx.RUnlock()
   521  
   522  	return pp.minTxFeeRateNanosPerKB
   523  }
   524  
   525  // StartingBlockHeight is the height of the peer's blockchain tip.
   526  func (pp *Peer) StartingBlockHeight() uint32 {
   527  	pp.StatsMtx.RLock()
   528  	defer pp.StatsMtx.RUnlock()
   529  	return pp.startingHeight
   530  }
   531  
   532  // NumBlocksToSend is the number of blocks the Peer has requested from
   533  // us that we have yet to send them.
   534  func (pp *Peer) NumBlocksToSend() uint32 {
   535  	pp.blocksToSendMtx.Lock()
   536  	defer pp.blocksToSendMtx.Unlock()
   537  
   538  	return uint32(len(pp.blocksToSend))
   539  }
   540  
   541  const (
   542  	// maxKnownInventory is the maximum number of items to keep in the known
   543  	// inventory cache.
   544  	maxKnownInventory = 1000000
   545  
   546  	// pingInterval is the interval of time to wait in between sending ping
   547  	// messages.
   548  	pingInterval = 2 * time.Minute
   549  
   550  	// idleTimeout is the duration of inactivity before we time out a peer.
   551  	idleTimeout = 5 * time.Minute
   552  )
   553  
   554  // handlePingMsg is invoked when a peer receives a ping message. It replies with a pong
   555  // message.
   556  func (pp *Peer) handlePingMsg(msg *MsgDeSoPing) {
   557  	// Include nonce from ping so pong can be identified.
   558  	glog.V(2).Infof("Peer.handlePingMsg: Received ping from peer %v: %v", pp, msg)
   559  	// Queue up a pong message.
   560  	pp.QueueMessage(&MsgDeSoPong{Nonce: msg.Nonce})
   561  }
   562  
   563  // handlePongMsg is invoked when a peer receives a pong message.  It
   564  // updates the ping statistics.
   565  func (pp *Peer) handlePongMsg(msg *MsgDeSoPong) {
   566  	// Arguably we could use a buffered channel here sending data
   567  	// in a fifo manner whenever we send a ping, or a list keeping track of
   568  	// the times of each ping. For now we just make a best effort and
   569  	// only record stats if it was for the last ping sent. Any preceding
   570  	// and overlapping pings will be ignored. It is unlikely to occur
   571  	// without large usage of the ping call since we ping infrequently
   572  	// enough that if they overlap we would have timed out the peer.
   573  	glog.V(2).Infof("Peer.handlePongMsg: Received pong from peer %v: %v", msg, pp)
   574  	pp.StatsMtx.Lock()
   575  	defer pp.StatsMtx.Unlock()
   576  	if pp.LastPingNonce != 0 && msg.Nonce == pp.LastPingNonce {
   577  		pp.LastPingMicros = time.Since(pp.LastPingTime).Nanoseconds()
   578  		pp.LastPingMicros /= 1000 // convert to usec.
   579  		pp.LastPingNonce = 0
   580  		glog.V(2).Infof("Peer.handlePongMsg: LastPingMicros(%d) from Peer %v", pp.LastPingMicros, pp)
   581  	}
   582  }
   583  
   584  func (pp *Peer) pingHandler() {
   585  	glog.V(1).Infof("Peer.pingHandler: Starting ping handler for Peer %v", pp)
   586  	pingTicker := time.NewTicker(pingInterval)
   587  	defer pingTicker.Stop()
   588  
   589  out:
   590  	for {
   591  		select {
   592  		case <-pingTicker.C:
   593  			glog.V(2).Infof("Peer.pingHandler: Initiating ping for Peer %v", pp)
   594  			nonce, err := wire.RandomUint64()
   595  			if err != nil {
   596  				glog.Errorf("Not sending ping to Peer %v: %v", pp, err)
   597  				continue
   598  			}
   599  			// Update the ping stats when we initiate a ping.
   600  			//
   601  			// TODO: Setting LastPingTime here means that we're technically measuring the time
   602  			// between *queueing* the ping and when we receive a pong vs the time between when
   603  			// a ping is actually sent and when the pong is received. To fix it we'd have to
   604  			// detect a ping message in the outHandler and set the stats there instead.
   605  			pp.StatsMtx.Lock()
   606  			pp.LastPingNonce = nonce
   607  			pp.LastPingTime = time.Now()
   608  			pp.StatsMtx.Unlock()
   609  			// Queue the ping message to be sent.
   610  			pp.QueueMessage(&MsgDeSoPing{Nonce: nonce})
   611  
   612  		case <-pp.quit:
   613  			break out
   614  		}
   615  	}
   616  }
   617  
   618  func (pp *Peer) String() string {
   619  	isDisconnected := ""
   620  	if pp.disconnected != 0 {
   621  		isDisconnected = ", DISCONNECTED"
   622  	}
   623  	return fmt.Sprintf("[ Remote Address: %v%s PeerID=%d ]", pp.addrStr, isDisconnected, pp.ID)
   624  }
   625  
   626  func (pp *Peer) Connected() bool {
   627  	return atomic.LoadInt32(&pp.disconnected) == 0
   628  }
   629  
   630  func (pp *Peer) Address() string {
   631  	return pp.addrStr
   632  }
   633  
   634  func (pp *Peer) IP() string {
   635  	return pp.netAddr.IP.String()
   636  }
   637  
   638  func (pp *Peer) Port() uint16 {
   639  	return pp.netAddr.Port
   640  }
   641  
   642  func (pp *Peer) IsOutbound() bool {
   643  	return pp.isOutbound
   644  }
   645  
   646  func (pp *Peer) QueueMessage(desoMessage DeSoMessage) {
   647  	// If the peer is disconnected, don't queue anything.
   648  	if !pp.Connected() {
   649  		return
   650  	}
   651  
   652  	pp.outputQueueChan <- desoMessage
   653  }
   654  
   655  func (pp *Peer) _handleOutExpectedResponse(msg DeSoMessage) {
   656  	pp.PeerInfoMtx.Lock()
   657  	defer pp.PeerInfoMtx.Unlock()
   658  
   659  	// If we're sending the peer a GetBlocks message, we expect to receive the
   660  	// blocks at minimum within a few seconds of each other.
   661  	stallTimeout := time.Duration(int64(pp.stallTimeoutSeconds) * int64(time.Second))
   662  	if msg.GetMsgType() == MsgTypeGetBlocks {
   663  		getBlocks := msg.(*MsgDeSoGetBlocks)
   664  		// We have one block expected for each entry in the message.
   665  		for ii := range getBlocks.HashList {
   666  			pp._addExpectedResponse(&ExpectedResponse{
   667  				TimeExpected: time.Now().Add(
   668  					stallTimeout + time.Duration(int64(ii)*int64(stallTimeout))),
   669  				MessageType: MsgTypeBlock,
   670  			})
   671  		}
   672  	}
   673  
   674  	// If we're sending a GetHeaders message, the Peer should respond within
   675  	// a few seconds with a HeaderBundle.
   676  	if msg.GetMsgType() == MsgTypeGetHeaders {
   677  		pp._addExpectedResponse(&ExpectedResponse{
   678  			TimeExpected: time.Now().Add(stallTimeout),
   679  			MessageType:  MsgTypeHeaderBundle,
   680  		})
   681  	}
   682  
   683  	// If we're sending a GetTransactions message, the Peer should respond within
   684  	// a few seconds with a TransactionBundle. Every GetTransactions message should
   685  	// receive a TransactionBundle in response. The
   686  	// Server handles situations in which we request certain hashes but only get
   687  	// back a subset of them in the response (i.e. a case in which we received a
   688  	// timely reply but the reply was incomplete).
   689  	if msg.GetMsgType() == MsgTypeGetTransactions {
   690  		pp._addExpectedResponse(&ExpectedResponse{
   691  			TimeExpected: time.Now().Add(stallTimeout),
   692  			MessageType:  MsgTypeTransactionBundle,
   693  			// The Server handles situations in which the Peer doesn't send us all of
   694  			// the hashes we were expecting using timeouts on requested hashes.
   695  		})
   696  	}
   697  }
   698  
   699  func (pp *Peer) _filterAddrMsg(addrMsg *MsgDeSoAddr) *MsgDeSoAddr {
   700  	pp.knownAddressMapLock.Lock()
   701  	defer pp.knownAddressMapLock.Unlock()
   702  
   703  	filteredAddrMsg := &MsgDeSoAddr{}
   704  	for _, addr := range addrMsg.AddrList {
   705  		if _, hasAddr := pp.knownAddressesmap[addr.StringWithPort(false /*includePort*/)]; hasAddr {
   706  			continue
   707  		}
   708  
   709  		// If we get here this is an address the peer hasn't seen before so
   710  		// don't filter it out. Also add it to the known address map.
   711  		filteredAddrMsg.AddrList = append(filteredAddrMsg.AddrList, addr)
   712  		pp.knownAddressesmap[addr.StringWithPort(false /*includePort*/)] = true
   713  	}
   714  
   715  	return filteredAddrMsg
   716  }
   717  
   718  func (pp *Peer) _setKnownAddressesMap(key string, val bool) {
   719  	pp.knownAddressMapLock.Lock()
   720  	defer pp.knownAddressMapLock.Unlock()
   721  
   722  	pp.knownAddressesmap[key] = val
   723  }
   724  
   725  func (pp *Peer) outHandler() {
   726  	glog.V(1).Infof("Peer.outHandler: Starting outHandler for Peer %v", pp)
   727  	stallTicker := time.NewTicker(time.Second)
   728  out:
   729  	for {
   730  		select {
   731  		case msg := <-pp.outputQueueChan:
   732  			// Wire up the responses we expect from the Peer depending on what
   733  			// type of message it is.
   734  			pp._handleOutExpectedResponse(msg)
   735  
   736  			if msg.GetMsgType() == MsgTypeInv {
   737  				invMsg := msg.(*MsgDeSoInv)
   738  
   739  				if len(invMsg.InvList) == 0 {
   740  					// Don't send anything if the inv list is empty after filtering.
   741  					continue
   742  				}
   743  
   744  				// Add the new inventory to the peer's knownInventory.
   745  				for _, invVect := range invMsg.InvList {
   746  					pp.knownInventory.Add(*invVect)
   747  				}
   748  			}
   749  
   750  			// If we're sending a block, remove it from our blocksToSend map to allow
   751  			// the peer to request more blocks after receiving this one.
   752  			if msg.GetMsgType() == MsgTypeBlock {
   753  				pp.blocksToSendMtx.Lock()
   754  				hash, _ := msg.(*MsgDeSoBlock).Hash()
   755  				delete(pp.blocksToSend, *hash)
   756  				pp.blocksToSendMtx.Unlock()
   757  			}
   758  
   759  			// Before we send an addr message to the peer, filter out the addresses
   760  			// the peer is already aware of.
   761  			if msg.GetMsgType() == MsgTypeAddr {
   762  				msg = pp._filterAddrMsg(msg.(*MsgDeSoAddr))
   763  
   764  				// Don't send anything if we managed to filter out all the addresses.
   765  				if len(msg.(*MsgDeSoAddr).AddrList) == 0 {
   766  					continue
   767  				}
   768  			}
   769  
   770  			// If we have a problem sending a message to a peer then disconnect them.
   771  			if err := pp.WriteDeSoMessage(msg); err != nil {
   772  				glog.Errorf("Peer.outHandler: Problem sending message to peer: %v: %v", pp, err)
   773  				pp.Disconnect()
   774  			}
   775  		case <-stallTicker.C:
   776  			// Every second take a look to see if there's something that the peer should
   777  			// have responded to that they're delinquent on. If there is then error and
   778  			// disconnect the Peer.
   779  			if len(pp.expectedResponses) == 0 {
   780  				// If there are no expected responses, nothing to do.
   781  				continue
   782  			}
   783  			// The expected responses are sorted by when the corresponding requests were
   784  			// made. As such, if the first entry is not past the deadline then nothing is.
   785  			firstEntry := pp.expectedResponses[0]
   786  			nowTime := time.Now()
   787  			if nowTime.After(firstEntry.TimeExpected) {
   788  				glog.Errorf("Peer.outHandler: Peer %v took too long to response to "+
   789  					"reqest. Expected MsgType=%v at time %v but it is now time %v",
   790  					pp, firstEntry.MessageType, firstEntry.TimeExpected, nowTime)
   791  				pp.Disconnect()
   792  			}
   793  
   794  		case <-pp.quit:
   795  			break out
   796  		}
   797  	}
   798  
   799  	glog.V(1).Infof("Peer.outHandler: Quitting outHandler for Peer %v", pp)
   800  }
   801  
   802  func (pp *Peer) _maybeAddBlocksToSend(msg DeSoMessage) error {
   803  	// If the input is not a GetBlocks message, don't do anything.
   804  	if msg.GetMsgType() != MsgTypeGetBlocks {
   805  		return nil
   806  	}
   807  
   808  	// At this point, we're sure this is a GetBlocks message. Acquire the
   809  	// blocksToSend mutex and cast the message.
   810  	pp.blocksToSendMtx.Lock()
   811  	defer pp.blocksToSendMtx.Unlock()
   812  	getBlocks := msg.(*MsgDeSoGetBlocks)
   813  
   814  	// When blocks have been requested, add them to the list of blocks we're
   815  	// in the process of sending to the Peer.
   816  	for _, hash := range getBlocks.HashList {
   817  		pp.blocksToSend[*hash] = true
   818  	}
   819  
   820  	// If the peer has exceeded the number of blocks she is allowed to request
   821  	// then disconnect her.
   822  	if len(pp.blocksToSend) > MaxBlocksInFlight {
   823  		pp.Disconnect()
   824  		return fmt.Errorf("_maybeAddBlocksToSend: Disconnecting peer %v because she requested %d "+
   825  			"blocks, which is more than the %d blocks allowed "+
   826  			"in flight", pp, len(pp.blocksToSend), MaxBlocksInFlight)
   827  	}
   828  
   829  	return nil
   830  }
   831  
   832  func (pp *Peer) _removeEarliestExpectedResponse(msgType MsgType) *ExpectedResponse {
   833  	pp.PeerInfoMtx.Lock()
   834  	defer pp.PeerInfoMtx.Unlock()
   835  
   836  	// Just remove the first instance we find of the passed-in message
   837  	// type and return.
   838  	for ii, res := range pp.expectedResponses {
   839  		if res.MessageType == msgType {
   840  			// We found the first occurrence of the message type so remove
   841  			// that message since we're no longer waiting on it.
   842  			left := append([]*ExpectedResponse{}, pp.expectedResponses[:ii]...)
   843  			pp.expectedResponses = append(left, pp.expectedResponses[ii+1:]...)
   844  
   845  			// Return so we stop processing.
   846  			return res
   847  		}
   848  	}
   849  
   850  	return nil
   851  }
   852  
   853  func (pp *Peer) _addExpectedResponse(item *ExpectedResponse) {
   854  	if len(pp.expectedResponses) == 0 {
   855  		pp.expectedResponses = []*ExpectedResponse{item}
   856  		return
   857  	}
   858  
   859  	// Usually the item will need to be added at the end so start
   860  	// from there.
   861  	index := len(pp.expectedResponses)
   862  	for index > 0 &&
   863  		pp.expectedResponses[index-1].TimeExpected.After(item.TimeExpected) {
   864  
   865  		index--
   866  	}
   867  
   868  	left := append([]*ExpectedResponse{}, pp.expectedResponses[:index]...)
   869  	right := pp.expectedResponses[index:]
   870  	pp.expectedResponses = append(append(left, item), right...)
   871  }
   872  
   873  func (pp *Peer) _handleInExpectedResponse(rmsg DeSoMessage) error {
   874  	// Let the Peer off the hook if the response is one we were waiting for.
   875  	// Do this in a separate switch to keep things clean.
   876  	msgType := rmsg.GetMsgType()
   877  	if msgType == MsgTypeBlock ||
   878  		msgType == MsgTypeHeaderBundle ||
   879  		msgType == MsgTypeTransactionBundle {
   880  
   881  		expectedResponse := pp._removeEarliestExpectedResponse(msgType)
   882  		if expectedResponse == nil {
   883  			// We should never get one of these types of messages unless we've previously
   884  			// requested it so disconnect the Peer in this case.
   885  			errRet := fmt.Errorf("_handleInExpectedResponse: Received unsolicited message "+
   886  				"of type %v %v from peer %v -- disconnecting", msgType, rmsg, pp)
   887  			glog.V(1).Infof(errRet.Error())
   888  			// TODO: Removing this check so we can inject transactions into the node.
   889  			//return errRet
   890  		}
   891  
   892  		// If we get here then we managed to dequeue a message we were
   893  		// expecting, which is good.
   894  	}
   895  
   896  	return nil
   897  }
   898  
   899  // inHandler handles all incoming messages for the peer. It must be run as a
   900  // goroutine.
   901  func (pp *Peer) inHandler() {
   902  	glog.V(1).Infof("Peer.inHandler: Starting inHandler for Peer %v", pp)
   903  
   904  	// The timer is stopped when a new message is received and reset after it
   905  	// is processed.
   906  	idleTimer := time.AfterFunc(idleTimeout, func() {
   907  		glog.V(1).Infof("Peer.inHandler: Peer %v no answer for %v -- disconnecting", pp, idleTimeout)
   908  		pp.Disconnect()
   909  	})
   910  
   911  out:
   912  	for {
   913  		// Read a message and stop the idle timer as soon as the read
   914  		// is done. The timer is reset below for the next iteration if
   915  		// needed.
   916  		rmsg, err := pp.ReadDeSoMessage()
   917  		idleTimer.Stop()
   918  		if err != nil {
   919  			glog.Errorf("Peer.inHandler: Can't read message from peer %v: %v", pp, err)
   920  
   921  			break out
   922  		}
   923  
   924  		// Adjust what we expect our Peer to send us based on what we're now
   925  		// receiving with this message.
   926  		if err := pp._handleInExpectedResponse(rmsg); err != nil {
   927  			break out
   928  		}
   929  
   930  		// If we get an addr message, add all of the addresses to the known addresses
   931  		// for the peer.
   932  		if rmsg.GetMsgType() == MsgTypeAddr {
   933  			addrMsg := rmsg.(*MsgDeSoAddr)
   934  			for _, addr := range addrMsg.AddrList {
   935  				pp._setKnownAddressesMap(addr.StringWithPort(false /*includePort*/), true)
   936  			}
   937  		}
   938  
   939  		// If we receive a control message from a Peer then that Peer is misbehaving
   940  		// and we should disconnect. Control messages should never originate from Peers.
   941  		if IsControlMessage(rmsg.GetMsgType()) {
   942  			glog.Errorf("Peer.inHandler: Received control message of type %v from "+
   943  				"Peer %v; this should never happen. Disconnecting the Peer", rmsg.GetMsgType(), pp)
   944  			break out
   945  		}
   946  
   947  		// Potentially adjust blocksToSend to account for blocks the Peer is
   948  		// currently requesting from us. Disconnect the Peer if she's requesting too many
   949  		// blocks now.
   950  		if err := pp._maybeAddBlocksToSend(rmsg); err != nil {
   951  			glog.Errorf(err.Error())
   952  			break out
   953  		}
   954  
   955  		// This switch actually processes the message. For most messages, we just
   956  		// pass them onto the Server.
   957  		switch msg := rmsg.(type) {
   958  		case *MsgDeSoVersion:
   959  			// We always receive the VERSION from the Peer before starting this select
   960  			// statement, so getting one here is an error.
   961  
   962  			glog.Errorf("Peer.inHandler: Already received 'version' from peer %v -- disconnecting", pp)
   963  			break out
   964  
   965  		case *MsgDeSoVerack:
   966  			// We always receive the VERACK from the Peer before starting this select
   967  			// statement, so getting one here is an error.
   968  
   969  			glog.Errorf("Peer.inHandler: Already received 'verack' from peer %v -- disconnecting", pp)
   970  			break out
   971  
   972  		case *MsgDeSoPing:
   973  			// Respond to a ping with a pong.
   974  			pp.handlePingMsg(msg)
   975  
   976  		case *MsgDeSoPong:
   977  			// Measure the ping time when we receive a pong.
   978  			pp.handlePongMsg(msg)
   979  
   980  		case *MsgDeSoNewPeer, *MsgDeSoDonePeer,
   981  			*MsgDeSoBitcoinManagerUpdate, *MsgDeSoQuit:
   982  
   983  			// We should never receive control messages from a Peer. Disconnect if we do.
   984  			glog.Errorf("Peer.inHandler: Received control message of type %v from "+
   985  				"Peer %v which should never happen -- disconnecting", msg.GetMsgType(), pp)
   986  			break out
   987  
   988  		default:
   989  			// All other messages just forward back to the Server to handle them.
   990  			//glog.V(2).Infof("Peer.inHandler: Received message of type %v from %v", rmsg.GetMsgType(), pp)
   991  			pp.MessageChan <- &ServerMessage{
   992  				Peer: pp,
   993  				Msg:  msg,
   994  			}
   995  		}
   996  
   997  		// A message was received so reset the idle timer.
   998  		idleTimer.Reset(idleTimeout)
   999  	}
  1000  
  1001  	// Ensure the idle timer is stopped to avoid leaking the resource.
  1002  	idleTimer.Stop()
  1003  
  1004  	// Disconnect the Peer if it isn't already.
  1005  	pp.Disconnect()
  1006  
  1007  	glog.V(1).Infof("Peer.inHandler: done for peer: %v", pp)
  1008  }
  1009  
  1010  func (pp *Peer) Start() {
  1011  	glog.Infof("Peer.Start: Starting peer %v", pp)
  1012  	// The protocol has been negotiated successfully so start processing input
  1013  	// and output messages.
  1014  	go pp.pingHandler()
  1015  	go pp.outHandler()
  1016  	go pp.inHandler()
  1017  	go pp.StartDeSoMessageProcessor()
  1018  
  1019  	// If the address manager needs more addresses, then send a GetAddr message
  1020  	// to the peer. This is best-effort.
  1021  	if pp.cmgr != nil {
  1022  		if pp.cmgr.addrMgr.NeedMoreAddresses() {
  1023  			go func() {
  1024  				pp.QueueMessage(&MsgDeSoGetAddr{})
  1025  			}()
  1026  		}
  1027  	}
  1028  
  1029  	// Send our verack message now that the IO processing machinery has started.
  1030  }
  1031  
  1032  func (pp *Peer) IsSyncCandidate() bool {
  1033  	flagsAreCorrect := (pp.serviceFlags & SFFullNode) != 0
  1034  	return flagsAreCorrect && pp.isOutbound
  1035  }
  1036  
  1037  func (pp *Peer) WriteDeSoMessage(msg DeSoMessage) error {
  1038  	payload, err := WriteMessage(pp.conn, msg, pp.Params.NetworkType)
  1039  	if err != nil {
  1040  		return errors.Wrapf(err, "WriteDeSoMessage: ")
  1041  	}
  1042  
  1043  	// Only track the payload sent in the statistics we track.
  1044  	atomic.AddUint64(&pp.bytesSent, uint64(len(payload)))
  1045  	atomic.StoreInt64(&pp.lastSend, time.Now().Unix())
  1046  
  1047  	// Useful for debugging.
  1048  	// TODO: This may be too verbose
  1049  	messageSeq := atomic.AddUint64(&pp.totalMessages, 1)
  1050  	glog.V(1).Infof("SENDING( seq=%d ) message of type: %v to peer %v: %v",
  1051  		messageSeq, msg.GetMsgType(), pp, msg)
  1052  
  1053  	return nil
  1054  }
  1055  
  1056  func (pp *Peer) ReadDeSoMessage() (DeSoMessage, error) {
  1057  	msg, payload, err := ReadMessage(pp.conn, pp.Params.NetworkType)
  1058  	if err != nil {
  1059  		err := errors.Wrapf(err, "ReadDeSoMessage: ")
  1060  		glog.Error(err)
  1061  		return nil, err
  1062  	}
  1063  
  1064  	// Only track the payload received in the statistics we track.
  1065  	msgLen := uint64(len(payload))
  1066  	atomic.AddUint64(&pp.bytesReceived, msgLen)
  1067  	atomic.StoreInt64(&pp.lastRecv, time.Now().Unix())
  1068  
  1069  	// Useful for debugging.
  1070  	messageSeq := atomic.AddUint64(&pp.totalMessages, 1)
  1071  	glog.V(1).Infof("RECEIVED( seq=%d ) message of type: %v from peer %v: %v",
  1072  		messageSeq, msg.GetMsgType(), pp, msg)
  1073  
  1074  	return msg, nil
  1075  }
  1076  
  1077  func (pp *Peer) NewVersionMessage(params *DeSoParams) *MsgDeSoVersion {
  1078  	ver := NewMessage(MsgTypeVersion).(*MsgDeSoVersion)
  1079  
  1080  	ver.Version = params.ProtocolVersion
  1081  	ver.TstampSecs = time.Now().Unix()
  1082  	// We use an int64 instead of a uint64 for convenience but
  1083  	// this should be fine since we're just looking to generate a
  1084  	// unique value.
  1085  	ver.Nonce = uint64(RandInt64(math.MaxInt64))
  1086  	ver.UserAgent = params.UserAgent
  1087  	// TODO: Right now all peers are full nodes. Later on we'll want to change this,
  1088  	// at which point we'll need to do a little refactoring.
  1089  	ver.Services = SFFullNode
  1090  
  1091  	// When a node asks you for what height you have, you should reply with
  1092  	// the height of the latest actual block you have. This makes it so that
  1093  	// peers who have up-to-date headers but missing blocks won't be considered
  1094  	// for initial block download.
  1095  	//
  1096  	// TODO: This is ugly. It would be nice if the Peer required zero knowledge of the
  1097  	// Server and the Blockchain.
  1098  	if pp.srv != nil {
  1099  		ver.StartBlockHeight = uint32(pp.srv.blockchain.blockTip().Header.Height)
  1100  	} else {
  1101  		ver.StartBlockHeight = uint32(0)
  1102  	}
  1103  
  1104  	// Set the minimum fee rate the peer will accept.
  1105  	ver.MinFeeRateNanosPerKB = pp.minTxFeeRateNanosPerKB
  1106  
  1107  	return ver
  1108  }
  1109  
  1110  func (pp *Peer) sendVerack() error {
  1111  	verackMsg := NewMessage(MsgTypeVerack)
  1112  	// Include the nonce we received in the peer's version message so
  1113  	// we can validate that we actually control our IP address.
  1114  	verackMsg.(*MsgDeSoVerack).Nonce = pp.versionNonceReceived
  1115  	if err := pp.WriteDeSoMessage(verackMsg); err != nil {
  1116  		return errors.Wrap(err, "sendVerack: ")
  1117  	}
  1118  
  1119  	return nil
  1120  }
  1121  
  1122  func (pp *Peer) readVerack() error {
  1123  	msg, err := pp.ReadDeSoMessage()
  1124  	if err != nil {
  1125  		return errors.Wrap(err, "readVerack: ")
  1126  	}
  1127  	if msg.GetMsgType() != MsgTypeVerack {
  1128  		return fmt.Errorf(
  1129  			"readVerack: Received message with type %s but expected type VERACK. ",
  1130  			msg.GetMsgType().String())
  1131  	}
  1132  	verackMsg := msg.(*MsgDeSoVerack)
  1133  	if verackMsg.Nonce != pp.versionNonceSent {
  1134  		return fmt.Errorf(
  1135  			"readVerack: Received VERACK message with nonce %d but expected nonce %d",
  1136  			verackMsg.Nonce, pp.versionNonceSent)
  1137  	}
  1138  
  1139  	return nil
  1140  }
  1141  
  1142  func (pp *Peer) sendVersion() error {
  1143  	// For an outbound peer, we send a version message and then wait to
  1144  	// hear back for one.
  1145  	verMsg := pp.NewVersionMessage(pp.Params)
  1146  
  1147  	// Record the nonce of this version message before we send it so we can
  1148  	// detect self connections and so we can validate that the peer actually
  1149  	// controls the IP she's supposedly communicating to us from.
  1150  	pp.versionNonceSent = verMsg.Nonce
  1151  	if pp.cmgr != nil {
  1152  		pp.cmgr.sentNonces.Add(pp.versionNonceSent)
  1153  	}
  1154  
  1155  	if err := pp.WriteDeSoMessage(verMsg); err != nil {
  1156  		return errors.Wrap(err, "sendVersion: ")
  1157  	}
  1158  
  1159  	return nil
  1160  }
  1161  
  1162  func (pp *Peer) readVersion() error {
  1163  	msg, err := pp.ReadDeSoMessage()
  1164  	if err != nil {
  1165  		return errors.Wrap(err, "readVersion: ")
  1166  	}
  1167  
  1168  	verMsg, ok := msg.(*MsgDeSoVersion)
  1169  	if !ok {
  1170  		return fmt.Errorf(
  1171  			"readVersion: Received message with type %s but expected type VERSION. "+
  1172  				"The VERSION message must preceed all others", msg.GetMsgType().String())
  1173  	}
  1174  	if verMsg.Version < pp.Params.MinProtocolVersion {
  1175  		return fmt.Errorf("readVersion: Peer's protocol version too low: %d (min: %v)",
  1176  			verMsg.Version, pp.Params.MinProtocolVersion)
  1177  	}
  1178  
  1179  	// If we've sent this nonce before then return an error since this is
  1180  	// a connection from ourselves.
  1181  	msgNonce := verMsg.Nonce
  1182  	if pp.cmgr != nil {
  1183  		if pp.cmgr.sentNonces.Contains(msgNonce) {
  1184  			pp.cmgr.sentNonces.Delete(msgNonce)
  1185  			return fmt.Errorf("readVersion: Rejecting connection to self")
  1186  		}
  1187  	}
  1188  	// Save the version nonce so we can include it in our verack message.
  1189  	pp.versionNonceReceived = msgNonce
  1190  
  1191  	// Set the peer info-related fields.
  1192  	pp.PeerInfoMtx.Lock()
  1193  	pp.userAgent = verMsg.UserAgent
  1194  	pp.serviceFlags = verMsg.Services
  1195  	pp.advertisedProtocolVersion = verMsg.Version
  1196  	negotiatedVersion := pp.Params.ProtocolVersion
  1197  	if pp.advertisedProtocolVersion < pp.Params.ProtocolVersion {
  1198  		negotiatedVersion = pp.advertisedProtocolVersion
  1199  	}
  1200  	pp.negotiatedProtocolVersion = negotiatedVersion
  1201  	pp.PeerInfoMtx.Unlock()
  1202  
  1203  	// Set the stats-related fields.
  1204  	pp.StatsMtx.Lock()
  1205  	pp.startingHeight = verMsg.StartBlockHeight
  1206  	pp.minTxFeeRateNanosPerKB = verMsg.MinFeeRateNanosPerKB
  1207  	pp.timeConnected = time.Unix(verMsg.TstampSecs, 0)
  1208  	pp.timeOffsetSecs = verMsg.TstampSecs - time.Now().Unix()
  1209  	pp.StatsMtx.Unlock()
  1210  
  1211  	// Update the timeSource now that we've gotten a version message from the
  1212  	// peer.
  1213  	if pp.cmgr != nil {
  1214  		pp.cmgr.timeSource.AddTimeSample(pp.addrStr, pp.timeConnected)
  1215  	}
  1216  
  1217  	return nil
  1218  }
  1219  
  1220  func (pp *Peer) readWithTimeout(readFunc func() error, readTimeout time.Duration) error {
  1221  	errChan := make(chan error)
  1222  	go func() {
  1223  		errChan <- readFunc()
  1224  	}()
  1225  	select {
  1226  	case err := <-errChan:
  1227  		{
  1228  			return err
  1229  		}
  1230  	case <-time.After(readTimeout):
  1231  		{
  1232  			return fmt.Errorf("readWithTimeout: Timed out reading message from peer: (%v)", pp)
  1233  		}
  1234  	}
  1235  }
  1236  
  1237  func (pp *Peer) NegotiateVersion(versionNegotiationTimeout time.Duration) error {
  1238  	if pp.isOutbound {
  1239  		// Write a version message.
  1240  		if err := pp.sendVersion(); err != nil {
  1241  			return errors.Wrapf(err, "negotiateVersion: Problem sending version to Peer %v", pp)
  1242  		}
  1243  		// Read the peer's version.
  1244  		if err := pp.readWithTimeout(
  1245  			pp.readVersion,
  1246  			versionNegotiationTimeout); err != nil {
  1247  
  1248  			return errors.Wrapf(err, "negotiateVersion: Problem reading OUTBOUND peer version for Peer %v", pp)
  1249  		}
  1250  	} else {
  1251  		// Read the version first since this is an inbound peer.
  1252  		if err := pp.readWithTimeout(
  1253  			pp.readVersion,
  1254  			versionNegotiationTimeout); err != nil {
  1255  
  1256  			return errors.Wrapf(err, "negotiateVersion: Problem reading INBOUND peer version for Peer %v", pp)
  1257  		}
  1258  		if err := pp.sendVersion(); err != nil {
  1259  			return errors.Wrapf(err, "negotiateVersion: Problem sending version to Peer %v", pp)
  1260  		}
  1261  	}
  1262  
  1263  	// After sending and receiving a compatible version, complete the
  1264  	// negotiation by sending and receiving a verack message.
  1265  	if err := pp.sendVerack(); err != nil {
  1266  		return errors.Wrapf(err, "negotiateVersion: Problem sending verack to Peer %v", pp)
  1267  	}
  1268  	if err := pp.readWithTimeout(
  1269  		pp.readVerack,
  1270  		versionNegotiationTimeout); err != nil {
  1271  
  1272  		return errors.Wrapf(err, "negotiateVersion: Problem reading VERACK message from Peer %v", pp)
  1273  	}
  1274  	pp.versionNegotiated = true
  1275  
  1276  	// At this point we have sent a version and validated our peer's
  1277  	// version. So the negotiation should be complete.
  1278  	return nil
  1279  }
  1280  
  1281  // Disconnect closes a peer's network connection.
  1282  func (pp *Peer) Disconnect() {
  1283  	// Only run the logic the first time Disconnect is called.
  1284  	if atomic.AddInt32(&pp.disconnected, 1) != 1 {
  1285  		glog.V(1).Infof("Peer.Disconnect: Disconnect call ignored since it was already called before for Peer %v", pp)
  1286  		return
  1287  	}
  1288  
  1289  	glog.V(1).Infof("Peer.Disconnect: Running Disconnect for the first time for Peer %v", pp)
  1290  
  1291  	// Close the connection object.
  1292  	pp.conn.Close()
  1293  
  1294  	// Signaling the quit channel allows all the other goroutines to stop running.
  1295  	close(pp.quit)
  1296  
  1297  	// Add the Peer to donePeers so that the ConnectionManager and Server can do any
  1298  	// cleanup they need to do.
  1299  	if pp.cmgr != nil {
  1300  		pp.cmgr.donePeerChan <- pp
  1301  	}
  1302  }
  1303  
  1304  func (pp *Peer) _logVersionSuccess() {
  1305  	inboundStr := "INBOUND"
  1306  	if pp.isOutbound {
  1307  		inboundStr = "OUTBOUND"
  1308  	}
  1309  	persistentStr := "PERSISTENT"
  1310  	if !pp.isPersistent {
  1311  		persistentStr = "NON-PERSISTENT"
  1312  	}
  1313  	logStr := fmt.Sprintf("SUCCESS version negotiation for (%s) (%s) peer (%v).", inboundStr, persistentStr, pp)
  1314  	glog.V(1).Info(logStr)
  1315  }
  1316  
  1317  func (pp *Peer) _logAddPeer() {
  1318  	inboundStr := "INBOUND"
  1319  	if pp.isOutbound {
  1320  		inboundStr = "OUTBOUND"
  1321  	}
  1322  	persistentStr := "PERSISTENT"
  1323  	if !pp.isPersistent {
  1324  		persistentStr = "NON-PERSISTENT"
  1325  	}
  1326  	logStr := fmt.Sprintf("ADDING (%s) (%s) peer (%v)", inboundStr, persistentStr, pp)
  1327  	glog.V(1).Info(logStr)
  1328  }