decred.org/dcrwallet/v3@v3.1.0/p2p/peering.go (about)

     1  // Copyright (c) 2018-2019 The Decred developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package p2p
     6  
     7  import (
     8  	"context"
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"net"
    13  	"net/http"
    14  	"net/url"
    15  	"strconv"
    16  	"sync"
    17  	"sync/atomic"
    18  	"time"
    19  
    20  	"decred.org/dcrwallet/v3/errors"
    21  	"decred.org/dcrwallet/v3/lru"
    22  	"decred.org/dcrwallet/v3/version"
    23  	"github.com/decred/dcrd/addrmgr/v2"
    24  	"github.com/decred/dcrd/chaincfg/chainhash"
    25  	"github.com/decred/dcrd/chaincfg/v3"
    26  	"github.com/decred/dcrd/connmgr/v3"
    27  	"github.com/decred/dcrd/gcs/v4"
    28  	blockcf "github.com/decred/dcrd/gcs/v4/blockcf2"
    29  	"github.com/decred/dcrd/wire"
    30  	"github.com/decred/go-socks/socks"
    31  	"golang.org/x/sync/errgroup"
    32  )
    33  
    34  // uaName is the LocalPeer useragent name.
    35  const uaName = "dcrwallet"
    36  
    37  // uaVersion is the LocalPeer useragent version.
    38  var uaVersion = version.String()
    39  
    40  // minPver is the minimum protocol version we require remote peers to
    41  // implement.
    42  const minPver = wire.CFilterV2Version
    43  
    44  // Pver is the maximum protocol version implemented by the LocalPeer.
    45  const Pver = wire.InitStateVersion
    46  
    47  // connectTimeout is the amount of time allowed before connecting, peering
    48  // handshake, and protocol negotiation is aborted.
    49  const connectTimeout = 30 * time.Second
    50  
    51  // stallTimeout is the amount of time allowed before a request to receive data
    52  // that is known to exist at the RemotePeer times out with no matching reply.
    53  const stallTimeout = 30 * time.Second
    54  
    55  const banThreshold = 100
    56  
    57  const invLRUSize = 5000
    58  
    59  type msgAck struct {
    60  	msg wire.Message
    61  	ack chan<- struct{}
    62  }
    63  
    64  type blockRequest struct {
    65  	hash  *chainhash.Hash
    66  	ready chan struct{}
    67  	block *wire.MsgBlock
    68  	err   error
    69  }
    70  
    71  // RemotePeer represents a remote peer that can send and receive wire protocol
    72  // messages with the local peer.  RemotePeers must be created by dialing the
    73  // peer's address with a LocalPeer.
    74  type RemotePeer struct {
    75  	// atomics
    76  	atomicClosed uint64
    77  
    78  	id         uint64
    79  	lp         *LocalPeer
    80  	ua         string
    81  	services   wire.ServiceFlag
    82  	pver       uint32
    83  	initHeight int32
    84  	raddr      net.Addr
    85  	na         *addrmgr.NetAddress
    86  
    87  	// io
    88  	c       net.Conn
    89  	mr      msgReader
    90  	out     chan *msgAck
    91  	outPrio chan *msgAck
    92  	pongs   chan *wire.MsgPong
    93  
    94  	// requestedBlocksMu controls access to both the requestedBlocks map
    95  	// *and* its contents. Changes to individual *blockRequests MUST only
    96  	// be made under a locked requestedBlocksMu.
    97  	requestedBlocksMu sync.Mutex
    98  	requestedBlocks   map[chainhash.Hash]*blockRequest
    99  
   100  	requestedCFiltersV2 sync.Map // k=chainhash.Hash v=chan<- *wire.MsgCFilterV2
   101  	requestedTxs        map[chainhash.Hash]chan<- *wire.MsgTx
   102  	requestedTxsMu      sync.Mutex
   103  
   104  	// headers message management.  Headers can either be fetched synchronously
   105  	// or used to push block notifications with sendheaders.
   106  	requestedHeaders   chan<- *wire.MsgHeaders // non-nil result chan when synchronous getheaders in process
   107  	sendheaders        bool                    // whether a sendheaders message was sent
   108  	requestedHeadersMu sync.Mutex
   109  
   110  	// init state message management.
   111  	requestedInitState   chan<- *wire.MsgInitState // non-nil result chan when synchronous getinitstate in process
   112  	requestedInitStateMu sync.Mutex
   113  
   114  	invsSent     lru.Cache // Hashes from sent inventory messages
   115  	invsRecv     lru.Cache // Hashes of received inventory messages
   116  	knownHeaders lru.Cache // Hashes of received headers
   117  	banScore     connmgr.DynamicBanScore
   118  
   119  	err  error         // Final error of disconnected peer
   120  	errc chan struct{} // Closed after err is set
   121  }
   122  
   123  // LocalPeer represents the local peer that can send and receive wire protocol
   124  // messages with remote peers on the network.
   125  type LocalPeer struct {
   126  	// atomics
   127  	atomicMask          uint64
   128  	atomicPeerIDCounter uint64
   129  	atomicRequireHeight int32
   130  
   131  	dial DialFunc
   132  
   133  	receivedGetData   chan *inMsg
   134  	receivedHeaders   chan *inMsg
   135  	receivedInv       chan *inMsg
   136  	announcedHeaders  chan *inMsg
   137  	receivedInitState chan *inMsg
   138  
   139  	extaddr     net.Addr
   140  	amgr        *addrmgr.AddrManager
   141  	chainParams *chaincfg.Params
   142  
   143  	rpByID map[uint64]*RemotePeer
   144  	rpMu   sync.Mutex
   145  }
   146  
   147  // NewLocalPeer creates a LocalPeer that is externally reachable to remote peers
   148  // through extaddr.
   149  func NewLocalPeer(params *chaincfg.Params, extaddr *net.TCPAddr, amgr *addrmgr.AddrManager) *LocalPeer {
   150  	var dialer net.Dialer
   151  	lp := &LocalPeer{
   152  		dial:              dialer.DialContext,
   153  		receivedGetData:   make(chan *inMsg),
   154  		receivedHeaders:   make(chan *inMsg),
   155  		receivedInv:       make(chan *inMsg),
   156  		announcedHeaders:  make(chan *inMsg),
   157  		receivedInitState: make(chan *inMsg),
   158  		extaddr:           extaddr,
   159  		amgr:              amgr,
   160  		chainParams:       params,
   161  		rpByID:            make(map[uint64]*RemotePeer),
   162  	}
   163  	return lp
   164  }
   165  
   166  // DialFunc provides a method to dial a network connection.
   167  type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error)
   168  
   169  // SetDialFunc sets the function used to dial peer and seeder connections.
   170  func (lp *LocalPeer) SetDialFunc(dial DialFunc) {
   171  	lp.dial = dial
   172  }
   173  
   174  func isCGNAT(ip net.IP) bool {
   175  	if ip4 := ip.To4(); ip4 != nil {
   176  		return ip4[0] == 100 && ip4[1]&0xc0 == 64 // 100.64.0.0/10
   177  	}
   178  	return false
   179  }
   180  
   181  func newNetAddress(addr net.Addr, services wire.ServiceFlag) (*wire.NetAddress, error) {
   182  	var ip net.IP
   183  	var port uint16
   184  	switch a := addr.(type) {
   185  	case *net.TCPAddr:
   186  		ip = a.IP
   187  		port = uint16(a.Port)
   188  	case *socks.ProxiedAddr:
   189  		ip = net.ParseIP(a.Host)
   190  		port = uint16(a.Port)
   191  	default:
   192  		return nil, fmt.Errorf("newNetAddress: unsupported address "+
   193  			"type %T", addr)
   194  	}
   195  	switch {
   196  	case ip.IsLoopback(), ip.IsPrivate(), !ip.IsGlobalUnicast(), isCGNAT(ip):
   197  		ip = nil
   198  		port = 0
   199  	}
   200  	return wire.NewNetAddressIPPort(ip, port, services), nil
   201  }
   202  
   203  func (lp *LocalPeer) newMsgVersion(pver uint32, c net.Conn) (*wire.MsgVersion, error) {
   204  	la := new(wire.NetAddress)
   205  	ra, err := newNetAddress(c.RemoteAddr(), 0)
   206  	if err != nil {
   207  		return nil, err
   208  	}
   209  	nonce, err := wire.RandomUint64()
   210  	if err != nil {
   211  		return nil, err
   212  	}
   213  	v := wire.NewMsgVersion(la, ra, nonce, 0)
   214  	v.AddUserAgent(uaName, uaVersion)
   215  	v.ProtocolVersion = int32(pver)
   216  	return v, nil
   217  }
   218  
   219  // RequirePeerHeight sets the minimum height a peer must advertise during its
   220  // handshake.  Peers advertising below this height will error during the
   221  // handshake, and will not be marked as good peers in the address manager.
   222  func (lp *LocalPeer) RequirePeerHeight(requiredHeight int32) {
   223  	atomic.StoreInt32(&lp.atomicRequireHeight, requiredHeight)
   224  }
   225  
   226  // ConnectOutbound establishes a connection to a remote peer by their remote TCP
   227  // address.  The peer is serviced in the background until the context is
   228  // cancelled, the RemotePeer disconnects, times out, misbehaves, or the
   229  // LocalPeer disconnects all peers.
   230  func (lp *LocalPeer) ConnectOutbound(ctx context.Context, addr string, reqSvcs wire.ServiceFlag) (*RemotePeer, error) {
   231  	const opf = "localpeer.ConnectOutbound(%v)"
   232  
   233  	log.Debugf("Attempting connection to peer %v", addr)
   234  
   235  	connectCtx, cancel := context.WithTimeout(ctx, connectTimeout)
   236  	defer cancel()
   237  
   238  	// Generate a unique ID for this peer and add the initial connection state.
   239  	id := atomic.AddUint64(&lp.atomicPeerIDCounter, 1)
   240  
   241  	tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
   242  	if err != nil {
   243  		return nil, err
   244  	}
   245  
   246  	// Create a net address with assumed services.
   247  	na := addrmgr.NewNetAddressIPPort(tcpAddr.IP, uint16(tcpAddr.Port), wire.SFNodeNetwork)
   248  	na.Timestamp = time.Now()
   249  
   250  	rp, err := lp.connectOutbound(connectCtx, id, addr, na)
   251  	if err != nil {
   252  		op := errors.Opf(opf, addr)
   253  		return nil, errors.E(op, err)
   254  	}
   255  
   256  	go lp.serveUntilError(ctx, rp)
   257  
   258  	var waitForAddrs <-chan time.Time
   259  	if lp.amgr.NeedMoreAddresses() {
   260  		waitForAddrs = time.After(stallTimeout)
   261  		err = rp.Addrs(ctx)
   262  		if err != nil {
   263  			op := errors.Opf(opf, rp.raddr)
   264  			return nil, errors.E(op, err)
   265  		}
   266  	}
   267  
   268  	// Disconnect from the peer if it does not specify all required services.
   269  	if rp.services&reqSvcs != reqSvcs {
   270  		op := errors.Opf(opf, rp.raddr)
   271  		reason := errors.Errorf("missing required service flags %v", reqSvcs&^rp.services)
   272  		err := errors.E(op, reason)
   273  		go func() {
   274  			if waitForAddrs != nil {
   275  				<-waitForAddrs
   276  			}
   277  			reject := wire.NewMsgReject(wire.CmdVersion, wire.RejectNonstandard, reason.Error())
   278  			rp.sendMessageAck(ctx, reject)
   279  			rp.Disconnect(err)
   280  		}()
   281  		return nil, err
   282  	}
   283  
   284  	// Disconnect from the peer if its advertised last height is below our
   285  	// required minimum.
   286  	reqHeight := atomic.LoadInt32(&lp.atomicRequireHeight)
   287  	if rp.initHeight < reqHeight {
   288  		op := errors.Opf(opf, rp.raddr)
   289  		err := errors.E(op, "peer is not synced")
   290  		go func() {
   291  			if waitForAddrs != nil {
   292  				<-waitForAddrs
   293  			}
   294  			rp.Disconnect(err)
   295  		}()
   296  		return nil, err
   297  	}
   298  
   299  	// Mark this as a good address.
   300  	lp.amgr.Good(na)
   301  
   302  	return rp, nil
   303  }
   304  
   305  // AddrManager returns the local peer's address manager.
   306  func (lp *LocalPeer) AddrManager() *addrmgr.AddrManager { return lp.amgr }
   307  
   308  // NA returns the remote peer's net address.
   309  func (rp *RemotePeer) NA() *addrmgr.NetAddress { return rp.na }
   310  
   311  // UA returns the remote peer's user agent.
   312  func (rp *RemotePeer) UA() string { return rp.ua }
   313  
   314  // ID returns the remote ID.
   315  func (rp *RemotePeer) ID() uint64 { return rp.id }
   316  
   317  // InitialHeight returns the current height the peer advertised in its version
   318  // message.
   319  func (rp *RemotePeer) InitialHeight() int32 { return rp.initHeight }
   320  
   321  // Services returns the remote peer's advertised service flags.
   322  func (rp *RemotePeer) Services() wire.ServiceFlag { return rp.services }
   323  
   324  // InvsSent returns an LRU cache of inventory hashes sent to the remote peer.
   325  func (rp *RemotePeer) InvsSent() *lru.Cache { return &rp.invsSent }
   326  
   327  // InvsRecv returns an LRU cache of inventory hashes received by the remote
   328  // peer.
   329  func (rp *RemotePeer) InvsRecv() *lru.Cache { return &rp.invsRecv }
   330  
   331  // KnownHeaders returns an LRU cache of block hashes from received headers messages.
   332  func (rp *RemotePeer) KnownHeaders() *lru.Cache { return &rp.knownHeaders }
   333  
   334  // SeedPeers seeds the local peer with remote addresses matching the
   335  // services.
   336  func (lp *LocalPeer) SeedPeers(ctx context.Context, services wire.ServiceFlag) {
   337  	seeders := lp.chainParams.Seeders()
   338  	url := &url.URL{
   339  		Scheme:   "https",
   340  		Path:     "/api/addrs",
   341  		RawQuery: fmt.Sprintf("services=%d", services),
   342  	}
   343  	resps := make(chan *http.Response)
   344  	client := http.Client{
   345  		Transport: &http.Transport{
   346  			DialContext: lp.dial,
   347  		},
   348  	}
   349  	cancels := make([]func(), 0, len(seeders))
   350  	defer func() {
   351  		for _, cancel := range cancels {
   352  			cancel()
   353  		}
   354  	}()
   355  	for _, host := range seeders {
   356  		host := host
   357  		url.Host = host
   358  		ctx, cancel := context.WithTimeout(ctx, time.Minute)
   359  		cancels = append(cancels, cancel)
   360  		req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
   361  		if err != nil {
   362  			log.Errorf("Bad seeder request: %v", err)
   363  			continue
   364  		}
   365  		go func() {
   366  			resp, err := client.Do(req)
   367  			if err != nil {
   368  				log.Warnf("Failed to seed addresses from %s: %v", host, err)
   369  				resp = nil
   370  			}
   371  			resps <- resp
   372  		}()
   373  	}
   374  	var na []*addrmgr.NetAddress
   375  	for range seeders {
   376  		resp := <-resps
   377  		if resp == nil {
   378  			continue
   379  		}
   380  		seeder := resp.Request.Host
   381  		var apiResponse struct {
   382  			Host     string `json:"host"`
   383  			Services uint64 `json:"services"`
   384  		}
   385  		dec := json.NewDecoder(io.LimitReader(resp.Body, 4096))
   386  		na = na[:0]
   387  		// Read at most 16 entries from each seeder, discard rest
   388  		for i := 0; i < 16; i++ {
   389  			err := dec.Decode(&apiResponse)
   390  			if errors.Is(err, io.EOF) {
   391  				break
   392  			}
   393  			if err != nil {
   394  				log.Warnf("Invalid seeder %v API response: %v", seeder, err)
   395  				break
   396  			}
   397  			host, port, err := net.SplitHostPort(apiResponse.Host)
   398  			if err != nil {
   399  				log.Warnf("Invalid host in seeder %v API: %v", seeder, err)
   400  				continue
   401  			}
   402  			ip := net.ParseIP(host)
   403  			if ip == nil {
   404  				log.Warnf("Invalid IP address %q in seeder %v API host field",
   405  					host, seeder)
   406  				continue
   407  			}
   408  			portNum, err := strconv.ParseUint(port, 10, 16)
   409  			if err != nil {
   410  				log.Warnf("Invalid port %q in seeder %v API host field", port,
   411  					seeder)
   412  				continue
   413  			}
   414  			log.Debugf("Discovered peer %v from seeder %v", apiResponse.Host,
   415  				seeder)
   416  			na = append(na, &addrmgr.NetAddress{
   417  				IP:        ip,
   418  				Port:      uint16(portNum),
   419  				Timestamp: time.Now(),
   420  				Services:  wire.ServiceFlag(apiResponse.Services),
   421  			})
   422  		}
   423  		resp.Body.Close()
   424  		if len(na) > 0 {
   425  			lp.amgr.AddAddresses(na, na[0])
   426  		}
   427  	}
   428  }
   429  
   430  type msgReader struct {
   431  	r      io.Reader
   432  	net    wire.CurrencyNet
   433  	msg    wire.Message
   434  	rawMsg []byte
   435  	err    error
   436  }
   437  
   438  func (mr *msgReader) next(pver uint32) bool {
   439  	mr.msg, mr.rawMsg, mr.err = wire.ReadMessage(mr.r, pver, mr.net)
   440  	return mr.err == nil
   441  }
   442  
   443  func (rp *RemotePeer) writeMessages(ctx context.Context) error {
   444  	e := make(chan error, 1)
   445  	go func() {
   446  		c := rp.c
   447  		pver := rp.pver
   448  		cnet := rp.lp.chainParams.Net
   449  		for {
   450  			var m *msgAck
   451  			select {
   452  			case m = <-rp.outPrio:
   453  			default:
   454  				select {
   455  				case m = <-rp.outPrio:
   456  				case m = <-rp.out:
   457  				}
   458  			}
   459  			log.Debugf("%v -> %v", m.msg.Command(), rp.raddr)
   460  			err := wire.WriteMessage(c, m.msg, pver, cnet)
   461  			if m.ack != nil {
   462  				m.ack <- struct{}{}
   463  			}
   464  			if err != nil {
   465  				e <- err
   466  				return
   467  			}
   468  		}
   469  	}()
   470  	select {
   471  	case <-ctx.Done():
   472  		return ctx.Err()
   473  	case err := <-e:
   474  		return err
   475  	}
   476  }
   477  
   478  type msgWriter struct {
   479  	w   io.Writer
   480  	net wire.CurrencyNet
   481  }
   482  
   483  func (mw *msgWriter) write(ctx context.Context, msg wire.Message, pver uint32) error {
   484  	e := make(chan error, 1)
   485  	go func() {
   486  		e <- wire.WriteMessage(mw.w, msg, pver, mw.net)
   487  	}()
   488  	select {
   489  	case <-ctx.Done():
   490  		return ctx.Err()
   491  	case err := <-e:
   492  		return err
   493  	}
   494  }
   495  
   496  func handshake(ctx context.Context, lp *LocalPeer, id uint64, na *addrmgr.NetAddress, c net.Conn) (*RemotePeer, error) {
   497  	const op errors.Op = "p2p.handshake"
   498  
   499  	rp := &RemotePeer{
   500  		id:              id,
   501  		lp:              lp,
   502  		ua:              "",
   503  		services:        0,
   504  		pver:            Pver,
   505  		raddr:           c.RemoteAddr(),
   506  		na:              na,
   507  		c:               c,
   508  		mr:              msgReader{r: c, net: lp.chainParams.Net},
   509  		out:             nil,
   510  		outPrio:         nil,
   511  		pongs:           make(chan *wire.MsgPong, 1),
   512  		requestedBlocks: make(map[chainhash.Hash]*blockRequest),
   513  		requestedTxs:    make(map[chainhash.Hash]chan<- *wire.MsgTx),
   514  		invsSent:        lru.NewCache(invLRUSize),
   515  		invsRecv:        lru.NewCache(invLRUSize),
   516  		knownHeaders:    lru.NewCache(invLRUSize),
   517  		errc:            make(chan struct{}),
   518  	}
   519  
   520  	mw := msgWriter{c, lp.chainParams.Net}
   521  
   522  	// The first message sent must be the version message.
   523  	lversion, err := lp.newMsgVersion(rp.pver, c)
   524  	if err != nil {
   525  		return nil, errors.E(op, err)
   526  	}
   527  	err = mw.write(ctx, lversion, rp.pver)
   528  	if err != nil {
   529  		return nil, errors.E(op, errors.IO, err)
   530  	}
   531  
   532  	// The first message received must also be a version message.
   533  	err = c.SetReadDeadline(time.Now().Add(3 * time.Second))
   534  	if err != nil {
   535  		return nil, errors.E(op, errors.IO, err)
   536  	}
   537  	msg, _, err := wire.ReadMessage(c, Pver, lp.chainParams.Net)
   538  	if err != nil {
   539  		return nil, errors.E(op, errors.IO, err)
   540  	}
   541  	rversion, ok := msg.(*wire.MsgVersion)
   542  	if !ok {
   543  		return nil, errors.E(op, errors.Protocol, "first received message was not the version message")
   544  	}
   545  	rp.initHeight = rversion.LastBlock
   546  	rp.services = rversion.Services
   547  	rp.ua = rversion.UserAgent
   548  	c.SetReadDeadline(time.Time{})
   549  
   550  	// Negotiate protocol down to compatible version
   551  	if uint32(rversion.ProtocolVersion) < minPver {
   552  		return nil, errors.E(op, errors.Protocol, "remote peer has pver lower than minimum required")
   553  	}
   554  	if uint32(rversion.ProtocolVersion) < rp.pver {
   555  		rp.pver = uint32(rversion.ProtocolVersion)
   556  	}
   557  
   558  	// Send the verack.  The received verack is ignored.
   559  	err = mw.write(ctx, wire.NewMsgVerAck(), rp.pver)
   560  	if err != nil {
   561  		return nil, errors.E(op, errors.IO, err)
   562  	}
   563  
   564  	rp.out = make(chan *msgAck)
   565  	rp.outPrio = make(chan *msgAck)
   566  
   567  	return rp, nil
   568  }
   569  
   570  func (lp *LocalPeer) connectOutbound(ctx context.Context, id uint64, addr string,
   571  	na *addrmgr.NetAddress) (*RemotePeer, error) {
   572  
   573  	var c net.Conn
   574  	var retryDuration = 5 * time.Second
   575  	timer := time.NewTimer(retryDuration)
   576  	var err error
   577  	for {
   578  		// Mark the connection attempt.
   579  		lp.amgr.Attempt(na)
   580  
   581  		// Dial with a timeout of 10 seconds.
   582  		dialCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
   583  		c, err = lp.dial(dialCtx, "tcp", addr)
   584  		cancel()
   585  		if err == nil {
   586  			break
   587  		}
   588  		var netErr net.Error
   589  		if errors.As(err, &netErr) && !netErr.Temporary() {
   590  			return nil, err
   591  		}
   592  		if ctx.Err() != nil {
   593  			return nil, ctx.Err()
   594  		}
   595  
   596  		select {
   597  		case <-ctx.Done():
   598  			timer.Stop()
   599  			return nil, ctx.Err()
   600  		case <-timer.C:
   601  			if retryDuration < 200*time.Second {
   602  				retryDuration += 5 * time.Second
   603  				timer.Reset(retryDuration)
   604  			}
   605  		}
   606  	}
   607  	lp.amgr.Connected(na)
   608  
   609  	rp, err := handshake(ctx, lp, id, na, c)
   610  	if err != nil {
   611  		return nil, err
   612  	}
   613  
   614  	// Associate connected rp with local peer.
   615  	lp.rpMu.Lock()
   616  	lp.rpByID[rp.id] = rp
   617  	lp.rpMu.Unlock()
   618  
   619  	// The real services of the net address are now known.
   620  	na.Services = rp.services
   621  
   622  	return rp, nil
   623  }
   624  
   625  func (lp *LocalPeer) serveUntilError(ctx context.Context, rp *RemotePeer) {
   626  	defer func() {
   627  		// Remove from local peer
   628  		log.Debugf("Disconnected from outbound peer %v", rp.raddr)
   629  		lp.rpMu.Lock()
   630  		delete(lp.rpByID, rp.id)
   631  		lp.rpMu.Unlock()
   632  	}()
   633  
   634  	g, gctx := errgroup.WithContext(ctx)
   635  	g.Go(func() error {
   636  		<-gctx.Done()
   637  		rp.Disconnect(gctx.Err())
   638  		rp.c.Close()
   639  		return nil
   640  	})
   641  	g.Go(func() (err error) {
   642  		defer func() {
   643  			if err != nil && gctx.Err() == nil {
   644  				log.Debugf("remotepeer(%v).readMessages: %v", rp.raddr, err)
   645  			}
   646  		}()
   647  		return rp.readMessages(gctx)
   648  	})
   649  	g.Go(func() (err error) {
   650  		defer func() {
   651  			if err != nil && gctx.Err() == nil {
   652  				log.Debugf("syncWriter(%v).write: %v", rp.raddr, err)
   653  			}
   654  		}()
   655  		return rp.writeMessages(gctx)
   656  	})
   657  	g.Go(func() error {
   658  		for {
   659  			select {
   660  			case <-gctx.Done():
   661  				return gctx.Err()
   662  			case <-time.After(2 * time.Minute):
   663  				ctx, cancel := context.WithDeadline(gctx, time.Now().Add(15*time.Second))
   664  				rp.pingPong(ctx)
   665  				cancel()
   666  			}
   667  		}
   668  	})
   669  	err := g.Wait()
   670  	if err != nil {
   671  		rp.Disconnect(err)
   672  	}
   673  }
   674  
   675  // ErrDisconnected describes the error of a remote peer being disconnected by
   676  // the local peer.  While the disconnection may be clean, other methods
   677  // currently being called on the peer must return this as a non-nil error.
   678  var ErrDisconnected = errors.New("peer has been disconnected")
   679  
   680  // Disconnect closes the underlying TCP connection to a RemotePeer.  A nil
   681  // reason is replaced with ErrDisconnected.
   682  func (rp *RemotePeer) Disconnect(reason error) {
   683  	if !atomic.CompareAndSwapUint64(&rp.atomicClosed, 0, 1) {
   684  		// Already disconnected
   685  		return
   686  	}
   687  	log.Debugf("Disconnecting %v", rp.raddr)
   688  	rp.c.Close()
   689  	if reason == nil {
   690  		reason = ErrDisconnected
   691  	}
   692  	rp.err = reason
   693  	close(rp.errc)
   694  }
   695  
   696  // Err blocks until the RemotePeer disconnects, returning the reason for
   697  // disconnection.
   698  func (rp *RemotePeer) Err() error {
   699  	<-rp.errc
   700  	return rp.err
   701  }
   702  
   703  // RemoteAddr returns the remote address of the peer's TCP connection.
   704  func (rp *RemotePeer) RemoteAddr() net.Addr {
   705  	return rp.c.RemoteAddr()
   706  }
   707  
   708  // LocalAddr returns the local address of the peer's TCP connection.
   709  func (rp *RemotePeer) LocalAddr() net.Addr {
   710  	return rp.c.LocalAddr()
   711  }
   712  
   713  // BanScore returns the banScore of the peer's.
   714  func (rp *RemotePeer) BanScore() uint32 {
   715  	return rp.banScore.Int()
   716  }
   717  
   718  // Pver returns the negotiated protocol version.
   719  func (rp *RemotePeer) Pver() uint32 { return rp.pver }
   720  
   721  func (rp *RemotePeer) String() string {
   722  	return rp.raddr.String()
   723  }
   724  
   725  type inMsg struct {
   726  	rp  *RemotePeer
   727  	msg wire.Message
   728  }
   729  
   730  var inMsgPool = sync.Pool{
   731  	New: func() interface{} { return new(inMsg) },
   732  }
   733  
   734  func newInMsg(rp *RemotePeer, msg wire.Message) *inMsg {
   735  	m := inMsgPool.Get().(*inMsg)
   736  	m.rp = rp
   737  	m.msg = msg
   738  	return m
   739  }
   740  
   741  func recycleInMsg(m *inMsg) {
   742  	*m = inMsg{}
   743  	inMsgPool.Put(m)
   744  }
   745  
   746  func (rp *RemotePeer) readMessages(ctx context.Context) error {
   747  	for rp.mr.next(rp.pver) {
   748  		msg := rp.mr.msg
   749  		log.Debugf("%v <- %v", msg.Command(), rp.raddr)
   750  		if _, ok := msg.(*wire.MsgVersion); ok {
   751  			// TODO: reject duplicate version message
   752  			return errors.E(errors.Protocol, "received unexpected version message")
   753  		}
   754  		go func() {
   755  			switch m := msg.(type) {
   756  			case *wire.MsgAddr:
   757  				rp.receivedAddr(ctx, m)
   758  			case *wire.MsgBlock:
   759  				rp.receivedBlock(ctx, m)
   760  			case *wire.MsgCFilterV2:
   761  				rp.receivedCFilterV2(ctx, m)
   762  			case *wire.MsgNotFound:
   763  				rp.receivedNotFound(ctx, m)
   764  			case *wire.MsgTx:
   765  				rp.receivedTx(ctx, m)
   766  			case *wire.MsgGetData:
   767  				rp.receivedGetData(ctx, m)
   768  			case *wire.MsgHeaders:
   769  				rp.receivedHeaders(ctx, m)
   770  			case *wire.MsgInv:
   771  				if rp.lp.messageIsMasked(MaskInv) {
   772  					rp.lp.receivedInv <- newInMsg(rp, msg)
   773  				}
   774  			case *wire.MsgReject:
   775  				log.Warnf("%v reject(%v, %v, %v): %v", rp.raddr, m.Cmd, m.Code, &m.Hash, m.Reason)
   776  			case *wire.MsgGetMiningState:
   777  				rp.receivedGetMiningState(ctx)
   778  			case *wire.MsgGetInitState:
   779  				rp.receivedGetInitState(ctx)
   780  			case *wire.MsgInitState:
   781  				rp.receivedInitState(ctx, m)
   782  			case *wire.MsgPing:
   783  				pong(ctx, m, rp)
   784  			case *wire.MsgPong:
   785  				rp.receivedPong(ctx, m)
   786  			}
   787  		}()
   788  	}
   789  	return rp.mr.err
   790  }
   791  
   792  func pong(ctx context.Context, ping *wire.MsgPing, rp *RemotePeer) {
   793  	ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
   794  	defer cancel()
   795  	select {
   796  	case <-ctx.Done():
   797  	case rp.outPrio <- &msgAck{wire.NewMsgPong(ping.Nonce), nil}:
   798  	}
   799  }
   800  
   801  // MessageMask is a bitmask of message types that can be received and handled by
   802  // consumers of this package by calling various Receive* methods on a LocalPeer.
   803  // Received messages not in the mask are ignored and not receiving messages in
   804  // the mask will leak goroutines.  Handled messages can be added and removed by
   805  // using the AddHandledMessages and RemoveHandledMessages methods of a
   806  // LocalPeer.
   807  type MessageMask uint64
   808  
   809  // Message mask constants
   810  const (
   811  	MaskGetData MessageMask = 1 << iota
   812  	MaskInv
   813  )
   814  
   815  // AddHandledMessages adds all messages defined by the bitmask.  This operation
   816  // is concurrent-safe.
   817  func (lp *LocalPeer) AddHandledMessages(mask MessageMask) {
   818  	for {
   819  		p := atomic.LoadUint64(&lp.atomicMask)
   820  		n := p | uint64(mask)
   821  		if atomic.CompareAndSwapUint64(&lp.atomicMask, p, n) {
   822  			return
   823  		}
   824  	}
   825  }
   826  
   827  // RemoveHandledMessages removes all messages defined by the bitmask.  This
   828  // operation is concurrent safe.
   829  func (lp *LocalPeer) RemoveHandledMessages(mask MessageMask) {
   830  	for {
   831  		p := atomic.LoadUint64(&lp.atomicMask)
   832  		n := p &^ uint64(mask)
   833  		if atomic.CompareAndSwapUint64(&lp.atomicMask, p, n) {
   834  			return
   835  		}
   836  	}
   837  }
   838  
   839  func (lp *LocalPeer) messageIsMasked(m MessageMask) bool {
   840  	return atomic.LoadUint64(&lp.atomicMask)&uint64(m) != 0
   841  }
   842  
   843  // ReceiveGetData waits for a getdata message from a remote peer, returning the
   844  // peer that sent the message, and the message itself.
   845  func (lp *LocalPeer) ReceiveGetData(ctx context.Context) (*RemotePeer, *wire.MsgGetData, error) {
   846  	select {
   847  	case <-ctx.Done():
   848  		return nil, nil, ctx.Err()
   849  	case r := <-lp.receivedGetData:
   850  		rp, msg := r.rp, r.msg.(*wire.MsgGetData)
   851  		recycleInMsg(r)
   852  		return rp, msg, nil
   853  	}
   854  }
   855  
   856  // ReceiveInv waits for an inventory message from a remote peer, returning the
   857  // peer that sent the message, and the message itself.
   858  func (lp *LocalPeer) ReceiveInv(ctx context.Context) (*RemotePeer, *wire.MsgInv, error) {
   859  	select {
   860  	case <-ctx.Done():
   861  		return nil, nil, ctx.Err()
   862  	case r := <-lp.receivedInv:
   863  		rp, msg := r.rp, r.msg.(*wire.MsgInv)
   864  		recycleInMsg(r)
   865  		return rp, msg, nil
   866  	}
   867  }
   868  
   869  // ReceiveHeadersAnnouncement returns any unrequested headers that were
   870  // announced without an inventory message due to a previous sendheaders request.
   871  func (lp *LocalPeer) ReceiveHeadersAnnouncement(ctx context.Context) (*RemotePeer, []*wire.BlockHeader, error) {
   872  	select {
   873  	case <-ctx.Done():
   874  		return nil, nil, ctx.Err()
   875  	case r := <-lp.announcedHeaders:
   876  		rp, msg := r.rp, r.msg.(*wire.MsgHeaders)
   877  		recycleInMsg(r)
   878  		return rp, msg.Headers, nil
   879  	}
   880  }
   881  
   882  func (rp *RemotePeer) pingPong(ctx context.Context) {
   883  	nonce, err := wire.RandomUint64()
   884  	if err != nil {
   885  		log.Errorf("Failed to generate random ping nonce: %v", err)
   886  		return
   887  	}
   888  	select {
   889  	case <-ctx.Done():
   890  		return
   891  	case rp.outPrio <- &msgAck{wire.NewMsgPing(nonce), nil}:
   892  	}
   893  	select {
   894  	case <-ctx.Done():
   895  		if ctx.Err() == context.DeadlineExceeded {
   896  			err := errors.E(errors.IO, "ping timeout")
   897  			rp.Disconnect(err)
   898  		}
   899  	case pong := <-rp.pongs:
   900  		if pong.Nonce != nonce {
   901  			err := errors.E(errors.Protocol, "pong contains nonmatching nonce")
   902  			rp.Disconnect(err)
   903  		}
   904  	}
   905  }
   906  
   907  func (rp *RemotePeer) receivedPong(ctx context.Context, msg *wire.MsgPong) {
   908  	select {
   909  	case <-ctx.Done():
   910  	case rp.pongs <- msg:
   911  	}
   912  }
   913  
   914  func (rp *RemotePeer) receivedAddr(ctx context.Context, msg *wire.MsgAddr) {
   915  	addrs := make([]*addrmgr.NetAddress, len(msg.AddrList))
   916  	for i, a := range msg.AddrList {
   917  		addrs[i] = &addrmgr.NetAddress{
   918  			IP:        a.IP,
   919  			Port:      a.Port,
   920  			Services:  a.Services,
   921  			Timestamp: a.Timestamp,
   922  		}
   923  	}
   924  	rp.lp.amgr.AddAddresses(addrs, rp.na)
   925  }
   926  
   927  func (rp *RemotePeer) receivedBlock(ctx context.Context, msg *wire.MsgBlock) {
   928  	const opf = "remotepeer(%v).receivedBlock(%v)"
   929  	blockHash := msg.Header.BlockHash()
   930  
   931  	// Acquire the lock so we can work with the relevant blockRequest.
   932  	rp.requestedBlocksMu.Lock()
   933  	req := rp.requestedBlocks[blockHash]
   934  	if req == nil {
   935  		rp.requestedBlocksMu.Unlock()
   936  		op := errors.Opf(opf, rp.raddr, &blockHash)
   937  		err := errors.E(op, errors.Protocol, "received unrequested block")
   938  		rp.Disconnect(err)
   939  		return
   940  	}
   941  	select {
   942  	case <-req.ready:
   943  		// Already have a resolution for this block.
   944  	default:
   945  		req.block = msg
   946  		close(req.ready)
   947  	}
   948  	rp.requestedBlocksMu.Unlock()
   949  }
   950  
   951  func (rp *RemotePeer) addRequestedCFilterV2(hash *chainhash.Hash, c chan<- *wire.MsgCFilterV2) (newRequest bool) {
   952  	_, loaded := rp.requestedCFiltersV2.LoadOrStore(*hash, c)
   953  	return !loaded
   954  }
   955  
   956  func (rp *RemotePeer) deleteRequestedCFilterV2(hash *chainhash.Hash) {
   957  	rp.requestedCFiltersV2.Delete(*hash)
   958  }
   959  
   960  func (rp *RemotePeer) receivedCFilterV2(ctx context.Context, msg *wire.MsgCFilterV2) {
   961  	const opf = "remotepeer(%v).receivedCFilterV2(%v)"
   962  	var k interface{} = msg.BlockHash
   963  	v, ok := rp.requestedCFiltersV2.Load(k)
   964  	if !ok {
   965  		op := errors.Opf(opf, rp.raddr, &msg.BlockHash)
   966  		err := errors.E(op, errors.Protocol, "received unrequested cfilter")
   967  		rp.Disconnect(err)
   968  		return
   969  	}
   970  
   971  	rp.requestedCFiltersV2.Delete(k)
   972  	c := v.(chan<- *wire.MsgCFilterV2)
   973  	select {
   974  	case <-ctx.Done():
   975  	case c <- msg:
   976  	}
   977  }
   978  
   979  func (rp *RemotePeer) addRequestedHeaders(c chan<- *wire.MsgHeaders) (sendheaders, newRequest bool) {
   980  	rp.requestedHeadersMu.Lock()
   981  	if rp.sendheaders {
   982  		rp.requestedHeadersMu.Unlock()
   983  		return true, false
   984  	}
   985  	if rp.requestedHeaders != nil {
   986  		rp.requestedHeadersMu.Unlock()
   987  		return false, false
   988  	}
   989  	rp.requestedHeaders = c
   990  	rp.requestedHeadersMu.Unlock()
   991  	return false, true
   992  }
   993  
   994  func (rp *RemotePeer) deleteRequestedHeaders() {
   995  	rp.requestedHeadersMu.Lock()
   996  	rp.requestedHeaders = nil
   997  	rp.requestedHeadersMu.Unlock()
   998  }
   999  
  1000  func (rp *RemotePeer) receivedHeaders(ctx context.Context, msg *wire.MsgHeaders) {
  1001  	const opf = "remotepeer(%v).receivedHeaders"
  1002  	rp.requestedHeadersMu.Lock()
  1003  	var prevHash chainhash.Hash
  1004  	var prevHeight uint32
  1005  	for i, h := range msg.Headers {
  1006  		hash := h.BlockHash() // Must be type chainhash.Hash
  1007  		rp.knownHeaders.Add(hash)
  1008  
  1009  		// Sanity check the headers connect to each other in sequence.
  1010  		if i > 0 && (!prevHash.IsEqual(&h.PrevBlock) || h.Height != prevHeight+1) {
  1011  			op := errors.Opf(opf, rp.raddr)
  1012  			err := errors.E(op, errors.Protocol, "received out-of-sequence headers")
  1013  			rp.Disconnect(err)
  1014  			rp.requestedHeadersMu.Unlock()
  1015  			return
  1016  		}
  1017  
  1018  		prevHash = hash
  1019  		prevHeight = h.Height
  1020  	}
  1021  
  1022  	if rp.sendheaders {
  1023  		rp.requestedHeadersMu.Unlock()
  1024  		select {
  1025  		case <-ctx.Done():
  1026  		case rp.lp.announcedHeaders <- newInMsg(rp, msg):
  1027  		}
  1028  		return
  1029  	}
  1030  	if rp.requestedHeaders == nil {
  1031  		op := errors.Opf(opf, rp.raddr)
  1032  		err := errors.E(op, errors.Protocol, "received unrequested headers")
  1033  		rp.Disconnect(err)
  1034  		rp.requestedHeadersMu.Unlock()
  1035  		return
  1036  	}
  1037  	c := rp.requestedHeaders
  1038  	rp.requestedHeaders = nil
  1039  	rp.requestedHeadersMu.Unlock()
  1040  	select {
  1041  	case <-ctx.Done():
  1042  	case c <- msg:
  1043  	}
  1044  }
  1045  
  1046  func (rp *RemotePeer) receivedNotFound(ctx context.Context, msg *wire.MsgNotFound) {
  1047  	const opf = "remotepeer(%v).receivedNotFound(%v)"
  1048  	var err error
  1049  	for _, inv := range msg.InvList {
  1050  		rp.requestedTxsMu.Lock()
  1051  		c, ok := rp.requestedTxs[inv.Hash]
  1052  		delete(rp.requestedTxs, inv.Hash)
  1053  		rp.requestedTxsMu.Unlock()
  1054  		if ok {
  1055  			close(c)
  1056  			continue
  1057  		}
  1058  
  1059  		// Blocks that were requested but that the remote peer does not
  1060  		// have end up also falling through to this conditional.
  1061  		if err == nil {
  1062  			op := errors.Errorf(opf, rp.raddr, &inv.Hash)
  1063  			err = errors.E(op, errors.Protocol, "received notfound for unrequested hash")
  1064  		}
  1065  	}
  1066  	if err != nil {
  1067  		rp.Disconnect(err)
  1068  	}
  1069  }
  1070  
  1071  func (rp *RemotePeer) addRequestedTx(hash *chainhash.Hash, c chan<- *wire.MsgTx) (newRequest bool) {
  1072  	rp.requestedTxsMu.Lock()
  1073  	_, ok := rp.requestedTxs[*hash]
  1074  	if !ok {
  1075  		rp.requestedTxs[*hash] = c
  1076  	}
  1077  	rp.requestedTxsMu.Unlock()
  1078  	return !ok
  1079  }
  1080  
  1081  func (rp *RemotePeer) deleteRequestedTx(hash *chainhash.Hash) {
  1082  	rp.requestedTxsMu.Lock()
  1083  	delete(rp.requestedTxs, *hash)
  1084  	rp.requestedTxsMu.Unlock()
  1085  }
  1086  
  1087  func (rp *RemotePeer) receivedTx(ctx context.Context, msg *wire.MsgTx) {
  1088  	const opf = "remotepeer(%v).receivedTx(%v)"
  1089  	txHash := msg.TxHash()
  1090  	rp.requestedTxsMu.Lock()
  1091  	c, ok := rp.requestedTxs[txHash]
  1092  	delete(rp.requestedTxs, txHash)
  1093  	rp.requestedTxsMu.Unlock()
  1094  	if !ok {
  1095  		op := errors.Opf(opf, rp.raddr, &txHash)
  1096  		err := errors.E(op, errors.Protocol, "received unrequested tx")
  1097  		rp.Disconnect(err)
  1098  		return
  1099  	}
  1100  	select {
  1101  	case <-ctx.Done():
  1102  	case c <- msg:
  1103  	}
  1104  }
  1105  
  1106  func (rp *RemotePeer) receivedGetData(ctx context.Context, msg *wire.MsgGetData) {
  1107  	if rp.banScore.Increase(0, uint32(len(msg.InvList))*banThreshold/wire.MaxInvPerMsg) > banThreshold {
  1108  		log.Warnf("%v: ban score reached threshold", rp.RemoteAddr())
  1109  		rp.Disconnect(errors.E(errors.Protocol, "ban score reached"))
  1110  		return
  1111  	}
  1112  
  1113  	if rp.lp.messageIsMasked(MaskGetData) {
  1114  		rp.lp.receivedGetData <- newInMsg(rp, msg)
  1115  	}
  1116  }
  1117  
  1118  func (rp *RemotePeer) addRequestedInitState(c chan<- *wire.MsgInitState) (newRequest bool) {
  1119  	rp.requestedInitStateMu.Lock()
  1120  	if rp.requestedInitState != nil {
  1121  		rp.requestedInitStateMu.Unlock()
  1122  		return false
  1123  	}
  1124  	rp.requestedInitState = c
  1125  	rp.requestedInitStateMu.Unlock()
  1126  	return true
  1127  }
  1128  
  1129  func (rp *RemotePeer) deleteRequestedInitState() {
  1130  	rp.requestedInitStateMu.Lock()
  1131  	rp.requestedInitState = nil
  1132  	rp.requestedInitStateMu.Unlock()
  1133  }
  1134  
  1135  func (rp *RemotePeer) receivedInitState(ctx context.Context, msg *wire.MsgInitState) {
  1136  	const opf = "remotepeer(%v).receivedInitState"
  1137  	rp.requestedInitStateMu.Lock()
  1138  	c := rp.requestedInitState
  1139  	rp.requestedInitState = nil
  1140  	rp.requestedInitStateMu.Unlock()
  1141  
  1142  	if c == nil {
  1143  		op := errors.Opf(opf, rp.raddr)
  1144  		err := errors.E(op, errors.Protocol, "received unrequested init state")
  1145  		rp.Disconnect(err)
  1146  		return
  1147  	}
  1148  
  1149  	select {
  1150  	case <-ctx.Done():
  1151  	case c <- msg:
  1152  	}
  1153  }
  1154  
  1155  func (rp *RemotePeer) receivedGetMiningState(ctx context.Context) {
  1156  	// Send an empty miningstate reply.
  1157  	m := wire.NewMsgMiningState()
  1158  	select {
  1159  	case <-ctx.Done():
  1160  	case <-rp.errc:
  1161  	case rp.out <- &msgAck{m, nil}:
  1162  	}
  1163  }
  1164  
  1165  func (rp *RemotePeer) receivedGetInitState(ctx context.Context) {
  1166  	// Send an empty initstate reply.
  1167  	m := wire.NewMsgInitState()
  1168  	select {
  1169  	case <-ctx.Done():
  1170  	case <-rp.errc:
  1171  	case rp.out <- &msgAck{m, nil}:
  1172  	}
  1173  }
  1174  
  1175  // Addrs requests a list of known active peers from a RemotePeer using getaddr.
  1176  // As many addr responses may be received for a single getaddr request, received
  1177  // address messages are handled asynchronously by the local peer and at least
  1178  // the stall timeout should be waited before disconnecting a remote peer while
  1179  // waiting for addr messages.
  1180  func (rp *RemotePeer) Addrs(ctx context.Context) error {
  1181  	const opf = "remotepeer(%v).Addrs"
  1182  	ctx, cancel := context.WithTimeout(ctx, stallTimeout)
  1183  	defer cancel()
  1184  
  1185  	m := wire.NewMsgGetAddr()
  1186  	select {
  1187  	case <-ctx.Done():
  1188  		if ctx.Err() == context.DeadlineExceeded {
  1189  			op := errors.Opf(opf, rp.raddr)
  1190  			err := errors.E(op, errors.IO, "peer appears stalled")
  1191  			rp.Disconnect(err)
  1192  			return err
  1193  		}
  1194  		return ctx.Err()
  1195  	case <-rp.errc:
  1196  		return rp.err
  1197  	case rp.out <- &msgAck{m, nil}:
  1198  		return nil
  1199  	}
  1200  }
  1201  
  1202  // Block requests a block from a RemotePeer using getdata.
  1203  func (rp *RemotePeer) Block(ctx context.Context, blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
  1204  	const opf = "remotepeer(%v).Block(%v)"
  1205  
  1206  	blocks, err := rp.Blocks(ctx, []*chainhash.Hash{blockHash})
  1207  	if err != nil {
  1208  		op := errors.Opf(opf, rp.raddr, blockHash)
  1209  		return nil, errors.E(op, err)
  1210  	}
  1211  
  1212  	return blocks[0], nil
  1213  }
  1214  
  1215  // requestBlocks sends a getdata wire message and waits for all the specified
  1216  // blocks to be received. This blocks so it should be called from a goroutine.
  1217  func (rp *RemotePeer) requestBlocks(reqs []*blockRequest) {
  1218  	// Aux func to fulfill requests. It signals any outstanding requests of
  1219  	// the given error and removes all from the requestedBlocks map.
  1220  	fulfill := func(err error) {
  1221  		rp.requestedBlocksMu.Lock()
  1222  		for _, req := range reqs {
  1223  			select {
  1224  			case <-req.ready:
  1225  				// Already fulfilled.
  1226  			default:
  1227  				req.err = err
  1228  				close(req.ready)
  1229  			}
  1230  			delete(rp.requestedBlocks, *req.hash)
  1231  		}
  1232  		rp.requestedBlocksMu.Unlock()
  1233  	}
  1234  
  1235  	// Build the message.
  1236  	//
  1237  	// TODO: split into batches when len(blockHashes) > wire.MaxInvPerMsg
  1238  	// so AddInvVect() can't error.
  1239  	m := wire.NewMsgGetDataSizeHint(uint(len(reqs)))
  1240  	for _, req := range reqs {
  1241  		err := m.AddInvVect(wire.NewInvVect(wire.InvTypeBlock, req.hash))
  1242  		if err != nil {
  1243  			fulfill(err)
  1244  			return
  1245  		}
  1246  	}
  1247  
  1248  	// Send the message.
  1249  	stalled := time.NewTimer(stallTimeout)
  1250  	select {
  1251  	case <-stalled.C:
  1252  		err := errors.E(errors.IO, "peer appears stalled")
  1253  		rp.Disconnect(err)
  1254  		fulfill(err)
  1255  		return
  1256  
  1257  	case <-rp.errc:
  1258  		if !stalled.Stop() {
  1259  			<-stalled.C
  1260  		}
  1261  		fulfill(rp.err)
  1262  		return
  1263  
  1264  	case rp.out <- &msgAck{m, nil}:
  1265  	}
  1266  
  1267  	// Receive responses.
  1268  	for i := 0; i < len(reqs); i++ {
  1269  		select {
  1270  		case <-stalled.C:
  1271  			err := errors.E(errors.IO, "peer appears stalled")
  1272  			rp.Disconnect(err)
  1273  			fulfill(err)
  1274  			return
  1275  
  1276  		case <-rp.errc:
  1277  			if !stalled.Stop() {
  1278  				<-stalled.C
  1279  			}
  1280  			fulfill(rp.err)
  1281  			return
  1282  
  1283  		case <-reqs[i].ready:
  1284  			if !stalled.Stop() {
  1285  				<-stalled.C
  1286  			}
  1287  			stalled.Reset(stallTimeout)
  1288  		}
  1289  	}
  1290  
  1291  	// Remove all requests that were just completed from the
  1292  	// `requestedBlocks` map.
  1293  	fulfill(nil)
  1294  }
  1295  
  1296  // Blocks requests multiple blocks at a time from a RemotePeer using a single
  1297  // getdata message.  It returns when all of the blocks have been received.
  1298  func (rp *RemotePeer) Blocks(ctx context.Context, blockHashes []*chainhash.Hash) ([]*wire.MsgBlock, error) {
  1299  	const opf = "remotepeer(%v).Blocks"
  1300  
  1301  	// Determine which blocks don't have an in-flight request yet so we can
  1302  	// dispatch a new one for them.
  1303  	reqs := make([]*blockRequest, len(blockHashes))
  1304  	newReqs := make([]*blockRequest, 0, len(blockHashes))
  1305  	rp.requestedBlocksMu.Lock()
  1306  	for i, h := range blockHashes {
  1307  		if req, ok := rp.requestedBlocks[*h]; ok {
  1308  			// Already requesting this block.
  1309  			reqs[i] = req
  1310  			continue
  1311  		}
  1312  
  1313  		// Not requesting this block yet.
  1314  		req := &blockRequest{
  1315  			ready: make(chan struct{}),
  1316  			hash:  h,
  1317  		}
  1318  		reqs[i] = req
  1319  		rp.requestedBlocks[*h] = req
  1320  		newReqs = append(newReqs, req)
  1321  	}
  1322  	rp.requestedBlocksMu.Unlock()
  1323  
  1324  	// Request any blocks which have not yet been requested.
  1325  	var doneRequests chan struct{}
  1326  	if len(newReqs) > 0 {
  1327  		doneRequests = make(chan struct{}, 1)
  1328  		go func() {
  1329  			rp.requestBlocks(newReqs)
  1330  			doneRequests <- struct{}{}
  1331  		}()
  1332  	}
  1333  
  1334  	// Wait for all blocks to be received or to error out.
  1335  	blocks := make([]*wire.MsgBlock, len(blockHashes))
  1336  	for i, req := range reqs {
  1337  		select {
  1338  		case <-req.ready:
  1339  			if req.err != nil {
  1340  				op := errors.Opf(opf, rp.raddr)
  1341  				return nil, errors.E(op, req.err)
  1342  			}
  1343  			blocks[i] = req.block
  1344  
  1345  		case <-ctx.Done():
  1346  			op := errors.Opf(opf, rp.raddr)
  1347  			return nil, errors.E(op, ctx.Err())
  1348  		}
  1349  	}
  1350  
  1351  	if doneRequests != nil {
  1352  		<-doneRequests
  1353  	}
  1354  	return blocks, nil
  1355  }
  1356  
  1357  // ErrNotFound describes one or more transactions not being returned by a remote
  1358  // peer, indicated with notfound.
  1359  var ErrNotFound = errors.E(errors.NotExist, "transaction not found")
  1360  
  1361  // Transactions requests multiple transactions at a time from a RemotePeer
  1362  // using a single getdata message.  It returns when all of the transactions
  1363  // and/or notfound messages have been received.  The same transaction may not be
  1364  // requested multiple times concurrently from the same peer.  Returns
  1365  // ErrNotFound with a slice of one or more nil transactions if any notfound
  1366  // messages are received for requested transactions.
  1367  func (rp *RemotePeer) Transactions(ctx context.Context, hashes []*chainhash.Hash) ([]*wire.MsgTx, error) {
  1368  	const opf = "remotepeer(%v).Transactions"
  1369  
  1370  	m := wire.NewMsgGetDataSizeHint(uint(len(hashes)))
  1371  	cs := make([]chan *wire.MsgTx, len(hashes))
  1372  	for i, h := range hashes {
  1373  		err := m.AddInvVect(wire.NewInvVect(wire.InvTypeTx, h))
  1374  		if err != nil {
  1375  			op := errors.Opf(opf, rp.raddr)
  1376  			return nil, errors.E(op, err)
  1377  		}
  1378  		cs[i] = make(chan *wire.MsgTx, 1)
  1379  		if !rp.addRequestedTx(h, cs[i]) {
  1380  			for _, h := range hashes[:i] {
  1381  				rp.deleteRequestedTx(h)
  1382  			}
  1383  			op := errors.Opf(opf, rp.raddr)
  1384  			return nil, errors.E(op, errors.Errorf("tx %v is already being requested from this peer", h))
  1385  		}
  1386  	}
  1387  	select {
  1388  	case <-ctx.Done():
  1389  		for _, h := range hashes {
  1390  			rp.deleteRequestedTx(h)
  1391  		}
  1392  		return nil, ctx.Err()
  1393  	case rp.out <- &msgAck{m, nil}:
  1394  	}
  1395  	txs := make([]*wire.MsgTx, len(hashes))
  1396  	var notfound bool
  1397  	stalled := time.NewTimer(stallTimeout)
  1398  	for i := 0; i < len(hashes); i++ {
  1399  		select {
  1400  		case <-ctx.Done():
  1401  			go func() {
  1402  				<-stalled.C
  1403  				for _, h := range hashes[i:] {
  1404  					rp.deleteRequestedTx(h)
  1405  				}
  1406  			}()
  1407  			return nil, ctx.Err()
  1408  		case <-stalled.C:
  1409  			for _, h := range hashes[i:] {
  1410  				rp.deleteRequestedTx(h)
  1411  			}
  1412  			op := errors.Opf(opf, rp.raddr)
  1413  			err := errors.E(op, errors.IO, "peer appears stalled")
  1414  			rp.Disconnect(err)
  1415  			return nil, err
  1416  		case <-rp.errc:
  1417  			stalled.Stop()
  1418  			return nil, rp.err
  1419  		case m, ok := <-cs[i]:
  1420  			txs[i] = m
  1421  			notfound = notfound || !ok
  1422  		}
  1423  	}
  1424  	stalled.Stop()
  1425  	if notfound {
  1426  		return txs, ErrNotFound
  1427  	}
  1428  	return txs, nil
  1429  }
  1430  
  1431  // CFilterV2 requests a version 2 regular compact filter from a RemotePeer
  1432  // using getcfilterv2.  The same block can not be requested concurrently from
  1433  // the same peer.
  1434  //
  1435  // The inclusion proof data that ensures the cfilter is committed to in the
  1436  // header is returned as well.
  1437  func (rp *RemotePeer) CFilterV2(ctx context.Context, blockHash *chainhash.Hash) (*gcs.FilterV2, uint32, []chainhash.Hash, error) {
  1438  	const opf = "remotepeer(%v).CFilterV2(%v)"
  1439  
  1440  	if rp.pver < wire.CFilterV2Version {
  1441  		op := errors.Opf(opf, rp.raddr, blockHash)
  1442  		err := errors.Errorf("protocol version %v is too low to fetch cfiltersv2 from this peer", rp.pver)
  1443  		return nil, 0, nil, errors.E(op, errors.Protocol, err)
  1444  	}
  1445  
  1446  	m := wire.NewMsgGetCFilterV2(blockHash)
  1447  	c := make(chan *wire.MsgCFilterV2, 1)
  1448  	if !rp.addRequestedCFilterV2(blockHash, c) {
  1449  		op := errors.Opf(opf, rp.raddr, blockHash)
  1450  		return nil, 0, nil, errors.E(op, errors.Invalid, "cfilterv2 is already being requested from this peer for this block")
  1451  	}
  1452  	stalled := time.NewTimer(stallTimeout)
  1453  	out := rp.out
  1454  	for {
  1455  		select {
  1456  		case <-ctx.Done():
  1457  			go func() {
  1458  				<-stalled.C
  1459  				rp.deleteRequestedCFilterV2(blockHash)
  1460  			}()
  1461  			return nil, 0, nil, ctx.Err()
  1462  		case <-stalled.C:
  1463  			rp.deleteRequestedCFilterV2(blockHash)
  1464  			op := errors.Opf(opf, rp.raddr, blockHash)
  1465  			err := errors.E(op, errors.IO, "peer appears stalled")
  1466  			rp.Disconnect(err)
  1467  			return nil, 0, nil, err
  1468  		case <-rp.errc:
  1469  			stalled.Stop()
  1470  			return nil, 0, nil, rp.err
  1471  		case out <- &msgAck{m, nil}:
  1472  			out = nil
  1473  		case m := <-c:
  1474  			stalled.Stop()
  1475  			var f *gcs.FilterV2
  1476  			var err error
  1477  			f, err = gcs.FromBytesV2(blockcf.B, blockcf.M, m.Data)
  1478  			if err != nil {
  1479  				op := errors.Opf(opf, rp.raddr, blockHash)
  1480  				return nil, 0, nil, errors.E(op, err)
  1481  			}
  1482  			return f, m.ProofIndex, m.ProofHashes, nil
  1483  		}
  1484  	}
  1485  }
  1486  
  1487  // filterProof is an alias to the same anonymous struct as wallet package's
  1488  // FilterProof struct.
  1489  type filterProof = struct {
  1490  	Filter     *gcs.FilterV2
  1491  	ProofIndex uint32
  1492  	Proof      []chainhash.Hash
  1493  }
  1494  
  1495  // CFiltersV2 requests version 2 cfilters for all blocks described by
  1496  // blockHashes.  This is currently implemented by making many separate
  1497  // getcfilter requests concurrently and waiting on every result.
  1498  //
  1499  // Note: returning a []func() is an ugly hack to prevent a cyclical dependency
  1500  // between the rpc package and the wallet package.
  1501  func (rp *RemotePeer) CFiltersV2(ctx context.Context, blockHashes []*chainhash.Hash) ([]filterProof, error) {
  1502  	// TODO: this is spammy and would be better implemented with a single
  1503  	// request/response.
  1504  	filters := make([]filterProof, len(blockHashes))
  1505  	g, ctx := errgroup.WithContext(ctx)
  1506  	for i := range blockHashes {
  1507  		i := i
  1508  		g.Go(func() error {
  1509  			f, pi, prf, err := rp.CFilterV2(ctx, blockHashes[i])
  1510  			filters[i] = filterProof{
  1511  				Filter:     f,
  1512  				ProofIndex: pi,
  1513  				Proof:      prf,
  1514  			}
  1515  			return err
  1516  		})
  1517  	}
  1518  	err := g.Wait()
  1519  	if err != nil {
  1520  		return nil, err
  1521  	}
  1522  	return filters, nil
  1523  }
  1524  
  1525  // SendHeaders sends the remote peer a sendheaders message.  This informs the
  1526  // peer to announce new blocks by immediately sending them in a headers message
  1527  // rather than sending an inv message containing the block hash.
  1528  //
  1529  // Once this is called, it is no longer permitted to use the synchronous
  1530  // GetHeaders method, as there is no guarantee that the next received headers
  1531  // message corresponds with any getheaders request.
  1532  func (rp *RemotePeer) SendHeaders(ctx context.Context) error {
  1533  	const opf = "remotepeer(%v).SendHeaders"
  1534  
  1535  	// If negotiated protocol version allows it, and the option is set, request
  1536  	// blocks to be announced by pushing headers messages.
  1537  	if rp.pver < wire.SendHeadersVersion {
  1538  		op := errors.Opf(opf, rp.raddr)
  1539  		err := errors.Errorf("protocol version %v is too low to receive block header announcements", rp.pver)
  1540  		return errors.E(op, errors.Protocol, err)
  1541  	}
  1542  
  1543  	rp.requestedHeadersMu.Lock()
  1544  	old := rp.sendheaders
  1545  	rp.sendheaders = true
  1546  	rp.requestedHeadersMu.Unlock()
  1547  	if old {
  1548  		return nil
  1549  	}
  1550  
  1551  	stalled := time.NewTimer(stallTimeout)
  1552  	defer stalled.Stop()
  1553  	select {
  1554  	case <-ctx.Done():
  1555  		rp.requestedHeadersMu.Lock()
  1556  		rp.sendheaders = false
  1557  		rp.requestedHeadersMu.Unlock()
  1558  		return ctx.Err()
  1559  	case <-stalled.C:
  1560  		op := errors.Opf(opf, rp.raddr)
  1561  		err := errors.E(op, errors.IO, "peer appears stalled")
  1562  		rp.Disconnect(err)
  1563  		return err
  1564  	case <-rp.errc:
  1565  		return rp.err
  1566  	case rp.out <- &msgAck{wire.NewMsgSendHeaders(), nil}:
  1567  		return nil
  1568  	}
  1569  }
  1570  
  1571  // Headers requests block headers from the RemotePeer with getheaders.  Block
  1572  // headers can not be requested concurrently from the same peer.  Sending a
  1573  // getheaders message and synchronously waiting for the result is not possible
  1574  // if a sendheaders message has been sent to the remote peer.
  1575  func (rp *RemotePeer) Headers(ctx context.Context, blockLocators []*chainhash.Hash, hashStop *chainhash.Hash) ([]*wire.BlockHeader, error) {
  1576  	const opf = "remotepeer(%v).Headers"
  1577  
  1578  	m := &wire.MsgGetHeaders{
  1579  		ProtocolVersion:    rp.pver,
  1580  		BlockLocatorHashes: blockLocators,
  1581  		HashStop:           *hashStop,
  1582  	}
  1583  	c := make(chan *wire.MsgHeaders, 1)
  1584  	sendheaders, newRequest := rp.addRequestedHeaders(c)
  1585  	if sendheaders {
  1586  		op := errors.Opf(opf, rp.raddr)
  1587  		return nil, errors.E(op, errors.Invalid, "synchronous getheaders after sendheaders is unsupported")
  1588  	}
  1589  	if !newRequest {
  1590  		op := errors.Opf(opf, rp.raddr)
  1591  		return nil, errors.E(op, errors.Invalid, "headers are already being requested from this peer")
  1592  	}
  1593  	stalled := time.NewTimer(stallTimeout)
  1594  	out := rp.out
  1595  	for {
  1596  		select {
  1597  		case <-ctx.Done():
  1598  			go func() {
  1599  				<-stalled.C
  1600  				rp.deleteRequestedHeaders()
  1601  			}()
  1602  			return nil, ctx.Err()
  1603  		case <-stalled.C:
  1604  			op := errors.Opf(opf, rp.raddr)
  1605  			err := errors.E(op, errors.IO, "peer appears stalled")
  1606  			rp.Disconnect(err)
  1607  			return nil, err
  1608  		case <-rp.errc:
  1609  			stalled.Stop()
  1610  			return nil, rp.err
  1611  		case out <- &msgAck{m, nil}:
  1612  			out = nil
  1613  		case m := <-c:
  1614  			stalled.Stop()
  1615  			return m.Headers, nil
  1616  		}
  1617  	}
  1618  }
  1619  
  1620  // HeadersAsync requests block headers from the RemotePeer with getheaders.
  1621  // Block headers can not be requested concurrently from the same peer.  This
  1622  // can only be used _after_ the sendheaders msg was sent and does _not_ wait
  1623  // for a reply from the remote peer. Headers will be delivered via the
  1624  // ReceiveHeaderAnnouncements call.
  1625  func (rp *RemotePeer) HeadersAsync(ctx context.Context, blockLocators []*chainhash.Hash, hashStop *chainhash.Hash) error {
  1626  	const opf = "remotepeer(%v).Headers"
  1627  
  1628  	rp.requestedHeadersMu.Lock()
  1629  	sendheaders := rp.sendheaders
  1630  	rp.requestedHeadersMu.Unlock()
  1631  	if !sendheaders {
  1632  		op := errors.Opf(opf, rp.raddr)
  1633  		return errors.E(op, errors.Invalid, "asynchronous getheaders before sendheaders is unsupported")
  1634  	}
  1635  
  1636  	m := &wire.MsgGetHeaders{
  1637  		ProtocolVersion:    rp.pver,
  1638  		BlockLocatorHashes: blockLocators,
  1639  		HashStop:           *hashStop,
  1640  	}
  1641  	stalled := time.NewTimer(stallTimeout)
  1642  	out := rp.out
  1643  	for {
  1644  		select {
  1645  		case <-ctx.Done():
  1646  			return ctx.Err()
  1647  		case <-stalled.C:
  1648  			op := errors.Opf(opf, rp.raddr)
  1649  			err := errors.E(op, errors.IO, "peer appears stalled")
  1650  			rp.Disconnect(err)
  1651  			return err
  1652  		case <-rp.errc:
  1653  			stalled.Stop()
  1654  			return rp.err
  1655  		case out <- &msgAck{m, nil}:
  1656  			return nil
  1657  		}
  1658  	}
  1659  }
  1660  
  1661  // PublishTransactions pushes an inventory message advertising transaction
  1662  // hashes of txs.
  1663  func (rp *RemotePeer) PublishTransactions(ctx context.Context, txs ...*wire.MsgTx) error {
  1664  	const opf = "remotepeer(%v).PublishTransactions"
  1665  	msg := wire.NewMsgInvSizeHint(uint(len(txs)))
  1666  	for i := range txs {
  1667  		txHash := txs[i].TxHash() // Must be type chainhash.Hash
  1668  		rp.invsSent.Add(txHash)
  1669  		err := msg.AddInvVect(wire.NewInvVect(wire.InvTypeTx, &txHash))
  1670  		if err != nil {
  1671  			op := errors.Opf(opf, rp.raddr)
  1672  			return errors.E(op, errors.Protocol, err)
  1673  		}
  1674  	}
  1675  	err := rp.SendMessage(ctx, msg)
  1676  	if err != nil {
  1677  		op := errors.Opf(opf, rp.raddr)
  1678  		return errors.E(op, err)
  1679  	}
  1680  	return nil
  1681  }
  1682  
  1683  // SendMessage sends an message to the remote peer.  Use this method carefully,
  1684  // as calling this with an unexpected message that changes the protocol state
  1685  // may cause problems with the convenience methods implemented by this package.
  1686  func (rp *RemotePeer) SendMessage(ctx context.Context, msg wire.Message) error {
  1687  	ctx, cancel := context.WithTimeout(ctx, stallTimeout)
  1688  	defer cancel()
  1689  	select {
  1690  	case <-ctx.Done():
  1691  		return ctx.Err()
  1692  	case rp.out <- &msgAck{msg, nil}:
  1693  		return nil
  1694  	}
  1695  }
  1696  
  1697  // sendMessageAck sends a message to a remote peer, waiting until the write
  1698  // finishes before returning.
  1699  func (rp *RemotePeer) sendMessageAck(ctx context.Context, msg wire.Message) error {
  1700  	ctx, cancel := context.WithTimeout(ctx, stallTimeout)
  1701  	defer cancel()
  1702  	ack := make(chan struct{}, 1)
  1703  	select {
  1704  	case <-ctx.Done():
  1705  		return ctx.Err()
  1706  	case rp.out <- &msgAck{msg, ack}:
  1707  		<-ack
  1708  		return nil
  1709  	}
  1710  }
  1711  
  1712  // ReceivedOrphanHeader increases the banscore for a peer due to them sending
  1713  // an orphan header. Returns an error if the banscore has been breached.
  1714  func (rp *RemotePeer) ReceivedOrphanHeader() error {
  1715  	// Allow up to 10 orphan header chain announcements.
  1716  	delta := uint32(banThreshold / 10)
  1717  	bs := rp.banScore.Increase(0, delta)
  1718  	if bs > banThreshold {
  1719  		return errors.E(errors.Protocol, "ban score reached due to orphan header")
  1720  	}
  1721  	return nil
  1722  }
  1723  
  1724  // SendHeadersSent returns whether this peer was already instructed to send new
  1725  // headers via the sendheaders message.
  1726  func (rp *RemotePeer) SendHeadersSent() bool {
  1727  	rp.requestedHeadersMu.Lock()
  1728  	sent := rp.sendheaders
  1729  	rp.requestedHeadersMu.Unlock()
  1730  	return sent
  1731  }
  1732  
  1733  // GetInitState attempts to get initial state by sending a GetInitState message.
  1734  func (rp *RemotePeer) GetInitState(ctx context.Context, msg *wire.MsgGetInitState) (*wire.MsgInitState, error) {
  1735  	const opf = "remotepeer(%v).GetInitState"
  1736  
  1737  	c := make(chan *wire.MsgInitState, 1)
  1738  	newRequest := rp.addRequestedInitState(c)
  1739  	if !newRequest {
  1740  		op := errors.Opf(opf, rp.raddr)
  1741  		return nil, errors.E(op, errors.Invalid, "init state is already being requested from this peer")
  1742  	}
  1743  
  1744  	stalled := time.NewTimer(stallTimeout)
  1745  	out := rp.out
  1746  	for {
  1747  		select {
  1748  		case <-ctx.Done():
  1749  			rp.deleteRequestedInitState()
  1750  			return nil, ctx.Err()
  1751  		case <-stalled.C:
  1752  			op := errors.Opf(opf, rp.raddr)
  1753  			err := errors.E(op, errors.IO, "peer appears stalled")
  1754  			rp.Disconnect(err)
  1755  			return nil, err
  1756  		case <-rp.errc:
  1757  			if !stalled.Stop() {
  1758  				<-stalled.C
  1759  			}
  1760  			return nil, rp.err
  1761  		case out <- &msgAck{msg, nil}:
  1762  			out = nil
  1763  		case msg := <-c:
  1764  			if !stalled.Stop() {
  1765  				<-stalled.C
  1766  			}
  1767  			return msg, nil
  1768  		}
  1769  	}
  1770  }