github.com/nspcc-dev/neo-go@v0.105.2-0.20240517133400-6be757af3eba/pkg/network/server.go (about)

     1  package network
     2  
     3  import (
     4  	"context"
     5  	"crypto/rand"
     6  	"encoding/binary"
     7  	"errors"
     8  	"fmt"
     9  	"math/big"
    10  	mrand "math/rand"
    11  	"net"
    12  	"runtime"
    13  	"sort"
    14  	"strconv"
    15  	"sync"
    16  	"sync/atomic"
    17  	"time"
    18  
    19  	"github.com/nspcc-dev/neo-go/pkg/config"
    20  	"github.com/nspcc-dev/neo-go/pkg/core/block"
    21  	"github.com/nspcc-dev/neo-go/pkg/core/mempool"
    22  	"github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
    23  	"github.com/nspcc-dev/neo-go/pkg/core/mpt"
    24  	"github.com/nspcc-dev/neo-go/pkg/core/transaction"
    25  	"github.com/nspcc-dev/neo-go/pkg/encoding/address"
    26  	"github.com/nspcc-dev/neo-go/pkg/io"
    27  	"github.com/nspcc-dev/neo-go/pkg/network/bqueue"
    28  	"github.com/nspcc-dev/neo-go/pkg/network/capability"
    29  	"github.com/nspcc-dev/neo-go/pkg/network/extpool"
    30  	"github.com/nspcc-dev/neo-go/pkg/network/payload"
    31  	"github.com/nspcc-dev/neo-go/pkg/util"
    32  	"go.uber.org/zap"
    33  )
    34  
    35  const (
    36  	// peer numbers are arbitrary at the moment.
    37  	defaultMinPeers           = 5
    38  	defaultAttemptConnPeers   = 20
    39  	defaultMaxPeers           = 100
    40  	defaultExtensiblePoolSize = 20
    41  	defaultBroadcastFactor    = 0
    42  	maxBlockBatch             = 200
    43  	peerTimeFactor            = 1000
    44  )
    45  
    46  var (
    47  	errAlreadyConnected = errors.New("already connected")
    48  	errIdenticalID      = errors.New("identical node id")
    49  	errInvalidNetwork   = errors.New("invalid network")
    50  	errMaxPeers         = errors.New("max peers reached")
    51  	errServerShutdown   = errors.New("server shutdown")
    52  	errInvalidInvType   = errors.New("invalid inventory type")
    53  )
    54  
    55  type (
    56  	// Ledger is everything Server needs from the blockchain.
    57  	Ledger interface {
    58  		extpool.Ledger
    59  		mempool.Feer
    60  		bqueue.Blockqueuer
    61  		GetBlock(hash util.Uint256) (*block.Block, error)
    62  		GetConfig() config.Blockchain
    63  		GetHeader(hash util.Uint256) (*block.Header, error)
    64  		GetHeaderHash(uint32) util.Uint256
    65  		GetMaxVerificationGAS() int64
    66  		GetMemPool() *mempool.Pool
    67  		GetNotaryBalance(acc util.Uint160) *big.Int
    68  		GetNotaryContractScriptHash() util.Uint160
    69  		GetNotaryDepositExpiration(acc util.Uint160) uint32
    70  		GetTransaction(util.Uint256) (*transaction.Transaction, uint32, error)
    71  		HasBlock(util.Uint256) bool
    72  		HeaderHeight() uint32
    73  		P2PSigExtensionsEnabled() bool
    74  		PoolTx(t *transaction.Transaction, pools ...*mempool.Pool) error
    75  		PoolTxWithData(t *transaction.Transaction, data any, mp *mempool.Pool, feer mempool.Feer, verificationFunction func(t *transaction.Transaction, data any) error) error
    76  		RegisterPostBlock(f func(func(*transaction.Transaction, *mempool.Pool, bool) bool, *mempool.Pool, *block.Block))
    77  		SubscribeForBlocks(ch chan *block.Block)
    78  		UnsubscribeFromBlocks(ch chan *block.Block)
    79  	}
    80  
    81  	// Service is a service abstraction (oracle, state root, consensus, etc).
    82  	Service interface {
    83  		Name() string
    84  		Start()
    85  		Shutdown()
    86  	}
    87  
    88  	// Server represents the local Node in the network. Its transport could
    89  	// be of any kind.
    90  	Server struct {
    91  		// ServerConfig holds the Server configuration.
    92  		ServerConfig
    93  
    94  		// id also known as the nonce of the server.
    95  		id uint32
    96  
    97  		// A copy of the Ledger's config.
    98  		config config.ProtocolConfiguration
    99  
   100  		transports        []Transporter
   101  		discovery         Discoverer
   102  		chain             Ledger
   103  		bQueue            *bqueue.Queue
   104  		bSyncQueue        *bqueue.Queue
   105  		mempool           *mempool.Pool
   106  		notaryRequestPool *mempool.Pool
   107  		extensiblePool    *extpool.Pool
   108  		notaryFeer        NotaryFeer
   109  
   110  		serviceLock    sync.RWMutex
   111  		services       map[string]Service
   112  		extensHandlers map[string]func(*payload.Extensible) error
   113  		txCallback     func(*transaction.Transaction)
   114  		txCbList       atomic.Value
   115  
   116  		txInLock sync.RWMutex
   117  		txin     chan *transaction.Transaction
   118  		txInMap  map[util.Uint256]struct{}
   119  
   120  		lock  sync.RWMutex
   121  		peers map[Peer]bool
   122  
   123  		// lastRequestedBlock contains a height of the last requested block.
   124  		lastRequestedBlock atomic.Uint32
   125  		// lastRequestedHeader contains a height of the last requested header.
   126  		lastRequestedHeader atomic.Uint32
   127  		register            chan Peer
   128  		unregister          chan peerDrop
   129  		handshake           chan Peer
   130  		quit                chan struct{}
   131  		relayFin            chan struct{}
   132  		runFin              chan struct{}
   133  		broadcastTxFin      chan struct{}
   134  		runProtoFin         chan struct{}
   135  
   136  		transactions chan *transaction.Transaction
   137  
   138  		syncReached atomic.Bool
   139  
   140  		stateSync StateSync
   141  
   142  		log *zap.Logger
   143  
   144  		// started used to Start and Shutdown server only once.
   145  		started atomic.Bool
   146  
   147  		txHandlerLoopWG sync.WaitGroup
   148  	}
   149  
   150  	peerDrop struct {
   151  		peer   Peer
   152  		reason error
   153  	}
   154  )
   155  
   156  func randomID() uint32 {
   157  	buf := make([]byte, 4)
   158  	_, _ = rand.Read(buf)
   159  	return binary.BigEndian.Uint32(buf)
   160  }
   161  
   162  // NewServer returns a new Server, initialized with the given configuration.
   163  func NewServer(config ServerConfig, chain Ledger, stSync StateSync, log *zap.Logger) (*Server, error) {
   164  	return newServerFromConstructors(config, chain, stSync, log, func(s *Server, addr string) Transporter {
   165  		return NewTCPTransport(s, addr, s.log)
   166  	}, newDefaultDiscovery)
   167  }
   168  
   169  func newServerFromConstructors(config ServerConfig, chain Ledger, stSync StateSync, log *zap.Logger,
   170  	newTransport func(*Server, string) Transporter,
   171  	newDiscovery func([]string, time.Duration, Transporter) Discoverer,
   172  ) (*Server, error) {
   173  	if log == nil {
   174  		return nil, errors.New("logger is a required parameter")
   175  	}
   176  
   177  	if config.ExtensiblePoolSize <= 0 {
   178  		config.ExtensiblePoolSize = defaultExtensiblePoolSize
   179  		log.Info("ExtensiblePoolSize is not set or wrong, using default value",
   180  			zap.Int("ExtensiblePoolSize", config.ExtensiblePoolSize))
   181  	}
   182  
   183  	s := &Server{
   184  		ServerConfig:   config,
   185  		chain:          chain,
   186  		id:             randomID(),
   187  		config:         chain.GetConfig().ProtocolConfiguration,
   188  		quit:           make(chan struct{}),
   189  		relayFin:       make(chan struct{}),
   190  		runFin:         make(chan struct{}),
   191  		broadcastTxFin: make(chan struct{}),
   192  		runProtoFin:    make(chan struct{}),
   193  		register:       make(chan Peer),
   194  		unregister:     make(chan peerDrop),
   195  		handshake:      make(chan Peer),
   196  		txInMap:        make(map[util.Uint256]struct{}),
   197  		peers:          make(map[Peer]bool),
   198  		mempool:        chain.GetMemPool(),
   199  		extensiblePool: extpool.New(chain, config.ExtensiblePoolSize),
   200  		log:            log,
   201  		txin:           make(chan *transaction.Transaction, 64),
   202  		transactions:   make(chan *transaction.Transaction, 64),
   203  		services:       make(map[string]Service),
   204  		extensHandlers: make(map[string]func(*payload.Extensible) error),
   205  		stateSync:      stSync,
   206  	}
   207  	if chain.P2PSigExtensionsEnabled() {
   208  		s.notaryFeer = NewNotaryFeer(chain)
   209  		s.notaryRequestPool = mempool.New(s.config.P2PNotaryRequestPayloadPoolSize, 1, true, updateNotarypoolMetrics)
   210  		chain.RegisterPostBlock(func(isRelevant func(*transaction.Transaction, *mempool.Pool, bool) bool, txpool *mempool.Pool, _ *block.Block) {
   211  			s.notaryRequestPool.RemoveStale(func(t *transaction.Transaction) bool {
   212  				return isRelevant(t, txpool, true)
   213  			}, s.notaryFeer)
   214  		})
   215  	}
   216  	s.bQueue = bqueue.New(chain, log, func(b *block.Block) {
   217  		s.tryStartServices()
   218  	}, updateBlockQueueLenMetric)
   219  
   220  	s.bSyncQueue = bqueue.New(s.stateSync, log, nil, updateBlockQueueLenMetric)
   221  
   222  	if s.MinPeers < 0 {
   223  		s.log.Info("bad MinPeers configured, using the default value",
   224  			zap.Int("configured", s.MinPeers),
   225  			zap.Int("actual", defaultMinPeers))
   226  		s.MinPeers = defaultMinPeers
   227  	}
   228  
   229  	if s.MaxPeers <= 0 {
   230  		s.log.Info("bad MaxPeers configured, using the default value",
   231  			zap.Int("configured", s.MaxPeers),
   232  			zap.Int("actual", defaultMaxPeers))
   233  		s.MaxPeers = defaultMaxPeers
   234  	}
   235  
   236  	if s.AttemptConnPeers <= 0 {
   237  		s.log.Info("bad AttemptConnPeers configured, using the default value",
   238  			zap.Int("configured", s.AttemptConnPeers),
   239  			zap.Int("actual", defaultAttemptConnPeers))
   240  		s.AttemptConnPeers = defaultAttemptConnPeers
   241  	}
   242  
   243  	if s.BroadcastFactor < 0 || s.BroadcastFactor > 100 {
   244  		s.log.Info("bad BroadcastFactor configured, using the default value",
   245  			zap.Int("configured", s.BroadcastFactor),
   246  			zap.Int("actual", defaultBroadcastFactor))
   247  		s.BroadcastFactor = defaultBroadcastFactor
   248  	}
   249  
   250  	if len(s.ServerConfig.Addresses) == 0 {
   251  		return nil, errors.New("no bind addresses configured")
   252  	}
   253  	transports := make([]Transporter, len(s.ServerConfig.Addresses))
   254  	for i, addr := range s.ServerConfig.Addresses {
   255  		transports[i] = newTransport(s, addr.Address)
   256  	}
   257  	s.transports = transports
   258  	s.discovery = newDiscovery(
   259  		s.Seeds,
   260  		s.DialTimeout,
   261  		// Here we need to pick up a single transporter, it will be used to
   262  		// dial, and it doesn't matter which one.
   263  		s.transports[0],
   264  	)
   265  
   266  	return s, nil
   267  }
   268  
   269  // ID returns the servers ID.
   270  func (s *Server) ID() uint32 {
   271  	return s.id
   272  }
   273  
   274  // Start will start the server and its underlying transport. Calling it twice
   275  // is a no-op. Caller should wait for Start to finish for normal server operation.
   276  func (s *Server) Start() {
   277  	if !s.started.CompareAndSwap(false, true) {
   278  		s.log.Info("node server already started")
   279  		return
   280  	}
   281  	s.log.Info("node started",
   282  		zap.Uint32("blockHeight", s.chain.BlockHeight()),
   283  		zap.Uint32("headerHeight", s.chain.HeaderHeight()))
   284  
   285  	s.tryStartServices()
   286  	s.initStaleMemPools()
   287  
   288  	var txThreads = optimalNumOfThreads()
   289  	s.txHandlerLoopWG.Add(txThreads)
   290  	for i := 0; i < txThreads; i++ {
   291  		go s.txHandlerLoop()
   292  	}
   293  	go s.broadcastTxLoop()
   294  	go s.relayBlocksLoop()
   295  	go s.bQueue.Run()
   296  	go s.bSyncQueue.Run()
   297  	for _, tr := range s.transports {
   298  		go tr.Accept()
   299  	}
   300  	setServerAndNodeVersions(s.UserAgent, strconv.FormatUint(uint64(s.id), 10))
   301  	setNeoGoVersion(config.Version)
   302  	setSeverID(strconv.FormatUint(uint64(s.id), 10))
   303  	go s.run()
   304  }
   305  
   306  // Shutdown disconnects all peers and stops listening. Calling it twice is a no-op,
   307  // once stopped the same instance of the Server can't be started again by calling Start.
   308  func (s *Server) Shutdown() {
   309  	if !s.started.CompareAndSwap(true, false) {
   310  		return
   311  	}
   312  	s.log.Info("shutting down server", zap.Int("peers", s.PeerCount()))
   313  	for _, tr := range s.transports {
   314  		tr.Close()
   315  	}
   316  	for _, p := range s.getPeers(nil) {
   317  		p.Disconnect(errServerShutdown)
   318  	}
   319  	s.bQueue.Discard()
   320  	s.bSyncQueue.Discard()
   321  	s.serviceLock.RLock()
   322  	for _, svc := range s.services {
   323  		svc.Shutdown()
   324  	}
   325  	s.serviceLock.RUnlock()
   326  	if s.chain.P2PSigExtensionsEnabled() {
   327  		s.notaryRequestPool.StopSubscriptions()
   328  	}
   329  	close(s.quit)
   330  	<-s.broadcastTxFin
   331  	<-s.runProtoFin
   332  	<-s.relayFin
   333  	<-s.runFin
   334  	s.txHandlerLoopWG.Wait()
   335  
   336  	_ = s.log.Sync()
   337  }
   338  
   339  // AddService allows to add a service to be started/stopped by Server.
   340  func (s *Server) AddService(svc Service) {
   341  	s.serviceLock.Lock()
   342  	defer s.serviceLock.Unlock()
   343  	s.addService(svc)
   344  }
   345  
   346  // addService is an unlocked version of AddService.
   347  func (s *Server) addService(svc Service) {
   348  	s.services[svc.Name()] = svc
   349  }
   350  
   351  // GetBlockQueue returns the block queue instance managed by Server.
   352  func (s *Server) GetBlockQueue() *bqueue.Queue {
   353  	return s.bQueue
   354  }
   355  
   356  // AddExtensibleService register a service that handles an extensible payload of some kind.
   357  func (s *Server) AddExtensibleService(svc Service, category string, handler func(*payload.Extensible) error) {
   358  	s.serviceLock.Lock()
   359  	defer s.serviceLock.Unlock()
   360  	s.addExtensibleService(svc, category, handler)
   361  }
   362  
   363  // addExtensibleService is an unlocked version of AddExtensibleService.
   364  func (s *Server) addExtensibleService(svc Service, category string, handler func(*payload.Extensible) error) {
   365  	s.extensHandlers[category] = handler
   366  	s.addService(svc)
   367  }
   368  
   369  // AddConsensusService registers consensus service that handles transactions and dBFT extensible payloads.
   370  func (s *Server) AddConsensusService(svc Service, handler func(*payload.Extensible) error, txCallback func(*transaction.Transaction)) {
   371  	s.serviceLock.Lock()
   372  	defer s.serviceLock.Unlock()
   373  	s.txCallback = txCallback
   374  	s.addExtensibleService(svc, payload.ConsensusCategory, handler)
   375  }
   376  
   377  // DelService drops a service from the list, use it when the service is stopped
   378  // outside of the Server.
   379  func (s *Server) DelService(svc Service) {
   380  	s.serviceLock.Lock()
   381  	defer s.serviceLock.Unlock()
   382  	s.delService(svc)
   383  }
   384  
   385  // delService is an unlocked version of DelService.
   386  func (s *Server) delService(svc Service) {
   387  	delete(s.services, svc.Name())
   388  }
   389  
   390  // DelExtensibleService drops a service that handler extensible payloads from the
   391  // list, use it when the service is stopped outside of the Server.
   392  func (s *Server) DelExtensibleService(svc Service, category string) {
   393  	s.serviceLock.Lock()
   394  	defer s.serviceLock.Unlock()
   395  	s.delExtensibleService(svc, category)
   396  }
   397  
   398  // delExtensibleService is an unlocked version of DelExtensibleService.
   399  func (s *Server) delExtensibleService(svc Service, category string) {
   400  	delete(s.extensHandlers, category)
   401  	s.delService(svc)
   402  }
   403  
   404  // DelConsensusService unregisters consensus service that handles transactions and dBFT extensible payloads.
   405  func (s *Server) DelConsensusService(svc Service) {
   406  	s.serviceLock.Lock()
   407  	defer s.serviceLock.Unlock()
   408  	s.txCallback = nil
   409  	s.delExtensibleService(svc, payload.ConsensusCategory)
   410  }
   411  
   412  // GetNotaryPool allows to retrieve notary pool, if it's configured.
   413  func (s *Server) GetNotaryPool() *mempool.Pool {
   414  	return s.notaryRequestPool
   415  }
   416  
   417  // UnconnectedPeers returns a list of peers that are in the discovery peer list
   418  // but are not connected to the server.
   419  func (s *Server) UnconnectedPeers() []string {
   420  	return s.discovery.UnconnectedPeers()
   421  }
   422  
   423  // BadPeers returns a list of peers that are flagged as "bad" peers.
   424  func (s *Server) BadPeers() []string {
   425  	return s.discovery.BadPeers()
   426  }
   427  
   428  // ConnectedPeers returns a list of currently connected peers.
   429  func (s *Server) ConnectedPeers() []string {
   430  	s.lock.RLock()
   431  	defer s.lock.RUnlock()
   432  
   433  	peers := make([]string, 0, len(s.peers))
   434  	for k := range s.peers {
   435  		peers = append(peers, k.PeerAddr().String())
   436  	}
   437  
   438  	return peers
   439  }
   440  
   441  // run is a goroutine that starts another goroutine to manage protocol specifics
   442  // while itself dealing with peers management (handling connects/disconnects).
   443  func (s *Server) run() {
   444  	var (
   445  		peerCheckTime    = s.TimePerBlock * peerTimeFactor
   446  		addrCheckTimeout bool
   447  		addrTimer        = time.NewTimer(peerCheckTime)
   448  		peerTimer        = time.NewTimer(s.ProtoTickInterval)
   449  	)
   450  	defer close(s.runFin)
   451  	defer addrTimer.Stop()
   452  	defer peerTimer.Stop()
   453  	go s.runProto()
   454  	for loopCnt := 0; ; loopCnt++ {
   455  		var (
   456  			netSize = s.discovery.NetworkSize()
   457  			// "Optimal" number of peers.
   458  			optimalN = s.discovery.GetFanOut() * 2
   459  			// Real number of peers.
   460  			peerN = s.HandshakedPeersCount()
   461  			// Timeout value for the next peerTimer, long one by default.
   462  			peerT = peerCheckTime
   463  		)
   464  
   465  		if peerN < s.MinPeers {
   466  			// Starting up or going below the minimum -> quickly get many new peers.
   467  			s.discovery.RequestRemote(s.AttemptConnPeers)
   468  			// Check/retry new connections soon.
   469  			peerT = s.ProtoTickInterval
   470  		} else if s.MinPeers > 0 && loopCnt%s.MinPeers == 0 && optimalN > peerN && optimalN < s.MaxPeers && optimalN < netSize {
   471  			// Having some number of peers, but probably can get some more, the network is big.
   472  			// It also allows to start picking up new peers proactively, before we suddenly have <s.MinPeers of them.
   473  			var connN = s.AttemptConnPeers
   474  			if connN > optimalN-peerN {
   475  				connN = optimalN - peerN
   476  			}
   477  			s.discovery.RequestRemote(connN)
   478  		}
   479  
   480  		if addrCheckTimeout || s.discovery.PoolCount() < s.AttemptConnPeers {
   481  			s.broadcastHPMessage(NewMessage(CMDGetAddr, payload.NewNullPayload()))
   482  			addrCheckTimeout = false
   483  		}
   484  		select {
   485  		case <-s.quit:
   486  			return
   487  		case <-addrTimer.C:
   488  			addrCheckTimeout = true
   489  			addrTimer.Reset(peerCheckTime)
   490  		case <-peerTimer.C:
   491  			peerTimer.Reset(peerT)
   492  		case p := <-s.register:
   493  			s.lock.Lock()
   494  			s.peers[p] = true
   495  			s.lock.Unlock()
   496  			peerCount := s.PeerCount()
   497  			s.log.Info("new peer connected", zap.Stringer("addr", p.RemoteAddr()), zap.Int("peerCount", peerCount))
   498  			if peerCount > s.MaxPeers {
   499  				s.lock.RLock()
   500  				// Pick a random peer and drop connection to it.
   501  				for peer := range s.peers {
   502  					// It will send us unregister signal.
   503  					go peer.Disconnect(errMaxPeers)
   504  					break
   505  				}
   506  				s.lock.RUnlock()
   507  			}
   508  			updatePeersConnectedMetric(s.PeerCount())
   509  
   510  		case drop := <-s.unregister:
   511  			s.lock.Lock()
   512  			if s.peers[drop.peer] {
   513  				delete(s.peers, drop.peer)
   514  				s.lock.Unlock()
   515  				s.log.Warn("peer disconnected",
   516  					zap.Stringer("addr", drop.peer.RemoteAddr()),
   517  					zap.Error(drop.reason),
   518  					zap.Int("peerCount", s.PeerCount()))
   519  				if errors.Is(drop.reason, errIdenticalID) {
   520  					s.discovery.RegisterSelf(drop.peer)
   521  				} else {
   522  					s.discovery.UnregisterConnected(drop.peer, errors.Is(drop.reason, errAlreadyConnected))
   523  				}
   524  				updatePeersConnectedMetric(s.PeerCount())
   525  			} else {
   526  				// else the peer is already gone, which can happen
   527  				// because we have two goroutines sending signals here
   528  				s.lock.Unlock()
   529  			}
   530  
   531  		case p := <-s.handshake:
   532  			ver := p.Version()
   533  			s.log.Info("started protocol",
   534  				zap.Stringer("addr", p.RemoteAddr()),
   535  				zap.ByteString("userAgent", ver.UserAgent),
   536  				zap.Uint32("startHeight", p.LastBlockIndex()),
   537  				zap.Uint32("id", ver.Nonce))
   538  
   539  			s.discovery.RegisterGood(p)
   540  
   541  			s.tryInitStateSync()
   542  			s.tryStartServices()
   543  		}
   544  	}
   545  }
   546  
   547  // runProto is a goroutine that manages server-wide protocol events.
   548  func (s *Server) runProto() {
   549  	defer close(s.runProtoFin)
   550  	pingTimer := time.NewTimer(s.PingInterval)
   551  	for {
   552  		prevHeight := s.chain.BlockHeight()
   553  		select {
   554  		case <-s.quit:
   555  			return
   556  		case <-pingTimer.C:
   557  			if s.chain.BlockHeight() == prevHeight {
   558  				s.broadcastMessage(NewMessage(CMDPing, payload.NewPing(s.chain.BlockHeight(), s.id)))
   559  			}
   560  			pingTimer.Reset(s.PingInterval)
   561  		}
   562  	}
   563  }
   564  
   565  func (s *Server) tryStartServices() {
   566  	if s.syncReached.Load() {
   567  		return
   568  	}
   569  
   570  	if s.IsInSync() && s.syncReached.CompareAndSwap(false, true) {
   571  		s.log.Info("node reached synchronized state, starting services")
   572  		if s.chain.P2PSigExtensionsEnabled() {
   573  			s.notaryRequestPool.RunSubscriptions() // WSClient is also a subscriber.
   574  		}
   575  		s.serviceLock.RLock()
   576  		for _, svc := range s.services {
   577  			svc.Start()
   578  		}
   579  		s.serviceLock.RUnlock()
   580  	}
   581  }
   582  
   583  // SubscribeForNotaryRequests adds the given channel to a notary request event
   584  // broadcasting, so when a new P2PNotaryRequest is received or an existing
   585  // P2PNotaryRequest is removed from the pool you'll receive it via this channel.
   586  // Make sure it's read from regularly as not reading these events might affect
   587  // other Server functions. Make sure you're not changing the received mempool
   588  // events, as it may affect the functionality of Blockchain and other subscribers.
   589  // Ensure that P2PSigExtensions are enabled before calling this method.
   590  func (s *Server) SubscribeForNotaryRequests(ch chan<- mempoolevent.Event) {
   591  	if !s.chain.P2PSigExtensionsEnabled() {
   592  		panic("P2PSigExtensions are disabled")
   593  	}
   594  	s.notaryRequestPool.SubscribeForTransactions(ch)
   595  }
   596  
   597  // UnsubscribeFromNotaryRequests unsubscribes the given channel from notary request
   598  // notifications, you can close it afterwards. Passing non-subscribed channel
   599  // is a no-op.
   600  // Ensure that P2PSigExtensions are enabled before calling this method.
   601  func (s *Server) UnsubscribeFromNotaryRequests(ch chan<- mempoolevent.Event) {
   602  	if !s.chain.P2PSigExtensionsEnabled() {
   603  		panic("P2PSigExtensions are disabled")
   604  	}
   605  	s.notaryRequestPool.UnsubscribeFromTransactions(ch)
   606  }
   607  
   608  // getPeers returns the current list of the peers connected to the server filtered by
   609  // isOK function if it's given.
   610  func (s *Server) getPeers(isOK func(Peer) bool) []Peer {
   611  	s.lock.RLock()
   612  	defer s.lock.RUnlock()
   613  
   614  	peers := make([]Peer, 0, len(s.peers))
   615  	for k := range s.peers {
   616  		if isOK != nil && !isOK(k) {
   617  			continue
   618  		}
   619  		peers = append(peers, k)
   620  	}
   621  
   622  	return peers
   623  }
   624  
   625  // PeerCount returns the number of the currently connected peers.
   626  func (s *Server) PeerCount() int {
   627  	s.lock.RLock()
   628  	defer s.lock.RUnlock()
   629  	return len(s.peers)
   630  }
   631  
   632  // HandshakedPeersCount returns the number of the connected peers
   633  // which have already performed handshake.
   634  func (s *Server) HandshakedPeersCount() int {
   635  	s.lock.RLock()
   636  	defer s.lock.RUnlock()
   637  
   638  	var count int
   639  
   640  	for p := range s.peers {
   641  		if p.Handshaked() {
   642  			count++
   643  		}
   644  	}
   645  
   646  	return count
   647  }
   648  
   649  // getVersionMsg returns the current version message generated for the specified
   650  // connection.
   651  func (s *Server) getVersionMsg(localAddr net.Addr) (*Message, error) {
   652  	port, err := s.Port(localAddr)
   653  	if err != nil {
   654  		return nil, fmt.Errorf("failed to fetch server port: %w", err)
   655  	}
   656  
   657  	capabilities := []capability.Capability{
   658  		{
   659  			Type: capability.TCPServer,
   660  			Data: &capability.Server{
   661  				Port: port,
   662  			},
   663  		},
   664  	}
   665  	if s.Relay {
   666  		capabilities = append(capabilities, capability.Capability{
   667  			Type: capability.FullNode,
   668  			Data: &capability.Node{
   669  				StartHeight: s.chain.BlockHeight(),
   670  			},
   671  		})
   672  	}
   673  	payload := payload.NewVersion(
   674  		s.Net,
   675  		s.id,
   676  		s.UserAgent,
   677  		capabilities,
   678  	)
   679  	return NewMessage(CMDVersion, payload), nil
   680  }
   681  
   682  // IsInSync answers the question of whether the server is in sync with the
   683  // network or not (at least how the server itself sees it). The server operates
   684  // with the data that it has, the number of peers (that has to be more than
   685  // minimum number) and the height of these peers (our chain has to be not lower
   686  // than 2/3 of our peers have). Ideally, we would check for the highest of the
   687  // peers, but the problem is that they can lie to us and send whatever height
   688  // they want to. Once sync reached, IsInSync will always return `true`, even if
   689  // server is temporary out of sync after that.
   690  func (s *Server) IsInSync() bool {
   691  	if s.syncReached.Load() {
   692  		return true
   693  	}
   694  	var peersNumber int
   695  	var notHigher int
   696  
   697  	if s.stateSync.IsActive() {
   698  		return false
   699  	}
   700  
   701  	if s.MinPeers == 0 {
   702  		return true
   703  	}
   704  
   705  	ourLastBlock := s.chain.BlockHeight()
   706  
   707  	s.lock.RLock()
   708  	for p := range s.peers {
   709  		if p.Handshaked() {
   710  			peersNumber++
   711  			if ourLastBlock >= p.LastBlockIndex() {
   712  				notHigher++
   713  			}
   714  		}
   715  	}
   716  	s.lock.RUnlock()
   717  
   718  	// Checking bQueue would also be nice, but it can be filled with garbage
   719  	// easily at the moment.
   720  	return peersNumber >= s.MinPeers && (3*notHigher > 2*peersNumber) // && s.bQueue.length() == 0
   721  }
   722  
   723  // When a peer sends out its version, we reply with verack after validating
   724  // the version.
   725  func (s *Server) handleVersionCmd(p Peer, version *payload.Version) error {
   726  	err := p.HandleVersion(version)
   727  	if err != nil {
   728  		return err
   729  	}
   730  	if s.id == version.Nonce {
   731  		return errIdenticalID
   732  	}
   733  	// Make sure both the server and the peer are operating on
   734  	// the same network.
   735  	if s.Net != version.Magic {
   736  		return errInvalidNetwork
   737  	}
   738  	peerAddr := p.PeerAddr().String()
   739  	s.lock.RLock()
   740  	for peer := range s.peers {
   741  		if p == peer {
   742  			continue
   743  		}
   744  		ver := peer.Version()
   745  		// Already connected, drop this connection.
   746  		if ver != nil && ver.Nonce == version.Nonce && peer.PeerAddr().String() == peerAddr {
   747  			s.lock.RUnlock()
   748  			return errAlreadyConnected
   749  		}
   750  	}
   751  	s.lock.RUnlock()
   752  	return p.SendVersionAck(NewMessage(CMDVerack, payload.NewNullPayload()))
   753  }
   754  
   755  // handleBlockCmd processes the block received from its peer.
   756  func (s *Server) handleBlockCmd(p Peer, block *block.Block) error {
   757  	if s.stateSync.IsActive() {
   758  		return s.bSyncQueue.PutBlock(block)
   759  	}
   760  	return s.bQueue.PutBlock(block)
   761  }
   762  
   763  // handlePing processes a ping request.
   764  func (s *Server) handlePing(p Peer, ping *payload.Ping) error {
   765  	err := p.HandlePing(ping)
   766  	if err != nil {
   767  		return err
   768  	}
   769  	err = s.requestBlocksOrHeaders(p)
   770  	if err != nil {
   771  		return err
   772  	}
   773  	return p.EnqueueP2PMessage(NewMessage(CMDPong, payload.NewPing(s.chain.BlockHeight(), s.id)))
   774  }
   775  
   776  func (s *Server) requestBlocksOrHeaders(p Peer) error {
   777  	if s.stateSync.NeedHeaders() {
   778  		if s.chain.HeaderHeight() < p.LastBlockIndex() {
   779  			return s.requestHeaders(p)
   780  		}
   781  		return nil
   782  	}
   783  	var (
   784  		bq              bqueue.Blockqueuer = s.chain
   785  		requestMPTNodes bool
   786  	)
   787  	if s.stateSync.IsActive() {
   788  		bq = s.stateSync
   789  		requestMPTNodes = s.stateSync.NeedMPTNodes()
   790  	}
   791  	if bq.BlockHeight() >= p.LastBlockIndex() {
   792  		return nil
   793  	}
   794  	err := s.requestBlocks(bq, p)
   795  	if err != nil {
   796  		return err
   797  	}
   798  	if requestMPTNodes {
   799  		return s.requestMPTNodes(p, s.stateSync.GetUnknownMPTNodesBatch(payload.MaxMPTHashesCount))
   800  	}
   801  	return nil
   802  }
   803  
   804  // requestHeaders sends a CMDGetHeaders message to the peer to sync up in headers.
   805  func (s *Server) requestHeaders(p Peer) error {
   806  	pl := getRequestBlocksPayload(p, s.chain.HeaderHeight(), &s.lastRequestedHeader)
   807  	return p.EnqueueP2PMessage(NewMessage(CMDGetHeaders, pl))
   808  }
   809  
   810  // handlePing processes a pong request.
   811  func (s *Server) handlePong(p Peer, pong *payload.Ping) error {
   812  	err := p.HandlePong(pong)
   813  	if err != nil {
   814  		return err
   815  	}
   816  	return s.requestBlocksOrHeaders(p)
   817  }
   818  
   819  // handleInvCmd processes the received inventory.
   820  func (s *Server) handleInvCmd(p Peer, inv *payload.Inventory) error {
   821  	var reqHashes = inv.Hashes[:0]
   822  	var typExists = map[payload.InventoryType]func(util.Uint256) bool{
   823  		payload.TXType: func(h util.Uint256) bool {
   824  			s.txInLock.RLock()
   825  			_, ok := s.txInMap[h]
   826  			s.txInLock.RUnlock()
   827  			return ok || s.mempool.ContainsKey(h)
   828  		},
   829  		payload.BlockType: s.chain.HasBlock,
   830  		payload.ExtensibleType: func(h util.Uint256) bool {
   831  			cp := s.extensiblePool.Get(h)
   832  			return cp != nil
   833  		},
   834  		payload.P2PNotaryRequestType: func(h util.Uint256) bool {
   835  			return s.notaryRequestPool.ContainsKey(h)
   836  		},
   837  	}
   838  	if exists := typExists[inv.Type]; exists != nil {
   839  		for _, hash := range inv.Hashes {
   840  			if !exists(hash) {
   841  				reqHashes = append(reqHashes, hash)
   842  			}
   843  		}
   844  	}
   845  	if len(reqHashes) > 0 {
   846  		msg := NewMessage(CMDGetData, payload.NewInventory(inv.Type, reqHashes))
   847  		if inv.Type == payload.ExtensibleType {
   848  			return p.EnqueueHPMessage(msg)
   849  		}
   850  		return p.EnqueueP2PMessage(msg)
   851  	}
   852  	return nil
   853  }
   854  
   855  // handleMempoolCmd handles getmempool command.
   856  func (s *Server) handleMempoolCmd(p Peer) error {
   857  	txs := s.mempool.GetVerifiedTransactions()
   858  	hs := make([]util.Uint256, 0, payload.MaxHashesCount)
   859  	for i := range txs {
   860  		hs = append(hs, txs[i].Hash())
   861  		if len(hs) < payload.MaxHashesCount && i != len(txs)-1 {
   862  			continue
   863  		}
   864  		msg := NewMessage(CMDInv, payload.NewInventory(payload.TXType, hs))
   865  		err := p.EnqueueP2PMessage(msg)
   866  		if err != nil {
   867  			return err
   868  		}
   869  		hs = hs[:0]
   870  	}
   871  	return nil
   872  }
   873  
   874  // handleInvCmd processes the received inventory.
   875  func (s *Server) handleGetDataCmd(p Peer, inv *payload.Inventory) error {
   876  	var (
   877  		err      error
   878  		notFound []util.Uint256
   879  		reply    = io.NewBufBinWriter()
   880  		send     = p.EnqueueP2PPacket
   881  	)
   882  	if inv.Type == payload.ExtensibleType {
   883  		send = p.EnqueueHPPacket
   884  	}
   885  	for _, hash := range inv.Hashes {
   886  		var msg *Message
   887  
   888  		switch inv.Type {
   889  		case payload.TXType:
   890  			tx, _, err := s.chain.GetTransaction(hash)
   891  			if err == nil {
   892  				msg = NewMessage(CMDTX, tx)
   893  			} else {
   894  				notFound = append(notFound, hash)
   895  			}
   896  		case payload.BlockType:
   897  			b, err := s.chain.GetBlock(hash)
   898  			if err == nil {
   899  				msg = NewMessage(CMDBlock, b)
   900  			} else {
   901  				notFound = append(notFound, hash)
   902  			}
   903  		case payload.ExtensibleType:
   904  			if cp := s.extensiblePool.Get(hash); cp != nil {
   905  				msg = NewMessage(CMDExtensible, cp)
   906  			}
   907  		case payload.P2PNotaryRequestType:
   908  			if nrp, ok := s.notaryRequestPool.TryGetData(hash); ok { // already have checked P2PSigExtEnabled
   909  				msg = NewMessage(CMDP2PNotaryRequest, nrp.(*payload.P2PNotaryRequest))
   910  			} else {
   911  				notFound = append(notFound, hash)
   912  			}
   913  		}
   914  		if msg != nil {
   915  			err = addMessageToPacket(reply, msg, send)
   916  			if err != nil {
   917  				return err
   918  			}
   919  		}
   920  	}
   921  	if len(notFound) != 0 {
   922  		err = addMessageToPacket(reply, NewMessage(CMDNotFound, payload.NewInventory(inv.Type, notFound)), send)
   923  		if err != nil {
   924  			return err
   925  		}
   926  	}
   927  	if reply.Len() == 0 {
   928  		return nil
   929  	}
   930  	return send(reply.Bytes())
   931  }
   932  
   933  // addMessageToPacket serializes given message into the given buffer and sends whole
   934  // batch if it exceeds MaxSize/2 memory limit (to prevent DoS).
   935  func addMessageToPacket(batch *io.BufBinWriter, msg *Message, send func([]byte) error) error {
   936  	err := msg.Encode(batch.BinWriter)
   937  	if err != nil {
   938  		return err
   939  	}
   940  	if batch.Len() > payload.MaxSize/2 {
   941  		err = send(batch.Bytes())
   942  		if err != nil {
   943  			return err
   944  		}
   945  		batch.Reset()
   946  	}
   947  	return nil
   948  }
   949  
   950  // handleGetMPTDataCmd processes the received MPT inventory.
   951  func (s *Server) handleGetMPTDataCmd(p Peer, inv *payload.MPTInventory) error {
   952  	if !s.config.P2PStateExchangeExtensions {
   953  		return errors.New("GetMPTDataCMD was received, but P2PStateExchangeExtensions are disabled")
   954  	}
   955  	// Even if s.config.KeepOnlyLatestState enabled, we'll keep latest P1 and P2 MPT states.
   956  	resp := payload.MPTData{}
   957  	capLeft := payload.MaxSize - 8 // max(io.GetVarSize(len(resp.Nodes)))
   958  	added := make(map[util.Uint256]struct{})
   959  	for _, h := range inv.Hashes {
   960  		if capLeft <= 2 { // at least 1 byte for len(nodeBytes) and 1 byte for node type
   961  			break
   962  		}
   963  		err := s.stateSync.Traverse(h,
   964  			func(n mpt.Node, node []byte) bool {
   965  				if _, ok := added[n.Hash()]; ok {
   966  					return false
   967  				}
   968  				l := len(node)
   969  				size := l + io.GetVarSize(l)
   970  				if size > capLeft {
   971  					return true
   972  				}
   973  				resp.Nodes = append(resp.Nodes, node)
   974  				added[n.Hash()] = struct{}{}
   975  				capLeft -= size
   976  				return false
   977  			})
   978  		if err != nil {
   979  			return fmt.Errorf("failed to traverse MPT starting from %s: %w", h.StringBE(), err)
   980  		}
   981  	}
   982  	if len(resp.Nodes) > 0 {
   983  		msg := NewMessage(CMDMPTData, &resp)
   984  		return p.EnqueueP2PMessage(msg)
   985  	}
   986  	return nil
   987  }
   988  
   989  func (s *Server) handleMPTDataCmd(p Peer, data *payload.MPTData) error {
   990  	if !s.config.P2PStateExchangeExtensions {
   991  		return errors.New("MPTDataCMD was received, but P2PStateExchangeExtensions are disabled")
   992  	}
   993  	return s.stateSync.AddMPTNodes(data.Nodes)
   994  }
   995  
   996  // requestMPTNodes requests the specified MPT nodes from the peer or broadcasts
   997  // request if no peer is specified.
   998  func (s *Server) requestMPTNodes(p Peer, itms []util.Uint256) error {
   999  	if len(itms) == 0 {
  1000  		return nil
  1001  	}
  1002  	if len(itms) > payload.MaxMPTHashesCount {
  1003  		itms = itms[:payload.MaxMPTHashesCount]
  1004  	}
  1005  	pl := payload.NewMPTInventory(itms)
  1006  	msg := NewMessage(CMDGetMPTData, pl)
  1007  	return p.EnqueueP2PMessage(msg)
  1008  }
  1009  
  1010  // handleGetBlocksCmd processes the getblocks request.
  1011  func (s *Server) handleGetBlocksCmd(p Peer, gb *payload.GetBlocks) error {
  1012  	count := gb.Count
  1013  	if gb.Count < 0 || gb.Count > payload.MaxHashesCount {
  1014  		count = payload.MaxHashesCount
  1015  	}
  1016  	start, err := s.chain.GetHeader(gb.HashStart)
  1017  	if err != nil {
  1018  		return err
  1019  	}
  1020  	blockHashes := make([]util.Uint256, 0)
  1021  	for i := start.Index + 1; i <= start.Index+uint32(count); i++ {
  1022  		hash := s.chain.GetHeaderHash(i)
  1023  		if hash.Equals(util.Uint256{}) {
  1024  			break
  1025  		}
  1026  		blockHashes = append(blockHashes, hash)
  1027  	}
  1028  
  1029  	if len(blockHashes) == 0 {
  1030  		return nil
  1031  	}
  1032  	payload := payload.NewInventory(payload.BlockType, blockHashes)
  1033  	msg := NewMessage(CMDInv, payload)
  1034  	return p.EnqueueP2PMessage(msg)
  1035  }
  1036  
  1037  // handleGetBlockByIndexCmd processes the getblockbyindex request.
  1038  func (s *Server) handleGetBlockByIndexCmd(p Peer, gbd *payload.GetBlockByIndex) error {
  1039  	var reply = io.NewBufBinWriter()
  1040  	count := gbd.Count
  1041  	if gbd.Count < 0 || gbd.Count > payload.MaxHashesCount {
  1042  		count = payload.MaxHashesCount
  1043  	}
  1044  	for i := gbd.IndexStart; i < gbd.IndexStart+uint32(count); i++ {
  1045  		hash := s.chain.GetHeaderHash(i)
  1046  		if hash.Equals(util.Uint256{}) {
  1047  			break
  1048  		}
  1049  		b, err := s.chain.GetBlock(hash)
  1050  		if err != nil {
  1051  			break
  1052  		}
  1053  		err = addMessageToPacket(reply, NewMessage(CMDBlock, b), p.EnqueueP2PPacket)
  1054  		if err != nil {
  1055  			return err
  1056  		}
  1057  	}
  1058  	if reply.Len() == 0 {
  1059  		return nil
  1060  	}
  1061  	return p.EnqueueP2PPacket(reply.Bytes())
  1062  }
  1063  
  1064  // handleGetHeadersCmd processes the getheaders request.
  1065  func (s *Server) handleGetHeadersCmd(p Peer, gh *payload.GetBlockByIndex) error {
  1066  	if gh.IndexStart > s.chain.HeaderHeight() {
  1067  		return nil
  1068  	}
  1069  	count := gh.Count
  1070  	if gh.Count < 0 || gh.Count > payload.MaxHeadersAllowed {
  1071  		count = payload.MaxHeadersAllowed
  1072  	}
  1073  	resp := payload.Headers{}
  1074  	resp.Hdrs = make([]*block.Header, 0, count)
  1075  	for i := gh.IndexStart; i < gh.IndexStart+uint32(count); i++ {
  1076  		hash := s.chain.GetHeaderHash(i)
  1077  		if hash.Equals(util.Uint256{}) {
  1078  			break
  1079  		}
  1080  		header, err := s.chain.GetHeader(hash)
  1081  		if err != nil {
  1082  			break
  1083  		}
  1084  		resp.Hdrs = append(resp.Hdrs, header)
  1085  	}
  1086  	if len(resp.Hdrs) == 0 {
  1087  		return nil
  1088  	}
  1089  	msg := NewMessage(CMDHeaders, &resp)
  1090  	return p.EnqueueP2PMessage(msg)
  1091  }
  1092  
  1093  // handleHeadersCmd processes headers payload.
  1094  func (s *Server) handleHeadersCmd(p Peer, h *payload.Headers) error {
  1095  	return s.stateSync.AddHeaders(h.Hdrs...)
  1096  }
  1097  
  1098  // handleExtensibleCmd processes the received extensible payload.
  1099  func (s *Server) handleExtensibleCmd(e *payload.Extensible) error {
  1100  	if !s.syncReached.Load() {
  1101  		return nil
  1102  	}
  1103  	ok, err := s.extensiblePool.Add(e)
  1104  	if err != nil {
  1105  		return err
  1106  	}
  1107  	if !ok { // payload is already in cache
  1108  		return nil
  1109  	}
  1110  	s.serviceLock.RLock()
  1111  	handler := s.extensHandlers[e.Category]
  1112  	s.serviceLock.RUnlock()
  1113  	if handler != nil {
  1114  		err = handler(e)
  1115  		if err != nil {
  1116  			return err
  1117  		}
  1118  	}
  1119  	s.advertiseExtensible(e)
  1120  	return nil
  1121  }
  1122  
  1123  func (s *Server) advertiseExtensible(e *payload.Extensible) {
  1124  	msg := NewMessage(CMDInv, payload.NewInventory(payload.ExtensibleType, []util.Uint256{e.Hash()}))
  1125  	if e.Category == payload.ConsensusCategory {
  1126  		// It's high priority because it directly affects consensus process,
  1127  		// even though it's just an inv.
  1128  		s.broadcastHPMessage(msg)
  1129  	} else {
  1130  		s.broadcastMessage(msg)
  1131  	}
  1132  }
  1133  
  1134  // handleTxCmd processes the received transaction.
  1135  // It never returns an error.
  1136  func (s *Server) handleTxCmd(tx *transaction.Transaction) error {
  1137  	// It's OK for it to fail for various reasons like tx already existing
  1138  	// in the pool.
  1139  	s.txInLock.Lock()
  1140  	_, ok := s.txInMap[tx.Hash()]
  1141  	if ok || s.mempool.ContainsKey(tx.Hash()) {
  1142  		s.txInLock.Unlock()
  1143  		return nil
  1144  	}
  1145  	s.txInMap[tx.Hash()] = struct{}{}
  1146  	s.txInLock.Unlock()
  1147  	s.txin <- tx
  1148  	return nil
  1149  }
  1150  
  1151  func (s *Server) txHandlerLoop() {
  1152  	defer s.txHandlerLoopWG.Done()
  1153  txloop:
  1154  	for {
  1155  		select {
  1156  		case tx := <-s.txin:
  1157  			s.serviceLock.RLock()
  1158  			txCallback := s.txCallback
  1159  			s.serviceLock.RUnlock()
  1160  			if txCallback != nil {
  1161  				var cbList = s.txCbList.Load()
  1162  				if cbList != nil {
  1163  					var list = cbList.([]util.Uint256)
  1164  					var i = sort.Search(len(list), func(i int) bool {
  1165  						return list[i].CompareTo(tx.Hash()) >= 0
  1166  					})
  1167  					if i < len(list) && list[i].Equals(tx.Hash()) {
  1168  						txCallback(tx)
  1169  					}
  1170  				}
  1171  			}
  1172  			err := s.verifyAndPoolTX(tx)
  1173  			if err == nil {
  1174  				s.broadcastTX(tx, nil)
  1175  			} else {
  1176  				s.log.Debug("tx handler", zap.Error(err), zap.String("hash", tx.Hash().StringLE()))
  1177  			}
  1178  			s.txInLock.Lock()
  1179  			delete(s.txInMap, tx.Hash())
  1180  			s.txInLock.Unlock()
  1181  		case <-s.quit:
  1182  			break txloop
  1183  		}
  1184  	}
  1185  drainloop:
  1186  	for {
  1187  		select {
  1188  		case <-s.txin:
  1189  		default:
  1190  			break drainloop
  1191  		}
  1192  	}
  1193  }
  1194  
  1195  // handleP2PNotaryRequestCmd process the received P2PNotaryRequest payload.
  1196  func (s *Server) handleP2PNotaryRequestCmd(r *payload.P2PNotaryRequest) error {
  1197  	if !s.chain.P2PSigExtensionsEnabled() {
  1198  		return errors.New("P2PNotaryRequestCMD was received, but P2PSignatureExtensions are disabled")
  1199  	}
  1200  	// It's OK for it to fail for various reasons like request already existing
  1201  	// in the pool.
  1202  	err := s.RelayP2PNotaryRequest(r)
  1203  	if err != nil {
  1204  		s.log.Debug("p2p notary request", zap.Error(err), zap.String("hash", r.Hash().StringLE()), zap.String("main", r.MainTransaction.Hash().StringLE()))
  1205  	}
  1206  	return nil
  1207  }
  1208  
  1209  // RelayP2PNotaryRequest adds the given request to the pool and relays. It does not check
  1210  // P2PSigExtensions enabled.
  1211  func (s *Server) RelayP2PNotaryRequest(r *payload.P2PNotaryRequest) error {
  1212  	err := s.verifyAndPoolNotaryRequest(r)
  1213  	if err == nil {
  1214  		s.broadcastP2PNotaryRequestPayload(nil, r)
  1215  	}
  1216  	return err
  1217  }
  1218  
  1219  // verifyAndPoolNotaryRequest verifies NotaryRequest payload and adds it to the payload mempool.
  1220  func (s *Server) verifyAndPoolNotaryRequest(r *payload.P2PNotaryRequest) error {
  1221  	return s.chain.PoolTxWithData(r.FallbackTransaction, r, s.notaryRequestPool, s.notaryFeer, s.verifyNotaryRequest)
  1222  }
  1223  
  1224  // verifyNotaryRequest is a function for state-dependant P2PNotaryRequest payload verification which is executed before ordinary blockchain's verification.
  1225  func (s *Server) verifyNotaryRequest(_ *transaction.Transaction, data any) error {
  1226  	r := data.(*payload.P2PNotaryRequest)
  1227  	payer := r.FallbackTransaction.Signers[1].Account
  1228  	if _, err := s.chain.VerifyWitness(payer, r, &r.Witness, s.chain.GetMaxVerificationGAS()); err != nil {
  1229  		return fmt.Errorf("bad P2PNotaryRequest payload witness: %w", err)
  1230  	}
  1231  	notaryHash := s.chain.GetNotaryContractScriptHash()
  1232  	if r.FallbackTransaction.Sender() != notaryHash {
  1233  		return fmt.Errorf("P2PNotary contract should be a sender of the fallback transaction, got %s", address.Uint160ToString(r.FallbackTransaction.Sender()))
  1234  	}
  1235  	if r.MainTransaction.Sender() == notaryHash {
  1236  		return errors.New("P2PNotary contract is not allowed to be the sender of the main transaction")
  1237  	}
  1238  	depositExpiration := s.chain.GetNotaryDepositExpiration(payer)
  1239  	if r.FallbackTransaction.ValidUntilBlock >= depositExpiration {
  1240  		return fmt.Errorf("fallback transaction is valid after deposit is unlocked: ValidUntilBlock is %d, deposit lock for %s expires at %d", r.FallbackTransaction.ValidUntilBlock, address.Uint160ToString(payer), depositExpiration)
  1241  	}
  1242  	return nil
  1243  }
  1244  
  1245  func (s *Server) broadcastP2PNotaryRequestPayload(_ *transaction.Transaction, data any) {
  1246  	r := data.(*payload.P2PNotaryRequest) // we can guarantee that cast is successful
  1247  	msg := NewMessage(CMDInv, payload.NewInventory(payload.P2PNotaryRequestType, []util.Uint256{r.FallbackTransaction.Hash()}))
  1248  	s.broadcastMessage(msg)
  1249  }
  1250  
  1251  // handleAddrCmd will process the received addresses.
  1252  func (s *Server) handleAddrCmd(p Peer, addrs *payload.AddressList) error {
  1253  	if !p.CanProcessAddr() {
  1254  		return errors.New("unexpected addr received")
  1255  	}
  1256  	for _, a := range addrs.Addrs {
  1257  		addr, err := a.GetTCPAddress()
  1258  		if err == nil {
  1259  			s.discovery.BackFill(addr)
  1260  		}
  1261  	}
  1262  	return nil
  1263  }
  1264  
  1265  // handleGetAddrCmd sends to the peer some good addresses that we know of.
  1266  func (s *Server) handleGetAddrCmd(p Peer) error {
  1267  	addrs := s.discovery.GoodPeers()
  1268  	if len(addrs) > payload.MaxAddrsCount {
  1269  		addrs = addrs[:payload.MaxAddrsCount]
  1270  	}
  1271  	alist := payload.NewAddressList(len(addrs))
  1272  	ts := time.Now()
  1273  	for i, addr := range addrs {
  1274  		// we know it's a good address, so it can't fail
  1275  		netaddr, _ := net.ResolveTCPAddr("tcp", addr.Address)
  1276  		alist.Addrs[i] = payload.NewAddressAndTime(netaddr, ts, addr.Capabilities)
  1277  	}
  1278  	return p.EnqueueP2PMessage(NewMessage(CMDAddr, alist))
  1279  }
  1280  
  1281  // requestBlocks sends a CMDGetBlockByIndex message to the peer
  1282  // to sync up in blocks. A maximum of maxBlockBatch will be
  1283  // sent at once. There are two things we need to take care of:
  1284  //  1. If possible, blocks should be fetched in parallel.
  1285  //     height..+500 to one peer, height+500..+1000 to another etc.
  1286  //  2. Every block must eventually be fetched even if the peer sends no answer.
  1287  //
  1288  // Thus, the following algorithm is used:
  1289  // 1. Block range is divided into chunks of payload.MaxHashesCount.
  1290  // 2. Send requests for chunk in increasing order.
  1291  // 3. After all requests have been sent, request random height.
  1292  func (s *Server) requestBlocks(bq bqueue.Blockqueuer, p Peer) error {
  1293  	pl := getRequestBlocksPayload(p, bq.BlockHeight(), &s.lastRequestedBlock)
  1294  	lq, capLeft := s.bQueue.LastQueued()
  1295  	if capLeft == 0 {
  1296  		// No more blocks will fit into the queue.
  1297  		return nil
  1298  	}
  1299  	if lq >= pl.IndexStart {
  1300  		if capLeft < payload.MaxHashesCount {
  1301  			pl.Count = int16(capLeft)
  1302  		}
  1303  		pl.IndexStart = lq + 1
  1304  	}
  1305  	return p.EnqueueP2PMessage(NewMessage(CMDGetBlockByIndex, pl))
  1306  }
  1307  
  1308  func getRequestBlocksPayload(p Peer, currHeight uint32, lastRequestedHeight *atomic.Uint32) *payload.GetBlockByIndex {
  1309  	var peerHeight = p.LastBlockIndex()
  1310  	var needHeight uint32
  1311  	// lastRequestedBlock can only be increased.
  1312  	for {
  1313  		old := lastRequestedHeight.Load()
  1314  		if old <= currHeight {
  1315  			needHeight = currHeight + 1
  1316  			if !lastRequestedHeight.CompareAndSwap(old, needHeight) {
  1317  				continue
  1318  			}
  1319  		} else if old < currHeight+(bqueue.CacheSize-payload.MaxHashesCount) {
  1320  			needHeight = currHeight + 1
  1321  			if peerHeight > old+payload.MaxHashesCount {
  1322  				needHeight = old + payload.MaxHashesCount
  1323  				if !lastRequestedHeight.CompareAndSwap(old, needHeight) {
  1324  					continue
  1325  				}
  1326  			}
  1327  		} else {
  1328  			index := mrand.Intn(bqueue.CacheSize / payload.MaxHashesCount)
  1329  			needHeight = currHeight + 1 + uint32(index*payload.MaxHashesCount)
  1330  		}
  1331  		break
  1332  	}
  1333  	return payload.NewGetBlockByIndex(needHeight, -1)
  1334  }
  1335  
  1336  // handleMessage processes the given message.
  1337  func (s *Server) handleMessage(peer Peer, msg *Message) error {
  1338  	s.log.Debug("got msg",
  1339  		zap.Stringer("addr", peer.RemoteAddr()),
  1340  		zap.Stringer("type", msg.Command))
  1341  
  1342  	start := time.Now()
  1343  	defer func() { addCmdTimeMetric(msg.Command, time.Since(start)) }()
  1344  
  1345  	if peer.Handshaked() {
  1346  		if inv, ok := msg.Payload.(*payload.Inventory); ok {
  1347  			if !inv.Type.Valid(s.chain.P2PSigExtensionsEnabled()) || len(inv.Hashes) == 0 {
  1348  				return errInvalidInvType
  1349  			}
  1350  		}
  1351  		switch msg.Command {
  1352  		case CMDAddr:
  1353  			addrs := msg.Payload.(*payload.AddressList)
  1354  			return s.handleAddrCmd(peer, addrs)
  1355  		case CMDGetAddr:
  1356  			// it has no payload
  1357  			return s.handleGetAddrCmd(peer)
  1358  		case CMDGetBlocks:
  1359  			gb := msg.Payload.(*payload.GetBlocks)
  1360  			return s.handleGetBlocksCmd(peer, gb)
  1361  		case CMDGetBlockByIndex:
  1362  			gbd := msg.Payload.(*payload.GetBlockByIndex)
  1363  			return s.handleGetBlockByIndexCmd(peer, gbd)
  1364  		case CMDGetData:
  1365  			inv := msg.Payload.(*payload.Inventory)
  1366  			return s.handleGetDataCmd(peer, inv)
  1367  		case CMDGetMPTData:
  1368  			inv := msg.Payload.(*payload.MPTInventory)
  1369  			return s.handleGetMPTDataCmd(peer, inv)
  1370  		case CMDMPTData:
  1371  			inv := msg.Payload.(*payload.MPTData)
  1372  			return s.handleMPTDataCmd(peer, inv)
  1373  		case CMDGetHeaders:
  1374  			gh := msg.Payload.(*payload.GetBlockByIndex)
  1375  			return s.handleGetHeadersCmd(peer, gh)
  1376  		case CMDHeaders:
  1377  			h := msg.Payload.(*payload.Headers)
  1378  			return s.handleHeadersCmd(peer, h)
  1379  		case CMDInv:
  1380  			inventory := msg.Payload.(*payload.Inventory)
  1381  			return s.handleInvCmd(peer, inventory)
  1382  		case CMDMempool:
  1383  			// no payload
  1384  			return s.handleMempoolCmd(peer)
  1385  		case CMDBlock:
  1386  			block := msg.Payload.(*block.Block)
  1387  			return s.handleBlockCmd(peer, block)
  1388  		case CMDExtensible:
  1389  			cp := msg.Payload.(*payload.Extensible)
  1390  			return s.handleExtensibleCmd(cp)
  1391  		case CMDTX:
  1392  			tx := msg.Payload.(*transaction.Transaction)
  1393  			return s.handleTxCmd(tx)
  1394  		case CMDP2PNotaryRequest:
  1395  			r := msg.Payload.(*payload.P2PNotaryRequest)
  1396  			return s.handleP2PNotaryRequestCmd(r)
  1397  		case CMDPing:
  1398  			ping := msg.Payload.(*payload.Ping)
  1399  			return s.handlePing(peer, ping)
  1400  		case CMDPong:
  1401  			pong := msg.Payload.(*payload.Ping)
  1402  			return s.handlePong(peer, pong)
  1403  		case CMDVersion, CMDVerack:
  1404  			return fmt.Errorf("received '%s' after the handshake", msg.Command.String())
  1405  		}
  1406  	} else {
  1407  		switch msg.Command {
  1408  		case CMDVersion:
  1409  			version := msg.Payload.(*payload.Version)
  1410  			return s.handleVersionCmd(peer, version)
  1411  		case CMDVerack:
  1412  			err := peer.HandleVersionAck()
  1413  			if err != nil {
  1414  				return err
  1415  			}
  1416  			go peer.StartProtocol()
  1417  		default:
  1418  			return fmt.Errorf("received '%s' during handshake", msg.Command.String())
  1419  		}
  1420  	}
  1421  	return nil
  1422  }
  1423  
  1424  func (s *Server) tryInitStateSync() {
  1425  	if !s.stateSync.IsActive() {
  1426  		s.bSyncQueue.Discard()
  1427  		return
  1428  	}
  1429  
  1430  	if s.stateSync.IsInitialized() {
  1431  		return
  1432  	}
  1433  
  1434  	var peersNumber int
  1435  	s.lock.RLock()
  1436  	heights := make([]uint32, 0)
  1437  	for p := range s.peers {
  1438  		if p.Handshaked() {
  1439  			peersNumber++
  1440  			peerLastBlock := p.LastBlockIndex()
  1441  			i := sort.Search(len(heights), func(i int) bool {
  1442  				return heights[i] >= peerLastBlock
  1443  			})
  1444  			heights = append(heights, peerLastBlock)
  1445  			if i != len(heights)-1 {
  1446  				copy(heights[i+1:], heights[i:])
  1447  				heights[i] = peerLastBlock
  1448  			}
  1449  		}
  1450  	}
  1451  	s.lock.RUnlock()
  1452  	if peersNumber >= s.MinPeers && len(heights) > 0 {
  1453  		// choose the height of the median peer as the current chain's height
  1454  		h := heights[len(heights)/2]
  1455  		err := s.stateSync.Init(h)
  1456  		if err != nil {
  1457  			s.log.Fatal("failed to init state sync module",
  1458  				zap.Uint32("evaluated chain's blockHeight", h),
  1459  				zap.Uint32("blockHeight", s.chain.BlockHeight()),
  1460  				zap.Uint32("headerHeight", s.chain.HeaderHeight()),
  1461  				zap.Error(err))
  1462  		}
  1463  
  1464  		// module can be inactive after init (i.e. full state is collected and ordinary block processing is needed)
  1465  		if !s.stateSync.IsActive() {
  1466  			s.bSyncQueue.Discard()
  1467  		}
  1468  	}
  1469  }
  1470  
  1471  // BroadcastExtensible add a locally-generated Extensible payload to the pool
  1472  // and advertises it to peers.
  1473  func (s *Server) BroadcastExtensible(p *payload.Extensible) {
  1474  	_, err := s.extensiblePool.Add(p)
  1475  	if err != nil {
  1476  		s.log.Error("created payload is not valid", zap.Error(err))
  1477  		return
  1478  	}
  1479  
  1480  	s.advertiseExtensible(p)
  1481  }
  1482  
  1483  // RequestTx asks for the given transactions from Server peers using GetData message.
  1484  func (s *Server) RequestTx(hashes ...util.Uint256) {
  1485  	if len(hashes) == 0 {
  1486  		return
  1487  	}
  1488  
  1489  	var sorted = make([]util.Uint256, len(hashes))
  1490  	copy(sorted, hashes)
  1491  	sort.Slice(sorted, func(i, j int) bool {
  1492  		return sorted[i].CompareTo(sorted[j]) < 0
  1493  	})
  1494  
  1495  	s.txCbList.Store(sorted)
  1496  
  1497  	for i := 0; i <= len(hashes)/payload.MaxHashesCount; i++ {
  1498  		start := i * payload.MaxHashesCount
  1499  		stop := (i + 1) * payload.MaxHashesCount
  1500  		if stop > len(hashes) {
  1501  			stop = len(hashes)
  1502  		}
  1503  		if start == stop {
  1504  			break
  1505  		}
  1506  		msg := NewMessage(CMDGetData, payload.NewInventory(payload.TXType, hashes[start:stop]))
  1507  		// It's high priority because it directly affects consensus process,
  1508  		// even though it's getdata.
  1509  		s.broadcastHPMessage(msg)
  1510  	}
  1511  }
  1512  
  1513  // StopTxFlow makes the server not call previously specified consensus transaction callback.
  1514  func (s *Server) StopTxFlow() {
  1515  	var hashes []util.Uint256
  1516  	s.txCbList.Store(hashes)
  1517  }
  1518  
  1519  // iteratePeersWithSendMsg sends the given message to all peers using two functions
  1520  // passed, one is to send the message and the other is to filtrate peers (the
  1521  // peer is considered invalid if it returns false).
  1522  func (s *Server) iteratePeersWithSendMsg(msg *Message, send func(Peer, context.Context, []byte) error, peerOK func(Peer) bool) {
  1523  	var deadN, peerN, sentN int
  1524  
  1525  	// Get a copy of s.peers to avoid holding a lock while sending.
  1526  	peers := s.getPeers(peerOK)
  1527  	peerN = len(peers)
  1528  	if peerN == 0 {
  1529  		return
  1530  	}
  1531  	pkt, err := msg.Bytes()
  1532  	if err != nil {
  1533  		return
  1534  	}
  1535  
  1536  	var (
  1537  		// Optimal number of recipients.
  1538  		enoughN     = s.discovery.GetFanOut()
  1539  		replies     = make(chan error, peerN) // Cache is there just to make goroutines exit faster.
  1540  		ctx, cancel = context.WithTimeout(context.Background(), s.TimePerBlock/2)
  1541  	)
  1542  	enoughN = (enoughN*(100-s.BroadcastFactor) + peerN*s.BroadcastFactor) / 100
  1543  	for _, peer := range peers {
  1544  		go func(p Peer, ctx context.Context, pkt []byte) {
  1545  			// Do this before packet is sent, reader thread can get the reply before this routine wakes up.
  1546  			if msg.Command == CMDGetAddr {
  1547  				p.AddGetAddrSent()
  1548  			}
  1549  			if msg.Command == CMDPing {
  1550  				p.SetPingTimer()
  1551  			}
  1552  			replies <- send(p, ctx, pkt)
  1553  		}(peer, ctx, pkt)
  1554  	}
  1555  	for r := range replies {
  1556  		if r == nil {
  1557  			sentN++
  1558  		} else {
  1559  			deadN++
  1560  		}
  1561  		if sentN+deadN == peerN {
  1562  			break
  1563  		}
  1564  		if sentN >= enoughN && ctx.Err() == nil {
  1565  			cancel()
  1566  		}
  1567  	}
  1568  	cancel()
  1569  	close(replies)
  1570  }
  1571  
  1572  // broadcastMessage sends the message to all available peers.
  1573  func (s *Server) broadcastMessage(msg *Message) {
  1574  	s.iteratePeersWithSendMsg(msg, Peer.BroadcastPacket, Peer.Handshaked)
  1575  }
  1576  
  1577  // broadcastHPMessage sends the high-priority message to all available peers.
  1578  func (s *Server) broadcastHPMessage(msg *Message) {
  1579  	s.iteratePeersWithSendMsg(msg, Peer.BroadcastHPPacket, Peer.Handshaked)
  1580  }
  1581  
  1582  // relayBlocksLoop subscribes to new blocks in the ledger and broadcasts them
  1583  // to the network. Intended to be run as a separate goroutine.
  1584  func (s *Server) relayBlocksLoop() {
  1585  	ch := make(chan *block.Block, 2) // Some buffering to smooth out possible egressing delays.
  1586  	s.chain.SubscribeForBlocks(ch)
  1587  mainloop:
  1588  	for {
  1589  		select {
  1590  		case <-s.quit:
  1591  			s.chain.UnsubscribeFromBlocks(ch)
  1592  			break mainloop
  1593  		case b := <-ch:
  1594  			msg := NewMessage(CMDInv, payload.NewInventory(payload.BlockType, []util.Uint256{b.Hash()}))
  1595  			// Filter out nodes that are more current (avoid spamming the network
  1596  			// during initial sync).
  1597  			s.iteratePeersWithSendMsg(msg, Peer.BroadcastPacket, func(p Peer) bool {
  1598  				return p.Handshaked() && p.LastBlockIndex() < b.Index
  1599  			})
  1600  			s.extensiblePool.RemoveStale(b.Index)
  1601  		}
  1602  	}
  1603  drainBlocksLoop:
  1604  	for {
  1605  		select {
  1606  		case <-ch:
  1607  		default:
  1608  			break drainBlocksLoop
  1609  		}
  1610  	}
  1611  	close(ch)
  1612  	close(s.relayFin)
  1613  }
  1614  
  1615  // verifyAndPoolTX verifies the TX and adds it to the local mempool.
  1616  func (s *Server) verifyAndPoolTX(t *transaction.Transaction) error {
  1617  	return s.chain.PoolTx(t)
  1618  }
  1619  
  1620  // RelayTxn a new transaction to the local node and the connected peers.
  1621  // Reference: the method OnRelay in C#: https://github.com/neo-project/neo/blob/master/neo/Network/P2P/LocalNode.cs#L159
  1622  func (s *Server) RelayTxn(t *transaction.Transaction) error {
  1623  	err := s.verifyAndPoolTX(t)
  1624  	if err == nil {
  1625  		s.broadcastTX(t, nil)
  1626  	}
  1627  	return err
  1628  }
  1629  
  1630  // broadcastTX broadcasts an inventory message about new transaction.
  1631  func (s *Server) broadcastTX(t *transaction.Transaction, _ any) {
  1632  	select {
  1633  	case s.transactions <- t:
  1634  	case <-s.quit:
  1635  	}
  1636  }
  1637  
  1638  func (s *Server) broadcastTxHashes(hs []util.Uint256) {
  1639  	msg := NewMessage(CMDInv, payload.NewInventory(payload.TXType, hs))
  1640  
  1641  	// We need to filter out non-relaying nodes, so plain broadcast
  1642  	// functions don't fit here.
  1643  	s.iteratePeersWithSendMsg(msg, Peer.BroadcastPacket, Peer.IsFullNode)
  1644  }
  1645  
  1646  // initStaleMemPools initializes mempools for stale tx/payload processing.
  1647  func (s *Server) initStaleMemPools() {
  1648  	threshold := 5
  1649  	// Not perfect, can change over time, but should be sufficient.
  1650  	numOfCNs := s.config.GetNumOfCNs(s.chain.BlockHeight())
  1651  	if numOfCNs*2 > threshold {
  1652  		threshold = numOfCNs * 2
  1653  	}
  1654  
  1655  	s.mempool.SetResendThreshold(uint32(threshold), s.broadcastTX)
  1656  	if s.chain.P2PSigExtensionsEnabled() {
  1657  		s.notaryRequestPool.SetResendThreshold(uint32(threshold), s.broadcastP2PNotaryRequestPayload)
  1658  	}
  1659  }
  1660  
  1661  // broadcastTxLoop is a loop for batching and sending
  1662  // transactions hashes in an INV payload.
  1663  func (s *Server) broadcastTxLoop() {
  1664  	const (
  1665  		batchTime = time.Millisecond * 50
  1666  		batchSize = 42
  1667  	)
  1668  
  1669  	defer close(s.broadcastTxFin)
  1670  	txs := make([]util.Uint256, 0, batchSize)
  1671  	var timer *time.Timer
  1672  
  1673  	timerCh := func() <-chan time.Time {
  1674  		if timer == nil {
  1675  			return nil
  1676  		}
  1677  		return timer.C
  1678  	}
  1679  
  1680  	broadcast := func() {
  1681  		s.broadcastTxHashes(txs)
  1682  		txs = txs[:0]
  1683  		if timer != nil {
  1684  			timer.Stop()
  1685  		}
  1686  	}
  1687  
  1688  	for {
  1689  		select {
  1690  		case <-s.quit:
  1691  		loop:
  1692  			for {
  1693  				select {
  1694  				case <-s.transactions:
  1695  				default:
  1696  					break loop
  1697  				}
  1698  			}
  1699  			return
  1700  		case <-timerCh():
  1701  			if len(txs) > 0 {
  1702  				broadcast()
  1703  			}
  1704  		case tx := <-s.transactions:
  1705  			if len(txs) == 0 {
  1706  				timer = time.NewTimer(batchTime)
  1707  			}
  1708  
  1709  			txs = append(txs, tx.Hash())
  1710  			if len(txs) == batchSize {
  1711  				broadcast()
  1712  			}
  1713  		}
  1714  	}
  1715  }
  1716  
  1717  // Port returns a server port that should be used in P2P version exchange with the
  1718  // peer connected on the given localAddr. In case if announced node port is set
  1719  // in the server.Config for the given bind address, the announced node port will
  1720  // be returned (e.g. consider the node running behind NAT). If `AnnouncedPort`
  1721  // isn't set, the port returned may still differ from that of server.Config. If
  1722  // no localAddr is given, then the first available port will be returned.
  1723  func (s *Server) Port(localAddr net.Addr) (uint16, error) {
  1724  	var connIP string
  1725  	if localAddr != nil {
  1726  		connIP, _, _ = net.SplitHostPort(localAddr.String()) // Ignore error and provide info if possible.
  1727  	}
  1728  	var defaultPort *uint16
  1729  	for i, tr := range s.transports {
  1730  		listenIP, listenPort := tr.HostPort()
  1731  		if listenIP == "::" || listenIP == "" || localAddr == nil || connIP == "" || connIP == listenIP {
  1732  			var res uint16
  1733  			if s.ServerConfig.Addresses[i].AnnouncedPort != 0 {
  1734  				res = s.ServerConfig.Addresses[i].AnnouncedPort
  1735  			} else {
  1736  				p, err := strconv.ParseUint(listenPort, 10, 16)
  1737  				if err != nil {
  1738  					return 0, fmt.Errorf("failed to parse bind port from '%s': %w", listenPort, err)
  1739  				}
  1740  				res = uint16(p)
  1741  			}
  1742  			if localAddr == nil || // no local address is specified => take the first port available
  1743  				(listenIP != "::" && listenIP != "") { // direct match is always preferable
  1744  				return res, nil
  1745  			}
  1746  			defaultPort = &res
  1747  		}
  1748  	}
  1749  	if defaultPort != nil {
  1750  		return *defaultPort, nil
  1751  	}
  1752  	return 0, fmt.Errorf("bind address for connection '%s' is not registered", localAddr.String())
  1753  }
  1754  
  1755  // optimalNumOfThreads returns the optimal number of processing threads to create
  1756  // for transaction processing.
  1757  func optimalNumOfThreads() int {
  1758  	// Doing more won't help, mempool is still a contention point.
  1759  	const maxThreads = 16
  1760  	var threads = runtime.GOMAXPROCS(0)
  1761  	if threads > runtime.NumCPU() {
  1762  		threads = runtime.NumCPU()
  1763  	}
  1764  	if threads > maxThreads {
  1765  		threads = maxThreads
  1766  	}
  1767  	return threads
  1768  }