github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/gossip/handler.go (about)

     1  package gossip
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"math"
     7  	"math/rand"
     8  	"strings"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/unicornultrafoundation/go-helios/gossip/dagprocessor"
    13  	"github.com/unicornultrafoundation/go-helios/gossip/itemsfetcher"
    14  	"github.com/unicornultrafoundation/go-helios/hash"
    15  	"github.com/unicornultrafoundation/go-helios/native/dag"
    16  	"github.com/unicornultrafoundation/go-helios/native/idx"
    17  	"github.com/unicornultrafoundation/go-helios/utils/datasemaphore"
    18  	"github.com/unicornultrafoundation/go-u2u/common"
    19  	"github.com/unicornultrafoundation/go-u2u/core/types"
    20  	notify "github.com/unicornultrafoundation/go-u2u/event"
    21  	"github.com/unicornultrafoundation/go-u2u/log"
    22  	"github.com/unicornultrafoundation/go-u2u/p2p"
    23  	"github.com/unicornultrafoundation/go-u2u/p2p/discover/discfilter"
    24  	"github.com/unicornultrafoundation/go-u2u/rlp"
    25  	"github.com/unicornultrafoundation/go-u2u/trie"
    26  
    27  	"github.com/unicornultrafoundation/go-u2u/eventcheck"
    28  	"github.com/unicornultrafoundation/go-u2u/eventcheck/bvallcheck"
    29  	"github.com/unicornultrafoundation/go-u2u/eventcheck/epochcheck"
    30  	"github.com/unicornultrafoundation/go-u2u/eventcheck/evallcheck"
    31  	"github.com/unicornultrafoundation/go-u2u/eventcheck/heavycheck"
    32  	"github.com/unicornultrafoundation/go-u2u/eventcheck/parentlesscheck"
    33  	"github.com/unicornultrafoundation/go-u2u/evmcore"
    34  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockrecords/brprocessor"
    35  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockrecords/brstream"
    36  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockrecords/brstream/brstreamleecher"
    37  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockrecords/brstream/brstreamseeder"
    38  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockvotes/bvprocessor"
    39  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockvotes/bvstream"
    40  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockvotes/bvstream/bvstreamleecher"
    41  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/blockvotes/bvstream/bvstreamseeder"
    42  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/dag/dagstream"
    43  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/dag/dagstream/dagstreamleecher"
    44  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/dag/dagstream/dagstreamseeder"
    45  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/epochpacks/epprocessor"
    46  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/epochpacks/epstream"
    47  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/epochpacks/epstream/epstreamleecher"
    48  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/epochpacks/epstream/epstreamseeder"
    49  	"github.com/unicornultrafoundation/go-u2u/gossip/protocols/snap/snapstream/snapleecher"
    50  	"github.com/unicornultrafoundation/go-u2u/logger"
    51  	"github.com/unicornultrafoundation/go-u2u/native"
    52  	"github.com/unicornultrafoundation/go-u2u/native/ibr"
    53  	"github.com/unicornultrafoundation/go-u2u/native/ier"
    54  	"github.com/unicornultrafoundation/go-u2u/utils/txtime"
    55  )
    56  
    57  const (
    58  	softResponseLimitSize = 2 * 1024 * 1024    // Target maximum size of returned events, or other data.
    59  	softLimitItems        = 250                // Target maximum number of events or transactions to request/response
    60  	hardLimitItems        = softLimitItems * 4 // Maximum number of events or transactions to request/response
    61  
    62  	// txChanSize is the size of channel listening to NewTxsNotify.
    63  	// The number is referenced from the size of tx pool.
    64  	txChanSize = 4096
    65  )
    66  
    67  func errResp(code errCode, format string, v ...interface{}) error {
    68  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    69  }
    70  
    71  func checkLenLimits(size int, v interface{}) error {
    72  	if size <= 0 {
    73  		return errResp(ErrEmptyMessage, "%v", v)
    74  	}
    75  	if size > hardLimitItems {
    76  		return errResp(ErrMsgTooLarge, "%v", v)
    77  	}
    78  	return nil
    79  }
    80  
    81  type dagNotifier interface {
    82  	SubscribeNewEpoch(ch chan<- idx.Epoch) notify.Subscription
    83  	SubscribeNewEmitted(ch chan<- *native.EventPayload) notify.Subscription
    84  }
    85  
    86  type processCallback struct {
    87  	Event            func(*native.EventPayload) error
    88  	SwitchEpochTo    func(idx.Epoch) error
    89  	PauseEvmSnapshot func()
    90  	BVs              func(native.LlrSignedBlockVotes) error
    91  	BR               func(ibr.LlrIdxFullBlockRecord) error
    92  	EV               func(native.LlrSignedEpochVote) error
    93  	ER               func(ier.LlrIdxFullEpochRecord) error
    94  }
    95  
    96  // handlerConfig is the collection of initialization parameters to create a full
    97  // node network handler.
    98  type handlerConfig struct {
    99  	config   Config
   100  	notifier dagNotifier
   101  	txpool   TxPool
   102  	engineMu sync.Locker
   103  	checkers *eventcheck.Checkers
   104  	s        *Store
   105  	process  processCallback
   106  }
   107  
   108  type snapsyncEpochUpd struct {
   109  	epoch idx.Epoch
   110  	root  common.Hash
   111  }
   112  
   113  type snapsyncCancelCmd struct {
   114  	done chan struct{}
   115  }
   116  
   117  type snapsyncStateUpd struct {
   118  	snapsyncEpochUpd  *snapsyncEpochUpd
   119  	snapsyncCancelCmd *snapsyncCancelCmd
   120  }
   121  
   122  type snapsyncState struct {
   123  	epoch     idx.Epoch
   124  	cancel    func() error
   125  	updatesCh chan snapsyncStateUpd
   126  	quit      chan struct{}
   127  }
   128  
   129  type handler struct {
   130  	NetworkID uint64
   131  	config    Config
   132  
   133  	syncStatus syncStatus
   134  
   135  	txpool   TxPool
   136  	maxPeers int
   137  
   138  	peers *peerSet
   139  
   140  	txsCh  chan evmcore.NewTxsNotify
   141  	txsSub notify.Subscription
   142  
   143  	dagLeecher   *dagstreamleecher.Leecher
   144  	dagSeeder    *dagstreamseeder.Seeder
   145  	dagProcessor *dagprocessor.Processor
   146  	dagFetcher   *itemsfetcher.Fetcher
   147  
   148  	bvLeecher   *bvstreamleecher.Leecher
   149  	bvSeeder    *bvstreamseeder.Seeder
   150  	bvProcessor *bvprocessor.Processor
   151  
   152  	brLeecher   *brstreamleecher.Leecher
   153  	brSeeder    *brstreamseeder.Seeder
   154  	brProcessor *brprocessor.Processor
   155  
   156  	epLeecher   *epstreamleecher.Leecher
   157  	epSeeder    *epstreamseeder.Seeder
   158  	epProcessor *epprocessor.Processor
   159  
   160  	process processCallback
   161  
   162  	txFetcher *itemsfetcher.Fetcher
   163  
   164  	checkers *eventcheck.Checkers
   165  
   166  	msgSemaphore *datasemaphore.DataSemaphore
   167  
   168  	store    *Store
   169  	engineMu sync.Locker
   170  
   171  	notifier             dagNotifier
   172  	emittedEventsCh      chan *native.EventPayload
   173  	emittedEventsSub     notify.Subscription
   174  	newEpochsCh          chan idx.Epoch
   175  	newEpochsSub         notify.Subscription
   176  	quitProgressBradcast chan struct{}
   177  
   178  	// channels for syncer, txsyncLoop
   179  	txsyncCh chan *txsync
   180  	quitSync chan struct{}
   181  
   182  	// snapsync fields
   183  	chain       *ethBlockChain
   184  	snapLeecher *snapleecher.Leecher
   185  	snapState   snapsyncState
   186  
   187  	// wait group is used for graceful shutdowns during downloading
   188  	// and processing
   189  	loopsWg sync.WaitGroup
   190  	wg      sync.WaitGroup
   191  	peerWG  sync.WaitGroup
   192  	started sync.WaitGroup
   193  
   194  	logger.Instance
   195  }
   196  
   197  // newHandler returns a new U2U sub protocol manager. The U2U sub protocol manages peers capable
   198  // with the U2U network.
   199  func newHandler(
   200  	c handlerConfig,
   201  ) (
   202  	*handler,
   203  	error,
   204  ) {
   205  	// Create the protocol manager with the base fields
   206  	h := &handler{
   207  		NetworkID:            c.s.GetRules().NetworkID,
   208  		config:               c.config,
   209  		notifier:             c.notifier,
   210  		txpool:               c.txpool,
   211  		msgSemaphore:         datasemaphore.New(c.config.Protocol.MsgsSemaphoreLimit, getSemaphoreWarningFn("P2P messages")),
   212  		store:                c.s,
   213  		process:              c.process,
   214  		checkers:             c.checkers,
   215  		peers:                newPeerSet(),
   216  		engineMu:             c.engineMu,
   217  		txsyncCh:             make(chan *txsync),
   218  		quitSync:             make(chan struct{}),
   219  		quitProgressBradcast: make(chan struct{}),
   220  
   221  		snapState: snapsyncState{
   222  			updatesCh: make(chan snapsyncStateUpd, 128),
   223  			quit:      make(chan struct{}),
   224  		},
   225  
   226  		Instance: logger.New("PM"),
   227  	}
   228  	h.started.Add(1)
   229  
   230  	// TODO: configure it
   231  	var (
   232  		configBloomCache uint64 = 0 // Megabytes to alloc for fast sync bloom
   233  	)
   234  
   235  	var err error
   236  	h.chain, err = newEthBlockChain(c.s)
   237  	if err != nil {
   238  		return nil, err
   239  	}
   240  
   241  	stateDb := h.store.EvmStore().EvmDb
   242  	var stateBloom *trie.SyncBloom
   243  	if false {
   244  		// NOTE: Construct the downloader (long sync) and its backing state bloom if fast
   245  		// sync is requested. The downloader is responsible for deallocating the state
   246  		// bloom when it's done.
   247  		// Note: we don't enable it if snap-sync is performed, since it's very heavy
   248  		// and the heal-portion of the snap sync is much lighter than fast. What we particularly
   249  		// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
   250  		// indexing the entire trie
   251  		stateBloom = trie.NewSyncBloom(configBloomCache, stateDb)
   252  	}
   253  	h.snapLeecher = snapleecher.New(stateDb, stateBloom, h.removePeer)
   254  
   255  	h.dagFetcher = itemsfetcher.New(h.config.Protocol.DagFetcher, itemsfetcher.Callback{
   256  		OnlyInterested: func(ids []interface{}) []interface{} {
   257  			return h.onlyInterestedEventsI(ids)
   258  		},
   259  		Suspend: func() bool {
   260  			return false
   261  		},
   262  	})
   263  	h.txFetcher = itemsfetcher.New(h.config.Protocol.TxFetcher, itemsfetcher.Callback{
   264  		OnlyInterested: func(txids []interface{}) []interface{} {
   265  			return txidsToInterfaces(h.txpool.OnlyNotExisting(interfacesToTxids(txids)))
   266  		},
   267  		Suspend: func() bool {
   268  			return false
   269  		},
   270  	})
   271  
   272  	h.dagProcessor = h.makeDagProcessor(c.checkers)
   273  	h.dagLeecher = dagstreamleecher.New(h.store.GetEpoch(), h.store.GetHighestLamport() == 0, h.config.Protocol.DagStreamLeecher, dagstreamleecher.Callbacks{
   274  		IsProcessed: h.store.HasEvent,
   275  		RequestChunk: func(peer string, r dagstream.Request) error {
   276  			p := h.peers.Peer(peer)
   277  			if p == nil {
   278  				return errNotRegistered
   279  			}
   280  			return p.RequestEventsStream(r)
   281  		},
   282  		Suspend: func(_ string) bool {
   283  			return h.dagFetcher.Overloaded() || h.dagProcessor.Overloaded()
   284  		},
   285  		PeerEpoch: func(peer string) idx.Epoch {
   286  			p := h.peers.Peer(peer)
   287  			if p == nil || p.Useless() {
   288  				return 0
   289  			}
   290  			return p.progress.Epoch
   291  		},
   292  	})
   293  	h.dagSeeder = dagstreamseeder.New(h.config.Protocol.DagStreamSeeder, dagstreamseeder.Callbacks{
   294  		ForEachEvent: c.s.ForEachEventRLP,
   295  	})
   296  
   297  	h.bvProcessor = h.makeBvProcessor(c.checkers)
   298  	h.bvLeecher = bvstreamleecher.New(h.config.Protocol.BvStreamLeecher, bvstreamleecher.Callbacks{
   299  		LowestBlockToDecide: func() (idx.Epoch, idx.Block) {
   300  			llrs := h.store.GetLlrState()
   301  			epoch := h.store.FindBlockEpoch(llrs.LowestBlockToDecide)
   302  			return epoch, llrs.LowestBlockToDecide
   303  		},
   304  		MaxEpochToDecide: func() idx.Epoch {
   305  			if !h.syncStatus.RequestLLR() {
   306  				return 0
   307  			}
   308  			return h.store.GetLlrState().LowestEpochToFill
   309  		},
   310  		IsProcessed: h.store.HasBlockVotes,
   311  		RequestChunk: func(peer string, r bvstream.Request) error {
   312  			p := h.peers.Peer(peer)
   313  			if p == nil {
   314  				return errNotRegistered
   315  			}
   316  			return p.RequestBVsStream(r)
   317  		},
   318  		Suspend: func(_ string) bool {
   319  			return h.bvProcessor.Overloaded()
   320  		},
   321  		PeerBlock: func(peer string) idx.Block {
   322  			p := h.peers.Peer(peer)
   323  			if p == nil || p.Useless() {
   324  				return 0
   325  			}
   326  			return p.progress.LastBlockIdx
   327  		},
   328  	})
   329  	h.bvSeeder = bvstreamseeder.New(h.config.Protocol.BvStreamSeeder, bvstreamseeder.Callbacks{
   330  		Iterate: h.store.IterateOverlappingBlockVotesRLP,
   331  	})
   332  
   333  	h.brProcessor = h.makeBrProcessor()
   334  	h.brLeecher = brstreamleecher.New(h.config.Protocol.BrStreamLeecher, brstreamleecher.Callbacks{
   335  		LowestBlockToFill: func() idx.Block {
   336  			return h.store.GetLlrState().LowestBlockToFill
   337  		},
   338  		MaxBlockToFill: func() idx.Block {
   339  			if !h.syncStatus.RequestLLR() {
   340  				return 0
   341  			}
   342  			// rough estimation for the max fill-able block
   343  			llrs := h.store.GetLlrState()
   344  			start := llrs.LowestBlockToFill
   345  			end := llrs.LowestBlockToDecide
   346  			if end > start+100 && h.store.HasBlock(start+100) {
   347  				return start + 100
   348  			}
   349  			return end
   350  		},
   351  		IsProcessed: h.store.HasBlock,
   352  		RequestChunk: func(peer string, r brstream.Request) error {
   353  			p := h.peers.Peer(peer)
   354  			if p == nil {
   355  				return errNotRegistered
   356  			}
   357  			return p.RequestBRsStream(r)
   358  		},
   359  		Suspend: func(_ string) bool {
   360  			return h.brProcessor.Overloaded()
   361  		},
   362  		PeerBlock: func(peer string) idx.Block {
   363  			p := h.peers.Peer(peer)
   364  			if p == nil || p.Useless() {
   365  				return 0
   366  			}
   367  			return p.progress.LastBlockIdx
   368  		},
   369  	})
   370  	h.brSeeder = brstreamseeder.New(h.config.Protocol.BrStreamSeeder, brstreamseeder.Callbacks{
   371  		Iterate: h.store.IterateFullBlockRecordsRLP,
   372  	})
   373  
   374  	h.epProcessor = h.makeEpProcessor(h.checkers)
   375  	h.epLeecher = epstreamleecher.New(h.config.Protocol.EpStreamLeecher, epstreamleecher.Callbacks{
   376  		LowestEpochToFetch: func() idx.Epoch {
   377  			llrs := h.store.GetLlrState()
   378  			if llrs.LowestEpochToFill < llrs.LowestEpochToDecide {
   379  				return llrs.LowestEpochToFill
   380  			}
   381  			return llrs.LowestEpochToDecide
   382  		},
   383  		MaxEpochToFetch: func() idx.Epoch {
   384  			if !h.syncStatus.RequestLLR() {
   385  				return 0
   386  			}
   387  			return h.store.GetLlrState().LowestEpochToDecide + 10000
   388  		},
   389  		IsProcessed: h.store.HasHistoryBlockEpochState,
   390  		RequestChunk: func(peer string, r epstream.Request) error {
   391  			p := h.peers.Peer(peer)
   392  			if p == nil {
   393  				return errNotRegistered
   394  			}
   395  			return p.RequestEPsStream(r)
   396  		},
   397  		Suspend: func(_ string) bool {
   398  			return h.epProcessor.Overloaded()
   399  		},
   400  		PeerEpoch: func(peer string) idx.Epoch {
   401  			p := h.peers.Peer(peer)
   402  			if p == nil || p.Useless() {
   403  				return 0
   404  			}
   405  			return p.progress.Epoch
   406  		},
   407  	})
   408  	h.epSeeder = epstreamseeder.New(h.config.Protocol.EpStreamSeeder, epstreamseeder.Callbacks{
   409  		Iterate: h.store.IterateEpochPacksRLP,
   410  	})
   411  
   412  	return h, nil
   413  }
   414  
   415  func (h *handler) peerMisbehaviour(peer string, err error) bool {
   416  	if eventcheck.IsBan(err) {
   417  		log.Warn("Dropping peer due to a misbehaviour", "peer", peer, "err", err)
   418  		h.removePeer(peer)
   419  		return true
   420  	}
   421  	return false
   422  }
   423  
   424  func (h *handler) makeDagProcessor(checkers *eventcheck.Checkers) *dagprocessor.Processor {
   425  	// checkers
   426  	lightCheck := func(e dag.Event) error {
   427  		if h.store.GetEpoch() != e.ID().Epoch() {
   428  			return epochcheck.ErrNotRelevant
   429  		}
   430  		if h.dagProcessor.IsBuffered(e.ID()) {
   431  			return eventcheck.ErrDuplicateEvent
   432  		}
   433  		if h.store.HasEvent(e.ID()) {
   434  			return eventcheck.ErrAlreadyConnectedEvent
   435  		}
   436  		if err := checkers.Basiccheck.Validate(e.(native.EventPayloadI)); err != nil {
   437  			return err
   438  		}
   439  		if err := checkers.Epochcheck.Validate(e.(native.EventPayloadI)); err != nil {
   440  			return err
   441  		}
   442  		return nil
   443  	}
   444  	bufferedCheck := func(_e dag.Event, _parents dag.Events) error {
   445  		e := _e.(native.EventI)
   446  		parents := make(native.EventIs, len(_parents))
   447  		for i := range _parents {
   448  			parents[i] = _parents[i].(native.EventI)
   449  		}
   450  		var selfParent native.EventI
   451  		if e.SelfParent() != nil {
   452  			selfParent = parents[0].(native.EventI)
   453  		}
   454  		if err := checkers.Parentscheck.Validate(e, parents); err != nil {
   455  			return err
   456  		}
   457  		if err := checkers.Gaspowercheck.Validate(e, selfParent); err != nil {
   458  			return err
   459  		}
   460  		return nil
   461  	}
   462  	parentlessChecker := parentlesscheck.Checker{
   463  		HeavyCheck: &heavycheck.EventsOnly{Checker: checkers.Heavycheck},
   464  		LightCheck: lightCheck,
   465  	}
   466  	newProcessor := dagprocessor.New(datasemaphore.New(h.config.Protocol.EventsSemaphoreLimit, getSemaphoreWarningFn("DAG events")), h.config.Protocol.DagProcessor, dagprocessor.Callback{
   467  		// DAG callbacks
   468  		Event: dagprocessor.EventCallback{
   469  			Process: func(_e dag.Event) error {
   470  				e := _e.(*native.EventPayload)
   471  				preStart := time.Now()
   472  				h.engineMu.Lock()
   473  				defer h.engineMu.Unlock()
   474  
   475  				err := h.process.Event(e)
   476  				if err != nil {
   477  					return err
   478  				}
   479  
   480  				// event is connected, announce it
   481  				passedSinceEvent := preStart.Sub(e.CreationTime().Time())
   482  				h.BroadcastEvent(e, passedSinceEvent)
   483  
   484  				return nil
   485  			},
   486  			Released: func(e dag.Event, peer string, err error) {
   487  				if eventcheck.IsBan(err) {
   488  					log.Warn("Incoming event rejected", "event", e.ID().String(), "creator", e.Creator(), "err", err)
   489  					h.removePeer(peer)
   490  				}
   491  			},
   492  
   493  			Exists: func(id hash.Event) bool {
   494  				return h.store.HasEvent(id)
   495  			},
   496  
   497  			Get: func(id hash.Event) dag.Event {
   498  				e := h.store.GetEventPayload(id)
   499  				if e == nil {
   500  					return nil
   501  				}
   502  				return e
   503  			},
   504  
   505  			CheckParents:    bufferedCheck,
   506  			CheckParentless: parentlessChecker.Enqueue,
   507  		},
   508  		HighestLamport: h.store.GetHighestLamport,
   509  	})
   510  
   511  	return newProcessor
   512  }
   513  
   514  func (h *handler) makeBvProcessor(checkers *eventcheck.Checkers) *bvprocessor.Processor {
   515  	// checkers
   516  	lightCheck := func(bvs native.LlrSignedBlockVotes) error {
   517  		if h.store.HasBlockVotes(bvs.Val.Epoch, bvs.Val.LastBlock(), bvs.Signed.Locator.ID()) {
   518  			return eventcheck.ErrAlreadyProcessedBVs
   519  		}
   520  		return checkers.Basiccheck.ValidateBVs(bvs)
   521  	}
   522  	allChecker := bvallcheck.Checker{
   523  		HeavyCheck: &heavycheck.BVsOnly{Checker: checkers.Heavycheck},
   524  		LightCheck: lightCheck,
   525  	}
   526  	return bvprocessor.New(datasemaphore.New(h.config.Protocol.BVsSemaphoreLimit, getSemaphoreWarningFn("BVs")), h.config.Protocol.BvProcessor, bvprocessor.Callback{
   527  		// DAG callbacks
   528  		Item: bvprocessor.ItemCallback{
   529  			Process: h.process.BVs,
   530  			Released: func(bvs native.LlrSignedBlockVotes, peer string, err error) {
   531  				if eventcheck.IsBan(err) {
   532  					log.Warn("Incoming BVs rejected", "BVs", bvs.Signed.Locator.ID(), "creator", bvs.Signed.Locator.Creator, "err", err)
   533  					h.removePeer(peer)
   534  				}
   535  			},
   536  			Check: allChecker.Enqueue,
   537  		},
   538  	})
   539  }
   540  
   541  func (h *handler) makeBrProcessor() *brprocessor.Processor {
   542  	// checkers
   543  	return brprocessor.New(datasemaphore.New(h.config.Protocol.BVsSemaphoreLimit, getSemaphoreWarningFn("BR")), h.config.Protocol.BrProcessor, brprocessor.Callback{
   544  		// DAG callbacks
   545  		Item: brprocessor.ItemCallback{
   546  			Process: h.process.BR,
   547  			Released: func(br ibr.LlrIdxFullBlockRecord, peer string, err error) {
   548  				if eventcheck.IsBan(err) {
   549  					log.Warn("Incoming BR rejected", "block", br.Idx, "err", err)
   550  					h.removePeer(peer)
   551  				}
   552  			},
   553  		},
   554  	})
   555  }
   556  
   557  func (h *handler) makeEpProcessor(checkers *eventcheck.Checkers) *epprocessor.Processor {
   558  	// checkers
   559  	lightCheck := func(ev native.LlrSignedEpochVote) error {
   560  		if h.store.HasEpochVote(ev.Val.Epoch, ev.Signed.Locator.ID()) {
   561  			return eventcheck.ErrAlreadyProcessedEV
   562  		}
   563  		return checkers.Basiccheck.ValidateEV(ev)
   564  	}
   565  	allChecker := evallcheck.Checker{
   566  		HeavyCheck: &heavycheck.EVOnly{Checker: checkers.Heavycheck},
   567  		LightCheck: lightCheck,
   568  	}
   569  	// checkers
   570  	return epprocessor.New(datasemaphore.New(h.config.Protocol.BVsSemaphoreLimit, getSemaphoreWarningFn("BR")), h.config.Protocol.EpProcessor, epprocessor.Callback{
   571  		// DAG callbacks
   572  		Item: epprocessor.ItemCallback{
   573  			ProcessEV: h.process.EV,
   574  			ProcessER: h.process.ER,
   575  			ReleasedEV: func(ev native.LlrSignedEpochVote, peer string, err error) {
   576  				if eventcheck.IsBan(err) {
   577  					log.Warn("Incoming EV rejected", "event", ev.Signed.Locator.ID(), "creator", ev.Signed.Locator.Creator, "err", err)
   578  					h.removePeer(peer)
   579  				}
   580  			},
   581  			ReleasedER: func(er ier.LlrIdxFullEpochRecord, peer string, err error) {
   582  				if eventcheck.IsBan(err) {
   583  					log.Warn("Incoming ER rejected", "epoch", er.Idx, "err", err)
   584  					h.removePeer(peer)
   585  				}
   586  			},
   587  			CheckEV: allChecker.Enqueue,
   588  		},
   589  	})
   590  }
   591  
   592  func (h *handler) isEventInterested(id hash.Event, epoch idx.Epoch) bool {
   593  	if id.Epoch() != epoch {
   594  		return false
   595  	}
   596  
   597  	if h.dagProcessor.IsBuffered(id) || h.store.HasEvent(id) {
   598  		return false
   599  	}
   600  	return true
   601  }
   602  
   603  func (h *handler) onlyInterestedEventsI(ids []interface{}) []interface{} {
   604  	if len(ids) == 0 {
   605  		return ids
   606  	}
   607  	epoch := h.store.GetEpoch()
   608  	interested := make([]interface{}, 0, len(ids))
   609  	for _, id := range ids {
   610  		if h.isEventInterested(id.(hash.Event), epoch) {
   611  			interested = append(interested, id)
   612  		}
   613  	}
   614  	return interested
   615  }
   616  
   617  func (h *handler) removePeer(id string) {
   618  	peer := h.peers.Peer(id)
   619  	if peer != nil {
   620  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   621  	}
   622  }
   623  
   624  func (h *handler) unregisterPeer(id string) {
   625  	// Short circuit if the peer was already removed
   626  	peer := h.peers.Peer(id)
   627  	if peer == nil {
   628  		return
   629  	}
   630  	log.Debug("Removing peer", "peer", id)
   631  
   632  	// Unregister the peer from the leecher's and seeder's and peer sets
   633  	_ = h.epLeecher.UnregisterPeer(id)
   634  	_ = h.epSeeder.UnregisterPeer(id)
   635  	_ = h.dagLeecher.UnregisterPeer(id)
   636  	_ = h.dagSeeder.UnregisterPeer(id)
   637  	_ = h.brLeecher.UnregisterPeer(id)
   638  	_ = h.brSeeder.UnregisterPeer(id)
   639  	_ = h.bvLeecher.UnregisterPeer(id)
   640  	_ = h.bvSeeder.UnregisterPeer(id)
   641  	// Remove the `snap` extension if it exists
   642  	if peer.snapExt != nil {
   643  		_ = h.snapLeecher.SnapSyncer.Unregister(id)
   644  	}
   645  	if err := h.peers.UnregisterPeer(id); err != nil {
   646  		log.Error("Peer removal failed", "peer", id, "err", err)
   647  	}
   648  }
   649  
   650  func (h *handler) Start(maxPeers int) {
   651  	h.snapsyncStageTick()
   652  
   653  	h.maxPeers = maxPeers
   654  
   655  	// broadcast transactions
   656  	h.txsCh = make(chan evmcore.NewTxsNotify, txChanSize)
   657  	h.txsSub = h.txpool.SubscribeNewTxsNotify(h.txsCh)
   658  
   659  	h.loopsWg.Add(1)
   660  	go h.txBroadcastLoop()
   661  
   662  	if h.notifier != nil {
   663  		// broadcast mined events
   664  		h.emittedEventsCh = make(chan *native.EventPayload, 4)
   665  		h.emittedEventsSub = h.notifier.SubscribeNewEmitted(h.emittedEventsCh)
   666  		// epoch changes
   667  		h.newEpochsCh = make(chan idx.Epoch, 4)
   668  		h.newEpochsSub = h.notifier.SubscribeNewEpoch(h.newEpochsCh)
   669  
   670  		h.loopsWg.Add(3)
   671  		go h.emittedBroadcastLoop()
   672  		go h.progressBroadcastLoop()
   673  		go h.onNewEpochLoop()
   674  	}
   675  
   676  	// start sync handlers
   677  	go h.txsyncLoop()
   678  	h.loopsWg.Add(2)
   679  	go h.snapsyncStateLoop()
   680  	go h.snapsyncStageLoop()
   681  	h.dagFetcher.Start()
   682  	h.txFetcher.Start()
   683  	h.checkers.Heavycheck.Start()
   684  
   685  	h.epProcessor.Start()
   686  	h.epSeeder.Start()
   687  	h.epLeecher.Start()
   688  
   689  	h.dagProcessor.Start()
   690  	h.dagSeeder.Start()
   691  	h.dagLeecher.Start()
   692  
   693  	h.bvProcessor.Start()
   694  	h.bvSeeder.Start()
   695  	h.bvLeecher.Start()
   696  
   697  	h.brProcessor.Start()
   698  	h.brSeeder.Start()
   699  	h.brLeecher.Start()
   700  	h.started.Done()
   701  }
   702  
   703  func (h *handler) Stop() {
   704  	log.Info("Stopping U2U protocol")
   705  
   706  	h.brLeecher.Stop()
   707  	h.brSeeder.Stop()
   708  	h.brProcessor.Stop()
   709  
   710  	h.bvLeecher.Stop()
   711  	h.bvSeeder.Stop()
   712  	h.bvProcessor.Stop()
   713  
   714  	h.dagLeecher.Stop()
   715  	h.dagSeeder.Stop()
   716  	h.dagProcessor.Stop()
   717  
   718  	h.epLeecher.Stop()
   719  	h.epSeeder.Stop()
   720  	h.epProcessor.Stop()
   721  
   722  	h.checkers.Heavycheck.Stop()
   723  	h.txFetcher.Stop()
   724  	h.dagFetcher.Stop()
   725  
   726  	close(h.quitProgressBradcast)
   727  	close(h.snapState.quit)
   728  	h.txsSub.Unsubscribe() // quits txBroadcastLoop
   729  	if h.notifier != nil {
   730  		h.emittedEventsSub.Unsubscribe() // quits eventBroadcastLoop
   731  		h.newEpochsSub.Unsubscribe()     // quits onNewEpochLoop
   732  	}
   733  
   734  	// Wait for the subscription loops to come down.
   735  	h.loopsWg.Wait()
   736  
   737  	h.msgSemaphore.Terminate()
   738  	// Quit the sync loop.
   739  	// After this send has completed, no new peers will be accepted.
   740  	close(h.quitSync)
   741  
   742  	// Disconnect existing sessions.
   743  	// This also closes the gate for any new registrations on the peer set.
   744  	// sessions which are already established but not added to h.peers yet
   745  	// will exit when they try to register.
   746  	h.peers.Close()
   747  
   748  	// Wait for all peer handler goroutines to come down.
   749  	h.wg.Wait()
   750  	h.peerWG.Wait()
   751  
   752  	log.Info("U2U protocol stopped")
   753  }
   754  
   755  func (h *handler) myProgress() PeerProgress {
   756  	bs := h.store.GetBlockState()
   757  	epoch := h.store.GetEpoch()
   758  	return PeerProgress{
   759  		Epoch:            epoch,
   760  		LastBlockIdx:     bs.LastBlock.Idx,
   761  		LastBlockAtropos: bs.LastBlock.Atropos,
   762  	}
   763  }
   764  
   765  func (h *handler) highestPeerProgress() PeerProgress {
   766  	peers := h.peers.List()
   767  	max := h.myProgress()
   768  	for _, peer := range peers {
   769  		if max.LastBlockIdx < peer.progress.LastBlockIdx {
   770  			max = peer.progress
   771  		}
   772  	}
   773  	return max
   774  }
   775  
   776  // handle is the callback invoked to manage the life cycle of a peer. When
   777  // this function terminates, the peer is disconnected.
   778  func (h *handler) handle(p *peer) error {
   779  	// If the peer has a `snap` extension, wait for it to connect so we can have
   780  	// a uniform initialization/teardown mechanism
   781  	snap, err := h.peers.WaitSnapExtension(p)
   782  	if err != nil {
   783  		p.Log().Error("Snapshot extension barrier failed", "err", err)
   784  		return err
   785  	}
   786  	useless := discfilter.Banned(p.Node().ID(), p.Node().Record())
   787  	if !useless && (!eligibleForSnap(p.Peer) || !strings.Contains(strings.ToLower(p.Name()), "u2u")) {
   788  		useless = true
   789  		discfilter.Ban(p.ID())
   790  	}
   791  	if !p.Peer.Info().Network.Trusted && useless {
   792  		if h.peers.UselessNum() >= h.maxPeers/10 {
   793  			// don't allow more than 10% of useless peers
   794  			return p2p.DiscTooManyPeers
   795  		}
   796  		p.SetUseless()
   797  	}
   798  
   799  	h.peerWG.Add(1)
   800  	defer h.peerWG.Done()
   801  
   802  	// Execute the handshake
   803  	var (
   804  		genesis    = *h.store.GetGenesisID()
   805  		myProgress = h.myProgress()
   806  	)
   807  	if err := p.Handshake(h.NetworkID, myProgress, common.Hash(genesis)); err != nil {
   808  		p.Log().Debug("Handshake failed", "err", err)
   809  		if !useless {
   810  			discfilter.Ban(p.ID())
   811  		}
   812  		return err
   813  	}
   814  
   815  	// Ignore maxPeers if this is a trusted peer
   816  	if h.peers.Len() >= h.maxPeers && !p.Peer.Info().Network.Trusted {
   817  		return p2p.DiscTooManyPeers
   818  	}
   819  	p.Log().Debug("Peer connected", "name", p.Name())
   820  
   821  	// Register the peer locally
   822  	if err := h.peers.RegisterPeer(p, snap); err != nil {
   823  		p.Log().Warn("Peer registration failed", "err", err)
   824  		return err
   825  	}
   826  	if err := h.dagLeecher.RegisterPeer(p.id); err != nil {
   827  		p.Log().Warn("Leecher peer registration failed", "err", err)
   828  		return err
   829  	}
   830  	if p.RunningCap(ProtocolName, []uint{UP01}) {
   831  		if err := h.epLeecher.RegisterPeer(p.id); err != nil {
   832  			p.Log().Warn("Leecher peer registration failed", "err", err)
   833  			return err
   834  		}
   835  		if err := h.bvLeecher.RegisterPeer(p.id); err != nil {
   836  			p.Log().Warn("Leecher peer registration failed", "err", err)
   837  			return err
   838  		}
   839  		if err := h.brLeecher.RegisterPeer(p.id); err != nil {
   840  			p.Log().Warn("Leecher peer registration failed", "err", err)
   841  			return err
   842  		}
   843  	}
   844  	if snap != nil {
   845  		if err := h.snapLeecher.SnapSyncer.Register(snap); err != nil {
   846  			p.Log().Error("Failed to register peer in snap syncer", "err", err)
   847  			return err
   848  		}
   849  	}
   850  	defer h.unregisterPeer(p.id)
   851  
   852  	// Propagate existing transactions. new transactions appearing
   853  	// after this will be sent via broadcasts.
   854  	h.syncTransactions(p, h.txpool.SampleHashes(h.config.Protocol.MaxInitialTxHashesSend))
   855  
   856  	// Handle incoming messages until the connection is torn down
   857  	for {
   858  		if err := h.handleMsg(p); err != nil {
   859  			p.Log().Debug("Message handling failed", "err", err)
   860  			return err
   861  		}
   862  	}
   863  }
   864  
   865  func interfacesToEventIDs(ids []interface{}) hash.Events {
   866  	res := make(hash.Events, len(ids))
   867  	for i, id := range ids {
   868  		res[i] = id.(hash.Event)
   869  	}
   870  	return res
   871  }
   872  
   873  func eventIDsToInterfaces(ids hash.Events) []interface{} {
   874  	res := make([]interface{}, len(ids))
   875  	for i, id := range ids {
   876  		res[i] = id
   877  	}
   878  	return res
   879  }
   880  
   881  func interfacesToTxids(ids []interface{}) []common.Hash {
   882  	res := make([]common.Hash, len(ids))
   883  	for i, id := range ids {
   884  		res[i] = id.(common.Hash)
   885  	}
   886  	return res
   887  }
   888  
   889  func txidsToInterfaces(ids []common.Hash) []interface{} {
   890  	res := make([]interface{}, len(ids))
   891  	for i, id := range ids {
   892  		res[i] = id
   893  	}
   894  	return res
   895  }
   896  
   897  func (h *handler) handleTxHashes(p *peer, announces []common.Hash) {
   898  	// Mark the hashes as present at the remote node
   899  	now := time.Now()
   900  	for _, id := range announces {
   901  		txtime.Saw(id, now)
   902  		p.MarkTransaction(id)
   903  	}
   904  	// Schedule all the unknown hashes for retrieval
   905  	requestTransactions := func(ids []interface{}) error {
   906  		return p.RequestTransactions(interfacesToTxids(ids))
   907  	}
   908  	_ = h.txFetcher.NotifyAnnounces(p.id, txidsToInterfaces(announces), time.Now(), requestTransactions)
   909  }
   910  
   911  func (h *handler) handleTxs(p *peer, txs types.Transactions) {
   912  	// Mark the hashes as present at the remote node
   913  	now := time.Now()
   914  	for _, tx := range txs {
   915  		txid := tx.Hash()
   916  		txtime.Saw(txid, now)
   917  		p.MarkTransaction(txid)
   918  	}
   919  	h.txpool.AddRemotes(txs)
   920  }
   921  
   922  func (h *handler) handleEventHashes(p *peer, announces hash.Events) {
   923  	// Mark the hashes as present at the remote node
   924  	for _, id := range announces {
   925  		p.MarkEvent(id)
   926  	}
   927  	// filter too high IDs
   928  	notTooHigh := make(hash.Events, 0, len(announces))
   929  	sessionCfg := h.config.Protocol.DagStreamLeecher.Session
   930  	for _, id := range announces {
   931  		maxLamport := h.store.GetHighestLamport() + idx.Lamport(sessionCfg.DefaultChunkItemsNum+1)*idx.Lamport(sessionCfg.ParallelChunksDownload)
   932  		if id.Lamport() <= maxLamport {
   933  			notTooHigh = append(notTooHigh, id)
   934  		}
   935  	}
   936  	if len(announces) != len(notTooHigh) {
   937  		h.dagLeecher.ForceSyncing()
   938  	}
   939  	if len(notTooHigh) == 0 {
   940  		return
   941  	}
   942  	// Schedule all the unknown hashes for retrieval
   943  	requestEvents := func(ids []interface{}) error {
   944  		return p.RequestEvents(interfacesToEventIDs(ids))
   945  	}
   946  	_ = h.dagFetcher.NotifyAnnounces(p.id, eventIDsToInterfaces(notTooHigh), time.Now(), requestEvents)
   947  }
   948  
   949  func (h *handler) handleEvents(p *peer, events dag.Events, ordered bool) {
   950  	// Mark the hashes as present at the remote node
   951  	now := time.Now()
   952  	for _, e := range events {
   953  		for _, tx := range e.(native.EventPayloadI).Txs() {
   954  			txtime.Saw(tx.Hash(), now)
   955  		}
   956  		p.MarkEvent(e.ID())
   957  	}
   958  	// filter too high events
   959  	notTooHigh := make(dag.Events, 0, len(events))
   960  	sessionCfg := h.config.Protocol.DagStreamLeecher.Session
   961  	for _, e := range events {
   962  		maxLamport := h.store.GetHighestLamport() + idx.Lamport(sessionCfg.DefaultChunkItemsNum+1)*idx.Lamport(sessionCfg.ParallelChunksDownload)
   963  		if e.Lamport() <= maxLamport {
   964  			notTooHigh = append(notTooHigh, e)
   965  		}
   966  		if now.Sub(e.(native.EventI).CreationTime().Time()) < 10*time.Minute {
   967  			h.syncStatus.MarkMaybeSynced()
   968  		}
   969  	}
   970  	if len(events) != len(notTooHigh) {
   971  		h.dagLeecher.ForceSyncing()
   972  	}
   973  	if len(notTooHigh) == 0 {
   974  		return
   975  	}
   976  	// Schedule all the events for connection
   977  	peer := *p
   978  	requestEvents := func(ids []interface{}) error {
   979  		return peer.RequestEvents(interfacesToEventIDs(ids))
   980  	}
   981  	notifyAnnounces := func(ids hash.Events) {
   982  		_ = h.dagFetcher.NotifyAnnounces(peer.id, eventIDsToInterfaces(ids), now, requestEvents)
   983  	}
   984  	_ = h.dagProcessor.Enqueue(peer.id, notTooHigh, ordered, notifyAnnounces, nil)
   985  }
   986  
   987  // handleMsg is invoked whenever an inbound message is received from a remote
   988  // peer. The remote connection is torn down upon returning any error.
   989  func (h *handler) handleMsg(p *peer) error {
   990  	// Read the next message from the remote peer, and ensure it's fully consumed
   991  	msg, err := p.rw.ReadMsg()
   992  	if err != nil {
   993  		return err
   994  	}
   995  	if msg.Size > protocolMaxMsgSize {
   996  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
   997  	}
   998  	defer msg.Discard()
   999  	// Acquire semaphore for serialized messages
  1000  	eventsSizeEst := dag.Metric{
  1001  		Num:  1,
  1002  		Size: uint64(msg.Size),
  1003  	}
  1004  	if !h.msgSemaphore.Acquire(eventsSizeEst, h.config.Protocol.MsgsSemaphoreTimeout) {
  1005  		h.Log.Warn("Failed to acquire semaphore for p2p message", "size", msg.Size, "peer", p.id)
  1006  		return nil
  1007  	}
  1008  	defer h.msgSemaphore.Release(eventsSizeEst)
  1009  
  1010  	// Handle the message depending on its contents
  1011  	switch {
  1012  	case msg.Code == HandshakeMsg:
  1013  		// Status messages should never arrive after the handshake
  1014  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
  1015  
  1016  	case msg.Code == ProgressMsg:
  1017  		var progress PeerProgress
  1018  		if err := msg.Decode(&progress); err != nil {
  1019  			return errResp(ErrDecode, "%v: %v", msg, err)
  1020  		}
  1021  		p.SetProgress(progress)
  1022  
  1023  	case msg.Code == EvmTxsMsg:
  1024  		// Transactions arrived, make sure we have a valid and fresh graph to handle them
  1025  		if !h.syncStatus.AcceptTxs() {
  1026  			break
  1027  		}
  1028  		// Transactions can be processed, parse all of them and deliver to the pool
  1029  		var txs types.Transactions
  1030  		if err := msg.Decode(&txs); err != nil {
  1031  			return errResp(ErrDecode, "msg %v: %v", msg, err)
  1032  		}
  1033  		if err := checkLenLimits(len(txs), txs); err != nil {
  1034  			return err
  1035  		}
  1036  		txids := make([]interface{}, txs.Len())
  1037  		for i, tx := range txs {
  1038  			txids[i] = tx.Hash()
  1039  		}
  1040  		_ = h.txFetcher.NotifyReceived(txids)
  1041  		h.handleTxs(p, txs)
  1042  
  1043  	case msg.Code == NewEvmTxHashesMsg:
  1044  		// Transactions arrived, make sure we have a valid and fresh graph to handle them
  1045  		if !h.syncStatus.AcceptTxs() {
  1046  			break
  1047  		}
  1048  		// Transactions can be processed, parse all of them and deliver to the pool
  1049  		var txHashes []common.Hash
  1050  		if err := msg.Decode(&txHashes); err != nil {
  1051  			return errResp(ErrDecode, "msg %v: %v", msg, err)
  1052  		}
  1053  		if err := checkLenLimits(len(txHashes), txHashes); err != nil {
  1054  			return err
  1055  		}
  1056  		h.handleTxHashes(p, txHashes)
  1057  
  1058  	case msg.Code == GetEvmTxsMsg:
  1059  		var requests []common.Hash
  1060  		if err := msg.Decode(&requests); err != nil {
  1061  			return errResp(ErrDecode, "msg %v: %v", msg, err)
  1062  		}
  1063  		if err := checkLenLimits(len(requests), requests); err != nil {
  1064  			return err
  1065  		}
  1066  
  1067  		txs := make(types.Transactions, 0, len(requests))
  1068  		for _, txid := range requests {
  1069  			tx := h.txpool.Get(txid)
  1070  			if tx == nil {
  1071  				continue
  1072  			}
  1073  			txs = append(txs, tx)
  1074  		}
  1075  		SplitTransactions(txs, func(batch types.Transactions) {
  1076  			p.EnqueueSendTransactions(batch, p.queue)
  1077  		})
  1078  
  1079  	case msg.Code == EventsMsg:
  1080  		if !h.syncStatus.AcceptEvents() {
  1081  			break
  1082  		}
  1083  
  1084  		var events native.EventPayloads
  1085  		if err := msg.Decode(&events); err != nil {
  1086  			return errResp(ErrDecode, "%v: %v", msg, err)
  1087  		}
  1088  		if err := checkLenLimits(len(events), events); err != nil {
  1089  			return err
  1090  		}
  1091  		_ = h.dagFetcher.NotifyReceived(eventIDsToInterfaces(events.IDs()))
  1092  		h.handleEvents(p, events.Bases(), events.Len() > 1)
  1093  
  1094  	case msg.Code == NewEventIDsMsg:
  1095  		// Fresh events arrived, make sure we have a valid and fresh graph to handle them
  1096  		if !h.syncStatus.AcceptEvents() {
  1097  			break
  1098  		}
  1099  		var announces hash.Events
  1100  		if err := msg.Decode(&announces); err != nil {
  1101  			return errResp(ErrDecode, "%v: %v", msg, err)
  1102  		}
  1103  		if err := checkLenLimits(len(announces), announces); err != nil {
  1104  			return err
  1105  		}
  1106  		h.handleEventHashes(p, announces)
  1107  
  1108  	case msg.Code == GetEventsMsg:
  1109  		var requests hash.Events
  1110  		if err := msg.Decode(&requests); err != nil {
  1111  			return errResp(ErrDecode, "%v: %v", msg, err)
  1112  		}
  1113  		if err := checkLenLimits(len(requests), requests); err != nil {
  1114  			return err
  1115  		}
  1116  
  1117  		rawEvents := make([]rlp.RawValue, 0, len(requests))
  1118  		ids := make(hash.Events, 0, len(requests))
  1119  		size := 0
  1120  		for _, id := range requests {
  1121  			if raw := h.store.GetEventPayloadRLP(id); raw != nil {
  1122  				rawEvents = append(rawEvents, raw)
  1123  				ids = append(ids, id)
  1124  				size += len(raw)
  1125  			} else {
  1126  				h.Log.Debug("requested event not found", "hash", id)
  1127  			}
  1128  			if size >= softResponseLimitSize {
  1129  				break
  1130  			}
  1131  		}
  1132  		if len(rawEvents) != 0 {
  1133  			p.EnqueueSendEventsRLP(rawEvents, ids, p.queue)
  1134  		}
  1135  
  1136  	case msg.Code == RequestEventsStream:
  1137  		var request dagstream.Request
  1138  		if err := msg.Decode(&request); err != nil {
  1139  			return errResp(ErrDecode, "%v: %v", msg, err)
  1140  		}
  1141  		if request.Limit.Num > hardLimitItems-1 {
  1142  			return errResp(ErrMsgTooLarge, "%v", msg)
  1143  		}
  1144  		if request.Limit.Size > protocolMaxMsgSize*2/3 {
  1145  			return errResp(ErrMsgTooLarge, "%v", msg)
  1146  		}
  1147  
  1148  		pid := p.id
  1149  		_, peerErr := h.dagSeeder.NotifyRequestReceived(dagstreamseeder.Peer{
  1150  			ID:        pid,
  1151  			SendChunk: p.SendEventsStream,
  1152  			Misbehaviour: func(err error) {
  1153  				h.peerMisbehaviour(pid, err)
  1154  			},
  1155  		}, request)
  1156  		if peerErr != nil {
  1157  			return peerErr
  1158  		}
  1159  
  1160  	case msg.Code == EventsStreamResponse:
  1161  		if !h.syncStatus.AcceptEvents() {
  1162  			break
  1163  		}
  1164  
  1165  		var chunk dagChunk
  1166  		if err := msg.Decode(&chunk); err != nil {
  1167  			return errResp(ErrDecode, "%v: %v", msg, err)
  1168  		}
  1169  		if err := checkLenLimits(len(chunk.Events)+len(chunk.IDs)+1, chunk); err != nil {
  1170  			return err
  1171  		}
  1172  
  1173  		if (len(chunk.Events) != 0) && (len(chunk.IDs) != 0) {
  1174  			return errors.New("expected either events or event hashes")
  1175  		}
  1176  		var last hash.Event
  1177  		if len(chunk.IDs) != 0 {
  1178  			h.handleEventHashes(p, chunk.IDs)
  1179  			last = chunk.IDs[len(chunk.IDs)-1]
  1180  		}
  1181  		if len(chunk.Events) != 0 {
  1182  			h.handleEvents(p, chunk.Events.Bases(), true)
  1183  			last = chunk.Events[len(chunk.Events)-1].ID()
  1184  		}
  1185  
  1186  		_ = h.dagLeecher.NotifyChunkReceived(chunk.SessionID, last, chunk.Done)
  1187  
  1188  	case msg.Code == RequestBVsStream:
  1189  		var request bvstream.Request
  1190  		if err := msg.Decode(&request); err != nil {
  1191  			return errResp(ErrDecode, "%v: %v", msg, err)
  1192  		}
  1193  		if request.Limit.Num > hardLimitItems-1 {
  1194  			return errResp(ErrMsgTooLarge, "%v", msg)
  1195  		}
  1196  		if request.Limit.Size > protocolMaxMsgSize*2/3 {
  1197  			return errResp(ErrMsgTooLarge, "%v", msg)
  1198  		}
  1199  
  1200  		pid := p.id
  1201  		_, peerErr := h.bvSeeder.NotifyRequestReceived(bvstreamseeder.Peer{
  1202  			ID:        pid,
  1203  			SendChunk: p.SendBVsStream,
  1204  			Misbehaviour: func(err error) {
  1205  				h.peerMisbehaviour(pid, err)
  1206  			},
  1207  		}, request)
  1208  		if peerErr != nil {
  1209  			return peerErr
  1210  		}
  1211  
  1212  	case msg.Code == BVsStreamResponse:
  1213  		var chunk bvsChunk
  1214  		if err := msg.Decode(&chunk); err != nil {
  1215  			return errResp(ErrDecode, "%v: %v", msg, err)
  1216  		}
  1217  		if err := checkLenLimits(len(chunk.BVs)+1, chunk); err != nil {
  1218  			return err
  1219  		}
  1220  
  1221  		var last bvstreamleecher.BVsID
  1222  		if len(chunk.BVs) != 0 {
  1223  			_ = h.bvProcessor.Enqueue(p.id, chunk.BVs, nil)
  1224  			last = bvstreamleecher.BVsID{
  1225  				Epoch:     chunk.BVs[len(chunk.BVs)-1].Val.Epoch,
  1226  				LastBlock: chunk.BVs[len(chunk.BVs)-1].Val.LastBlock(),
  1227  				ID:        chunk.BVs[len(chunk.BVs)-1].Signed.Locator.ID(),
  1228  			}
  1229  		}
  1230  
  1231  		_ = h.bvLeecher.NotifyChunkReceived(chunk.SessionID, last, chunk.Done)
  1232  
  1233  	case msg.Code == RequestBRsStream:
  1234  		var request brstream.Request
  1235  		if err := msg.Decode(&request); err != nil {
  1236  			return errResp(ErrDecode, "%v: %v", msg, err)
  1237  		}
  1238  		if request.Limit.Num > hardLimitItems-1 {
  1239  			return errResp(ErrMsgTooLarge, "%v", msg)
  1240  		}
  1241  		if request.Limit.Size > protocolMaxMsgSize*2/3 {
  1242  			return errResp(ErrMsgTooLarge, "%v", msg)
  1243  		}
  1244  
  1245  		pid := p.id
  1246  		_, peerErr := h.brSeeder.NotifyRequestReceived(brstreamseeder.Peer{
  1247  			ID:        pid,
  1248  			SendChunk: p.SendBRsStream,
  1249  			Misbehaviour: func(err error) {
  1250  				h.peerMisbehaviour(pid, err)
  1251  			},
  1252  		}, request)
  1253  		if peerErr != nil {
  1254  			return peerErr
  1255  		}
  1256  
  1257  	case msg.Code == BRsStreamResponse:
  1258  		if !h.syncStatus.AcceptBlockRecords() {
  1259  			break
  1260  		}
  1261  
  1262  		msgSize := uint64(msg.Size)
  1263  		var chunk brsChunk
  1264  		if err := msg.Decode(&chunk); err != nil {
  1265  			return errResp(ErrDecode, "%v: %v", msg, err)
  1266  		}
  1267  		if err := checkLenLimits(len(chunk.BRs)+1, chunk); err != nil {
  1268  			return err
  1269  		}
  1270  
  1271  		var last idx.Block
  1272  		if len(chunk.BRs) != 0 {
  1273  			_ = h.brProcessor.Enqueue(p.id, chunk.BRs, msgSize, nil)
  1274  			last = chunk.BRs[len(chunk.BRs)-1].Idx
  1275  		}
  1276  
  1277  		_ = h.brLeecher.NotifyChunkReceived(chunk.SessionID, last, chunk.Done)
  1278  
  1279  	case msg.Code == RequestEPsStream:
  1280  		var request epstream.Request
  1281  		if err := msg.Decode(&request); err != nil {
  1282  			return errResp(ErrDecode, "%v: %v", msg, err)
  1283  		}
  1284  		if request.Limit.Num > hardLimitItems-1 {
  1285  			return errResp(ErrMsgTooLarge, "%v", msg)
  1286  		}
  1287  		if request.Limit.Size > protocolMaxMsgSize*2/3 {
  1288  			return errResp(ErrMsgTooLarge, "%v", msg)
  1289  		}
  1290  
  1291  		pid := p.id
  1292  		_, peerErr := h.epSeeder.NotifyRequestReceived(epstreamseeder.Peer{
  1293  			ID:        pid,
  1294  			SendChunk: p.SendEPsStream,
  1295  			Misbehaviour: func(err error) {
  1296  				h.peerMisbehaviour(pid, err)
  1297  			},
  1298  		}, request)
  1299  		if peerErr != nil {
  1300  			return peerErr
  1301  		}
  1302  
  1303  	case msg.Code == EPsStreamResponse:
  1304  		msgSize := uint64(msg.Size)
  1305  		var chunk epsChunk
  1306  		if err := msg.Decode(&chunk); err != nil {
  1307  			return errResp(ErrDecode, "%v: %v", msg, err)
  1308  		}
  1309  		if err := checkLenLimits(len(chunk.EPs)+1, chunk); err != nil {
  1310  			return err
  1311  		}
  1312  
  1313  		var last idx.Epoch
  1314  		if len(chunk.EPs) != 0 {
  1315  			_ = h.epProcessor.Enqueue(p.id, chunk.EPs, msgSize, nil)
  1316  			last = chunk.EPs[len(chunk.EPs)-1].Record.Idx
  1317  		}
  1318  
  1319  		_ = h.epLeecher.NotifyChunkReceived(chunk.SessionID, last, chunk.Done)
  1320  
  1321  	default:
  1322  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
  1323  	}
  1324  	return nil
  1325  }
  1326  
  1327  func (h *handler) decideBroadcastAggressiveness(size int, passed time.Duration, peersNum int) int {
  1328  	percents := 100
  1329  	maxPercents := 1000000 * percents
  1330  	latencyVsThroughputTradeoff := maxPercents
  1331  	cfg := h.config.Protocol
  1332  	if cfg.ThroughputImportance != 0 {
  1333  		latencyVsThroughputTradeoff = (cfg.LatencyImportance * percents) / cfg.ThroughputImportance
  1334  	}
  1335  
  1336  	broadcastCost := passed * time.Duration(128+size) / 128
  1337  	broadcastAllCostTarget := time.Duration(latencyVsThroughputTradeoff) * (700 * time.Millisecond) / time.Duration(percents)
  1338  	broadcastSqrtCostTarget := broadcastAllCostTarget * 10
  1339  
  1340  	fullRecipients := 0
  1341  	if latencyVsThroughputTradeoff >= maxPercents {
  1342  		// edge case
  1343  		fullRecipients = peersNum
  1344  	} else if latencyVsThroughputTradeoff <= 0 {
  1345  		// edge case
  1346  		fullRecipients = 0
  1347  	} else if broadcastCost <= broadcastAllCostTarget {
  1348  		// if event is small or was created recently, always send to everyone full event
  1349  		fullRecipients = peersNum
  1350  	} else if broadcastCost <= broadcastSqrtCostTarget || passed == 0 {
  1351  		// if event is big but was created recently, send full event to subset of peers
  1352  		fullRecipients = int(math.Sqrt(float64(peersNum)))
  1353  		if fullRecipients < 4 {
  1354  			fullRecipients = 4
  1355  		}
  1356  	}
  1357  	if fullRecipients > peersNum {
  1358  		fullRecipients = peersNum
  1359  	}
  1360  	return fullRecipients
  1361  }
  1362  
  1363  // BroadcastEvent will either propagate a event to a subset of it's peers, or
  1364  // will only announce it's availability (depending what's requested).
  1365  func (h *handler) BroadcastEvent(event *native.EventPayload, passed time.Duration) int {
  1366  	if passed < 0 {
  1367  		passed = 0
  1368  	}
  1369  	id := event.ID()
  1370  	peers := h.peers.PeersWithoutEvent(id)
  1371  	if len(peers) == 0 {
  1372  		log.Trace("Event is already known to all peers", "hash", id)
  1373  		return 0
  1374  	}
  1375  
  1376  	fullRecipients := h.decideBroadcastAggressiveness(event.Size(), passed, len(peers))
  1377  
  1378  	// Exclude low quality peers from fullBroadcast
  1379  	var fullBroadcast = make([]*peer, 0, fullRecipients)
  1380  	var hashBroadcast = make([]*peer, 0, len(peers))
  1381  	for _, p := range peers {
  1382  		if !p.Useless() && len(fullBroadcast) < fullRecipients {
  1383  			fullBroadcast = append(fullBroadcast, p)
  1384  		} else {
  1385  			hashBroadcast = append(hashBroadcast, p)
  1386  		}
  1387  	}
  1388  	for _, peer := range fullBroadcast {
  1389  		peer.AsyncSendEvents(native.EventPayloads{event}, peer.queue)
  1390  	}
  1391  	// Broadcast of event hash to the rest peers
  1392  	for _, peer := range hashBroadcast {
  1393  		peer.AsyncSendEventIDs(hash.Events{event.ID()}, peer.queue)
  1394  	}
  1395  	log.Trace("Broadcast event", "hash", id, "fullRecipients", len(fullBroadcast), "hashRecipients", len(hashBroadcast))
  1396  	return len(peers)
  1397  }
  1398  
  1399  // BroadcastTxs will propagate a batch of transactions to all peers which are not known to
  1400  // already have the given transaction.
  1401  func (h *handler) BroadcastTxs(txs types.Transactions) {
  1402  	var txset = make(map[*peer]types.Transactions)
  1403  
  1404  	// Broadcast transactions to a batch of peers not knowing about it
  1405  	totalSize := common.StorageSize(0)
  1406  	for _, tx := range txs {
  1407  		peers := h.peers.PeersWithoutTx(tx.Hash())
  1408  		for _, peer := range peers {
  1409  			txset[peer] = append(txset[peer], tx)
  1410  		}
  1411  		totalSize += tx.Size()
  1412  		log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
  1413  	}
  1414  	fullRecipients := h.decideBroadcastAggressiveness(int(totalSize), time.Second, len(txset))
  1415  	i := 0
  1416  	for peer, txs := range txset {
  1417  		SplitTransactions(txs, func(batch types.Transactions) {
  1418  			if i < fullRecipients {
  1419  				peer.AsyncSendTransactions(batch, peer.queue)
  1420  			} else {
  1421  				txids := make([]common.Hash, batch.Len())
  1422  				for i, tx := range batch {
  1423  					txids[i] = tx.Hash()
  1424  				}
  1425  				peer.AsyncSendTransactionHashes(txids, peer.queue)
  1426  			}
  1427  		})
  1428  		i++
  1429  	}
  1430  }
  1431  
  1432  // Mined broadcast loop
  1433  func (h *handler) emittedBroadcastLoop() {
  1434  	defer h.loopsWg.Done()
  1435  	for {
  1436  		select {
  1437  		case emitted := <-h.emittedEventsCh:
  1438  			h.BroadcastEvent(emitted, 0)
  1439  		// Err() channel will be closed when unsubscribing.
  1440  		case <-h.emittedEventsSub.Err():
  1441  			return
  1442  		}
  1443  	}
  1444  }
  1445  
  1446  func (h *handler) broadcastProgress() {
  1447  	progress := h.myProgress()
  1448  	for _, peer := range h.peers.List() {
  1449  		peer.AsyncSendProgress(progress, peer.queue)
  1450  	}
  1451  }
  1452  
  1453  // Progress broadcast loop
  1454  func (h *handler) progressBroadcastLoop() {
  1455  	ticker := time.NewTicker(h.config.Protocol.ProgressBroadcastPeriod)
  1456  	defer ticker.Stop()
  1457  	defer h.loopsWg.Done()
  1458  	// automatically stops if unsubscribe
  1459  	for {
  1460  		select {
  1461  		case <-ticker.C:
  1462  			h.broadcastProgress()
  1463  		case <-h.quitProgressBradcast:
  1464  			return
  1465  		}
  1466  	}
  1467  }
  1468  
  1469  func (h *handler) onNewEpochLoop() {
  1470  	defer h.loopsWg.Done()
  1471  	for {
  1472  		select {
  1473  		case myEpoch := <-h.newEpochsCh:
  1474  			h.dagProcessor.Clear()
  1475  			h.dagLeecher.OnNewEpoch(myEpoch)
  1476  		// Err() channel will be closed when unsubscribing.
  1477  		case <-h.newEpochsSub.Err():
  1478  			return
  1479  		}
  1480  	}
  1481  }
  1482  
  1483  func (h *handler) txBroadcastLoop() {
  1484  	ticker := time.NewTicker(h.config.Protocol.RandomTxHashesSendPeriod)
  1485  	defer ticker.Stop()
  1486  	defer h.loopsWg.Done()
  1487  	for {
  1488  		select {
  1489  		case notify := <-h.txsCh:
  1490  			h.BroadcastTxs(notify.Txs)
  1491  
  1492  		// Err() channel will be closed when unsubscribing.
  1493  		case <-h.txsSub.Err():
  1494  			return
  1495  
  1496  		case <-ticker.C:
  1497  			if !h.syncStatus.AcceptTxs() {
  1498  				break
  1499  			}
  1500  			peers := h.peers.List()
  1501  			if len(peers) == 0 {
  1502  				continue
  1503  			}
  1504  			randPeer := peers[rand.Intn(len(peers))]
  1505  			h.syncTransactions(randPeer, h.txpool.SampleHashes(h.config.Protocol.MaxRandomTxHashesSend))
  1506  		}
  1507  	}
  1508  }
  1509  
  1510  // NodeInfo represents a short summary of the sub-protocol metadata
  1511  // known about the host peer.
  1512  type NodeInfo struct {
  1513  	Network     uint64      `json:"network"` // network ID
  1514  	Genesis     common.Hash `json:"genesis"` // SHA3 hash of the host's genesis object
  1515  	Epoch       idx.Epoch   `json:"epoch"`
  1516  	NumOfBlocks idx.Block   `json:"blocks"`
  1517  	//Config  *params.ChainConfig `json:"config"`  // Chain configuration for the fork rules
  1518  }
  1519  
  1520  // NodeInfo retrieves some protocol metadata about the running host node.
  1521  func (h *handler) NodeInfo() *NodeInfo {
  1522  	numOfBlocks := h.store.GetLatestBlockIndex()
  1523  	return &NodeInfo{
  1524  		Network:     h.NetworkID,
  1525  		Genesis:     common.Hash(*h.store.GetGenesisID()),
  1526  		Epoch:       h.store.GetEpoch(),
  1527  		NumOfBlocks: numOfBlocks,
  1528  	}
  1529  }
  1530  
  1531  func getSemaphoreWarningFn(name string) func(dag.Metric, dag.Metric, dag.Metric) {
  1532  	return func(received dag.Metric, processing dag.Metric, releasing dag.Metric) {
  1533  		log.Warn(fmt.Sprintf("%s semaphore inconsistency", name),
  1534  			"receivedNum", received.Num, "receivedSize", received.Size,
  1535  			"processingNum", processing.Num, "processingSize", processing.Size,
  1536  			"releasingNum", releasing.Num, "releasingSize", releasing.Size)
  1537  	}
  1538  }