github.com/neatio-net/neatio@v1.7.3-0.20231114194659-f4d7a2226baa/neatptc/fetcher/fetcher.go (about)

     1  package fetcher
     2  
     3  import (
     4  	"errors"
     5  	"math/rand"
     6  	"time"
     7  
     8  	"github.com/neatio-net/neatio/chain/consensus"
     9  	"github.com/neatio-net/neatio/chain/core/types"
    10  	"github.com/neatio-net/neatio/chain/log"
    11  	"github.com/neatio-net/neatio/utilities/common"
    12  	"github.com/neatio-net/neatio/utilities/common/prque"
    13  )
    14  
    15  const (
    16  	arriveTimeout = 500 * time.Millisecond
    17  	gatherSlack   = 100 * time.Millisecond
    18  	fetchTimeout  = 5 * time.Second
    19  	maxUncleDist  = 7
    20  	maxQueueDist  = 32
    21  	hashLimit     = 256
    22  	blockLimit    = 64
    23  )
    24  
    25  var (
    26  	errTerminated = errors.New("terminated")
    27  )
    28  
    29  type blockRetrievalFn func(common.Hash) *types.Block
    30  
    31  type headerRequesterFn func(common.Hash) error
    32  
    33  type bodyRequesterFn func([]common.Hash) error
    34  
    35  type headerVerifierFn func(header *types.Header) error
    36  
    37  type blockBroadcasterFn func(block *types.Block, propagate bool)
    38  
    39  type chainHeightFn func() uint64
    40  
    41  type chainInsertFn func(types.Blocks) (int, error)
    42  
    43  type peerDropFn func(id string)
    44  
    45  type announce struct {
    46  	hash   common.Hash
    47  	number uint64
    48  	header *types.Header
    49  	time   time.Time
    50  
    51  	origin string
    52  
    53  	fetchHeader headerRequesterFn
    54  	fetchBodies bodyRequesterFn
    55  }
    56  
    57  type headerFilterTask struct {
    58  	peer    string
    59  	headers []*types.Header
    60  	time    time.Time
    61  }
    62  
    63  type bodyFilterTask struct {
    64  	peer         string
    65  	transactions [][]*types.Transaction
    66  	uncles       [][]*types.Header
    67  	time         time.Time
    68  }
    69  
    70  type inject struct {
    71  	origin string
    72  	block  *types.Block
    73  }
    74  
    75  type Fetcher struct {
    76  	notify chan *announce
    77  	inject chan *inject
    78  
    79  	blockFilter  chan chan []*types.Block
    80  	headerFilter chan chan *headerFilterTask
    81  	bodyFilter   chan chan *bodyFilterTask
    82  
    83  	done chan common.Hash
    84  	quit chan struct{}
    85  
    86  	announces  map[string]int
    87  	announced  map[common.Hash][]*announce
    88  	fetching   map[common.Hash]*announce
    89  	fetched    map[common.Hash][]*announce
    90  	completing map[common.Hash]*announce
    91  
    92  	queue  *prque.Prque
    93  	queues map[string]int
    94  	queued map[common.Hash]*inject
    95  
    96  	getBlock       blockRetrievalFn
    97  	verifyHeader   headerVerifierFn
    98  	broadcastBlock blockBroadcasterFn
    99  	chainHeight    chainHeightFn
   100  	insertChain    chainInsertFn
   101  	dropPeer       peerDropFn
   102  
   103  	announceChangeHook func(common.Hash, bool)
   104  	queueChangeHook    func(common.Hash, bool)
   105  	fetchingHook       func([]common.Hash)
   106  	completingHook     func([]common.Hash)
   107  	importedHook       func(*types.Block)
   108  }
   109  
   110  func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
   111  	return &Fetcher{
   112  		notify:         make(chan *announce),
   113  		inject:         make(chan *inject),
   114  		blockFilter:    make(chan chan []*types.Block),
   115  		headerFilter:   make(chan chan *headerFilterTask),
   116  		bodyFilter:     make(chan chan *bodyFilterTask),
   117  		done:           make(chan common.Hash),
   118  		quit:           make(chan struct{}),
   119  		announces:      make(map[string]int),
   120  		announced:      make(map[common.Hash][]*announce),
   121  		fetching:       make(map[common.Hash]*announce),
   122  		fetched:        make(map[common.Hash][]*announce),
   123  		completing:     make(map[common.Hash]*announce),
   124  		queue:          prque.New(nil),
   125  		queues:         make(map[string]int),
   126  		queued:         make(map[common.Hash]*inject),
   127  		getBlock:       getBlock,
   128  		verifyHeader:   verifyHeader,
   129  		broadcastBlock: broadcastBlock,
   130  		chainHeight:    chainHeight,
   131  		insertChain:    insertChain,
   132  		dropPeer:       dropPeer,
   133  	}
   134  }
   135  
   136  func (f *Fetcher) Start() {
   137  	go f.loop()
   138  }
   139  
   140  func (f *Fetcher) Stop() {
   141  	close(f.quit)
   142  }
   143  
   144  func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   145  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   146  	block := &announce{
   147  		hash:        hash,
   148  		number:      number,
   149  		time:        time,
   150  		origin:      peer,
   151  		fetchHeader: headerFetcher,
   152  		fetchBodies: bodyFetcher,
   153  	}
   154  	select {
   155  	case f.notify <- block:
   156  		return nil
   157  	case <-f.quit:
   158  		return errTerminated
   159  	}
   160  }
   161  
   162  func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
   163  	op := &inject{
   164  		origin: peer,
   165  		block:  block,
   166  	}
   167  	select {
   168  	case f.inject <- op:
   169  		return nil
   170  	case <-f.quit:
   171  		return errTerminated
   172  	}
   173  }
   174  
   175  func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   176  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   177  
   178  	filter := make(chan *headerFilterTask)
   179  
   180  	select {
   181  	case f.headerFilter <- filter:
   182  	case <-f.quit:
   183  		return nil
   184  	}
   185  
   186  	select {
   187  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   188  	case <-f.quit:
   189  		return nil
   190  	}
   191  
   192  	select {
   193  	case task := <-filter:
   194  		return task.headers
   195  	case <-f.quit:
   196  		return nil
   197  	}
   198  }
   199  
   200  func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
   201  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
   202  
   203  	filter := make(chan *bodyFilterTask)
   204  
   205  	select {
   206  	case f.bodyFilter <- filter:
   207  	case <-f.quit:
   208  		return nil, nil
   209  	}
   210  
   211  	select {
   212  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
   213  	case <-f.quit:
   214  		return nil, nil
   215  	}
   216  
   217  	select {
   218  	case task := <-filter:
   219  		return task.transactions, task.uncles
   220  	case <-f.quit:
   221  		return nil, nil
   222  	}
   223  }
   224  
   225  func (f *Fetcher) loop() {
   226  
   227  	fetchTimer := time.NewTimer(0)
   228  	completeTimer := time.NewTimer(0)
   229  
   230  	for {
   231  
   232  		for hash, announce := range f.fetching {
   233  			if time.Since(announce.time) > fetchTimeout {
   234  				f.forgetHash(hash)
   235  			}
   236  		}
   237  
   238  		height := f.chainHeight()
   239  		for !f.queue.Empty() {
   240  			op := f.queue.PopItem().(*inject)
   241  			hash := op.block.Hash()
   242  			if f.queueChangeHook != nil {
   243  				f.queueChangeHook(hash, false)
   244  			}
   245  
   246  			number := op.block.NumberU64()
   247  			if number > height+1 {
   248  				f.queue.Push(op, -int64(number))
   249  				if f.queueChangeHook != nil {
   250  					f.queueChangeHook(hash, true)
   251  				}
   252  				break
   253  			}
   254  
   255  			if number+maxUncleDist < height || f.getBlock(hash) != nil {
   256  				f.forgetBlock(hash)
   257  				continue
   258  			}
   259  			f.insert(op.origin, op.block)
   260  		}
   261  
   262  		select {
   263  		case <-f.quit:
   264  
   265  			return
   266  
   267  		case notification := <-f.notify:
   268  
   269  			propAnnounceInMeter.Mark(1)
   270  
   271  			count := f.announces[notification.origin] + 1
   272  			if count > hashLimit {
   273  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   274  				propAnnounceDOSMeter.Mark(1)
   275  				break
   276  			}
   277  
   278  			if notification.number > 0 {
   279  				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   280  					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   281  					propAnnounceDropMeter.Mark(1)
   282  					break
   283  				}
   284  			}
   285  
   286  			if _, ok := f.fetching[notification.hash]; ok {
   287  				break
   288  			}
   289  			if _, ok := f.completing[notification.hash]; ok {
   290  				break
   291  			}
   292  			f.announces[notification.origin] = count
   293  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   294  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   295  				f.announceChangeHook(notification.hash, true)
   296  			}
   297  			if len(f.announced) == 1 {
   298  				f.rescheduleFetch(fetchTimer)
   299  			}
   300  
   301  		case op := <-f.inject:
   302  
   303  			propBroadcastInMeter.Mark(1)
   304  			f.enqueue(op.origin, op.block)
   305  
   306  		case hash := <-f.done:
   307  
   308  			f.forgetHash(hash)
   309  			f.forgetBlock(hash)
   310  
   311  		case <-fetchTimer.C:
   312  
   313  			request := make(map[string][]common.Hash)
   314  
   315  			for hash, announces := range f.announced {
   316  				if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
   317  
   318  					announce := announces[rand.Intn(len(announces))]
   319  					f.forgetHash(hash)
   320  
   321  					if f.getBlock(hash) == nil {
   322  						request[announce.origin] = append(request[announce.origin], hash)
   323  						f.fetching[hash] = announce
   324  					}
   325  				}
   326  			}
   327  
   328  			for peer, hashes := range request {
   329  				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   330  
   331  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   332  				go func() {
   333  					if f.fetchingHook != nil {
   334  						f.fetchingHook(hashes)
   335  					}
   336  					for _, hash := range hashes {
   337  						headerFetchMeter.Mark(1)
   338  						fetchHeader(hash)
   339  					}
   340  				}()
   341  			}
   342  
   343  			f.rescheduleFetch(fetchTimer)
   344  
   345  		case <-completeTimer.C:
   346  
   347  			request := make(map[string][]common.Hash)
   348  
   349  			for hash, announces := range f.fetched {
   350  
   351  				announce := announces[rand.Intn(len(announces))]
   352  				f.forgetHash(hash)
   353  
   354  				if f.getBlock(hash) == nil {
   355  					request[announce.origin] = append(request[announce.origin], hash)
   356  					f.completing[hash] = announce
   357  				}
   358  			}
   359  
   360  			for peer, hashes := range request {
   361  				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   362  
   363  				if f.completingHook != nil {
   364  					f.completingHook(hashes)
   365  				}
   366  				bodyFetchMeter.Mark(int64(len(hashes)))
   367  				go f.completing[hashes[0]].fetchBodies(hashes)
   368  			}
   369  
   370  			f.rescheduleComplete(completeTimer)
   371  
   372  		case filter := <-f.headerFilter:
   373  
   374  			var task *headerFilterTask
   375  			select {
   376  			case task = <-filter:
   377  			case <-f.quit:
   378  				return
   379  			}
   380  			headerFilterInMeter.Mark(int64(len(task.headers)))
   381  
   382  			unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
   383  			for _, header := range task.headers {
   384  				hash := header.Hash()
   385  
   386  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   387  
   388  					if header.Number.Uint64() != announce.number {
   389  						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
   390  						f.dropPeer(announce.origin)
   391  						f.forgetHash(hash)
   392  						continue
   393  					}
   394  
   395  					if f.getBlock(hash) == nil {
   396  						announce.header = header
   397  						announce.time = task.time
   398  
   399  						if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
   400  							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   401  
   402  							block := types.NewBlockWithHeader(header)
   403  							block.ReceivedAt = task.time
   404  
   405  							complete = append(complete, block)
   406  							f.completing[hash] = announce
   407  							continue
   408  						}
   409  
   410  						incomplete = append(incomplete, announce)
   411  					} else {
   412  						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   413  						f.forgetHash(hash)
   414  					}
   415  				} else {
   416  
   417  					unknown = append(unknown, header)
   418  				}
   419  			}
   420  			headerFilterOutMeter.Mark(int64(len(unknown)))
   421  			select {
   422  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   423  			case <-f.quit:
   424  				return
   425  			}
   426  
   427  			for _, announce := range incomplete {
   428  				hash := announce.header.Hash()
   429  				if _, ok := f.completing[hash]; ok {
   430  					continue
   431  				}
   432  				f.fetched[hash] = append(f.fetched[hash], announce)
   433  				if len(f.fetched) == 1 {
   434  					f.rescheduleComplete(completeTimer)
   435  				}
   436  			}
   437  
   438  			for _, block := range complete {
   439  				if announce := f.completing[block.Hash()]; announce != nil {
   440  					f.enqueue(announce.origin, block)
   441  				}
   442  			}
   443  
   444  		case filter := <-f.bodyFilter:
   445  
   446  			var task *bodyFilterTask
   447  			select {
   448  			case task = <-filter:
   449  			case <-f.quit:
   450  				return
   451  			}
   452  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   453  
   454  			blocks := []*types.Block{}
   455  			for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
   456  
   457  				matched := false
   458  
   459  				for hash, announce := range f.completing {
   460  					if f.queued[hash] == nil {
   461  						txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
   462  						uncleHash := types.CalcUncleHash(task.uncles[i])
   463  
   464  						if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
   465  
   466  							matched = true
   467  
   468  							if f.getBlock(hash) == nil {
   469  								block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
   470  								block.ReceivedAt = task.time
   471  
   472  								blocks = append(blocks, block)
   473  							} else {
   474  								f.forgetHash(hash)
   475  							}
   476  						}
   477  					}
   478  				}
   479  				if matched {
   480  					task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   481  					task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
   482  					i--
   483  					continue
   484  				}
   485  			}
   486  
   487  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   488  			select {
   489  			case filter <- task:
   490  			case <-f.quit:
   491  				return
   492  			}
   493  
   494  			for _, block := range blocks {
   495  				if announce := f.completing[block.Hash()]; announce != nil {
   496  					f.enqueue(announce.origin, block)
   497  				}
   498  			}
   499  		}
   500  	}
   501  }
   502  
   503  func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
   504  
   505  	if len(f.announced) == 0 {
   506  		return
   507  	}
   508  
   509  	earliest := time.Now()
   510  	for _, announces := range f.announced {
   511  		if earliest.After(announces[0].time) {
   512  			earliest = announces[0].time
   513  		}
   514  	}
   515  	fetch.Reset(arriveTimeout - time.Since(earliest))
   516  }
   517  
   518  func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
   519  
   520  	if len(f.fetched) == 0 {
   521  		return
   522  	}
   523  
   524  	earliest := time.Now()
   525  	for _, announces := range f.fetched {
   526  		if earliest.After(announces[0].time) {
   527  			earliest = announces[0].time
   528  		}
   529  	}
   530  	complete.Reset(gatherSlack - time.Since(earliest))
   531  }
   532  
   533  func (f *Fetcher) enqueue(peer string, block *types.Block) {
   534  	hash := block.Hash()
   535  
   536  	count := f.queues[peer] + 1
   537  	if count > blockLimit {
   538  		log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
   539  		propBroadcastDOSMeter.Mark(1)
   540  		f.forgetHash(hash)
   541  		return
   542  	}
   543  
   544  	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   545  		log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
   546  		propBroadcastDropMeter.Mark(1)
   547  		f.forgetHash(hash)
   548  		return
   549  	}
   550  
   551  	if _, ok := f.queued[hash]; !ok {
   552  		op := &inject{
   553  			origin: peer,
   554  			block:  block,
   555  		}
   556  		f.queues[peer] = count
   557  		f.queued[hash] = op
   558  		f.queue.Push(op, -int64(block.NumberU64()))
   559  		if f.queueChangeHook != nil {
   560  			f.queueChangeHook(op.block.Hash(), true)
   561  		}
   562  		log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
   563  	}
   564  }
   565  
   566  func (f *Fetcher) insert(peer string, block *types.Block) {
   567  	hash := block.Hash()
   568  
   569  	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   570  	go func() {
   571  		defer func() { f.done <- hash }()
   572  
   573  		parent := f.getBlock(block.ParentHash())
   574  		if parent == nil {
   575  			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
   576  			return
   577  		}
   578  
   579  		switch err := f.verifyHeader(block.Header()); err {
   580  		case nil:
   581  
   582  			propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   583  			go f.broadcastBlock(block, true)
   584  
   585  		case consensus.ErrFutureBlock:
   586  
   587  		default:
   588  
   589  			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   590  			f.dropPeer(peer)
   591  			return
   592  		}
   593  
   594  		if _, err := f.insertChain(types.Blocks{block}); err != nil {
   595  			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   596  			return
   597  		}
   598  
   599  		propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   600  		go f.broadcastBlock(block, false)
   601  
   602  		if f.importedHook != nil {
   603  			f.importedHook(block)
   604  		}
   605  	}()
   606  }
   607  
   608  func (f *Fetcher) forgetHash(hash common.Hash) {
   609  
   610  	for _, announce := range f.announced[hash] {
   611  		f.announces[announce.origin]--
   612  		if f.announces[announce.origin] == 0 {
   613  			delete(f.announces, announce.origin)
   614  		}
   615  	}
   616  	delete(f.announced, hash)
   617  	if f.announceChangeHook != nil {
   618  		f.announceChangeHook(hash, false)
   619  	}
   620  
   621  	if announce := f.fetching[hash]; announce != nil {
   622  		f.announces[announce.origin]--
   623  		if f.announces[announce.origin] == 0 {
   624  			delete(f.announces, announce.origin)
   625  		}
   626  		delete(f.fetching, hash)
   627  	}
   628  
   629  	for _, announce := range f.fetched[hash] {
   630  		f.announces[announce.origin]--
   631  		if f.announces[announce.origin] == 0 {
   632  			delete(f.announces, announce.origin)
   633  		}
   634  	}
   635  	delete(f.fetched, hash)
   636  
   637  	if announce := f.completing[hash]; announce != nil {
   638  		f.announces[announce.origin]--
   639  		if f.announces[announce.origin] == 0 {
   640  			delete(f.announces, announce.origin)
   641  		}
   642  		delete(f.completing, hash)
   643  	}
   644  }
   645  
   646  func (f *Fetcher) forgetBlock(hash common.Hash) {
   647  	if insert := f.queued[hash]; insert != nil {
   648  		f.queues[insert.origin]--
   649  		if f.queues[insert.origin] == 0 {
   650  			delete(f.queues, insert.origin)
   651  		}
   652  		delete(f.queued, hash)
   653  	}
   654  }