github.com/klaytn/klaytn@v1.10.2/datasync/chaindatafetcher/chaindata_fetcher.go (about)

     1  // Copyright 2020 The klaytn Authors
     2  // This file is part of the klaytn library.
     3  //
     4  // The klaytn library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The klaytn library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the klaytn library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package chaindatafetcher
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"sync"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	"github.com/klaytn/klaytn/blockchain"
    28  	"github.com/klaytn/klaytn/blockchain/types"
    29  	"github.com/klaytn/klaytn/blockchain/vm"
    30  	"github.com/klaytn/klaytn/common"
    31  	"github.com/klaytn/klaytn/consensus"
    32  	"github.com/klaytn/klaytn/datasync/chaindatafetcher/kafka"
    33  	"github.com/klaytn/klaytn/datasync/chaindatafetcher/kas"
    34  	cfTypes "github.com/klaytn/klaytn/datasync/chaindatafetcher/types"
    35  	"github.com/klaytn/klaytn/event"
    36  	"github.com/klaytn/klaytn/log"
    37  	"github.com/klaytn/klaytn/networks/p2p"
    38  	"github.com/klaytn/klaytn/networks/rpc"
    39  	"github.com/klaytn/klaytn/node"
    40  	"github.com/klaytn/klaytn/node/cn/tracers"
    41  	"github.com/rcrowley/go-metrics"
    42  )
    43  
    44  const (
    45  	stopped = uint32(iota)
    46  	running
    47  )
    48  
    49  var (
    50  	logger              = log.NewModuleLogger(log.ChainDataFetcher)
    51  	errUnsupportedMode  = errors.New("the given chaindatafetcher mode is not supported")
    52  	errMaxRetryExceeded = errors.New("the number of retries is exceeded over max")
    53  )
    54  
    55  //go:generate mockgen -destination=./mocks/blockchain_mock.go -package=mocks github.com/klaytn/klaytn/datasync/chaindatafetcher BlockChain
    56  type BlockChain interface {
    57  	SubscribeChainEvent(ch chan<- blockchain.ChainEvent) event.Subscription
    58  	CurrentHeader() *types.Header
    59  	GetBlockByNumber(number uint64) *types.Block
    60  	GetReceiptsByBlockHash(blockHash common.Hash) types.Receipts
    61  }
    62  
    63  type ChainDataFetcher struct {
    64  	config *ChainDataFetcherConfig
    65  
    66  	engine     consensus.Engine
    67  	blockchain BlockChain
    68  	debugAPI   *tracers.API
    69  
    70  	chainCh  chan blockchain.ChainEvent
    71  	chainSub event.Subscription
    72  
    73  	reqCh  chan *cfTypes.Request // TODO-ChainDataFetcher add logic to insert new requests from APIs to this channel
    74  	stopCh chan struct{}
    75  
    76  	numHandlers int
    77  
    78  	checkpointMu  sync.RWMutex
    79  	checkpoint    int64
    80  	checkpointMap map[int64]struct{}
    81  
    82  	wg sync.WaitGroup
    83  
    84  	repo         Repository
    85  	checkpointDB CheckpointDB
    86  	setters      []ComponentSetter
    87  
    88  	fetchingStarted      uint32
    89  	fetchingStopCh       chan struct{}
    90  	fetchingWg           sync.WaitGroup
    91  	rangeFetchingStarted uint32
    92  	rangeFetchingStopCh  chan struct{}
    93  	rangeFetchingWg      sync.WaitGroup
    94  
    95  	dataSizeLocker        sync.RWMutex
    96  	processingDataSize    common.StorageSize
    97  	maxProcessingDataSize common.StorageSize
    98  }
    99  
   100  func NewChainDataFetcher(ctx *node.ServiceContext, cfg *ChainDataFetcherConfig) (*ChainDataFetcher, error) {
   101  	var (
   102  		repo         Repository
   103  		checkpointDB CheckpointDB
   104  		setters      []ComponentSetter
   105  		err          error
   106  	)
   107  	switch cfg.Mode {
   108  	case ModeKAS:
   109  		repo, checkpointDB, setters, err = getKasComponents(cfg.KasConfig)
   110  		if err != nil {
   111  			return nil, err
   112  		}
   113  	case ModeKafka:
   114  		repo, checkpointDB, setters, err = getKafkaComponents(cfg.KafkaConfig)
   115  		if err != nil {
   116  			return nil, err
   117  		}
   118  	default:
   119  		logger.Error("the chaindatafetcher mode is not supported", "mode", cfg.Mode)
   120  		return nil, errUnsupportedMode
   121  	}
   122  	return &ChainDataFetcher{
   123  		config:                cfg,
   124  		chainCh:               make(chan blockchain.ChainEvent, cfg.BlockChannelSize),
   125  		reqCh:                 make(chan *cfTypes.Request, cfg.JobChannelSize),
   126  		stopCh:                make(chan struct{}),
   127  		numHandlers:           cfg.NumHandlers,
   128  		checkpointMap:         make(map[int64]struct{}),
   129  		repo:                  repo,
   130  		checkpointDB:          checkpointDB,
   131  		setters:               setters,
   132  		processingDataSize:    common.StorageSize(0),
   133  		maxProcessingDataSize: common.StorageSize(cfg.MaxProcessingDataSize * 1024 * 1024), // in MB
   134  	}, nil
   135  }
   136  
   137  func getKasComponents(cfg *kas.KASConfig) (Repository, CheckpointDB, []ComponentSetter, error) {
   138  	repo, err := kas.NewRepository(cfg)
   139  	if err != nil {
   140  		return nil, nil, nil, err
   141  	}
   142  	return repo, repo, []ComponentSetter{repo}, nil
   143  }
   144  
   145  func getKafkaComponents(cfg *kafka.KafkaConfig) (Repository, CheckpointDB, []ComponentSetter, error) {
   146  	repo, err := kafka.NewRepository(cfg)
   147  	if err != nil {
   148  		return nil, nil, nil, err
   149  	}
   150  	checkpointDB := kafka.NewCheckpointDB()
   151  	return repo, checkpointDB, []ComponentSetter{repo, checkpointDB}, nil
   152  }
   153  
   154  func (f *ChainDataFetcher) Protocols() []p2p.Protocol {
   155  	return []p2p.Protocol{}
   156  }
   157  
   158  func (f *ChainDataFetcher) APIs() []rpc.API {
   159  	return []rpc.API{
   160  		{
   161  			Namespace: "chaindatafetcher",
   162  			Version:   "1.0",
   163  			Service:   NewPublicChainDataFetcherAPI(f),
   164  			Public:    true,
   165  		},
   166  	}
   167  }
   168  
   169  func (f *ChainDataFetcher) Start(server p2p.Server) error {
   170  	// launch multiple goroutines to handle new blocks
   171  	for i := 0; i < f.numHandlers; i++ {
   172  		go f.handleRequest()
   173  	}
   174  
   175  	if !f.config.NoDefaultStart {
   176  		if err := f.startFetching(); err != nil {
   177  			logger.Error("start fetching is failed", "err", err)
   178  			return err
   179  		}
   180  	}
   181  	logger.Info("chaindata fetcher is started", "numHandlers", f.numHandlers)
   182  	return nil
   183  }
   184  
   185  func (f *ChainDataFetcher) Stop() error {
   186  	f.stopFetching()
   187  	f.stopRangeFetching()
   188  	logger.Info("wait for all goroutines to be terminated...", "numGoroutines", f.config.NumHandlers)
   189  	close(f.stopCh)
   190  	f.wg.Wait()
   191  	logger.Info("chaindata fetcher is stopped")
   192  	return nil
   193  }
   194  
   195  func (f *ChainDataFetcher) sendRequests(startBlock, endBlock uint64, reqType cfTypes.RequestType, shouldUpdateCheckpoint bool, stopCh chan struct{}) {
   196  	logger.Info("sending requests is started", "startBlock", startBlock, "endBlock", endBlock, "reqType", reqType)
   197  	for i := startBlock; i <= endBlock; i++ {
   198  		for { // spin lock if processing data size is larger than max
   199  			select {
   200  			case <-stopCh:
   201  				logger.Info("stopped making requests", "startBlock", startBlock, "endBlock", endBlock, "stoppedBlock", i)
   202  				return
   203  			default:
   204  			}
   205  
   206  			f.dataSizeLocker.RLock()
   207  			if f.processingDataSize <= f.maxProcessingDataSize {
   208  				f.dataSizeLocker.RUnlock()
   209  				break
   210  			}
   211  			f.dataSizeLocker.RUnlock()
   212  
   213  			logger.Warn("throttling the requests, sleeping", "interval", DefaultThrottlingInterval, "processingDataSize", f.processingDataSize, "maxDatasize", f.maxProcessingDataSize)
   214  			time.Sleep(DefaultThrottlingInterval)
   215  		}
   216  		select {
   217  		case <-stopCh:
   218  			logger.Info("stopped making requests", "startBlock", startBlock, "endBlock", endBlock, "stoppedBlock", i)
   219  			return
   220  		case f.reqCh <- cfTypes.NewRequest(reqType, shouldUpdateCheckpoint, i):
   221  		}
   222  	}
   223  	logger.Info("sending requests is finished", "startBlock", startBlock, "endBlock", endBlock, "reqType", reqType)
   224  }
   225  
   226  func (f *ChainDataFetcher) startFetching() error {
   227  	if !atomic.CompareAndSwapUint32(&f.fetchingStarted, stopped, running) {
   228  		return errors.New("fetching is already started")
   229  	}
   230  
   231  	// subscribe chain event in order to handle new blocks.
   232  	f.chainSub = f.blockchain.SubscribeChainEvent(f.chainCh)
   233  	checkpoint := uint64(f.checkpoint)
   234  	currentBlock := f.blockchain.CurrentHeader().Number.Uint64()
   235  
   236  	f.fetchingStopCh = make(chan struct{})
   237  	f.fetchingWg.Add(1)
   238  
   239  	// lanuch a goroutine to handle from checkpoint to the head block.
   240  	go func() {
   241  		defer f.fetchingWg.Done()
   242  		switch f.config.Mode {
   243  		case ModeKAS:
   244  			f.sendRequests(uint64(f.checkpoint), currentBlock, cfTypes.RequestTypeAll, true, f.fetchingStopCh)
   245  		case ModeKafka:
   246  			f.sendRequests(uint64(f.checkpoint), currentBlock, cfTypes.RequestTypeGroupAll, true, f.fetchingStopCh)
   247  		default:
   248  			logger.Error("the chaindatafetcher mode is not supported", "mode", f.config.Mode, "checkpoint", f.checkpoint, "currentBlock", currentBlock)
   249  		}
   250  	}()
   251  	logger.Info("fetching is started", "startedCheckpoint", checkpoint, "currentBlock", currentBlock)
   252  	return nil
   253  }
   254  
   255  func (f *ChainDataFetcher) stopFetching() error {
   256  	if !atomic.CompareAndSwapUint32(&f.fetchingStarted, running, stopped) {
   257  		return errors.New("fetching is not running")
   258  	}
   259  
   260  	f.chainSub.Unsubscribe()
   261  	close(f.fetchingStopCh)
   262  	f.fetchingWg.Wait()
   263  	logger.Info("fetching is stopped")
   264  	return nil
   265  }
   266  
   267  func (f *ChainDataFetcher) startRangeFetching(startBlock, endBlock uint64, reqType cfTypes.RequestType) error {
   268  	if !atomic.CompareAndSwapUint32(&f.rangeFetchingStarted, stopped, running) {
   269  		return errors.New("range fetching is already started")
   270  	}
   271  
   272  	f.rangeFetchingStopCh = make(chan struct{})
   273  	f.rangeFetchingWg.Add(1)
   274  	go func() {
   275  		defer f.rangeFetchingWg.Done()
   276  		f.sendRequests(startBlock, endBlock, reqType, false, f.rangeFetchingStopCh)
   277  		atomic.StoreUint32(&f.rangeFetchingStarted, stopped)
   278  	}()
   279  	logger.Info("range fetching is started", "startBlock", startBlock, "endBlock", endBlock, "reqType", reqType)
   280  	return nil
   281  }
   282  
   283  func (f *ChainDataFetcher) stopRangeFetching() error {
   284  	if !atomic.CompareAndSwapUint32(&f.rangeFetchingStarted, running, stopped) {
   285  		return errors.New("range fetching is not running")
   286  	}
   287  	close(f.rangeFetchingStopCh)
   288  	f.rangeFetchingWg.Wait()
   289  	logger.Info("range fetching is stopped")
   290  	return nil
   291  }
   292  
   293  func (f *ChainDataFetcher) makeChainEvent(blockNumber uint64) (blockchain.ChainEvent, error) {
   294  	var logs []*types.Log
   295  	block := f.blockchain.GetBlockByNumber(blockNumber)
   296  	if block == nil {
   297  		return blockchain.ChainEvent{}, fmt.Errorf("GetBlockByNumber is failed. blockNumber=%v", blockNumber)
   298  	}
   299  	receipts := f.blockchain.GetReceiptsByBlockHash(block.Hash())
   300  	if receipts == nil {
   301  		return blockchain.ChainEvent{}, fmt.Errorf("GetReceiptsByBlockHash is failed. blockNumber=%v", blockNumber)
   302  	}
   303  	for _, r := range receipts {
   304  		logs = append(logs, r.Logs...)
   305  	}
   306  	var internalTraces []*vm.InternalTxTrace
   307  	if block.Transactions().Len() > 0 {
   308  		fct := "fastCallTracer"
   309  		timeout := "24h"
   310  		results, err := f.debugAPI.TraceBlockByNumber(context.Background(), rpc.BlockNumber(block.Number().Int64()), &tracers.TraceConfig{
   311  			Tracer:  &fct,
   312  			Timeout: &timeout,
   313  		})
   314  		if err != nil {
   315  			traceAPIErrorCounter.Inc(1)
   316  			logger.Error("Failed to call trace block by number", "err", err, "blockNumber", block.NumberU64())
   317  			return blockchain.ChainEvent{}, err
   318  		}
   319  		for _, r := range results {
   320  			if r.Result != nil {
   321  				internalTraces = append(internalTraces, r.Result.(*vm.InternalTxTrace))
   322  			} else {
   323  				traceAPIErrorCounter.Inc(1)
   324  				logger.Error("the trace result is nil", "err", r.Error, "blockNumber", blockNumber)
   325  				internalTraces = append(internalTraces, &vm.InternalTxTrace{Value: "0x0", Calls: []*vm.InternalTxTrace{}})
   326  			}
   327  		}
   328  	}
   329  
   330  	return blockchain.ChainEvent{
   331  		Block:            block,
   332  		Hash:             block.Hash(),
   333  		Receipts:         receipts,
   334  		Logs:             logs,
   335  		InternalTxTraces: internalTraces,
   336  	}, nil
   337  }
   338  
   339  func (f *ChainDataFetcher) Components() []interface{} {
   340  	return nil
   341  }
   342  
   343  func (f *ChainDataFetcher) setDebugAPI(apis []rpc.API) {
   344  	for _, a := range apis {
   345  		switch s := a.Service.(type) {
   346  		case *tracers.API:
   347  			f.debugAPI = s
   348  		}
   349  	}
   350  }
   351  
   352  func (f *ChainDataFetcher) setCheckpoint() {
   353  	checkpoint, err := f.checkpointDB.ReadCheckpoint()
   354  	if err != nil {
   355  		logger.Crit("ReadCheckpoint is failed", "err", err)
   356  	}
   357  
   358  	if checkpoint == 0 {
   359  		checkpoint = f.blockchain.CurrentHeader().Number.Int64()
   360  	}
   361  	f.checkpoint = checkpoint
   362  	logger.Info("Chaindatafetcher initial checkpoint is set", "checkpoint", f.checkpoint)
   363  }
   364  
   365  func (f *ChainDataFetcher) setComponent(component interface{}) {
   366  	switch v := component.(type) {
   367  	case *blockchain.BlockChain:
   368  		f.blockchain = v
   369  	case consensus.Engine:
   370  		f.engine = v
   371  	case []rpc.API:
   372  		f.setDebugAPI(v)
   373  	}
   374  }
   375  
   376  func (f *ChainDataFetcher) SetComponents(components []interface{}) {
   377  	for _, component := range components {
   378  		f.setComponent(component)
   379  		for _, setter := range f.setters {
   380  			setter.SetComponent(component)
   381  		}
   382  	}
   383  	f.setCheckpoint()
   384  }
   385  
   386  func (f *ChainDataFetcher) handleRequestByType(reqType cfTypes.RequestType, shouldUpdateCheckpoint bool, ev blockchain.ChainEvent) error {
   387  	now := time.Now()
   388  	// TODO-ChainDataFetcher parallelize handling data
   389  
   390  	// iterate over all types of requests
   391  	// - RequestTypeTransaction
   392  	// - RequestTypeTokenTransfer
   393  	// - RequestTypeContract
   394  	// - RequestTypeTrace
   395  	// - RequestTypeBlockGroup
   396  	// - RequestTypeTraceGroup
   397  	for targetType := cfTypes.RequestTypeTransaction; targetType < cfTypes.RequestTypeLength; targetType = targetType << 1 {
   398  		if cfTypes.CheckRequestType(reqType, targetType) {
   399  			if err := f.updateInsertionTimeGauge(f.retryFunc(f.repo.HandleChainEvent))(ev, targetType); err != nil {
   400  				logger.Error("handling chain event is failed", "blockNumber", ev.Block.NumberU64(), "err", err, "reqType", reqType, "targetType", targetType)
   401  				return err
   402  			}
   403  		}
   404  	}
   405  	elapsed := time.Since(now)
   406  	totalInsertionTimeGauge.Update(elapsed.Milliseconds())
   407  
   408  	if shouldUpdateCheckpoint {
   409  		f.updateCheckpoint(ev.Block.Number().Int64())
   410  	}
   411  	handledBlockNumberGauge.Update(ev.Block.Number().Int64())
   412  	return nil
   413  }
   414  
   415  func (f *ChainDataFetcher) resetChainCh() {
   416  	for {
   417  		select {
   418  		case <-f.chainCh:
   419  		default:
   420  			return
   421  		}
   422  	}
   423  }
   424  
   425  func (f *ChainDataFetcher) resetRequestCh() {
   426  	for {
   427  		select {
   428  		case <-f.reqCh:
   429  		default:
   430  			return
   431  		}
   432  	}
   433  }
   434  
   435  func (f *ChainDataFetcher) pause() {
   436  	f.stopFetching()
   437  	f.stopRangeFetching()
   438  	f.resetChainCh()
   439  	f.resetRequestCh()
   440  }
   441  
   442  func (f *ChainDataFetcher) updateDataSize(dataSize common.StorageSize) {
   443  	f.dataSizeLocker.Lock()
   444  	defer f.dataSizeLocker.Unlock()
   445  	f.processingDataSize += dataSize
   446  }
   447  
   448  func (f *ChainDataFetcher) handleRequest() {
   449  	f.wg.Add(1)
   450  	defer f.wg.Done()
   451  	for {
   452  		select {
   453  		case <-f.stopCh:
   454  			logger.Info("handleRequest is stopped")
   455  			return
   456  		case ev := <-f.chainCh:
   457  			numChainEventGauge.Update(int64(len(f.chainCh)))
   458  			var err error
   459  			switch f.config.Mode {
   460  			case ModeKAS:
   461  				err = f.handleRequestByType(cfTypes.RequestTypeAll, true, ev)
   462  			case ModeKafka:
   463  				err = f.handleRequestByType(cfTypes.RequestTypeGroupAll, true, ev)
   464  			default:
   465  				logger.Error("the chaindatafetcher mode is not supported", "mode", f.config.Mode, "blockNumber", ev.Block.NumberU64())
   466  			}
   467  
   468  			if err != nil && err == errMaxRetryExceeded {
   469  				logger.Error("the chaindatafetcher reaches the maximum retries. it pauses fetching and clear the channels", "blockNum", ev.Block.NumberU64())
   470  				f.pause()
   471  			}
   472  		case req := <-f.reqCh:
   473  			numRequestsGauge.Update(int64(len(f.reqCh)))
   474  			ev, err := f.makeChainEvent(req.BlockNumber)
   475  			if err != nil {
   476  				// TODO-ChainDataFetcher handle error
   477  				logger.Error("making chain event is failed", "err", err)
   478  				break
   479  			}
   480  
   481  			f.updateDataSize(ev.JsonSize())
   482  			err = f.handleRequestByType(req.ReqType, req.ShouldUpdateCheckpoint, ev)
   483  			if err != nil && err == errMaxRetryExceeded {
   484  				logger.Error("the chaindatafetcher reaches the maximum retries. it pauses fetching and clear the channels", "blockNum", ev.Block.NumberU64())
   485  				f.pause()
   486  			}
   487  			f.updateDataSize(-ev.JsonSize())
   488  		}
   489  	}
   490  }
   491  
   492  func (f *ChainDataFetcher) updateCheckpoint(num int64) error {
   493  	f.checkpointMu.Lock()
   494  	defer f.checkpointMu.Unlock()
   495  	f.checkpointMap[num] = struct{}{}
   496  
   497  	updated := false
   498  	newCheckpoint := f.checkpoint
   499  	for {
   500  		if _, ok := f.checkpointMap[newCheckpoint]; !ok {
   501  			break
   502  		}
   503  		delete(f.checkpointMap, newCheckpoint)
   504  		newCheckpoint++
   505  		updated = true
   506  	}
   507  
   508  	if updated {
   509  		f.checkpoint = newCheckpoint
   510  		checkpointGauge.Update(f.checkpoint)
   511  		return f.checkpointDB.WriteCheckpoint(newCheckpoint)
   512  	}
   513  	return nil
   514  }
   515  
   516  func getInsertionTimeGauge(reqType cfTypes.RequestType) metrics.Gauge {
   517  	switch reqType {
   518  	case cfTypes.RequestTypeTransaction:
   519  		return txsInsertionTimeGauge
   520  	case cfTypes.RequestTypeTokenTransfer:
   521  		return tokenTransfersInsertionTimeGauge
   522  	case cfTypes.RequestTypeContract:
   523  		return contractsInsertionTimeGauge
   524  	case cfTypes.RequestTypeTrace:
   525  		return tracesInsertionTimeGauge
   526  	case cfTypes.RequestTypeBlockGroup:
   527  		return blockGroupInsertionTimeGauge
   528  	case cfTypes.RequestTypeTraceGroup:
   529  		return traceGroupInsertionTimeGauge
   530  	default:
   531  		logger.Warn("the request type is not supported", "type", reqType)
   532  		return metrics.NilGauge{}
   533  	}
   534  }
   535  
   536  func (f *ChainDataFetcher) updateInsertionTimeGauge(insert HandleChainEventFn) HandleChainEventFn {
   537  	return func(chainEvent blockchain.ChainEvent, reqType cfTypes.RequestType) error {
   538  		now := time.Now()
   539  		if err := insert(chainEvent, reqType); err != nil {
   540  			return err
   541  		}
   542  		elapsed := time.Since(now)
   543  		gauge := getInsertionTimeGauge(reqType)
   544  		gauge.Update(elapsed.Milliseconds())
   545  		return nil
   546  	}
   547  }
   548  
   549  func getInsertionRetryGauge(reqType cfTypes.RequestType) metrics.Gauge {
   550  	switch reqType {
   551  	case cfTypes.RequestTypeTransaction:
   552  		return txsInsertionRetryGauge
   553  	case cfTypes.RequestTypeTokenTransfer:
   554  		return tokenTransfersInsertionRetryGauge
   555  	case cfTypes.RequestTypeContract:
   556  		return contractsInsertionRetryGauge
   557  	case cfTypes.RequestTypeTrace:
   558  		return tracesInsertionRetryGauge
   559  	case cfTypes.RequestTypeBlockGroup:
   560  		return blockGroupInsertionRetryGauge
   561  	case cfTypes.RequestTypeTraceGroup:
   562  		return traceGroupInsertionRetryGauge
   563  	default:
   564  		logger.Warn("the request type is not supported", "type", reqType)
   565  		return metrics.NilGauge{}
   566  	}
   567  }
   568  
   569  func (f *ChainDataFetcher) retryFunc(insert HandleChainEventFn) HandleChainEventFn {
   570  	return func(event blockchain.ChainEvent, reqType cfTypes.RequestType) error {
   571  		i := 0
   572  		for err := insert(event, reqType); err != nil; err = insert(event, reqType) {
   573  			select {
   574  			case <-f.stopCh:
   575  				return err
   576  			default:
   577  				if i > InsertMaxRetry {
   578  					return errMaxRetryExceeded
   579  				}
   580  				i++
   581  				gauge := getInsertionRetryGauge(reqType)
   582  				gauge.Update(int64(i))
   583  				logger.Warn("retrying...", "blockNumber", event.Block.NumberU64(), "retryCount", i, "err", err)
   584  				time.Sleep(InsertRetryInterval)
   585  			}
   586  		}
   587  		return nil
   588  	}
   589  }
   590  
   591  func (f *ChainDataFetcher) status() string {
   592  	return fmt.Sprintf("{fetching: %v, rangeFetching: %v}", atomic.LoadUint32(&f.fetchingStarted), atomic.LoadUint32(&f.rangeFetchingStarted))
   593  }