github.com/klaytn/klaytn@v1.12.1/blockchain/state_migration.go (about)

     1  // Copyright 2020 The klaytn Authors
     2  // This file is part of the klaytn library.
     3  //
     4  // The klaytn library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The klaytn library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the klaytn library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package blockchain
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"runtime"
    23  	"strconv"
    24  	"strings"
    25  	"time"
    26  
    27  	"github.com/VictoriaMetrics/fastcache"
    28  	"github.com/klaytn/klaytn/blockchain/types"
    29  
    30  	"github.com/alecthomas/units"
    31  	lru "github.com/hashicorp/golang-lru"
    32  	"github.com/klaytn/klaytn/blockchain/state"
    33  	"github.com/klaytn/klaytn/common"
    34  	"github.com/klaytn/klaytn/common/mclock"
    35  	"github.com/klaytn/klaytn/log"
    36  	"github.com/klaytn/klaytn/storage/database"
    37  	"github.com/klaytn/klaytn/storage/statedb"
    38  )
    39  
    40  const (
    41  	DefaultWarmUpMinLoad        = 90
    42  	DefaultWarmUpReportInterval = time.Second * 10
    43  )
    44  
    45  var (
    46  	stopWarmUpErr           = errors.New("warm-up terminate by StopWarmUp")
    47  	blockChainStopWarmUpErr = errors.New("warm-up terminate as blockchain stopped")
    48  )
    49  
    50  func (bc *BlockChain) stateMigrationCommit(s *statedb.TrieSync, batch database.Batch) (int, error) {
    51  	written, err := s.Commit(batch)
    52  	if written == 0 || err != nil {
    53  		return written, err
    54  	}
    55  
    56  	if batch.ValueSize() > database.IdealBatchSize {
    57  		if err := batch.Write(); err != nil {
    58  			return 0, fmt.Errorf("DB write error: %v", err)
    59  		}
    60  		batch.Reset()
    61  	}
    62  
    63  	return written, nil
    64  }
    65  
    66  func (bc *BlockChain) concurrentRead(db state.Database, quitCh chan struct{}, hashCh chan common.Hash, resultCh chan statedb.SyncResult) {
    67  	for {
    68  		select {
    69  		case <-quitCh:
    70  			return
    71  		case hash := <-hashCh:
    72  			data, err := db.TrieDB().NodeFromOld(hash.ExtendZero())
    73  			if err != nil {
    74  				data, err = db.ContractCode(hash)
    75  			}
    76  			if err != nil {
    77  				resultCh <- statedb.SyncResult{Hash: hash, Err: err}
    78  				continue
    79  			}
    80  			resultCh <- statedb.SyncResult{Hash: hash, Data: data}
    81  		}
    82  	}
    83  }
    84  
    85  // migrateState is the core implementation of state trie migration.
    86  // This migrates a trie from StateTrieDB to StateTrieMigrationDB.
    87  // Reading StateTrieDB happens in parallel and writing StateTrieMigrationDB happens in batch write.
    88  //
    89  // Before this function is called, StateTrieMigrationDB should be set.
    90  // After the migration finish, the original StateTrieDB is removed and StateTrieMigrationDB becomes a new StateTrieDB.
    91  func (bc *BlockChain) migrateState(rootHash common.Hash) (returnErr error) {
    92  	bc.migrationErr = nil
    93  	defer func() {
    94  		bc.migrationErr = returnErr
    95  		// If migration stops by quit signal, it doesn't finish migration and it it will restart again.
    96  		if returnErr != ErrQuitBySignal {
    97  			// lock to prevent from a conflict of state DB close and state DB write
    98  			bc.mu.Lock()
    99  			bc.db.FinishStateMigration(returnErr == nil)
   100  			bc.mu.Unlock()
   101  		}
   102  	}()
   103  
   104  	start := time.Now()
   105  
   106  	srcState := bc.StateCache()
   107  	dstState := state.NewDatabase(bc.db)
   108  
   109  	// NOTE: lruCache is mandatory when state migration and block processing are executed simultaneously
   110  	lruCache, _ := lru.New(int(2 * units.Giga / common.HashLength)) // 2GB for 62,500,000 common.Hash key values
   111  	trieSync := state.NewStateSync(rootHash, dstState.TrieDB().DiskDB(), nil, lruCache, nil)
   112  	var queue []common.Hash
   113  
   114  	quitCh := make(chan struct{})
   115  	defer close(quitCh)
   116  
   117  	// Prepare concurrent read goroutines
   118  	threads := runtime.NumCPU()
   119  	hashCh := make(chan common.Hash, threads)
   120  	resultCh := make(chan statedb.SyncResult, threads)
   121  
   122  	for th := 0; th < threads; th++ {
   123  		go bc.concurrentRead(srcState, quitCh, hashCh, resultCh)
   124  	}
   125  
   126  	stateTrieBatch := dstState.TrieDB().DiskDB().NewBatch(database.StateTrieDB)
   127  	defer stateTrieBatch.Release()
   128  	stats := migrationStats{initialStartTime: start, startTime: mclock.Now()}
   129  
   130  	if bc.testMigrationHook != nil {
   131  		bc.testMigrationHook()
   132  	}
   133  
   134  	// Migration main loop
   135  	for trieSync.Pending() > 0 {
   136  		nodes, _, codes := trieSync.Missing(1024)
   137  		queue = append(queue[:0], append(nodes, codes...)...)
   138  		results := make([]statedb.SyncResult, len(queue))
   139  
   140  		// Read the trie nodes
   141  		startRead := time.Now()
   142  		go func() {
   143  			for _, hash := range queue {
   144  				hashCh <- hash
   145  			}
   146  		}()
   147  
   148  		for i := 0; i < len(queue); i++ {
   149  			result := <-resultCh
   150  			if result.Err != nil {
   151  				logger.Error("State migration is failed by resultCh",
   152  					"result.hash", result.Hash.String(), "result.Err", result.Err)
   153  				return fmt.Errorf("failed to retrieve node data for %x: %v", result.Hash, result.Err)
   154  			}
   155  			results[i] = result
   156  		}
   157  		stats.read += len(queue)
   158  		stats.readElapsed += time.Since(startRead)
   159  
   160  		// Process trie nodes
   161  		startProcess := time.Now()
   162  		for index, result := range results {
   163  			if err := trieSync.Process(result); err != nil {
   164  				logger.Error("State migration is failed by process error", "err", err)
   165  				return fmt.Errorf("failed to process result #%d: %v", index, err)
   166  			}
   167  		}
   168  		stats.processElapsed += time.Since(startProcess)
   169  
   170  		// Commit trie nodes
   171  		startWrite := time.Now()
   172  		written, err := bc.stateMigrationCommit(trieSync, stateTrieBatch)
   173  		if err != nil {
   174  			logger.Error("State migration is failed by commit error", "err", err)
   175  			return fmt.Errorf("failed to commit data #%d: %v", written, err)
   176  		}
   177  		stats.committed += written
   178  		stats.writeElapsed += time.Since(startWrite)
   179  
   180  		// Report progress
   181  		stats.stateMigrationReport(false, trieSync.Pending(), trieSync.CalcProgressPercentage())
   182  
   183  		select {
   184  		case <-bc.stopStateMigration:
   185  			logger.Info("State migration terminated by request")
   186  			return errors.New("stop state migration")
   187  		case <-bc.quit:
   188  			logger.Info("State migration stopped by quit signal; should continue on node restart")
   189  			return ErrQuitBySignal
   190  		default:
   191  		}
   192  
   193  		bc.readCnt, bc.committedCnt, bc.pendingCnt, bc.progress = stats.totalRead, stats.totalCommitted, trieSync.Pending(), stats.progress
   194  	}
   195  
   196  	// Flush trie nodes which is not written yet.
   197  	if err := stateTrieBatch.Write(); err != nil {
   198  		logger.Error("State migration is failed by commit error", "err", err)
   199  		return fmt.Errorf("DB write error: %v", err)
   200  	}
   201  
   202  	stats.stateMigrationReport(true, trieSync.Pending(), trieSync.CalcProgressPercentage())
   203  	bc.readCnt, bc.committedCnt, bc.pendingCnt, bc.progress = stats.totalRead, stats.totalCommitted, trieSync.Pending(), stats.progress
   204  
   205  	// Clear memory of trieSync
   206  	trieSync = nil
   207  
   208  	elapsed := time.Since(start)
   209  	speed := float64(stats.totalCommitted) / elapsed.Seconds()
   210  	logger.Info("State migration : Copy is done",
   211  		"totalRead", stats.totalRead, "totalCommitted", stats.totalCommitted,
   212  		"totalElapsed", elapsed, "committed per second", speed)
   213  
   214  	startCheck := time.Now()
   215  	if err := state.CheckStateConsistencyParallel(srcState, dstState, rootHash, bc.quit); err != nil {
   216  		logger.Error("State migration : copied stateDB is invalid", "err", err)
   217  		return err
   218  	}
   219  	checkElapsed := time.Since(startCheck)
   220  	logger.Info("State migration is completed", "copyElapsed", elapsed, "checkElapsed", checkElapsed)
   221  	return nil
   222  }
   223  
   224  // migrationStats tracks and reports on state migration.
   225  type migrationStats struct {
   226  	read, committed, totalRead, totalCommitted, pending int
   227  	progress                                            float64
   228  	initialStartTime                                    time.Time
   229  	startTime                                           mclock.AbsTime
   230  	readElapsed                                         time.Duration
   231  	processElapsed                                      time.Duration
   232  	writeElapsed                                        time.Duration
   233  }
   234  
   235  func (st *migrationStats) stateMigrationReport(force bool, pending int, progress float64) {
   236  	var (
   237  		now     = mclock.Now()
   238  		elapsed = time.Duration(now) - time.Duration(st.startTime)
   239  	)
   240  
   241  	if force || elapsed >= log.StatsReportLimit {
   242  		st.totalRead += st.read
   243  		st.totalCommitted += st.committed
   244  		st.pending, st.progress = pending, progress
   245  
   246  		progressStr := strconv.FormatFloat(st.progress, 'f', 4, 64)
   247  		progressStr = strings.TrimRight(progressStr, "0")
   248  		progressStr = strings.TrimRight(progressStr, ".") + "%"
   249  
   250  		logger.Info("State migration progress",
   251  			"progress", progressStr,
   252  			"totalRead", st.totalRead, "totalCommitted", st.totalCommitted, "pending", st.pending,
   253  			"read", st.read, "readElapsed", st.readElapsed, "processElapsed", st.processElapsed,
   254  			"written", st.committed, "writeElapsed", st.writeElapsed,
   255  			"elapsed", common.PrettyDuration(elapsed),
   256  			"totalElapsed", time.Since(st.initialStartTime))
   257  
   258  		st.read, st.committed = 0, 0
   259  		st.startTime = now
   260  	}
   261  }
   262  
   263  func (bc *BlockChain) checkTrieContents(oldDB, newDB *statedb.Database, root common.Hash) ([]common.Address, error) {
   264  	oldTrie, err := statedb.NewSecureTrie(root, oldDB, nil)
   265  	if err != nil {
   266  		return nil, err
   267  	}
   268  
   269  	newTrie, err := statedb.NewSecureTrie(root, newDB, nil)
   270  	if err != nil {
   271  		return nil, err
   272  	}
   273  
   274  	diff, _ := statedb.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
   275  	iter := statedb.NewIterator(diff)
   276  
   277  	var dirty []common.Address
   278  
   279  	for iter.Next() {
   280  		key := newTrie.GetKey(iter.Key)
   281  		if key == nil {
   282  			return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
   283  		}
   284  
   285  		dirty = append(dirty, common.BytesToAddress(key))
   286  	}
   287  
   288  	return dirty, nil
   289  }
   290  
   291  // restartStateMigration is called when a server is restarted while migration. The migration continues.
   292  func (bc *BlockChain) restartStateMigration() {
   293  	if bc.db.InMigration() {
   294  		number := bc.db.MigrationBlockNumber()
   295  
   296  		block := bc.GetBlockByNumber(number)
   297  		if block == nil {
   298  			logger.Error("failed to get migration block number", "blockNumber", number)
   299  			return
   300  		}
   301  
   302  		root := block.Root()
   303  		logger.Warn("State migration : Restarted", "blockNumber", number, "root", root.String())
   304  
   305  		bc.wg.Add(1)
   306  		go func() {
   307  			bc.migrateState(root)
   308  			bc.wg.Done()
   309  		}()
   310  	}
   311  }
   312  
   313  // PrepareStateMigration sets prepareStateMigration to be called in checkStartStateMigration.
   314  func (bc *BlockChain) PrepareStateMigration() error {
   315  	if bc.db.ReadPruningEnabled() {
   316  		return errors.New("state migration not supported with live pruning enabled")
   317  	}
   318  
   319  	if bc.db.InMigration() || bc.prepareStateMigration {
   320  		return errors.New("migration already started")
   321  	}
   322  
   323  	bc.prepareStateMigration = true
   324  	logger.Info("State migration is prepared", "expectedMigrationStartingBlockNumber", bc.CurrentBlock().NumberU64()+1)
   325  
   326  	return nil
   327  }
   328  
   329  func (bc *BlockChain) checkStartStateMigration(number uint64, root common.Hash) bool {
   330  	if bc.prepareStateMigration {
   331  		logger.Info("State migration is started", "block", number, "root", root)
   332  
   333  		if err := bc.StartStateMigration(number, root); err != nil {
   334  			logger.Error("Failed to start state migration", "err", err)
   335  		}
   336  
   337  		bc.prepareStateMigration = false
   338  
   339  		return true
   340  	}
   341  
   342  	return false
   343  }
   344  
   345  // migrationPrerequisites is a collection of functions that needs to be run
   346  // before state trie migration. If one of the functions fails to run,
   347  // the migration will not start.
   348  var migrationPrerequisites []func(uint64) error
   349  
   350  func RegisterMigrationPrerequisites(f func(uint64) error) {
   351  	migrationPrerequisites = append(migrationPrerequisites, f)
   352  }
   353  
   354  // For tests starting and stopping node instances, clear residual migrationPrerequisites
   355  // that might no longer work.
   356  // TODO: remove this function when we have a better way to handle this.
   357  // e.g. StartStateMigration directly calls CheckStakingInfoStored instead of this callback.
   358  func ClearMigrationPrerequisites() {
   359  	migrationPrerequisites = nil
   360  }
   361  
   362  // StartStateMigration checks prerequisites, configures DB and starts migration.
   363  func (bc *BlockChain) StartStateMigration(number uint64, root common.Hash) error {
   364  	if bc.db.InMigration() {
   365  		return errors.New("migration already started")
   366  	}
   367  
   368  	for _, f := range migrationPrerequisites {
   369  		if err := f(number); err != nil {
   370  			return err
   371  		}
   372  	}
   373  
   374  	if err := bc.db.CreateMigrationDBAndSetStatus(number); err != nil {
   375  		return err
   376  	}
   377  
   378  	bc.wg.Add(1)
   379  	go func() {
   380  		bc.migrateState(root)
   381  		bc.wg.Done()
   382  	}()
   383  
   384  	return nil
   385  }
   386  
   387  func (bc *BlockChain) StopStateMigration() error {
   388  	if !bc.db.InMigration() {
   389  		return errors.New("not in migration")
   390  	}
   391  
   392  	bc.stopStateMigration <- struct{}{}
   393  
   394  	return nil
   395  }
   396  
   397  // StateMigrationStatus returns if it is in migration, the block number of in migration,
   398  // number of committed blocks and number of pending blocks
   399  func (bc *BlockChain) StateMigrationStatus() (bool, uint64, int, int, int, float64, error) {
   400  	return bc.db.InMigration(), bc.db.MigrationBlockNumber(), bc.readCnt, bc.committedCnt, bc.pendingCnt, bc.progress, bc.migrationErr
   401  }
   402  
   403  // trieWarmUp runs state.Iterator, generated from the given state or storage trie node hash,
   404  // until it reaches end. If it reaches end, it will send a nil error to errCh to indicate that
   405  // it has been finished.
   406  func (bc *BlockChain) trieWarmUp(next func() bool, resultCh chan int, errCh chan error) {
   407  	var (
   408  		resultErr    error
   409  		reportTicker = time.NewTicker(DefaultWarmUpReportInterval)
   410  		nReads       = 0
   411  	)
   412  
   413  	defer func() {
   414  		resultCh <- nReads
   415  		errCh <- resultErr
   416  		reportTicker.Stop()
   417  	}()
   418  
   419  	for next() {
   420  		select {
   421  		case <-bc.quitWarmUp:
   422  			return
   423  		case <-bc.quit:
   424  			resultErr = blockChainStopWarmUpErr
   425  			return
   426  		case <-reportTicker.C:
   427  			resultCh <- nReads
   428  			nReads = 1
   429  		default:
   430  			nReads++
   431  		}
   432  	}
   433  }
   434  
   435  // warmUpTrieCache receives errors from each warm-up goroutine.
   436  // If it receives a nil error, it means a child goroutine is successfully terminated.
   437  // It also periodically checks and logs warm-up progress.
   438  func (bc *BlockChain) warmUpTrieCache(mainTrieDB *statedb.Database, minLoad uint, nChildren int,
   439  	resultCh chan int, errCh chan error,
   440  ) {
   441  	defer func() { bc.quitWarmUp = nil }()
   442  
   443  	cache := mainTrieDB.TrieNodeCache()
   444  	mainTrieCacheLimit := mainTrieDB.GetTrieNodeLocalCacheByteLimit()
   445  	if minLoad == 0 {
   446  		minLoad = DefaultWarmUpMinLoad
   447  	}
   448  
   449  	var (
   450  		resultErr   error
   451  		started     = time.Now()
   452  		logged      = time.Now()
   453  		context     []interface{}
   454  		progress    uint64
   455  		trieReadCnt int
   456  	)
   457  
   458  	updateContext := func() {
   459  		switch c := cache.(type) {
   460  		case *statedb.FastCache:
   461  			stats := c.UpdateStats().(fastcache.Stats)
   462  			progress = stats.BytesSize * 100 / mainTrieCacheLimit
   463  			context = []interface{}{
   464  				"warmUpCnt", trieReadCnt,
   465  				"cacheLimit", units.Base2Bytes(mainTrieCacheLimit).String(),
   466  				"cachedSize", units.Base2Bytes(stats.BytesSize).String(),
   467  				"progress", progress,
   468  				"elapsed", time.Since(started),
   469  			}
   470  		default:
   471  			context = []interface{}{
   472  				"warmUpCnt", trieReadCnt,
   473  				"cacheLimit", units.Base2Bytes(mainTrieCacheLimit).String(),
   474  			}
   475  		}
   476  	}
   477  
   478  	for childCnt := 0; childCnt < nChildren; {
   479  		select {
   480  		case nReadNodes := <-resultCh:
   481  			trieReadCnt += nReadNodes
   482  
   483  			if time.Since(logged) < log.StatsReportLimit {
   484  				continue
   485  			}
   486  			logged = time.Now()
   487  			updateContext()
   488  			if progress > uint64(minLoad) { // more than 90%
   489  				close(bc.quitWarmUp)
   490  				logger.Info("Warm up is completed", context...)
   491  				return
   492  			}
   493  			logger.Info("Warm up progress", context...)
   494  		case err := <-errCh:
   495  			// if errCh returns nil, it means success.
   496  			if err != nil {
   497  				resultErr = err
   498  				logger.Warn("Warm up got an error", "err", err)
   499  			}
   500  
   501  			logger.Debug("Warm up a child trie is finished", "childCnt", childCnt, "err", err)
   502  			childCnt++
   503  		}
   504  	}
   505  
   506  	updateContext()
   507  	context = append(context, "resultErr", resultErr)
   508  	logger.Info("Warm up is completed", context...)
   509  }
   510  
   511  // StartWarmUp retrieves all state/storage tries of the latest state root and caches the tries.
   512  func (bc *BlockChain) StartWarmUp(minLoad uint) error {
   513  	block, db, mainTrieDB, err := bc.prepareWarmUp()
   514  	if err != nil {
   515  		return err
   516  	}
   517  	// retrieve children nodes of state trie root node
   518  	children, err := db.TrieDB().NodeChildren(block.Root().ExtendZero())
   519  	if err != nil {
   520  		return err
   521  	}
   522  	// run goroutine for each child node
   523  	resultCh := make(chan int, len(children))
   524  	errCh := make(chan error)
   525  	bc.quitWarmUp = make(chan struct{})
   526  	for idx, child := range children {
   527  		childHash := child.Unextend()
   528  		stateDB, err := state.New(childHash, db, nil, nil)
   529  		if err != nil {
   530  			logger.Warn("[WarmUp] Failed to get state",
   531  				"rootHash", children, "childIdx", idx, "childHash", childHash.Hex())
   532  			continue
   533  		}
   534  		it := state.NewNodeIterator(stateDB)
   535  		go bc.trieWarmUp(it.Next, resultCh, errCh)
   536  	}
   537  	// run a warm-up checker routine
   538  	go bc.warmUpTrieCache(mainTrieDB, minLoad, len(children), resultCh, errCh)
   539  	logger.Info("State trie warm-up is started", "blockNum", block.NumberU64(),
   540  		"root", block.Root().String(), "len(children)", len(children))
   541  	return nil
   542  }
   543  
   544  // StopWarmUp stops the warming up process.
   545  func (bc *BlockChain) StopWarmUp() error {
   546  	if bc.quitWarmUp == nil {
   547  		return ErrNotInWarmUp
   548  	}
   549  
   550  	close(bc.quitWarmUp)
   551  
   552  	return nil
   553  }
   554  
   555  // StartCollectingTrieStats collects state or storage trie statistics.
   556  func (bc *BlockChain) StartCollectingTrieStats(contractAddr common.Address) error {
   557  	block := bc.GetBlockByNumber(bc.lastCommittedBlock)
   558  	if block == nil {
   559  		return fmt.Errorf("Block #%d not found", bc.lastCommittedBlock)
   560  	}
   561  
   562  	mainTrieDB := bc.StateCache().TrieDB()
   563  	cache := mainTrieDB.TrieNodeCache()
   564  	if cache == nil {
   565  		return fmt.Errorf("target cache is nil")
   566  	}
   567  	db := state.NewDatabaseWithExistingCache(bc.db, cache)
   568  
   569  	startNode := block.Root().ExtendZero()
   570  	// If the contractAddr is given, start collecting stats from the root of storage trie
   571  	if !common.EmptyAddress(contractAddr) {
   572  		var err error
   573  		startNode, err = bc.GetContractStorageRoot(block, db, contractAddr)
   574  		if err != nil {
   575  			logger.Error("Failed to get the contract storage root",
   576  				"contractAddr", contractAddr.String(), "rootHash", block.Root().String(),
   577  				"err", err)
   578  			return err
   579  		}
   580  	}
   581  
   582  	children, err := db.TrieDB().NodeChildren(startNode)
   583  	if err != nil {
   584  		logger.Error("Failed to retrieve the children of start node", "err", err)
   585  		return err
   586  	}
   587  
   588  	logger.Info("Started collecting trie statistics",
   589  		"blockNum", block.NumberU64(), "root", block.Root().String(), "len(children)", len(children))
   590  	go collectTrieStats(db, startNode)
   591  
   592  	return nil
   593  }
   594  
   595  // collectChildrenStats wraps CollectChildrenStats, in order to send finish signal to resultCh.
   596  func collectChildrenStats(db state.Database, child common.ExtHash, resultCh chan<- statedb.NodeInfo) {
   597  	db.TrieDB().CollectChildrenStats(child, 2, resultCh)
   598  	resultCh <- statedb.NodeInfo{Finished: true}
   599  }
   600  
   601  // collectTrieStats is the main function of collecting trie statistics.
   602  // It spawns goroutines for the upper-most children and iterates each sub-trie.
   603  func collectTrieStats(db state.Database, startNode common.ExtHash) {
   604  	children, err := db.TrieDB().NodeChildren(startNode)
   605  	if err != nil {
   606  		logger.Error("Failed to retrieve the children of start node", "err", err)
   607  	}
   608  
   609  	// collecting statistics by running individual goroutines for each child
   610  	resultCh := make(chan statedb.NodeInfo, 10000)
   611  	for _, child := range children {
   612  		go collectChildrenStats(db, child, resultCh)
   613  	}
   614  
   615  	numGoRoutines := len(children)
   616  	ticker := time.NewTicker(1 * time.Minute)
   617  
   618  	numNodes, numLeafNodes, maxDepth := 0, 0, 0
   619  	depthCounter := make(map[int]int)
   620  	begin := time.Now()
   621  	for {
   622  		select {
   623  		case result := <-resultCh:
   624  			if result.Finished {
   625  				numGoRoutines--
   626  				if numGoRoutines == 0 {
   627  					logger.Info("Finished collecting trie statistics", "elapsed", time.Since(begin),
   628  						"numNodes", numNodes, "numLeafNodes", numLeafNodes, "maxDepth", maxDepth)
   629  					printDepthStats(depthCounter)
   630  					return
   631  				}
   632  				continue
   633  			}
   634  			numNodes++
   635  			// if a leaf node, collect the depth data
   636  			if result.Depth != 0 {
   637  				numLeafNodes++
   638  				depthCounter[result.Depth]++
   639  				if result.Depth > maxDepth {
   640  					maxDepth = result.Depth
   641  				}
   642  			}
   643  		case <-ticker.C:
   644  			// leave a periodic log
   645  			logger.Info("Collecting trie statistics is in progress...", "elapsed", time.Since(begin),
   646  				"numGoRoutines", numGoRoutines, "numNodes", numNodes, "numLeafNodes", numLeafNodes, "maxDepth", maxDepth)
   647  			printDepthStats(depthCounter)
   648  		}
   649  	}
   650  }
   651  
   652  // printDepthStats leaves logs containing the depth and the number of nodes in the depth.
   653  func printDepthStats(depthCounter map[int]int) {
   654  	// max depth 20 is set by heuristically
   655  	for depth := 2; depth < 20; depth++ {
   656  		if depthCounter[depth] == 0 {
   657  			continue
   658  		}
   659  		logger.Info("number of leaf nodes in a depth",
   660  			"depth", depth, "numNodes", depthCounter[depth])
   661  	}
   662  }
   663  
   664  // GetContractStorageRoot returns the storage root of a contract based on the given block.
   665  func (bc *BlockChain) GetContractStorageRoot(block *types.Block, db state.Database, contractAddr common.Address) (common.ExtHash, error) {
   666  	stateDB, err := state.New(block.Root(), db, nil, nil)
   667  	if err != nil {
   668  		return common.ExtHash{}, fmt.Errorf("failed to get StateDB - %w", err)
   669  	}
   670  	return stateDB.GetContractStorageRoot(contractAddr)
   671  }
   672  
   673  // prepareWarmUp creates and returns resources needed for state warm-up.
   674  func (bc *BlockChain) prepareWarmUp() (*types.Block, state.Database, *statedb.Database, error) {
   675  	// There is a chance of concurrent access to quitWarmUp, though not likely to happen.
   676  	if bc.quitWarmUp != nil {
   677  		return nil, nil, nil, fmt.Errorf("already warming up")
   678  	}
   679  
   680  	block := bc.GetBlockByNumber(bc.lastCommittedBlock)
   681  	if block == nil {
   682  		return nil, nil, nil, fmt.Errorf("block #%d not found", bc.lastCommittedBlock)
   683  	}
   684  
   685  	mainTrieDB := bc.StateCache().TrieDB()
   686  	cache := mainTrieDB.TrieNodeCache()
   687  	if cache == nil {
   688  		return nil, nil, nil, fmt.Errorf("target cache is nil")
   689  	}
   690  	db := state.NewDatabaseWithExistingCache(bc.db, cache)
   691  	return block, db, mainTrieDB, nil
   692  }
   693  
   694  func prepareContractWarmUp(block *types.Block, db state.Database, contractAddr common.Address) (common.ExtHash, state.Trie, error) {
   695  	stateDB, err := state.New(block.Root(), db, nil, nil)
   696  	if err != nil {
   697  		return common.ExtHash{}, nil, fmt.Errorf("failed to get StateDB, err: %w", err)
   698  	}
   699  	storageTrieRoot, err := stateDB.GetContractStorageRoot(contractAddr)
   700  	if err != nil {
   701  		return common.ExtHash{}, nil, err
   702  	}
   703  	storageTrie, err := db.OpenStorageTrie(storageTrieRoot, nil)
   704  	if err != nil {
   705  		return common.ExtHash{}, nil, err
   706  	}
   707  	return storageTrieRoot, storageTrie, nil
   708  }
   709  
   710  // StartContractWarmUp retrieves a storage trie of the latest state root and caches the trie
   711  // corresponding to the given contract address.
   712  func (bc *BlockChain) StartContractWarmUp(contractAddr common.Address, minLoad uint) error {
   713  	block, db, mainTrieDB, err := bc.prepareWarmUp()
   714  	if err != nil {
   715  		return err
   716  	}
   717  	// prepare contract storage trie specific resources - storageTrieRoot and storageTrie
   718  	storageTrieRoot, storageTrie, err := prepareContractWarmUp(block, db, contractAddr)
   719  	if err != nil {
   720  		return fmt.Errorf("failed to prepare contract warm-up, err: %w", err)
   721  	}
   722  	// retrieve children nodes of contract storage trie root node
   723  	children, err := db.TrieDB().NodeChildren(storageTrieRoot)
   724  	if err != nil {
   725  		return err
   726  	}
   727  	// run goroutine for each child node
   728  	resultCh := make(chan int, len(children))
   729  	errCh := make(chan error)
   730  	bc.quitWarmUp = make(chan struct{})
   731  	for _, child := range children {
   732  		it := statedb.NewIterator(storageTrie.NodeIterator(child[:]))
   733  		go bc.trieWarmUp(it.Next, resultCh, errCh)
   734  	}
   735  	// run a warm-up checker routine
   736  	go bc.warmUpTrieCache(mainTrieDB, minLoad, len(children), resultCh, errCh)
   737  	logger.Info("Contract storage trie warm-up is started",
   738  		"blockNum", block.NumberU64(), "root", block.Root().String(), "contractAddr", contractAddr.String(),
   739  		"contractStorageRoot", storageTrieRoot.String(), "len(children)", len(children))
   740  	return nil
   741  }