github.com/ledgerwatch/erigon-lib@v1.0.0/state/aggregator.go (about)

     1  /*
     2     Copyright 2022 The Erigon contributors
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"math"
    24  	"math/bits"
    25  	"os"
    26  	"sync"
    27  	"sync/atomic"
    28  	"time"
    29  
    30  	"github.com/VictoriaMetrics/metrics"
    31  	"github.com/holiman/uint256"
    32  	"github.com/ledgerwatch/log/v3"
    33  	"golang.org/x/sync/errgroup"
    34  
    35  	"github.com/ledgerwatch/erigon-lib/commitment"
    36  	"github.com/ledgerwatch/erigon-lib/common"
    37  	"github.com/ledgerwatch/erigon-lib/common/background"
    38  	"github.com/ledgerwatch/erigon-lib/common/length"
    39  	"github.com/ledgerwatch/erigon-lib/kv"
    40  	"github.com/ledgerwatch/erigon-lib/kv/iter"
    41  	"github.com/ledgerwatch/erigon-lib/kv/order"
    42  )
    43  
    44  // StepsInBiggestFile - files of this size are completely frozen/immutable.
    45  // files of smaller size are also immutable, but can be removed after merge to bigger files.
    46  const StepsInBiggestFile = 32
    47  
    48  var (
    49  	mxCurrentTx                = metrics.GetOrCreateCounter("domain_tx_processed")
    50  	mxCurrentBlock             = metrics.GetOrCreateCounter("domain_block_current")
    51  	mxRunningMerges            = metrics.GetOrCreateCounter("domain_running_merges")
    52  	mxRunningCollations        = metrics.GetOrCreateCounter("domain_running_collations")
    53  	mxCollateTook              = metrics.GetOrCreateHistogram("domain_collate_took")
    54  	mxPruneTook                = metrics.GetOrCreateHistogram("domain_prune_took")
    55  	mxPruneHistTook            = metrics.GetOrCreateHistogram("domain_prune_hist_took")
    56  	mxPruningProgress          = metrics.GetOrCreateCounter("domain_pruning_progress")
    57  	mxCollationSize            = metrics.GetOrCreateCounter("domain_collation_size")
    58  	mxCollationSizeHist        = metrics.GetOrCreateCounter("domain_collation_hist_size")
    59  	mxPruneSize                = metrics.GetOrCreateCounter("domain_prune_size")
    60  	mxBuildTook                = metrics.GetOrCreateSummary("domain_build_files_took")
    61  	mxStepCurrent              = metrics.GetOrCreateCounter("domain_step_current")
    62  	mxStepTook                 = metrics.GetOrCreateHistogram("domain_step_took")
    63  	mxCommitmentKeys           = metrics.GetOrCreateCounter("domain_commitment_keys")
    64  	mxCommitmentRunning        = metrics.GetOrCreateCounter("domain_running_commitment")
    65  	mxCommitmentTook           = metrics.GetOrCreateSummary("domain_commitment_took")
    66  	mxCommitmentWriteTook      = metrics.GetOrCreateHistogram("domain_commitment_write_took")
    67  	mxCommitmentUpdates        = metrics.GetOrCreateCounter("domain_commitment_updates")
    68  	mxCommitmentUpdatesApplied = metrics.GetOrCreateCounter("domain_commitment_updates_applied")
    69  )
    70  
    71  type Aggregator struct {
    72  	db              kv.RwDB
    73  	aggregationStep uint64
    74  	accounts        *Domain
    75  	storage         *Domain
    76  	code            *Domain
    77  	commitment      *DomainCommitted
    78  	logAddrs        *InvertedIndex
    79  	logTopics       *InvertedIndex
    80  	tracesFrom      *InvertedIndex
    81  	tracesTo        *InvertedIndex
    82  	txNum           uint64
    83  	seekTxNum       uint64
    84  	blockNum        uint64
    85  	stepDoneNotice  chan [length.Hash]byte
    86  	rwTx            kv.RwTx
    87  	stats           FilesStats
    88  	tmpdir          string
    89  	defaultCtx      *AggregatorContext
    90  
    91  	ps     *background.ProgressSet
    92  	logger log.Logger
    93  }
    94  
    95  //type exposedMetrics struct {
    96  //	CollationSize     *metrics.Gauge
    97  //	CollationSizeHist *metrics.Gauge
    98  //	PruneSize         *metrics.Gauge
    99  //
   100  //	lastCollSize    int
   101  //	lastColHistSize int
   102  //	lastPruneSize   int
   103  //}
   104  //
   105  //func (e exposedMetrics) init() {
   106  //	e.CollationSize = metrics.GetOrCreateGauge("domain_collation_size", func() float64 { return 0 })
   107  //	e.CollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size", func() float64 { return 0 })
   108  //	e.PruneSize = metrics.GetOrCreateGauge("domain_prune_size", func() float64 { return e.lastPruneSize })
   109  //}
   110  
   111  func NewAggregator(dir, tmpdir string, aggregationStep uint64, commitmentMode CommitmentMode, commitTrieVariant commitment.TrieVariant, logger log.Logger) (*Aggregator, error) {
   112  	a := &Aggregator{aggregationStep: aggregationStep, ps: background.NewProgressSet(), tmpdir: tmpdir, stepDoneNotice: make(chan [length.Hash]byte, 1), logger: logger}
   113  
   114  	closeAgg := true
   115  	defer func() {
   116  		if closeAgg {
   117  			a.Close()
   118  		}
   119  	}()
   120  	err := os.MkdirAll(dir, 0764)
   121  	if err != nil {
   122  		return nil, err
   123  	}
   124  	if a.accounts, err = NewDomain(dir, tmpdir, aggregationStep, "accounts", kv.TblAccountKeys, kv.TblAccountVals, kv.TblAccountHistoryKeys, kv.TblAccountHistoryVals, kv.TblAccountIdx, false, false, logger); err != nil {
   125  		return nil, err
   126  	}
   127  	if a.storage, err = NewDomain(dir, tmpdir, aggregationStep, "storage", kv.TblStorageKeys, kv.TblStorageVals, kv.TblStorageHistoryKeys, kv.TblStorageHistoryVals, kv.TblStorageIdx, false, false, logger); err != nil {
   128  		return nil, err
   129  	}
   130  	if a.code, err = NewDomain(dir, tmpdir, aggregationStep, "code", kv.TblCodeKeys, kv.TblCodeVals, kv.TblCodeHistoryKeys, kv.TblCodeHistoryVals, kv.TblCodeIdx, true, true, logger); err != nil {
   131  		return nil, err
   132  	}
   133  
   134  	commitd, err := NewDomain(dir, tmpdir, aggregationStep, "commitment", kv.TblCommitmentKeys, kv.TblCommitmentVals, kv.TblCommitmentHistoryKeys, kv.TblCommitmentHistoryVals, kv.TblCommitmentIdx, false, true, logger)
   135  	if err != nil {
   136  		return nil, err
   137  	}
   138  	a.commitment = NewCommittedDomain(commitd, commitmentMode, commitTrieVariant, logger)
   139  
   140  	if a.logAddrs, err = NewInvertedIndex(dir, tmpdir, aggregationStep, "logaddrs", kv.TblLogAddressKeys, kv.TblLogAddressIdx, false, nil, logger); err != nil {
   141  		return nil, err
   142  	}
   143  	if a.logTopics, err = NewInvertedIndex(dir, tmpdir, aggregationStep, "logtopics", kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, false, nil, logger); err != nil {
   144  		return nil, err
   145  	}
   146  	if a.tracesFrom, err = NewInvertedIndex(dir, tmpdir, aggregationStep, "tracesfrom", kv.TblTracesFromKeys, kv.TblTracesFromIdx, false, nil, logger); err != nil {
   147  		return nil, err
   148  	}
   149  	if a.tracesTo, err = NewInvertedIndex(dir, tmpdir, aggregationStep, "tracesto", kv.TblTracesToKeys, kv.TblTracesToIdx, false, nil, logger); err != nil {
   150  		return nil, err
   151  	}
   152  	closeAgg = false
   153  
   154  	a.seekTxNum = a.EndTxNumMinimax()
   155  	return a, nil
   156  }
   157  
   158  func (a *Aggregator) SetDB(db kv.RwDB) { a.db = db }
   159  
   160  func (a *Aggregator) buildMissedIdxBlocking(d *Domain) error {
   161  	eg, ctx := errgroup.WithContext(context.Background())
   162  	eg.SetLimit(32)
   163  	if err := d.BuildMissedIndices(ctx, eg, a.ps); err != nil {
   164  		return err
   165  	}
   166  	return eg.Wait()
   167  }
   168  func (a *Aggregator) ReopenFolder() (err error) {
   169  	{
   170  		if err = a.buildMissedIdxBlocking(a.accounts); err != nil {
   171  			return err
   172  		}
   173  		if err = a.buildMissedIdxBlocking(a.storage); err != nil {
   174  			return err
   175  		}
   176  		if err = a.buildMissedIdxBlocking(a.code); err != nil {
   177  			return err
   178  		}
   179  		if err = a.buildMissedIdxBlocking(a.commitment.Domain); err != nil {
   180  			return err
   181  		}
   182  	}
   183  
   184  	if err = a.accounts.OpenFolder(); err != nil {
   185  		return fmt.Errorf("OpenFolder: %w", err)
   186  	}
   187  	if err = a.storage.OpenFolder(); err != nil {
   188  		return fmt.Errorf("OpenFolder: %w", err)
   189  	}
   190  	if err = a.code.OpenFolder(); err != nil {
   191  		return fmt.Errorf("OpenFolder: %w", err)
   192  	}
   193  	if err = a.commitment.OpenFolder(); err != nil {
   194  		return fmt.Errorf("OpenFolder: %w", err)
   195  	}
   196  	if err = a.logAddrs.OpenFolder(); err != nil {
   197  		return fmt.Errorf("OpenFolder: %w", err)
   198  	}
   199  	if err = a.logTopics.OpenFolder(); err != nil {
   200  		return fmt.Errorf("OpenFolder: %w", err)
   201  	}
   202  	if err = a.tracesFrom.OpenFolder(); err != nil {
   203  		return fmt.Errorf("OpenFolder: %w", err)
   204  	}
   205  	if err = a.tracesTo.OpenFolder(); err != nil {
   206  		return fmt.Errorf("OpenFolder: %w", err)
   207  	}
   208  	return nil
   209  }
   210  
   211  func (a *Aggregator) ReopenList(fNames []string) error {
   212  	var err error
   213  	if err = a.accounts.OpenList(fNames); err != nil {
   214  		return err
   215  	}
   216  	if err = a.storage.OpenList(fNames); err != nil {
   217  		return err
   218  	}
   219  	if err = a.code.OpenList(fNames); err != nil {
   220  		return err
   221  	}
   222  	if err = a.commitment.OpenList(fNames); err != nil {
   223  		return err
   224  	}
   225  	if err = a.logAddrs.OpenList(fNames); err != nil {
   226  		return err
   227  	}
   228  	if err = a.logTopics.OpenList(fNames); err != nil {
   229  		return err
   230  	}
   231  	if err = a.tracesFrom.OpenList(fNames); err != nil {
   232  		return err
   233  	}
   234  	if err = a.tracesTo.OpenList(fNames); err != nil {
   235  		return err
   236  	}
   237  	return nil
   238  }
   239  
   240  func (a *Aggregator) GetAndResetStats() DomainStats {
   241  	stats := DomainStats{HistoryQueries: &atomic.Uint64{}, TotalQueries: &atomic.Uint64{}}
   242  	stats.Accumulate(a.accounts.GetAndResetStats())
   243  	stats.Accumulate(a.storage.GetAndResetStats())
   244  	stats.Accumulate(a.code.GetAndResetStats())
   245  	stats.Accumulate(a.commitment.GetAndResetStats())
   246  
   247  	var tto, tfrom, ltopics, laddr DomainStats
   248  	tto.FilesCount, tto.DataSize, tto.IndexSize = a.tracesTo.collectFilesStat()
   249  	tfrom.FilesCount, tfrom.DataSize, tfrom.DataSize = a.tracesFrom.collectFilesStat()
   250  	ltopics.FilesCount, ltopics.DataSize, ltopics.IndexSize = a.logTopics.collectFilesStat()
   251  	laddr.FilesCount, laddr.DataSize, laddr.IndexSize = a.logAddrs.collectFilesStat()
   252  
   253  	stats.Accumulate(tto)
   254  	stats.Accumulate(tfrom)
   255  	stats.Accumulate(ltopics)
   256  	stats.Accumulate(laddr)
   257  	return stats
   258  }
   259  
   260  func (a *Aggregator) Close() {
   261  	if a.defaultCtx != nil {
   262  		a.defaultCtx.Close()
   263  	}
   264  	if a.stepDoneNotice != nil {
   265  		close(a.stepDoneNotice)
   266  	}
   267  	if a.accounts != nil {
   268  		a.accounts.Close()
   269  	}
   270  	if a.storage != nil {
   271  		a.storage.Close()
   272  	}
   273  	if a.code != nil {
   274  		a.code.Close()
   275  	}
   276  	if a.commitment != nil {
   277  		a.commitment.Close()
   278  	}
   279  
   280  	if a.logAddrs != nil {
   281  		a.logAddrs.Close()
   282  	}
   283  	if a.logTopics != nil {
   284  		a.logTopics.Close()
   285  	}
   286  	if a.tracesFrom != nil {
   287  		a.tracesFrom.Close()
   288  	}
   289  	if a.tracesTo != nil {
   290  		a.tracesTo.Close()
   291  	}
   292  }
   293  
   294  func (a *Aggregator) SetTx(tx kv.RwTx) {
   295  	a.rwTx = tx
   296  	a.accounts.SetTx(tx)
   297  	a.storage.SetTx(tx)
   298  	a.code.SetTx(tx)
   299  	a.commitment.SetTx(tx)
   300  	a.logAddrs.SetTx(tx)
   301  	a.logTopics.SetTx(tx)
   302  	a.tracesFrom.SetTx(tx)
   303  	a.tracesTo.SetTx(tx)
   304  }
   305  
   306  func (a *Aggregator) SetTxNum(txNum uint64) {
   307  	mxCurrentTx.Set(txNum)
   308  
   309  	a.txNum = txNum
   310  	a.accounts.SetTxNum(txNum)
   311  	a.storage.SetTxNum(txNum)
   312  	a.code.SetTxNum(txNum)
   313  	a.commitment.SetTxNum(txNum)
   314  	a.logAddrs.SetTxNum(txNum)
   315  	a.logTopics.SetTxNum(txNum)
   316  	a.tracesFrom.SetTxNum(txNum)
   317  	a.tracesTo.SetTxNum(txNum)
   318  }
   319  
   320  func (a *Aggregator) SetBlockNum(blockNum uint64) {
   321  	a.blockNum = blockNum
   322  	mxCurrentBlock.Set(blockNum)
   323  }
   324  
   325  func (a *Aggregator) SetWorkers(i int) {
   326  	a.accounts.compressWorkers = i
   327  	a.storage.compressWorkers = i
   328  	a.code.compressWorkers = i
   329  	a.commitment.compressWorkers = i
   330  	a.logAddrs.compressWorkers = i
   331  	a.logTopics.compressWorkers = i
   332  	a.tracesFrom.compressWorkers = i
   333  	a.tracesTo.compressWorkers = i
   334  }
   335  
   336  func (a *Aggregator) SetCommitmentMode(mode CommitmentMode) {
   337  	a.commitment.mode = mode
   338  }
   339  
   340  func (a *Aggregator) EndTxNumMinimax() uint64 {
   341  	min := a.accounts.endTxNumMinimax()
   342  	if txNum := a.storage.endTxNumMinimax(); txNum < min {
   343  		min = txNum
   344  	}
   345  	if txNum := a.code.endTxNumMinimax(); txNum < min {
   346  		min = txNum
   347  	}
   348  	if txNum := a.commitment.endTxNumMinimax(); txNum < min {
   349  		min = txNum
   350  	}
   351  	if txNum := a.logAddrs.endTxNumMinimax(); txNum < min {
   352  		min = txNum
   353  	}
   354  	if txNum := a.logTopics.endTxNumMinimax(); txNum < min {
   355  		min = txNum
   356  	}
   357  	if txNum := a.tracesFrom.endTxNumMinimax(); txNum < min {
   358  		min = txNum
   359  	}
   360  	if txNum := a.tracesTo.endTxNumMinimax(); txNum < min {
   361  		min = txNum
   362  	}
   363  	return min
   364  }
   365  
   366  func (a *Aggregator) DomainEndTxNumMinimax() uint64 {
   367  	min := a.accounts.endTxNumMinimax()
   368  	if txNum := a.storage.endTxNumMinimax(); txNum < min {
   369  		min = txNum
   370  	}
   371  	if txNum := a.code.endTxNumMinimax(); txNum < min {
   372  		min = txNum
   373  	}
   374  	if txNum := a.commitment.endTxNumMinimax(); txNum < min {
   375  		min = txNum
   376  	}
   377  	return min
   378  }
   379  
   380  func (a *Aggregator) SeekCommitment() (blockNum, txNum uint64, err error) {
   381  	filesTxNum := a.EndTxNumMinimax()
   382  	blockNum, txNum, err = a.commitment.SeekCommitment(a.aggregationStep, filesTxNum)
   383  	if err != nil {
   384  		return 0, 0, err
   385  	}
   386  	if txNum == 0 {
   387  		return
   388  	}
   389  	a.seekTxNum = txNum + 1
   390  	return blockNum, txNum + 1, nil
   391  }
   392  
   393  func (a *Aggregator) mergeDomainSteps(ctx context.Context) error {
   394  	mergeStartedAt := time.Now()
   395  	maxEndTxNum := a.DomainEndTxNumMinimax()
   396  
   397  	var upmerges int
   398  	for {
   399  		a.defaultCtx.Close()
   400  		a.defaultCtx = a.MakeContext()
   401  
   402  		somethingMerged, err := a.mergeLoopStep(ctx, maxEndTxNum, 1)
   403  		if err != nil {
   404  			return err
   405  		}
   406  
   407  		if !somethingMerged {
   408  			break
   409  		}
   410  		upmerges++
   411  	}
   412  
   413  	if upmerges > 1 {
   414  		a.logger.Info("[stat] aggregation merged",
   415  			"upto_tx", maxEndTxNum,
   416  			"merge_took", time.Since(mergeStartedAt),
   417  			"merges_count", upmerges)
   418  	}
   419  
   420  	return nil
   421  }
   422  
   423  func (a *Aggregator) aggregate(ctx context.Context, step uint64) error {
   424  	var (
   425  		logEvery = time.NewTicker(time.Second * 30)
   426  		wg       sync.WaitGroup
   427  		errCh    = make(chan error, 8)
   428  		maxSpan  = StepsInBiggestFile * a.aggregationStep
   429  		txFrom   = step * a.aggregationStep
   430  		txTo     = (step + 1) * a.aggregationStep
   431  		workers  = 1
   432  
   433  		stepStartedAt = time.Now()
   434  	)
   435  
   436  	defer logEvery.Stop()
   437  
   438  	for _, d := range []*Domain{a.accounts, a.storage, a.code, a.commitment.Domain} {
   439  		wg.Add(1)
   440  
   441  		mxRunningCollations.Inc()
   442  		start := time.Now()
   443  		collation, err := d.collateStream(ctx, step, txFrom, txTo, d.tx)
   444  		mxRunningCollations.Dec()
   445  		mxCollateTook.UpdateDuration(start)
   446  
   447  		//mxCollationSize.Set(uint64(collation.valuesComp.Count()))
   448  		mxCollationSizeHist.Set(uint64(collation.historyComp.Count()))
   449  
   450  		if err != nil {
   451  			collation.Close()
   452  			return fmt.Errorf("domain collation %q has failed: %w", d.filenameBase, err)
   453  		}
   454  
   455  		go func(wg *sync.WaitGroup, d *Domain, collation Collation) {
   456  			defer wg.Done()
   457  			mxRunningMerges.Inc()
   458  
   459  			start := time.Now()
   460  			sf, err := d.buildFiles(ctx, step, collation, a.ps)
   461  			collation.Close()
   462  
   463  			if err != nil {
   464  				errCh <- err
   465  
   466  				sf.Close()
   467  				mxRunningMerges.Dec()
   468  				return
   469  			}
   470  
   471  			mxRunningMerges.Dec()
   472  
   473  			d.integrateFiles(sf, step*a.aggregationStep, (step+1)*a.aggregationStep)
   474  			d.stats.LastFileBuildingTook = time.Since(start)
   475  		}(&wg, d, collation)
   476  
   477  		mxPruningProgress.Add(2) // domain and history
   478  		if err := d.prune(ctx, step, txFrom, txTo, math.MaxUint64, logEvery); err != nil {
   479  			return err
   480  		}
   481  		mxPruningProgress.Dec()
   482  		mxPruningProgress.Dec()
   483  
   484  		mxPruneTook.Update(d.stats.LastPruneTook.Seconds())
   485  		mxPruneHistTook.Update(d.stats.LastPruneHistTook.Seconds())
   486  	}
   487  
   488  	// when domain files are build and db is pruned, we can merge them
   489  	wg.Add(1)
   490  	go func(wg *sync.WaitGroup) {
   491  		defer wg.Done()
   492  
   493  		if err := a.mergeDomainSteps(ctx); err != nil {
   494  			errCh <- err
   495  		}
   496  	}(&wg)
   497  
   498  	// indices are built concurrently
   499  	for _, d := range []*InvertedIndex{a.logTopics, a.logAddrs, a.tracesFrom, a.tracesTo} {
   500  		wg.Add(1)
   501  
   502  		mxRunningCollations.Inc()
   503  		start := time.Now()
   504  		collation, err := d.collate(ctx, step*a.aggregationStep, (step+1)*a.aggregationStep, d.tx)
   505  		mxRunningCollations.Dec()
   506  		mxCollateTook.UpdateDuration(start)
   507  
   508  		if err != nil {
   509  			return fmt.Errorf("index collation %q has failed: %w", d.filenameBase, err)
   510  		}
   511  
   512  		go func(wg *sync.WaitGroup, d *InvertedIndex, tx kv.Tx) {
   513  			defer wg.Done()
   514  
   515  			mxRunningMerges.Inc()
   516  			start := time.Now()
   517  
   518  			sf, err := d.buildFiles(ctx, step, collation, a.ps)
   519  			if err != nil {
   520  				errCh <- err
   521  				sf.Close()
   522  				return
   523  			}
   524  
   525  			mxRunningMerges.Dec()
   526  			mxBuildTook.UpdateDuration(start)
   527  
   528  			d.integrateFiles(sf, step*a.aggregationStep, (step+1)*a.aggregationStep)
   529  
   530  			icx := d.MakeContext()
   531  			mxRunningMerges.Inc()
   532  
   533  			if err := d.mergeRangesUpTo(ctx, d.endTxNumMinimax(), maxSpan, workers, icx, a.ps); err != nil {
   534  				errCh <- err
   535  
   536  				mxRunningMerges.Dec()
   537  				icx.Close()
   538  				return
   539  			}
   540  
   541  			mxRunningMerges.Dec()
   542  			icx.Close()
   543  		}(&wg, d, d.tx)
   544  
   545  		mxPruningProgress.Inc()
   546  		startPrune := time.Now()
   547  		if err := d.prune(ctx, txFrom, txTo, math.MaxUint64, logEvery); err != nil {
   548  			return err
   549  		}
   550  		mxPruneTook.UpdateDuration(startPrune)
   551  		mxPruningProgress.Dec()
   552  	}
   553  
   554  	go func() {
   555  		wg.Wait()
   556  		close(errCh)
   557  	}()
   558  
   559  	for err := range errCh {
   560  		a.logger.Warn("domain collate-buildFiles failed", "err", err)
   561  		return fmt.Errorf("domain collate-build failed: %w", err)
   562  	}
   563  
   564  	a.logger.Info("[stat] aggregation is finished",
   565  		"range", fmt.Sprintf("%.2fM-%.2fM", float64(txFrom)/10e5, float64(txTo)/10e5),
   566  		"took", time.Since(stepStartedAt))
   567  
   568  	mxStepTook.UpdateDuration(stepStartedAt)
   569  
   570  	return nil
   571  }
   572  
   573  func (a *Aggregator) mergeLoopStep(ctx context.Context, maxEndTxNum uint64, workers int) (somethingDone bool, err error) {
   574  	closeAll := true
   575  	mergeStartedAt := time.Now()
   576  
   577  	maxSpan := a.aggregationStep * StepsInBiggestFile
   578  	r := a.findMergeRange(maxEndTxNum, maxSpan)
   579  	if !r.any() {
   580  		return false, nil
   581  	}
   582  
   583  	outs := a.staticFilesInRange(r, a.defaultCtx)
   584  	defer func() {
   585  		if closeAll {
   586  			outs.Close()
   587  		}
   588  	}()
   589  
   590  	in, err := a.mergeFiles(ctx, outs, r, workers)
   591  	if err != nil {
   592  		return true, err
   593  	}
   594  	defer func() {
   595  		if closeAll {
   596  			in.Close()
   597  		}
   598  	}()
   599  	a.integrateMergedFiles(outs, in)
   600  	a.cleanAfterNewFreeze(in)
   601  	closeAll = false
   602  
   603  	for _, s := range []DomainStats{a.accounts.stats, a.code.stats, a.storage.stats} {
   604  		mxBuildTook.Update(s.LastFileBuildingTook.Seconds())
   605  	}
   606  
   607  	a.logger.Info("[stat] finished merge step",
   608  		"upto_tx", maxEndTxNum, "merge_step_took", time.Since(mergeStartedAt))
   609  
   610  	return true, nil
   611  }
   612  
   613  type Ranges struct {
   614  	accounts   DomainRanges
   615  	storage    DomainRanges
   616  	code       DomainRanges
   617  	commitment DomainRanges
   618  }
   619  
   620  func (r Ranges) String() string {
   621  	return fmt.Sprintf("accounts=%s, storage=%s, code=%s, commitment=%s", r.accounts.String(), r.storage.String(), r.code.String(), r.commitment.String())
   622  }
   623  
   624  func (r Ranges) any() bool {
   625  	return r.accounts.any() || r.storage.any() || r.code.any() || r.commitment.any()
   626  }
   627  
   628  func (a *Aggregator) findMergeRange(maxEndTxNum, maxSpan uint64) Ranges {
   629  	var r Ranges
   630  	r.accounts = a.accounts.findMergeRange(maxEndTxNum, maxSpan)
   631  	r.storage = a.storage.findMergeRange(maxEndTxNum, maxSpan)
   632  	r.code = a.code.findMergeRange(maxEndTxNum, maxSpan)
   633  	r.commitment = a.commitment.findMergeRange(maxEndTxNum, maxSpan)
   634  	//if r.any() {
   635  	//log.Info(fmt.Sprintf("findMergeRange(%d, %d)=%+v\n", maxEndTxNum, maxSpan, r))
   636  	//}
   637  	return r
   638  }
   639  
   640  type SelectedStaticFiles struct {
   641  	accounts       []*filesItem
   642  	accountsIdx    []*filesItem
   643  	accountsHist   []*filesItem
   644  	storage        []*filesItem
   645  	storageIdx     []*filesItem
   646  	storageHist    []*filesItem
   647  	code           []*filesItem
   648  	codeIdx        []*filesItem
   649  	codeHist       []*filesItem
   650  	commitment     []*filesItem
   651  	commitmentIdx  []*filesItem
   652  	commitmentHist []*filesItem
   653  	codeI          int
   654  	storageI       int
   655  	accountsI      int
   656  	commitmentI    int
   657  }
   658  
   659  func (sf SelectedStaticFiles) Close() {
   660  	for _, group := range [][]*filesItem{
   661  		sf.accounts, sf.accountsIdx, sf.accountsHist,
   662  		sf.storage, sf.storageIdx, sf.storageHist,
   663  		sf.code, sf.codeIdx, sf.codeHist,
   664  		sf.commitment, sf.commitmentIdx, sf.commitmentHist,
   665  	} {
   666  		for _, item := range group {
   667  			if item != nil {
   668  				if item.decompressor != nil {
   669  					item.decompressor.Close()
   670  				}
   671  				if item.index != nil {
   672  					item.index.Close()
   673  				}
   674  				if item.bindex != nil {
   675  					item.bindex.Close()
   676  				}
   677  			}
   678  		}
   679  	}
   680  }
   681  
   682  func (a *Aggregator) staticFilesInRange(r Ranges, ac *AggregatorContext) SelectedStaticFiles {
   683  	var sf SelectedStaticFiles
   684  	if r.accounts.any() {
   685  		sf.accounts, sf.accountsIdx, sf.accountsHist, sf.accountsI = ac.accounts.staticFilesInRange(r.accounts)
   686  	}
   687  	if r.storage.any() {
   688  		sf.storage, sf.storageIdx, sf.storageHist, sf.storageI = ac.storage.staticFilesInRange(r.storage)
   689  	}
   690  	if r.code.any() {
   691  		sf.code, sf.codeIdx, sf.codeHist, sf.codeI = ac.code.staticFilesInRange(r.code)
   692  	}
   693  	if r.commitment.any() {
   694  		sf.commitment, sf.commitmentIdx, sf.commitmentHist, sf.commitmentI = ac.commitment.staticFilesInRange(r.commitment)
   695  	}
   696  	return sf
   697  }
   698  
   699  type MergedFiles struct {
   700  	accounts                      *filesItem
   701  	accountsIdx, accountsHist     *filesItem
   702  	storage                       *filesItem
   703  	storageIdx, storageHist       *filesItem
   704  	code                          *filesItem
   705  	codeIdx, codeHist             *filesItem
   706  	commitment                    *filesItem
   707  	commitmentIdx, commitmentHist *filesItem
   708  }
   709  
   710  func (mf MergedFiles) Close() {
   711  	for _, item := range []*filesItem{
   712  		mf.accounts, mf.accountsIdx, mf.accountsHist,
   713  		mf.storage, mf.storageIdx, mf.storageHist,
   714  		mf.code, mf.codeIdx, mf.codeHist,
   715  		mf.commitment, mf.commitmentIdx, mf.commitmentHist,
   716  		//mf.logAddrs, mf.logTopics, mf.tracesFrom, mf.tracesTo,
   717  	} {
   718  		if item != nil {
   719  			if item.decompressor != nil {
   720  				item.decompressor.Close()
   721  			}
   722  			if item.decompressor != nil {
   723  				item.index.Close()
   724  			}
   725  			if item.bindex != nil {
   726  				item.bindex.Close()
   727  			}
   728  		}
   729  	}
   730  }
   731  
   732  func (a *Aggregator) mergeFiles(ctx context.Context, files SelectedStaticFiles, r Ranges, workers int) (MergedFiles, error) {
   733  	started := time.Now()
   734  	defer func(t time.Time) {
   735  		a.logger.Info("[snapshots] domain files has been merged",
   736  			"range", fmt.Sprintf("%d-%d", r.accounts.valuesStartTxNum/a.aggregationStep, r.accounts.valuesEndTxNum/a.aggregationStep),
   737  			"took", time.Since(t))
   738  	}(started)
   739  
   740  	var mf MergedFiles
   741  	closeFiles := true
   742  	defer func() {
   743  		if closeFiles {
   744  			mf.Close()
   745  		}
   746  	}()
   747  
   748  	var (
   749  		errCh      = make(chan error, 4)
   750  		wg         sync.WaitGroup
   751  		predicates sync.WaitGroup
   752  	)
   753  
   754  	wg.Add(4)
   755  	predicates.Add(2)
   756  
   757  	go func() {
   758  		mxRunningMerges.Inc()
   759  		defer mxRunningMerges.Dec()
   760  		defer wg.Done()
   761  
   762  		var err error
   763  		if r.code.any() {
   764  			if mf.code, mf.codeIdx, mf.codeHist, err = a.code.mergeFiles(ctx, files.code, files.codeIdx, files.codeHist, r.code, workers, a.ps); err != nil {
   765  				errCh <- err
   766  			}
   767  		}
   768  	}()
   769  
   770  	go func(predicates *sync.WaitGroup) {
   771  		mxRunningMerges.Inc()
   772  		defer mxRunningMerges.Dec()
   773  
   774  		defer wg.Done()
   775  		defer predicates.Done()
   776  		var err error
   777  		if r.accounts.any() {
   778  			if mf.accounts, mf.accountsIdx, mf.accountsHist, err = a.accounts.mergeFiles(ctx, files.accounts, files.accountsIdx, files.accountsHist, r.accounts, workers, a.ps); err != nil {
   779  				errCh <- err
   780  			}
   781  		}
   782  	}(&predicates)
   783  	go func(predicates *sync.WaitGroup) {
   784  		mxRunningMerges.Inc()
   785  		defer mxRunningMerges.Dec()
   786  
   787  		defer wg.Done()
   788  		defer predicates.Done()
   789  		var err error
   790  		if r.storage.any() {
   791  			if mf.storage, mf.storageIdx, mf.storageHist, err = a.storage.mergeFiles(ctx, files.storage, files.storageIdx, files.storageHist, r.storage, workers, a.ps); err != nil {
   792  				errCh <- err
   793  			}
   794  		}
   795  	}(&predicates)
   796  
   797  	go func(predicates *sync.WaitGroup) {
   798  		defer wg.Done()
   799  		predicates.Wait()
   800  
   801  		mxRunningMerges.Inc()
   802  		defer mxRunningMerges.Dec()
   803  
   804  		var err error
   805  		// requires storage|accounts to be merged at this point
   806  		if r.commitment.any() {
   807  			if mf.commitment, mf.commitmentIdx, mf.commitmentHist, err = a.commitment.mergeFiles(ctx, files, mf, r.commitment, workers, a.ps); err != nil {
   808  				errCh <- err
   809  			}
   810  		}
   811  	}(&predicates)
   812  
   813  	go func() {
   814  		wg.Wait()
   815  		close(errCh)
   816  	}()
   817  
   818  	var lastError error
   819  	for err := range errCh {
   820  		lastError = err
   821  	}
   822  	if lastError == nil {
   823  		closeFiles = false
   824  	}
   825  	return mf, lastError
   826  }
   827  
   828  func (a *Aggregator) integrateMergedFiles(outs SelectedStaticFiles, in MergedFiles) {
   829  	a.accounts.integrateMergedFiles(outs.accounts, outs.accountsIdx, outs.accountsHist, in.accounts, in.accountsIdx, in.accountsHist)
   830  	a.storage.integrateMergedFiles(outs.storage, outs.storageIdx, outs.storageHist, in.storage, in.storageIdx, in.storageHist)
   831  	a.code.integrateMergedFiles(outs.code, outs.codeIdx, outs.codeHist, in.code, in.codeIdx, in.codeHist)
   832  	a.commitment.integrateMergedFiles(outs.commitment, outs.commitmentIdx, outs.commitmentHist, in.commitment, in.commitmentIdx, in.commitmentHist)
   833  }
   834  
   835  func (a *Aggregator) cleanAfterNewFreeze(in MergedFiles) {
   836  	a.accounts.cleanAfterFreeze(in.accountsHist.endTxNum)
   837  	a.storage.cleanAfterFreeze(in.storageHist.endTxNum)
   838  	a.code.cleanAfterFreeze(in.codeHist.endTxNum)
   839  	a.commitment.cleanAfterFreeze(in.commitment.endTxNum)
   840  }
   841  
   842  // ComputeCommitment evaluates commitment for processed state.
   843  // If `saveStateAfter`=true, then trie state will be saved to DB after commitment evaluation.
   844  func (a *Aggregator) ComputeCommitment(saveStateAfter, trace bool) (rootHash []byte, err error) {
   845  	// if commitment mode is Disabled, there will be nothing to compute on.
   846  	mxCommitmentRunning.Inc()
   847  	rootHash, branchNodeUpdates, err := a.commitment.ComputeCommitment(trace)
   848  	mxCommitmentRunning.Dec()
   849  
   850  	if err != nil {
   851  		return nil, err
   852  	}
   853  	if a.seekTxNum > a.txNum {
   854  		saveStateAfter = false
   855  	}
   856  
   857  	mxCommitmentKeys.Add(int(a.commitment.comKeys))
   858  	mxCommitmentTook.Update(a.commitment.comTook.Seconds())
   859  
   860  	defer func(t time.Time) { mxCommitmentWriteTook.UpdateDuration(t) }(time.Now())
   861  
   862  	for pref, update := range branchNodeUpdates {
   863  		prefix := []byte(pref)
   864  
   865  		stateValue, err := a.defaultCtx.ReadCommitment(prefix, a.rwTx)
   866  		if err != nil {
   867  			return nil, err
   868  		}
   869  		mxCommitmentUpdates.Inc()
   870  		stated := commitment.BranchData(stateValue)
   871  		merged, err := a.commitment.branchMerger.Merge(stated, update)
   872  		if err != nil {
   873  			return nil, err
   874  		}
   875  		if bytes.Equal(stated, merged) {
   876  			continue
   877  		}
   878  		if trace {
   879  			fmt.Printf("computeCommitment merge [%x] [%x]+[%x]=>[%x]\n", prefix, stated, update, merged)
   880  		}
   881  		if err = a.UpdateCommitmentData(prefix, merged); err != nil {
   882  			return nil, err
   883  		}
   884  		mxCommitmentUpdatesApplied.Inc()
   885  	}
   886  
   887  	if saveStateAfter {
   888  		if err := a.commitment.storeCommitmentState(a.blockNum, a.txNum); err != nil {
   889  			return nil, err
   890  		}
   891  	}
   892  
   893  	return rootHash, nil
   894  }
   895  
   896  // Provides channel which receives commitment hash each time aggregation is occured
   897  func (a *Aggregator) AggregatedRoots() chan [length.Hash]byte {
   898  	return a.stepDoneNotice
   899  }
   900  
   901  func (a *Aggregator) notifyAggregated(rootHash []byte) {
   902  	rh := (*[length.Hash]byte)(rootHash)
   903  	select {
   904  	case a.stepDoneNotice <- *rh:
   905  	default:
   906  	}
   907  }
   908  
   909  func (a *Aggregator) ReadyToFinishTx() bool {
   910  	return (a.txNum+1)%a.aggregationStep == 0 && a.seekTxNum < a.txNum
   911  }
   912  
   913  func (a *Aggregator) FinishTx() (err error) {
   914  	atomic.AddUint64(&a.stats.TxCount, 1)
   915  
   916  	if !a.ReadyToFinishTx() {
   917  		return nil
   918  	}
   919  
   920  	mxRunningMerges.Inc()
   921  	defer mxRunningMerges.Dec()
   922  
   923  	a.commitment.patriciaTrie.ResetFns(a.defaultCtx.branchFn, a.defaultCtx.accountFn, a.defaultCtx.storageFn)
   924  	rootHash, err := a.ComputeCommitment(true, false)
   925  	if err != nil {
   926  		return err
   927  	}
   928  	step := a.txNum / a.aggregationStep
   929  	mxStepCurrent.Set(step)
   930  
   931  	if step == 0 {
   932  		a.notifyAggregated(rootHash)
   933  		return nil
   934  	}
   935  	step-- // Leave one step worth in the DB
   936  
   937  	ctx := context.Background()
   938  	if err := a.Flush(ctx); err != nil {
   939  		return err
   940  	}
   941  
   942  	if err := a.aggregate(ctx, step); err != nil {
   943  		return err
   944  	}
   945  
   946  	a.notifyAggregated(rootHash)
   947  	return nil
   948  }
   949  
   950  func (a *Aggregator) UpdateAccountData(addr []byte, account []byte) error {
   951  	a.commitment.TouchPlainKey(addr, account, a.commitment.TouchPlainKeyAccount)
   952  	return a.accounts.Put(addr, nil, account)
   953  }
   954  
   955  func (a *Aggregator) UpdateAccountCode(addr []byte, code []byte) error {
   956  	a.commitment.TouchPlainKey(addr, code, a.commitment.TouchPlainKeyCode)
   957  	if len(code) == 0 {
   958  		return a.code.Delete(addr, nil)
   959  	}
   960  	return a.code.Put(addr, nil, code)
   961  }
   962  
   963  func (a *Aggregator) UpdateCommitmentData(prefix []byte, code []byte) error {
   964  	return a.commitment.Put(prefix, nil, code)
   965  }
   966  
   967  func (a *Aggregator) DeleteAccount(addr []byte) error {
   968  	a.commitment.TouchPlainKey(addr, nil, a.commitment.TouchPlainKeyAccount)
   969  
   970  	if err := a.accounts.Delete(addr, nil); err != nil {
   971  		return err
   972  	}
   973  	if err := a.code.Delete(addr, nil); err != nil {
   974  		return err
   975  	}
   976  	var e error
   977  	if err := a.storage.defaultDc.IteratePrefix(addr, func(k, _ []byte) {
   978  		a.commitment.TouchPlainKey(k, nil, a.commitment.TouchPlainKeyStorage)
   979  		if e == nil {
   980  			e = a.storage.Delete(k, nil)
   981  		}
   982  	}); err != nil {
   983  		return err
   984  	}
   985  	return e
   986  }
   987  
   988  func (a *Aggregator) WriteAccountStorage(addr, loc []byte, value []byte) error {
   989  	composite := make([]byte, len(addr)+len(loc))
   990  	copy(composite, addr)
   991  	copy(composite[length.Addr:], loc)
   992  
   993  	a.commitment.TouchPlainKey(composite, value, a.commitment.TouchPlainKeyStorage)
   994  	if len(value) == 0 {
   995  		return a.storage.Delete(addr, loc)
   996  	}
   997  	return a.storage.Put(addr, loc, value)
   998  }
   999  
  1000  func (a *Aggregator) AddTraceFrom(addr []byte) error {
  1001  	return a.tracesFrom.Add(addr)
  1002  }
  1003  
  1004  func (a *Aggregator) AddTraceTo(addr []byte) error {
  1005  	return a.tracesTo.Add(addr)
  1006  }
  1007  
  1008  func (a *Aggregator) AddLogAddr(addr []byte) error {
  1009  	return a.logAddrs.Add(addr)
  1010  }
  1011  
  1012  func (a *Aggregator) AddLogTopic(topic []byte) error {
  1013  	return a.logTopics.Add(topic)
  1014  }
  1015  
  1016  // StartWrites - pattern: `defer agg.StartWrites().FinishWrites()`
  1017  func (a *Aggregator) StartWrites() *Aggregator {
  1018  	a.accounts.StartWrites()
  1019  	a.storage.StartWrites()
  1020  	a.code.StartWrites()
  1021  	a.commitment.StartWrites()
  1022  	a.logAddrs.StartWrites()
  1023  	a.logTopics.StartWrites()
  1024  	a.tracesFrom.StartWrites()
  1025  	a.tracesTo.StartWrites()
  1026  
  1027  	if a.defaultCtx != nil {
  1028  		a.defaultCtx.Close()
  1029  	}
  1030  	a.defaultCtx = &AggregatorContext{
  1031  		a:          a,
  1032  		accounts:   a.accounts.defaultDc,
  1033  		storage:    a.storage.defaultDc,
  1034  		code:       a.code.defaultDc,
  1035  		commitment: a.commitment.defaultDc,
  1036  		logAddrs:   a.logAddrs.MakeContext(),
  1037  		logTopics:  a.logTopics.MakeContext(),
  1038  		tracesFrom: a.tracesFrom.MakeContext(),
  1039  		tracesTo:   a.tracesTo.MakeContext(),
  1040  	}
  1041  	a.commitment.patriciaTrie.ResetFns(a.defaultCtx.branchFn, a.defaultCtx.accountFn, a.defaultCtx.storageFn)
  1042  	return a
  1043  }
  1044  
  1045  func (a *Aggregator) FinishWrites() {
  1046  	a.accounts.FinishWrites()
  1047  	a.storage.FinishWrites()
  1048  	a.code.FinishWrites()
  1049  	a.commitment.FinishWrites()
  1050  	a.logAddrs.FinishWrites()
  1051  	a.logTopics.FinishWrites()
  1052  	a.tracesFrom.FinishWrites()
  1053  	a.tracesTo.FinishWrites()
  1054  }
  1055  
  1056  // Flush - must be called before Collate, if you did some writes
  1057  func (a *Aggregator) Flush(ctx context.Context) error {
  1058  	flushers := []flusher{
  1059  		a.accounts.Rotate(),
  1060  		a.storage.Rotate(),
  1061  		a.code.Rotate(),
  1062  		a.commitment.Domain.Rotate(),
  1063  		a.logAddrs.Rotate(),
  1064  		a.logTopics.Rotate(),
  1065  		a.tracesFrom.Rotate(),
  1066  		a.tracesTo.Rotate(),
  1067  	}
  1068  	defer func(t time.Time) { a.logger.Debug("[snapshots] history flush", "took", time.Since(t)) }(time.Now())
  1069  	for _, f := range flushers {
  1070  		if err := f.Flush(ctx, a.rwTx); err != nil {
  1071  			return err
  1072  		}
  1073  	}
  1074  	return nil
  1075  }
  1076  
  1077  type FilesStats struct {
  1078  	HistoryReads uint64
  1079  	TotalReads   uint64
  1080  	IdxAccess    time.Duration
  1081  	TxCount      uint64
  1082  	FilesCount   uint64
  1083  	IdxSize      uint64
  1084  	DataSize     uint64
  1085  }
  1086  
  1087  func (a *Aggregator) Stats() FilesStats {
  1088  	res := a.stats
  1089  	stat := a.GetAndResetStats()
  1090  	res.IdxSize = stat.IndexSize
  1091  	res.DataSize = stat.DataSize
  1092  	res.FilesCount = stat.FilesCount
  1093  	res.HistoryReads = stat.HistoryQueries.Load()
  1094  	res.TotalReads = stat.TotalQueries.Load()
  1095  	res.IdxAccess = stat.EfSearchTime
  1096  	return res
  1097  }
  1098  
  1099  type AggregatorContext struct {
  1100  	a          *Aggregator
  1101  	accounts   *DomainContext
  1102  	storage    *DomainContext
  1103  	code       *DomainContext
  1104  	commitment *DomainContext
  1105  	logAddrs   *InvertedIndexContext
  1106  	logTopics  *InvertedIndexContext
  1107  	tracesFrom *InvertedIndexContext
  1108  	tracesTo   *InvertedIndexContext
  1109  	keyBuf     []byte
  1110  }
  1111  
  1112  func (a *Aggregator) MakeContext() *AggregatorContext {
  1113  	return &AggregatorContext{
  1114  		a:          a,
  1115  		accounts:   a.accounts.MakeContext(),
  1116  		storage:    a.storage.MakeContext(),
  1117  		code:       a.code.MakeContext(),
  1118  		commitment: a.commitment.MakeContext(),
  1119  		logAddrs:   a.logAddrs.MakeContext(),
  1120  		logTopics:  a.logTopics.MakeContext(),
  1121  		tracesFrom: a.tracesFrom.MakeContext(),
  1122  		tracesTo:   a.tracesTo.MakeContext(),
  1123  	}
  1124  }
  1125  
  1126  func (ac *AggregatorContext) ReadAccountData(addr []byte, roTx kv.Tx) ([]byte, error) {
  1127  	return ac.accounts.Get(addr, nil, roTx)
  1128  }
  1129  
  1130  func (ac *AggregatorContext) ReadAccountDataBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) {
  1131  	v, err := ac.accounts.GetBeforeTxNum(addr, txNum, roTx)
  1132  	return v, err
  1133  }
  1134  
  1135  func (ac *AggregatorContext) ReadAccountStorage(addr []byte, loc []byte, roTx kv.Tx) ([]byte, error) {
  1136  	return ac.storage.Get(addr, loc, roTx)
  1137  }
  1138  
  1139  func (ac *AggregatorContext) ReadAccountStorageBeforeTxNum(addr []byte, loc []byte, txNum uint64, roTx kv.Tx) ([]byte, error) {
  1140  	if cap(ac.keyBuf) < len(addr)+len(loc) {
  1141  		ac.keyBuf = make([]byte, len(addr)+len(loc))
  1142  	} else if len(ac.keyBuf) != len(addr)+len(loc) {
  1143  		ac.keyBuf = ac.keyBuf[:len(addr)+len(loc)]
  1144  	}
  1145  	copy(ac.keyBuf, addr)
  1146  	copy(ac.keyBuf[len(addr):], loc)
  1147  	v, err := ac.storage.GetBeforeTxNum(ac.keyBuf, txNum, roTx)
  1148  	return v, err
  1149  }
  1150  
  1151  func (ac *AggregatorContext) ReadAccountCode(addr []byte, roTx kv.Tx) ([]byte, error) {
  1152  	return ac.code.Get(addr, nil, roTx)
  1153  }
  1154  
  1155  func (ac *AggregatorContext) ReadCommitment(addr []byte, roTx kv.Tx) ([]byte, error) {
  1156  	return ac.commitment.Get(addr, nil, roTx)
  1157  }
  1158  
  1159  func (ac *AggregatorContext) ReadCommitmentBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) {
  1160  	v, err := ac.commitment.GetBeforeTxNum(addr, txNum, roTx)
  1161  	return v, err
  1162  }
  1163  
  1164  func (ac *AggregatorContext) ReadAccountCodeBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) ([]byte, error) {
  1165  	v, err := ac.code.GetBeforeTxNum(addr, txNum, roTx)
  1166  	return v, err
  1167  }
  1168  
  1169  func (ac *AggregatorContext) ReadAccountCodeSize(addr []byte, roTx kv.Tx) (int, error) {
  1170  	code, err := ac.code.Get(addr, nil, roTx)
  1171  	if err != nil {
  1172  		return 0, err
  1173  	}
  1174  	return len(code), nil
  1175  }
  1176  
  1177  func (ac *AggregatorContext) ReadAccountCodeSizeBeforeTxNum(addr []byte, txNum uint64, roTx kv.Tx) (int, error) {
  1178  	code, err := ac.code.GetBeforeTxNum(addr, txNum, roTx)
  1179  	if err != nil {
  1180  		return 0, err
  1181  	}
  1182  	return len(code), nil
  1183  }
  1184  
  1185  func (ac *AggregatorContext) branchFn(prefix []byte) ([]byte, error) {
  1186  	// Look in the summary table first
  1187  	stateValue, err := ac.ReadCommitment(prefix, ac.a.rwTx)
  1188  	if err != nil {
  1189  		return nil, fmt.Errorf("failed read branch %x: %w", commitment.CompactedKeyToHex(prefix), err)
  1190  	}
  1191  	if stateValue == nil {
  1192  		return nil, nil
  1193  	}
  1194  	// fmt.Printf("Returning branch data prefix [%x], mergeVal=[%x]\n", commitment.CompactedKeyToHex(prefix), stateValue)
  1195  	return stateValue[2:], nil // Skip touchMap but keep afterMap
  1196  }
  1197  
  1198  func (ac *AggregatorContext) accountFn(plainKey []byte, cell *commitment.Cell) error {
  1199  	encAccount, err := ac.ReadAccountData(plainKey, ac.a.rwTx)
  1200  	if err != nil {
  1201  		return err
  1202  	}
  1203  	cell.Nonce = 0
  1204  	cell.Balance.Clear()
  1205  	copy(cell.CodeHash[:], commitment.EmptyCodeHash)
  1206  	if len(encAccount) > 0 {
  1207  		nonce, balance, chash := DecodeAccountBytes(encAccount)
  1208  		cell.Nonce = nonce
  1209  		cell.Balance.Set(balance)
  1210  		if chash != nil {
  1211  			copy(cell.CodeHash[:], chash)
  1212  		}
  1213  	}
  1214  
  1215  	code, err := ac.ReadAccountCode(plainKey, ac.a.rwTx)
  1216  	if err != nil {
  1217  		return err
  1218  	}
  1219  	if code != nil {
  1220  		ac.a.commitment.keccak.Reset()
  1221  		ac.a.commitment.keccak.Write(code)
  1222  		copy(cell.CodeHash[:], ac.a.commitment.keccak.Sum(nil))
  1223  	}
  1224  	cell.Delete = len(encAccount) == 0 && len(code) == 0
  1225  	return nil
  1226  }
  1227  
  1228  func (ac *AggregatorContext) storageFn(plainKey []byte, cell *commitment.Cell) error {
  1229  	// Look in the summary table first
  1230  	enc, err := ac.ReadAccountStorage(plainKey[:length.Addr], plainKey[length.Addr:], ac.a.rwTx)
  1231  	if err != nil {
  1232  		return err
  1233  	}
  1234  	cell.StorageLen = len(enc)
  1235  	copy(cell.Storage[:], enc)
  1236  	cell.Delete = cell.StorageLen == 0
  1237  	return nil
  1238  }
  1239  
  1240  func (ac *AggregatorContext) LogAddrIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) {
  1241  	return ac.logAddrs.IdxRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx)
  1242  }
  1243  
  1244  func (ac *AggregatorContext) LogTopicIterator(topic []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) {
  1245  	return ac.logTopics.IdxRange(topic, startTxNum, endTxNum, order.Asc, -1, roTx)
  1246  }
  1247  
  1248  func (ac *AggregatorContext) TraceFromIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) {
  1249  	return ac.tracesFrom.IdxRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx)
  1250  }
  1251  
  1252  func (ac *AggregatorContext) TraceToIterator(addr []byte, startTxNum, endTxNum int, roTx kv.Tx) (iter.U64, error) {
  1253  	return ac.tracesTo.IdxRange(addr, startTxNum, endTxNum, order.Asc, -1, roTx)
  1254  }
  1255  
  1256  func (ac *AggregatorContext) Close() {
  1257  	ac.accounts.Close()
  1258  	ac.storage.Close()
  1259  	ac.code.Close()
  1260  	ac.commitment.Close()
  1261  	ac.logAddrs.Close()
  1262  	ac.logTopics.Close()
  1263  	ac.tracesFrom.Close()
  1264  	ac.tracesTo.Close()
  1265  }
  1266  
  1267  func DecodeAccountBytes(enc []byte) (nonce uint64, balance *uint256.Int, hash []byte) {
  1268  	balance = new(uint256.Int)
  1269  
  1270  	if len(enc) > 0 {
  1271  		pos := 0
  1272  		nonceBytes := int(enc[pos])
  1273  		pos++
  1274  		if nonceBytes > 0 {
  1275  			nonce = bytesToUint64(enc[pos : pos+nonceBytes])
  1276  			pos += nonceBytes
  1277  		}
  1278  		balanceBytes := int(enc[pos])
  1279  		pos++
  1280  		if balanceBytes > 0 {
  1281  			balance.SetBytes(enc[pos : pos+balanceBytes])
  1282  			pos += balanceBytes
  1283  		}
  1284  		codeHashBytes := int(enc[pos])
  1285  		pos++
  1286  		if codeHashBytes > 0 {
  1287  			codeHash := make([]byte, length.Hash)
  1288  			copy(codeHash, enc[pos:pos+codeHashBytes])
  1289  		}
  1290  	}
  1291  	return
  1292  }
  1293  
  1294  func EncodeAccountBytes(nonce uint64, balance *uint256.Int, hash []byte, incarnation uint64) []byte {
  1295  	l := int(1)
  1296  	if nonce > 0 {
  1297  		l += common.BitLenToByteLen(bits.Len64(nonce))
  1298  	}
  1299  	l++
  1300  	if !balance.IsZero() {
  1301  		l += balance.ByteLen()
  1302  	}
  1303  	l++
  1304  	if len(hash) == length.Hash {
  1305  		l += 32
  1306  	}
  1307  	l++
  1308  	if incarnation > 0 {
  1309  		l += common.BitLenToByteLen(bits.Len64(incarnation))
  1310  	}
  1311  	value := make([]byte, l)
  1312  	pos := 0
  1313  
  1314  	if nonce == 0 {
  1315  		value[pos] = 0
  1316  		pos++
  1317  	} else {
  1318  		nonceBytes := common.BitLenToByteLen(bits.Len64(nonce))
  1319  		value[pos] = byte(nonceBytes)
  1320  		var nonce = nonce
  1321  		for i := nonceBytes; i > 0; i-- {
  1322  			value[pos+i] = byte(nonce)
  1323  			nonce >>= 8
  1324  		}
  1325  		pos += nonceBytes + 1
  1326  	}
  1327  	if balance.IsZero() {
  1328  		value[pos] = 0
  1329  		pos++
  1330  	} else {
  1331  		balanceBytes := balance.ByteLen()
  1332  		value[pos] = byte(balanceBytes)
  1333  		pos++
  1334  		balance.WriteToSlice(value[pos : pos+balanceBytes])
  1335  		pos += balanceBytes
  1336  	}
  1337  	if len(hash) == 0 {
  1338  		value[pos] = 0
  1339  		pos++
  1340  	} else {
  1341  		value[pos] = 32
  1342  		pos++
  1343  		copy(value[pos:pos+32], hash)
  1344  		pos += 32
  1345  	}
  1346  	if incarnation == 0 {
  1347  		value[pos] = 0
  1348  	} else {
  1349  		incBytes := common.BitLenToByteLen(bits.Len64(incarnation))
  1350  		value[pos] = byte(incBytes)
  1351  		var inc = incarnation
  1352  		for i := incBytes; i > 0; i-- {
  1353  			value[pos+i] = byte(inc)
  1354  			inc >>= 8
  1355  		}
  1356  	}
  1357  	return value
  1358  }
  1359  
  1360  func bytesToUint64(buf []byte) (x uint64) {
  1361  	for i, b := range buf {
  1362  		x = x<<8 + uint64(b)
  1363  		if i == 7 {
  1364  			return
  1365  		}
  1366  	}
  1367  	return
  1368  }