github.com/ledgerwatch/erigon-lib@v1.0.0/state/aggregator_v3.go (about)

     1  /*
     2     Copyright 2022 Erigon contributors
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package state
    18  
    19  import (
    20  	"context"
    21  	"encoding/binary"
    22  	"errors"
    23  	"fmt"
    24  	math2 "math"
    25  	"runtime"
    26  	"strings"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/RoaringBitmap/roaring/roaring64"
    32  	common2 "github.com/ledgerwatch/erigon-lib/common"
    33  	"github.com/ledgerwatch/erigon-lib/common/background"
    34  	"github.com/ledgerwatch/erigon-lib/common/cmp"
    35  	"github.com/ledgerwatch/erigon-lib/common/dbg"
    36  	"github.com/ledgerwatch/erigon-lib/kv"
    37  	"github.com/ledgerwatch/erigon-lib/kv/bitmapdb"
    38  	"github.com/ledgerwatch/erigon-lib/kv/iter"
    39  	"github.com/ledgerwatch/erigon-lib/kv/order"
    40  	"github.com/ledgerwatch/log/v3"
    41  	"golang.org/x/sync/errgroup"
    42  )
    43  
    44  type AggregatorV3 struct {
    45  	rwTx             kv.RwTx
    46  	db               kv.RoDB
    47  	storage          *History
    48  	tracesTo         *InvertedIndex
    49  	backgroundResult *BackgroundResult
    50  	code             *History
    51  	logAddrs         *InvertedIndex
    52  	logTopics        *InvertedIndex
    53  	tracesFrom       *InvertedIndex
    54  	accounts         *History
    55  	logPrefix        string
    56  	dir              string
    57  	tmpdir           string
    58  	aggregationStep  uint64
    59  	keepInDB         uint64
    60  
    61  	minimaxTxNumInFiles atomic.Uint64
    62  
    63  	filesMutationLock sync.Mutex
    64  
    65  	// To keep DB small - need move data to small files ASAP.
    66  	// It means goroutine which creating small files - can't be locked by merge or indexing.
    67  	buildingFiles           atomic.Bool
    68  	mergeingFiles           atomic.Bool
    69  	buildingOptionalIndices atomic.Bool
    70  
    71  	//warmupWorking          atomic.Bool
    72  	ctx       context.Context
    73  	ctxCancel context.CancelFunc
    74  
    75  	needSaveFilesListInDB atomic.Bool
    76  	wg                    sync.WaitGroup
    77  
    78  	onFreeze OnFreezeFunc
    79  	walLock  sync.RWMutex
    80  
    81  	ps *background.ProgressSet
    82  
    83  	// next fields are set only if agg.doTraceCtx is true. can enable by env: TRACE_AGG=true
    84  	leakDetector *dbg.LeakDetector
    85  	logger       log.Logger
    86  }
    87  
    88  type OnFreezeFunc func(frozenFileNames []string)
    89  
    90  func NewAggregatorV3(ctx context.Context, dir, tmpdir string, aggregationStep uint64, db kv.RoDB, logger log.Logger) (*AggregatorV3, error) {
    91  	ctx, ctxCancel := context.WithCancel(ctx)
    92  	a := &AggregatorV3{
    93  		ctx:              ctx,
    94  		ctxCancel:        ctxCancel,
    95  		onFreeze:         func(frozenFileNames []string) {},
    96  		dir:              dir,
    97  		tmpdir:           tmpdir,
    98  		aggregationStep:  aggregationStep,
    99  		db:               db,
   100  		keepInDB:         2 * aggregationStep,
   101  		leakDetector:     dbg.NewLeakDetector("agg", dbg.SlowTx()),
   102  		ps:               background.NewProgressSet(),
   103  		backgroundResult: &BackgroundResult{},
   104  		logger:           logger,
   105  	}
   106  	var err error
   107  	if a.accounts, err = NewHistory(dir, a.tmpdir, aggregationStep, "accounts", kv.TblAccountHistoryKeys, kv.TblAccountIdx, kv.TblAccountHistoryVals, false, nil, false, logger); err != nil {
   108  		return nil, err
   109  	}
   110  	if a.storage, err = NewHistory(dir, a.tmpdir, aggregationStep, "storage", kv.TblStorageHistoryKeys, kv.TblStorageIdx, kv.TblStorageHistoryVals, false, nil, false, logger); err != nil {
   111  		return nil, err
   112  	}
   113  	if a.code, err = NewHistory(dir, a.tmpdir, aggregationStep, "code", kv.TblCodeHistoryKeys, kv.TblCodeIdx, kv.TblCodeHistoryVals, true, nil, true, logger); err != nil {
   114  		return nil, err
   115  	}
   116  	if a.logAddrs, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "logaddrs", kv.TblLogAddressKeys, kv.TblLogAddressIdx, false, nil, logger); err != nil {
   117  		return nil, err
   118  	}
   119  	if a.logTopics, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "logtopics", kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, false, nil, logger); err != nil {
   120  		return nil, err
   121  	}
   122  	if a.tracesFrom, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "tracesfrom", kv.TblTracesFromKeys, kv.TblTracesFromIdx, false, nil, logger); err != nil {
   123  		return nil, err
   124  	}
   125  	if a.tracesTo, err = NewInvertedIndex(dir, a.tmpdir, aggregationStep, "tracesto", kv.TblTracesToKeys, kv.TblTracesToIdx, false, nil, logger); err != nil {
   126  		return nil, err
   127  	}
   128  	a.recalcMaxTxNum()
   129  
   130  	return a, nil
   131  }
   132  func (a *AggregatorV3) OnFreeze(f OnFreezeFunc) { a.onFreeze = f }
   133  
   134  func (a *AggregatorV3) OpenFolder() error {
   135  	a.filesMutationLock.Lock()
   136  	defer a.filesMutationLock.Unlock()
   137  	var err error
   138  	if err = a.accounts.OpenFolder(); err != nil {
   139  		return fmt.Errorf("OpenFolder: %w", err)
   140  	}
   141  	if err = a.storage.OpenFolder(); err != nil {
   142  		return fmt.Errorf("OpenFolder: %w", err)
   143  	}
   144  	if err = a.code.OpenFolder(); err != nil {
   145  		return fmt.Errorf("OpenFolder: %w", err)
   146  	}
   147  	if err = a.logAddrs.OpenFolder(); err != nil {
   148  		return fmt.Errorf("OpenFolder: %w", err)
   149  	}
   150  	if err = a.logTopics.OpenFolder(); err != nil {
   151  		return fmt.Errorf("OpenFolder: %w", err)
   152  	}
   153  	if err = a.tracesFrom.OpenFolder(); err != nil {
   154  		return fmt.Errorf("OpenFolder: %w", err)
   155  	}
   156  	if err = a.tracesTo.OpenFolder(); err != nil {
   157  		return fmt.Errorf("OpenFolder: %w", err)
   158  	}
   159  	a.recalcMaxTxNum()
   160  	return nil
   161  }
   162  func (a *AggregatorV3) OpenList(fNames []string) error {
   163  	a.filesMutationLock.Lock()
   164  	defer a.filesMutationLock.Unlock()
   165  
   166  	var err error
   167  	if err = a.accounts.OpenList(fNames); err != nil {
   168  		return err
   169  	}
   170  	if err = a.storage.OpenList(fNames); err != nil {
   171  		return err
   172  	}
   173  	if err = a.code.OpenList(fNames); err != nil {
   174  		return err
   175  	}
   176  	if err = a.logAddrs.OpenList(fNames); err != nil {
   177  		return err
   178  	}
   179  	if err = a.logTopics.OpenList(fNames); err != nil {
   180  		return err
   181  	}
   182  	if err = a.tracesFrom.OpenList(fNames); err != nil {
   183  		return err
   184  	}
   185  	if err = a.tracesTo.OpenList(fNames); err != nil {
   186  		return err
   187  	}
   188  	a.recalcMaxTxNum()
   189  	return nil
   190  }
   191  
   192  func (a *AggregatorV3) Close() {
   193  	a.ctxCancel()
   194  	a.wg.Wait()
   195  
   196  	a.filesMutationLock.Lock()
   197  	defer a.filesMutationLock.Unlock()
   198  
   199  	a.accounts.Close()
   200  	a.storage.Close()
   201  	a.code.Close()
   202  	a.logAddrs.Close()
   203  	a.logTopics.Close()
   204  	a.tracesFrom.Close()
   205  	a.tracesTo.Close()
   206  }
   207  
   208  // CleanDir - call it manually on startup of Main application (don't call it from utilities or nother processes)
   209  //   - remove files ignored during opening of aggregator
   210  //   - remove files which marked as deleted but have no readers (usually last reader removing files marked as deleted)
   211  func (a *AggregatorV3) CleanDir() {
   212  	a.accounts.deleteGarbageFiles()
   213  	a.storage.deleteGarbageFiles()
   214  	a.code.deleteGarbageFiles()
   215  	a.logAddrs.deleteGarbageFiles()
   216  	a.logTopics.deleteGarbageFiles()
   217  	a.tracesFrom.deleteGarbageFiles()
   218  	a.tracesTo.deleteGarbageFiles()
   219  
   220  	ac := a.MakeContext()
   221  	defer ac.Close()
   222  	ac.a.accounts.cleanAfterFreeze(ac.accounts.frozenTo())
   223  	ac.a.storage.cleanAfterFreeze(ac.storage.frozenTo())
   224  	ac.a.code.cleanAfterFreeze(ac.code.frozenTo())
   225  	ac.a.logAddrs.cleanAfterFreeze(ac.logAddrs.frozenTo())
   226  	ac.a.logTopics.cleanAfterFreeze(ac.logTopics.frozenTo())
   227  	ac.a.tracesFrom.cleanAfterFreeze(ac.tracesFrom.frozenTo())
   228  	ac.a.tracesTo.cleanAfterFreeze(ac.tracesTo.frozenTo())
   229  }
   230  
   231  func (a *AggregatorV3) SetWorkers(i int) {
   232  	a.accounts.compressWorkers = i
   233  	a.storage.compressWorkers = i
   234  	a.code.compressWorkers = i
   235  	a.logAddrs.compressWorkers = i
   236  	a.logTopics.compressWorkers = i
   237  	a.tracesFrom.compressWorkers = i
   238  	a.tracesTo.compressWorkers = i
   239  }
   240  
   241  func (a *AggregatorV3) HasBackgroundFilesBuild() bool { return a.ps.Has() }
   242  func (a *AggregatorV3) BackgroundProgress() string    { return a.ps.String() }
   243  
   244  func (a *AggregatorV3) Files() (res []string) {
   245  	a.filesMutationLock.Lock()
   246  	defer a.filesMutationLock.Unlock()
   247  
   248  	res = append(res, a.accounts.Files()...)
   249  	res = append(res, a.storage.Files()...)
   250  	res = append(res, a.code.Files()...)
   251  	res = append(res, a.logAddrs.Files()...)
   252  	res = append(res, a.logTopics.Files()...)
   253  	res = append(res, a.tracesFrom.Files()...)
   254  	res = append(res, a.tracesTo.Files()...)
   255  	return res
   256  }
   257  func (a *AggregatorV3) BuildOptionalMissedIndicesInBackground(ctx context.Context, workers int) {
   258  	if ok := a.buildingOptionalIndices.CompareAndSwap(false, true); !ok {
   259  		return
   260  	}
   261  	a.wg.Add(1)
   262  	go func() {
   263  		defer a.wg.Done()
   264  		defer a.buildingOptionalIndices.Store(false)
   265  		aggCtx := a.MakeContext()
   266  		defer aggCtx.Close()
   267  		if err := aggCtx.BuildOptionalMissedIndices(ctx, workers); err != nil {
   268  			if errors.Is(err, context.Canceled) {
   269  				return
   270  			}
   271  			log.Warn("[snapshots] merge", "err", err)
   272  		}
   273  	}()
   274  }
   275  
   276  func (ac *AggregatorV3Context) BuildOptionalMissedIndices(ctx context.Context, workers int) error {
   277  	g, ctx := errgroup.WithContext(ctx)
   278  	g.SetLimit(workers)
   279  	if ac.accounts != nil {
   280  		g.Go(func() error { return ac.accounts.BuildOptionalMissedIndices(ctx) })
   281  	}
   282  	if ac.storage != nil {
   283  		g.Go(func() error { return ac.storage.BuildOptionalMissedIndices(ctx) })
   284  	}
   285  	if ac.code != nil {
   286  		g.Go(func() error { return ac.code.BuildOptionalMissedIndices(ctx) })
   287  	}
   288  	return g.Wait()
   289  }
   290  
   291  func (a *AggregatorV3) BuildMissedIndices(ctx context.Context, workers int) error {
   292  	startIndexingTime := time.Now()
   293  	{
   294  		ps := background.NewProgressSet()
   295  
   296  		g, ctx := errgroup.WithContext(ctx)
   297  		g.SetLimit(workers)
   298  		go func() {
   299  			logEvery := time.NewTicker(20 * time.Second)
   300  			defer logEvery.Stop()
   301  			for {
   302  				select {
   303  				case <-ctx.Done():
   304  					return
   305  				case <-logEvery.C:
   306  					var m runtime.MemStats
   307  					dbg.ReadMemStats(&m)
   308  					log.Info("[snapshots] Indexing", "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys))
   309  				}
   310  			}
   311  		}()
   312  
   313  		a.accounts.BuildMissedIndices(ctx, g, ps)
   314  		a.storage.BuildMissedIndices(ctx, g, ps)
   315  		a.code.BuildMissedIndices(ctx, g, ps)
   316  		a.logAddrs.BuildMissedIndices(ctx, g, ps)
   317  		a.logTopics.BuildMissedIndices(ctx, g, ps)
   318  		a.tracesFrom.BuildMissedIndices(ctx, g, ps)
   319  		a.tracesTo.BuildMissedIndices(ctx, g, ps)
   320  
   321  		if err := g.Wait(); err != nil {
   322  			return err
   323  		}
   324  		if err := a.OpenFolder(); err != nil {
   325  			return err
   326  		}
   327  	}
   328  
   329  	ac := a.MakeContext()
   330  	defer ac.Close()
   331  	return ac.BuildOptionalMissedIndices(ctx, workers)
   332  }
   333  
   334  func (a *AggregatorV3) SetLogPrefix(v string) { a.logPrefix = v }
   335  
   336  func (a *AggregatorV3) SetTx(tx kv.RwTx) {
   337  	a.rwTx = tx
   338  	a.accounts.SetTx(tx)
   339  	a.storage.SetTx(tx)
   340  	a.code.SetTx(tx)
   341  	a.logAddrs.SetTx(tx)
   342  	a.logTopics.SetTx(tx)
   343  	a.tracesFrom.SetTx(tx)
   344  	a.tracesTo.SetTx(tx)
   345  }
   346  
   347  func (a *AggregatorV3) SetTxNum(txNum uint64) {
   348  	a.accounts.SetTxNum(txNum)
   349  	a.storage.SetTxNum(txNum)
   350  	a.code.SetTxNum(txNum)
   351  	a.logAddrs.SetTxNum(txNum)
   352  	a.logTopics.SetTxNum(txNum)
   353  	a.tracesFrom.SetTxNum(txNum)
   354  	a.tracesTo.SetTxNum(txNum)
   355  }
   356  
   357  type AggV3Collation struct {
   358  	logAddrs   map[string]*roaring64.Bitmap
   359  	logTopics  map[string]*roaring64.Bitmap
   360  	tracesFrom map[string]*roaring64.Bitmap
   361  	tracesTo   map[string]*roaring64.Bitmap
   362  	accounts   HistoryCollation
   363  	storage    HistoryCollation
   364  	code       HistoryCollation
   365  }
   366  
   367  func (c AggV3Collation) Close() {
   368  	c.accounts.Close()
   369  	c.storage.Close()
   370  	c.code.Close()
   371  
   372  	for _, b := range c.logAddrs {
   373  		bitmapdb.ReturnToPool64(b)
   374  	}
   375  	for _, b := range c.logTopics {
   376  		bitmapdb.ReturnToPool64(b)
   377  	}
   378  	for _, b := range c.tracesFrom {
   379  		bitmapdb.ReturnToPool64(b)
   380  	}
   381  	for _, b := range c.tracesTo {
   382  		bitmapdb.ReturnToPool64(b)
   383  	}
   384  }
   385  
   386  func (a *AggregatorV3) buildFiles(ctx context.Context, step, txFrom, txTo uint64) (AggV3StaticFiles, error) {
   387  	//logEvery := time.NewTicker(60 * time.Second)
   388  	//defer logEvery.Stop()
   389  	//defer func(t time.Time) {
   390  	//	log.Info(fmt.Sprintf("[snapshot] build %d-%d", step, step+1), "took", time.Since(t))
   391  	//}(time.Now())
   392  	var sf AggV3StaticFiles
   393  	var ac AggV3Collation
   394  	closeColl := true
   395  	defer func() {
   396  		if closeColl {
   397  			ac.Close()
   398  		}
   399  	}()
   400  	//var wg sync.WaitGroup
   401  	//wg.Add(7)
   402  	//errCh := make(chan error, 7)
   403  	//go func() {
   404  	//	defer wg.Done()
   405  	var err error
   406  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   407  		ac.accounts, err = a.accounts.collate(step, txFrom, txTo, tx)
   408  		return err
   409  	}); err != nil {
   410  		return sf, err
   411  		//errCh <- err
   412  	}
   413  
   414  	if sf.accounts, err = a.accounts.buildFiles(ctx, step, ac.accounts, a.ps); err != nil {
   415  		return sf, err
   416  		//errCh <- err
   417  	}
   418  	//}()
   419  	//
   420  	//go func() {
   421  	//	defer wg.Done()
   422  	//	var err error
   423  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   424  		ac.storage, err = a.storage.collate(step, txFrom, txTo, tx)
   425  		return err
   426  	}); err != nil {
   427  		return sf, err
   428  		//errCh <- err
   429  	}
   430  
   431  	if sf.storage, err = a.storage.buildFiles(ctx, step, ac.storage, a.ps); err != nil {
   432  		return sf, err
   433  		//errCh <- err
   434  	}
   435  	//}()
   436  	//go func() {
   437  	//	defer wg.Done()
   438  	//	var err error
   439  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   440  		ac.code, err = a.code.collate(step, txFrom, txTo, tx)
   441  		return err
   442  	}); err != nil {
   443  		return sf, err
   444  		//errCh <- err
   445  	}
   446  
   447  	if sf.code, err = a.code.buildFiles(ctx, step, ac.code, a.ps); err != nil {
   448  		return sf, err
   449  		//errCh <- err
   450  	}
   451  	//}()
   452  	//go func() {
   453  	//	defer wg.Done()
   454  	//	var err error
   455  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   456  		ac.logAddrs, err = a.logAddrs.collate(ctx, txFrom, txTo, tx)
   457  		return err
   458  	}); err != nil {
   459  		return sf, err
   460  		//errCh <- err
   461  	}
   462  
   463  	if sf.logAddrs, err = a.logAddrs.buildFiles(ctx, step, ac.logAddrs, a.ps); err != nil {
   464  		return sf, err
   465  		//errCh <- err
   466  	}
   467  	//}()
   468  	//go func() {
   469  	//	defer wg.Done()
   470  	//	var err error
   471  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   472  		ac.logTopics, err = a.logTopics.collate(ctx, txFrom, txTo, tx)
   473  		return err
   474  	}); err != nil {
   475  		return sf, err
   476  		//errCh <- err
   477  	}
   478  
   479  	if sf.logTopics, err = a.logTopics.buildFiles(ctx, step, ac.logTopics, a.ps); err != nil {
   480  		return sf, err
   481  		//errCh <- err
   482  	}
   483  	//}()
   484  	//go func() {
   485  	//	defer wg.Done()
   486  	//	var err error
   487  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   488  		ac.tracesFrom, err = a.tracesFrom.collate(ctx, txFrom, txTo, tx)
   489  		return err
   490  	}); err != nil {
   491  		return sf, err
   492  		//errCh <- err
   493  	}
   494  
   495  	if sf.tracesFrom, err = a.tracesFrom.buildFiles(ctx, step, ac.tracesFrom, a.ps); err != nil {
   496  		return sf, err
   497  		//errCh <- err
   498  	}
   499  	//}()
   500  	//go func() {
   501  	//	defer wg.Done()
   502  	//	var err error
   503  	if err = a.db.View(ctx, func(tx kv.Tx) error {
   504  		ac.tracesTo, err = a.tracesTo.collate(ctx, txFrom, txTo, tx)
   505  		return err
   506  	}); err != nil {
   507  		return sf, err
   508  		//errCh <- err
   509  	}
   510  
   511  	if sf.tracesTo, err = a.tracesTo.buildFiles(ctx, step, ac.tracesTo, a.ps); err != nil {
   512  		return sf, err
   513  		//		errCh <- err
   514  	}
   515  	//}()
   516  	//go func() {
   517  	//	wg.Wait()
   518  	//close(errCh)
   519  	//}()
   520  	//var lastError error
   521  	//for err := range errCh {
   522  	//	if err != nil {
   523  	//		lastError = err
   524  	//	}
   525  	//}
   526  	//if lastError == nil {
   527  	closeColl = false
   528  	//}
   529  	return sf, nil
   530  }
   531  
   532  type AggV3StaticFiles struct {
   533  	accounts   HistoryFiles
   534  	storage    HistoryFiles
   535  	code       HistoryFiles
   536  	logAddrs   InvertedFiles
   537  	logTopics  InvertedFiles
   538  	tracesFrom InvertedFiles
   539  	tracesTo   InvertedFiles
   540  }
   541  
   542  func (sf AggV3StaticFiles) Close() {
   543  	sf.accounts.Close()
   544  	sf.storage.Close()
   545  	sf.code.Close()
   546  	sf.logAddrs.Close()
   547  	sf.logTopics.Close()
   548  	sf.tracesFrom.Close()
   549  	sf.tracesTo.Close()
   550  }
   551  
   552  func (a *AggregatorV3) BuildFiles(toTxNum uint64) (err error) {
   553  	a.BuildFilesInBackground(toTxNum)
   554  	if !(a.buildingFiles.Load() || a.mergeingFiles.Load() || a.buildingOptionalIndices.Load()) {
   555  		return nil
   556  	}
   557  
   558  	logEvery := time.NewTicker(20 * time.Second)
   559  	defer logEvery.Stop()
   560  Loop:
   561  	for {
   562  		select {
   563  		case <-a.ctx.Done():
   564  			return a.ctx.Err()
   565  		case <-logEvery.C:
   566  			if !(a.buildingFiles.Load() || a.mergeingFiles.Load() || a.buildingOptionalIndices.Load()) {
   567  				break Loop
   568  			}
   569  			if a.HasBackgroundFilesBuild() {
   570  				log.Info("[snapshots] Files build", "progress", a.BackgroundProgress())
   571  			}
   572  		}
   573  	}
   574  
   575  	return nil
   576  }
   577  
   578  func (a *AggregatorV3) buildFilesInBackground(ctx context.Context, step uint64) (err error) {
   579  	closeAll := true
   580  	//log.Info("[snapshots] history build", "step", fmt.Sprintf("%d-%d", step, step+1))
   581  	sf, err := a.buildFiles(ctx, step, step*a.aggregationStep, (step+1)*a.aggregationStep)
   582  	if err != nil {
   583  		return err
   584  	}
   585  	defer func() {
   586  		if closeAll {
   587  			sf.Close()
   588  		}
   589  	}()
   590  	a.integrateFiles(sf, step*a.aggregationStep, (step+1)*a.aggregationStep)
   591  	//a.notifyAboutNewSnapshots()
   592  
   593  	closeAll = false
   594  	return nil
   595  }
   596  
   597  func (a *AggregatorV3) mergeLoopStep(ctx context.Context, workers int) (somethingDone bool, err error) {
   598  	ac := a.MakeContext() // this need, to ensure we do all operations on files in "transaction-style", maybe we will ensure it on type-level in future
   599  	defer ac.Close()
   600  
   601  	closeAll := true
   602  	maxSpan := a.aggregationStep * StepsInBiggestFile
   603  	r := ac.findMergeRange(a.minimaxTxNumInFiles.Load(), maxSpan)
   604  	if !r.any() {
   605  		return false, nil
   606  	}
   607  
   608  	outs, err := ac.staticFilesInRange(r)
   609  	defer func() {
   610  		if closeAll {
   611  			outs.Close()
   612  		}
   613  	}()
   614  	if err != nil {
   615  		return false, err
   616  	}
   617  
   618  	in, err := ac.mergeFiles(ctx, outs, r, workers)
   619  	if err != nil {
   620  		return true, err
   621  	}
   622  	defer func() {
   623  		if closeAll {
   624  			in.Close()
   625  		}
   626  	}()
   627  	a.integrateMergedFiles(outs, in)
   628  	a.onFreeze(in.FrozenList())
   629  	closeAll = false
   630  	return true, nil
   631  }
   632  func (a *AggregatorV3) MergeLoop(ctx context.Context, workers int) error {
   633  	for {
   634  		somethingMerged, err := a.mergeLoopStep(ctx, workers)
   635  		if err != nil {
   636  			return err
   637  		}
   638  		if !somethingMerged {
   639  			return nil
   640  		}
   641  	}
   642  }
   643  
   644  func (a *AggregatorV3) integrateFiles(sf AggV3StaticFiles, txNumFrom, txNumTo uint64) {
   645  	a.filesMutationLock.Lock()
   646  	defer a.filesMutationLock.Unlock()
   647  	defer a.needSaveFilesListInDB.Store(true)
   648  	defer a.recalcMaxTxNum()
   649  	a.accounts.integrateFiles(sf.accounts, txNumFrom, txNumTo)
   650  	a.storage.integrateFiles(sf.storage, txNumFrom, txNumTo)
   651  	a.code.integrateFiles(sf.code, txNumFrom, txNumTo)
   652  	a.logAddrs.integrateFiles(sf.logAddrs, txNumFrom, txNumTo)
   653  	a.logTopics.integrateFiles(sf.logTopics, txNumFrom, txNumTo)
   654  	a.tracesFrom.integrateFiles(sf.tracesFrom, txNumFrom, txNumTo)
   655  	a.tracesTo.integrateFiles(sf.tracesTo, txNumFrom, txNumTo)
   656  }
   657  
   658  func (a *AggregatorV3) HasNewFrozenFiles() bool {
   659  	return a.needSaveFilesListInDB.CompareAndSwap(true, false)
   660  }
   661  
   662  func (a *AggregatorV3) Unwind(ctx context.Context, txUnwindTo uint64) error {
   663  	logEvery := time.NewTicker(30 * time.Second)
   664  	defer logEvery.Stop()
   665  	if err := a.accounts.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   666  		return err
   667  	}
   668  	if err := a.storage.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   669  		return err
   670  	}
   671  	if err := a.code.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   672  		return err
   673  	}
   674  	if err := a.logAddrs.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   675  		return err
   676  	}
   677  	if err := a.logTopics.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   678  		return err
   679  	}
   680  	if err := a.tracesFrom.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   681  		return err
   682  	}
   683  	if err := a.tracesTo.prune(ctx, txUnwindTo, math2.MaxUint64, math2.MaxUint64, logEvery); err != nil {
   684  		return err
   685  	}
   686  	return nil
   687  }
   688  
   689  func (a *AggregatorV3) Warmup(ctx context.Context, txFrom, limit uint64) error {
   690  	if a.db == nil {
   691  		return nil
   692  	}
   693  	e, ctx := errgroup.WithContext(ctx)
   694  	e.Go(func() error {
   695  		return a.db.View(ctx, func(tx kv.Tx) error { return a.accounts.warmup(ctx, txFrom, limit, tx) })
   696  	})
   697  	e.Go(func() error {
   698  		return a.db.View(ctx, func(tx kv.Tx) error { return a.storage.warmup(ctx, txFrom, limit, tx) })
   699  	})
   700  	e.Go(func() error {
   701  		return a.db.View(ctx, func(tx kv.Tx) error { return a.code.warmup(ctx, txFrom, limit, tx) })
   702  	})
   703  	e.Go(func() error {
   704  		return a.db.View(ctx, func(tx kv.Tx) error { return a.logAddrs.warmup(ctx, txFrom, limit, tx) })
   705  	})
   706  	e.Go(func() error {
   707  		return a.db.View(ctx, func(tx kv.Tx) error { return a.logTopics.warmup(ctx, txFrom, limit, tx) })
   708  	})
   709  	e.Go(func() error {
   710  		return a.db.View(ctx, func(tx kv.Tx) error { return a.tracesFrom.warmup(ctx, txFrom, limit, tx) })
   711  	})
   712  	e.Go(func() error {
   713  		return a.db.View(ctx, func(tx kv.Tx) error { return a.tracesTo.warmup(ctx, txFrom, limit, tx) })
   714  	})
   715  	return e.Wait()
   716  }
   717  
   718  // StartWrites - pattern: `defer agg.StartWrites().FinishWrites()`
   719  func (a *AggregatorV3) DiscardHistory() *AggregatorV3 {
   720  	a.accounts.DiscardHistory()
   721  	a.storage.DiscardHistory()
   722  	a.code.DiscardHistory()
   723  	a.logAddrs.DiscardHistory(a.tmpdir)
   724  	a.logTopics.DiscardHistory(a.tmpdir)
   725  	a.tracesFrom.DiscardHistory(a.tmpdir)
   726  	a.tracesTo.DiscardHistory(a.tmpdir)
   727  	return a
   728  }
   729  
   730  // StartWrites - pattern: `defer agg.StartWrites().FinishWrites()`
   731  func (a *AggregatorV3) StartWrites() *AggregatorV3 {
   732  	a.walLock.Lock()
   733  	defer a.walLock.Unlock()
   734  	a.accounts.StartWrites()
   735  	a.storage.StartWrites()
   736  	a.code.StartWrites()
   737  	a.logAddrs.StartWrites()
   738  	a.logTopics.StartWrites()
   739  	a.tracesFrom.StartWrites()
   740  	a.tracesTo.StartWrites()
   741  	return a
   742  }
   743  func (a *AggregatorV3) StartUnbufferedWrites() *AggregatorV3 {
   744  	a.walLock.Lock()
   745  	defer a.walLock.Unlock()
   746  	a.accounts.StartWrites()
   747  	a.storage.StartWrites()
   748  	a.code.StartWrites()
   749  	a.logAddrs.StartWrites()
   750  	a.logTopics.StartWrites()
   751  	a.tracesFrom.StartWrites()
   752  	a.tracesTo.StartWrites()
   753  	return a
   754  }
   755  func (a *AggregatorV3) FinishWrites() {
   756  	a.walLock.Lock()
   757  	defer a.walLock.Unlock()
   758  	a.accounts.FinishWrites()
   759  	a.storage.FinishWrites()
   760  	a.code.FinishWrites()
   761  	a.logAddrs.FinishWrites()
   762  	a.logTopics.FinishWrites()
   763  	a.tracesFrom.FinishWrites()
   764  	a.tracesTo.FinishWrites()
   765  }
   766  
   767  type flusher interface {
   768  	Flush(ctx context.Context, tx kv.RwTx) error
   769  }
   770  
   771  func (a *AggregatorV3) rotate() []flusher {
   772  	a.walLock.Lock()
   773  	defer a.walLock.Unlock()
   774  	return []flusher{
   775  		a.accounts.Rotate(),
   776  		a.storage.Rotate(),
   777  		a.code.Rotate(),
   778  		a.logAddrs.Rotate(),
   779  		a.logTopics.Rotate(),
   780  		a.tracesFrom.Rotate(),
   781  		a.tracesTo.Rotate(),
   782  	}
   783  }
   784  func (a *AggregatorV3) Flush(ctx context.Context, tx kv.RwTx) error {
   785  	flushers := a.rotate()
   786  	defer func(t time.Time) { log.Debug("[snapshots] history flush", "took", time.Since(t)) }(time.Now())
   787  	for _, f := range flushers {
   788  		if err := f.Flush(ctx, tx); err != nil {
   789  			return err
   790  		}
   791  	}
   792  	return nil
   793  }
   794  
   795  func (a *AggregatorV3) CanPrune(tx kv.Tx) bool {
   796  	return a.CanPruneFrom(tx) < a.minimaxTxNumInFiles.Load()
   797  }
   798  func (a *AggregatorV3) CanPruneFrom(tx kv.Tx) uint64 {
   799  	fst, _ := kv.FirstKey(tx, kv.TblTracesToKeys)
   800  	fst2, _ := kv.FirstKey(tx, kv.TblStorageHistoryKeys)
   801  	if len(fst) > 0 && len(fst2) > 0 {
   802  		fstInDb := binary.BigEndian.Uint64(fst)
   803  		fstInDb2 := binary.BigEndian.Uint64(fst2)
   804  		return cmp.Min(fstInDb, fstInDb2)
   805  	}
   806  	return math2.MaxUint64
   807  }
   808  
   809  func (a *AggregatorV3) PruneWithTiemout(ctx context.Context, timeout time.Duration) error {
   810  	t := time.Now()
   811  	for a.CanPrune(a.rwTx) && time.Since(t) < timeout {
   812  		if err := a.Prune(ctx, 1_000); err != nil { // prune part of retired data, before commit
   813  			return err
   814  		}
   815  	}
   816  	return nil
   817  }
   818  
   819  func (a *AggregatorV3) StepsRangeInDBAsStr(tx kv.Tx) string {
   820  	return strings.Join([]string{
   821  		a.accounts.stepsRangeInDBAsStr(tx),
   822  		a.storage.stepsRangeInDBAsStr(tx),
   823  		a.code.stepsRangeInDBAsStr(tx),
   824  		a.logAddrs.stepsRangeInDBAsStr(tx),
   825  		a.logTopics.stepsRangeInDBAsStr(tx),
   826  		a.tracesFrom.stepsRangeInDBAsStr(tx),
   827  		a.tracesTo.stepsRangeInDBAsStr(tx),
   828  	}, ", ")
   829  }
   830  
   831  func (a *AggregatorV3) Prune(ctx context.Context, limit uint64) error {
   832  	//if limit/a.aggregationStep > StepsInBiggestFile {
   833  	//	ctx, cancel := context.WithCancel(ctx)
   834  	//	defer cancel()
   835  	//
   836  	//	a.wg.Add(1)
   837  	//	go func() {
   838  	//		defer a.wg.Done()
   839  	//		_ = a.Warmup(ctx, 0, cmp.Max(a.aggregationStep, limit)) // warmup is asyn and moving faster than data deletion
   840  	//	}()
   841  	//}
   842  	return a.prune(ctx, 0, a.minimaxTxNumInFiles.Load(), limit)
   843  }
   844  
   845  func (a *AggregatorV3) prune(ctx context.Context, txFrom, txTo, limit uint64) error {
   846  	logEvery := time.NewTicker(30 * time.Second)
   847  	defer logEvery.Stop()
   848  	if err := a.accounts.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   849  		return err
   850  	}
   851  	if err := a.storage.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   852  		return err
   853  	}
   854  	if err := a.code.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   855  		return err
   856  	}
   857  	if err := a.logAddrs.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   858  		return err
   859  	}
   860  	if err := a.logTopics.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   861  		return err
   862  	}
   863  	if err := a.tracesFrom.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   864  		return err
   865  	}
   866  	if err := a.tracesTo.prune(ctx, txFrom, txTo, limit, logEvery); err != nil {
   867  		return err
   868  	}
   869  	return nil
   870  }
   871  
   872  func (a *AggregatorV3) LogStats(tx kv.Tx, tx2block func(endTxNumMinimax uint64) uint64) {
   873  	if a.minimaxTxNumInFiles.Load() == 0 {
   874  		return
   875  	}
   876  	histBlockNumProgress := tx2block(a.minimaxTxNumInFiles.Load())
   877  	str := make([]string, 0, a.accounts.InvertedIndex.files.Len())
   878  	a.accounts.InvertedIndex.files.Walk(func(items []*filesItem) bool {
   879  		for _, item := range items {
   880  			bn := tx2block(item.endTxNum)
   881  			str = append(str, fmt.Sprintf("%d=%dK", item.endTxNum/a.aggregationStep, bn/1_000))
   882  		}
   883  		return true
   884  	})
   885  
   886  	c, err := tx.CursorDupSort(a.accounts.InvertedIndex.indexTable)
   887  	if err != nil {
   888  		// TODO pass error properly around
   889  		panic(err)
   890  	}
   891  	_, v, err := c.First()
   892  	if err != nil {
   893  		// TODO pass error properly around
   894  		panic(err)
   895  	}
   896  	var firstHistoryIndexBlockInDB uint64
   897  	if len(v) != 0 {
   898  		firstHistoryIndexBlockInDB = tx2block(binary.BigEndian.Uint64(v))
   899  	}
   900  
   901  	var m runtime.MemStats
   902  	dbg.ReadMemStats(&m)
   903  	log.Info("[snapshots] History Stat",
   904  		"blocks", fmt.Sprintf("%dk", (histBlockNumProgress+1)/1000),
   905  		"txs", fmt.Sprintf("%dm", a.minimaxTxNumInFiles.Load()/1_000_000),
   906  		"txNum2blockNum", strings.Join(str, ","),
   907  		"first_history_idx_in_db", firstHistoryIndexBlockInDB,
   908  		"alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys))
   909  }
   910  
   911  func (a *AggregatorV3) EndTxNumMinimax() uint64 { return a.minimaxTxNumInFiles.Load() }
   912  func (a *AggregatorV3) EndTxNumFrozenAndIndexed() uint64 {
   913  	return cmp.Min(
   914  		cmp.Min(
   915  			a.accounts.endIndexedTxNumMinimax(true),
   916  			a.storage.endIndexedTxNumMinimax(true),
   917  		),
   918  		a.code.endIndexedTxNumMinimax(true),
   919  	)
   920  }
   921  func (a *AggregatorV3) recalcMaxTxNum() {
   922  	min := a.accounts.endTxNumMinimax()
   923  	if txNum := a.storage.endTxNumMinimax(); txNum < min {
   924  		min = txNum
   925  	}
   926  	if txNum := a.code.endTxNumMinimax(); txNum < min {
   927  		min = txNum
   928  	}
   929  	if txNum := a.logAddrs.endTxNumMinimax(); txNum < min {
   930  		min = txNum
   931  	}
   932  	if txNum := a.logTopics.endTxNumMinimax(); txNum < min {
   933  		min = txNum
   934  	}
   935  	if txNum := a.tracesFrom.endTxNumMinimax(); txNum < min {
   936  		min = txNum
   937  	}
   938  	if txNum := a.tracesTo.endTxNumMinimax(); txNum < min {
   939  		min = txNum
   940  	}
   941  	a.minimaxTxNumInFiles.Store(min)
   942  }
   943  
   944  type RangesV3 struct {
   945  	accounts             HistoryRanges
   946  	storage              HistoryRanges
   947  	code                 HistoryRanges
   948  	logTopicsStartTxNum  uint64
   949  	logAddrsEndTxNum     uint64
   950  	logAddrsStartTxNum   uint64
   951  	logTopicsEndTxNum    uint64
   952  	tracesFromStartTxNum uint64
   953  	tracesFromEndTxNum   uint64
   954  	tracesToStartTxNum   uint64
   955  	tracesToEndTxNum     uint64
   956  	logAddrs             bool
   957  	logTopics            bool
   958  	tracesFrom           bool
   959  	tracesTo             bool
   960  }
   961  
   962  func (r RangesV3) any() bool {
   963  	return r.accounts.any() || r.storage.any() || r.code.any() || r.logAddrs || r.logTopics || r.tracesFrom || r.tracesTo
   964  }
   965  
   966  func (ac *AggregatorV3Context) findMergeRange(maxEndTxNum, maxSpan uint64) RangesV3 {
   967  	var r RangesV3
   968  	r.accounts = ac.a.accounts.findMergeRange(maxEndTxNum, maxSpan)
   969  	r.storage = ac.a.storage.findMergeRange(maxEndTxNum, maxSpan)
   970  	r.code = ac.a.code.findMergeRange(maxEndTxNum, maxSpan)
   971  	r.logAddrs, r.logAddrsStartTxNum, r.logAddrsEndTxNum = ac.a.logAddrs.findMergeRange(maxEndTxNum, maxSpan)
   972  	r.logTopics, r.logTopicsStartTxNum, r.logTopicsEndTxNum = ac.a.logTopics.findMergeRange(maxEndTxNum, maxSpan)
   973  	r.tracesFrom, r.tracesFromStartTxNum, r.tracesFromEndTxNum = ac.a.tracesFrom.findMergeRange(maxEndTxNum, maxSpan)
   974  	r.tracesTo, r.tracesToStartTxNum, r.tracesToEndTxNum = ac.a.tracesTo.findMergeRange(maxEndTxNum, maxSpan)
   975  	//log.Info(fmt.Sprintf("findMergeRange(%d, %d)=%+v\n", maxEndTxNum, maxSpan, r))
   976  	return r
   977  }
   978  
   979  type SelectedStaticFilesV3 struct {
   980  	logTopics    []*filesItem
   981  	accountsHist []*filesItem
   982  	tracesTo     []*filesItem
   983  	storageIdx   []*filesItem
   984  	storageHist  []*filesItem
   985  	tracesFrom   []*filesItem
   986  	codeIdx      []*filesItem
   987  	codeHist     []*filesItem
   988  	accountsIdx  []*filesItem
   989  	logAddrs     []*filesItem
   990  	codeI        int
   991  	logAddrsI    int
   992  	logTopicsI   int
   993  	storageI     int
   994  	tracesFromI  int
   995  	accountsI    int
   996  	tracesToI    int
   997  }
   998  
   999  func (sf SelectedStaticFilesV3) Close() {
  1000  	for _, group := range [][]*filesItem{sf.accountsIdx, sf.accountsHist, sf.storageIdx, sf.accountsHist, sf.codeIdx, sf.codeHist,
  1001  		sf.logAddrs, sf.logTopics, sf.tracesFrom, sf.tracesTo} {
  1002  		for _, item := range group {
  1003  			if item != nil {
  1004  				if item.decompressor != nil {
  1005  					item.decompressor.Close()
  1006  				}
  1007  				if item.index != nil {
  1008  					item.index.Close()
  1009  				}
  1010  			}
  1011  		}
  1012  	}
  1013  }
  1014  
  1015  func (ac *AggregatorV3Context) staticFilesInRange(r RangesV3) (sf SelectedStaticFilesV3, err error) {
  1016  	if r.accounts.any() {
  1017  		sf.accountsIdx, sf.accountsHist, sf.accountsI, err = ac.accounts.staticFilesInRange(r.accounts)
  1018  		if err != nil {
  1019  			return sf, err
  1020  		}
  1021  	}
  1022  	if r.storage.any() {
  1023  		sf.storageIdx, sf.storageHist, sf.storageI, err = ac.storage.staticFilesInRange(r.storage)
  1024  		if err != nil {
  1025  			return sf, err
  1026  		}
  1027  	}
  1028  	if r.code.any() {
  1029  		sf.codeIdx, sf.codeHist, sf.codeI, err = ac.code.staticFilesInRange(r.code)
  1030  		if err != nil {
  1031  			return sf, err
  1032  		}
  1033  	}
  1034  	if r.logAddrs {
  1035  		sf.logAddrs, sf.logAddrsI = ac.logAddrs.staticFilesInRange(r.logAddrsStartTxNum, r.logAddrsEndTxNum)
  1036  	}
  1037  	if r.logTopics {
  1038  		sf.logTopics, sf.logTopicsI = ac.logTopics.staticFilesInRange(r.logTopicsStartTxNum, r.logTopicsEndTxNum)
  1039  	}
  1040  	if r.tracesFrom {
  1041  		sf.tracesFrom, sf.tracesFromI = ac.tracesFrom.staticFilesInRange(r.tracesFromStartTxNum, r.tracesFromEndTxNum)
  1042  	}
  1043  	if r.tracesTo {
  1044  		sf.tracesTo, sf.tracesToI = ac.tracesTo.staticFilesInRange(r.tracesToStartTxNum, r.tracesToEndTxNum)
  1045  	}
  1046  	return sf, err
  1047  }
  1048  
  1049  type MergedFilesV3 struct {
  1050  	accountsIdx, accountsHist *filesItem
  1051  	storageIdx, storageHist   *filesItem
  1052  	codeIdx, codeHist         *filesItem
  1053  	logAddrs                  *filesItem
  1054  	logTopics                 *filesItem
  1055  	tracesFrom                *filesItem
  1056  	tracesTo                  *filesItem
  1057  }
  1058  
  1059  func (mf MergedFilesV3) FrozenList() (frozen []string) {
  1060  	if mf.accountsHist != nil && mf.accountsHist.frozen {
  1061  		frozen = append(frozen, mf.accountsHist.decompressor.FileName())
  1062  	}
  1063  	if mf.accountsIdx != nil && mf.accountsIdx.frozen {
  1064  		frozen = append(frozen, mf.accountsIdx.decompressor.FileName())
  1065  	}
  1066  
  1067  	if mf.storageHist != nil && mf.storageHist.frozen {
  1068  		frozen = append(frozen, mf.storageHist.decompressor.FileName())
  1069  	}
  1070  	if mf.storageIdx != nil && mf.storageIdx.frozen {
  1071  		frozen = append(frozen, mf.storageIdx.decompressor.FileName())
  1072  	}
  1073  
  1074  	if mf.codeHist != nil && mf.codeHist.frozen {
  1075  		frozen = append(frozen, mf.codeHist.decompressor.FileName())
  1076  	}
  1077  	if mf.codeIdx != nil && mf.codeIdx.frozen {
  1078  		frozen = append(frozen, mf.codeIdx.decompressor.FileName())
  1079  	}
  1080  
  1081  	if mf.logAddrs != nil && mf.logAddrs.frozen {
  1082  		frozen = append(frozen, mf.logAddrs.decompressor.FileName())
  1083  	}
  1084  	if mf.logTopics != nil && mf.logTopics.frozen {
  1085  		frozen = append(frozen, mf.logTopics.decompressor.FileName())
  1086  	}
  1087  	if mf.tracesFrom != nil && mf.tracesFrom.frozen {
  1088  		frozen = append(frozen, mf.tracesFrom.decompressor.FileName())
  1089  	}
  1090  	if mf.tracesTo != nil && mf.tracesTo.frozen {
  1091  		frozen = append(frozen, mf.tracesTo.decompressor.FileName())
  1092  	}
  1093  	return frozen
  1094  }
  1095  func (mf MergedFilesV3) Close() {
  1096  	for _, item := range []*filesItem{mf.accountsIdx, mf.accountsHist, mf.storageIdx, mf.storageHist, mf.codeIdx, mf.codeHist,
  1097  		mf.logAddrs, mf.logTopics, mf.tracesFrom, mf.tracesTo} {
  1098  		if item != nil {
  1099  			if item.decompressor != nil {
  1100  				item.decompressor.Close()
  1101  			}
  1102  			if item.index != nil {
  1103  				item.index.Close()
  1104  			}
  1105  		}
  1106  	}
  1107  }
  1108  
  1109  func (ac *AggregatorV3Context) mergeFiles(ctx context.Context, files SelectedStaticFilesV3, r RangesV3, workers int) (MergedFilesV3, error) {
  1110  	var mf MergedFilesV3
  1111  	g, ctx := errgroup.WithContext(ctx)
  1112  	g.SetLimit(workers)
  1113  	closeFiles := true
  1114  	defer func() {
  1115  		if closeFiles {
  1116  			mf.Close()
  1117  		}
  1118  	}()
  1119  	if r.accounts.any() {
  1120  		g.Go(func() error {
  1121  			var err error
  1122  			mf.accountsIdx, mf.accountsHist, err = ac.a.accounts.mergeFiles(ctx, files.accountsIdx, files.accountsHist, r.accounts, workers, ac.a.ps)
  1123  			return err
  1124  		})
  1125  	}
  1126  
  1127  	if r.storage.any() {
  1128  		g.Go(func() error {
  1129  			var err error
  1130  			mf.storageIdx, mf.storageHist, err = ac.a.storage.mergeFiles(ctx, files.storageIdx, files.storageHist, r.storage, workers, ac.a.ps)
  1131  			return err
  1132  		})
  1133  	}
  1134  	if r.code.any() {
  1135  		g.Go(func() error {
  1136  			var err error
  1137  			mf.codeIdx, mf.codeHist, err = ac.a.code.mergeFiles(ctx, files.codeIdx, files.codeHist, r.code, workers, ac.a.ps)
  1138  			return err
  1139  		})
  1140  	}
  1141  	if r.logAddrs {
  1142  		g.Go(func() error {
  1143  			var err error
  1144  			mf.logAddrs, err = ac.a.logAddrs.mergeFiles(ctx, files.logAddrs, r.logAddrsStartTxNum, r.logAddrsEndTxNum, workers, ac.a.ps)
  1145  			return err
  1146  		})
  1147  	}
  1148  	if r.logTopics {
  1149  		g.Go(func() error {
  1150  			var err error
  1151  			mf.logTopics, err = ac.a.logTopics.mergeFiles(ctx, files.logTopics, r.logTopicsStartTxNum, r.logTopicsEndTxNum, workers, ac.a.ps)
  1152  			return err
  1153  		})
  1154  	}
  1155  	if r.tracesFrom {
  1156  		g.Go(func() error {
  1157  			var err error
  1158  			mf.tracesFrom, err = ac.a.tracesFrom.mergeFiles(ctx, files.tracesFrom, r.tracesFromStartTxNum, r.tracesFromEndTxNum, workers, ac.a.ps)
  1159  			return err
  1160  		})
  1161  	}
  1162  	if r.tracesTo {
  1163  		g.Go(func() error {
  1164  			var err error
  1165  			mf.tracesTo, err = ac.a.tracesTo.mergeFiles(ctx, files.tracesTo, r.tracesToStartTxNum, r.tracesToEndTxNum, workers, ac.a.ps)
  1166  			return err
  1167  		})
  1168  	}
  1169  	err := g.Wait()
  1170  	if err == nil {
  1171  		closeFiles = false
  1172  	}
  1173  	return mf, err
  1174  }
  1175  
  1176  func (a *AggregatorV3) integrateMergedFiles(outs SelectedStaticFilesV3, in MergedFilesV3) (frozen []string) {
  1177  	a.filesMutationLock.Lock()
  1178  	defer a.filesMutationLock.Unlock()
  1179  	defer a.needSaveFilesListInDB.Store(true)
  1180  	defer a.recalcMaxTxNum()
  1181  	a.accounts.integrateMergedFiles(outs.accountsIdx, outs.accountsHist, in.accountsIdx, in.accountsHist)
  1182  	a.storage.integrateMergedFiles(outs.storageIdx, outs.storageHist, in.storageIdx, in.storageHist)
  1183  	a.code.integrateMergedFiles(outs.codeIdx, outs.codeHist, in.codeIdx, in.codeHist)
  1184  	a.logAddrs.integrateMergedFiles(outs.logAddrs, in.logAddrs)
  1185  	a.logTopics.integrateMergedFiles(outs.logTopics, in.logTopics)
  1186  	a.tracesFrom.integrateMergedFiles(outs.tracesFrom, in.tracesFrom)
  1187  	a.tracesTo.integrateMergedFiles(outs.tracesTo, in.tracesTo)
  1188  	a.cleanAfterNewFreeze(in)
  1189  	return frozen
  1190  }
  1191  func (a *AggregatorV3) cleanAfterNewFreeze(in MergedFilesV3) {
  1192  	if in.accountsHist != nil && in.accountsHist.frozen {
  1193  		a.accounts.cleanAfterFreeze(in.accountsHist.endTxNum)
  1194  	}
  1195  	if in.storageHist != nil && in.storageHist.frozen {
  1196  		a.storage.cleanAfterFreeze(in.storageHist.endTxNum)
  1197  	}
  1198  	if in.codeHist != nil && in.codeHist.frozen {
  1199  		a.code.cleanAfterFreeze(in.codeHist.endTxNum)
  1200  	}
  1201  	if in.logAddrs != nil && in.logAddrs.frozen {
  1202  		a.logAddrs.cleanAfterFreeze(in.logAddrs.endTxNum)
  1203  	}
  1204  	if in.logTopics != nil && in.logTopics.frozen {
  1205  		a.logTopics.cleanAfterFreeze(in.logTopics.endTxNum)
  1206  	}
  1207  	if in.tracesFrom != nil && in.tracesFrom.frozen {
  1208  		a.tracesFrom.cleanAfterFreeze(in.tracesFrom.endTxNum)
  1209  	}
  1210  	if in.tracesTo != nil && in.tracesTo.frozen {
  1211  		a.tracesTo.cleanAfterFreeze(in.tracesTo.endTxNum)
  1212  	}
  1213  }
  1214  
  1215  // KeepInDB - usually equal to one a.aggregationStep, but when we exec blocks from snapshots
  1216  // we can set it to 0, because no re-org on this blocks are possible
  1217  func (a *AggregatorV3) KeepInDB(v uint64) { a.keepInDB = v }
  1218  
  1219  func (a *AggregatorV3) BuildFilesInBackground(txNum uint64) {
  1220  	if (txNum + 1) <= a.minimaxTxNumInFiles.Load()+a.aggregationStep+a.keepInDB { // Leave one step worth in the DB
  1221  		return
  1222  	}
  1223  
  1224  	if ok := a.buildingFiles.CompareAndSwap(false, true); !ok {
  1225  		return
  1226  	}
  1227  
  1228  	step := a.minimaxTxNumInFiles.Load() / a.aggregationStep
  1229  	toTxNum := (step + 1) * a.aggregationStep
  1230  	hasData := false
  1231  	a.wg.Add(1)
  1232  	go func() {
  1233  		defer a.wg.Done()
  1234  		defer a.buildingFiles.Store(false)
  1235  
  1236  		// check if db has enough data (maybe we didn't commit them yet)
  1237  		lastInDB := lastIdInDB(a.db, a.accounts.indexKeysTable)
  1238  		hasData = lastInDB >= toTxNum
  1239  		if !hasData {
  1240  			return
  1241  		}
  1242  
  1243  		// trying to create as much small-step-files as possible:
  1244  		// - to reduce amount of small merges
  1245  		// - to remove old data from db as early as possible
  1246  		// - during files build, may happen commit of new data. on each loop step getting latest id in db
  1247  		for step < lastIdInDB(a.db, a.accounts.indexKeysTable)/a.aggregationStep {
  1248  			if err := a.buildFilesInBackground(a.ctx, step); err != nil {
  1249  				if errors.Is(err, context.Canceled) {
  1250  					return
  1251  				}
  1252  				log.Warn("[snapshots] buildFilesInBackground", "err", err)
  1253  				break
  1254  			}
  1255  			step++
  1256  		}
  1257  
  1258  		if ok := a.mergeingFiles.CompareAndSwap(false, true); !ok {
  1259  			return
  1260  		}
  1261  		a.wg.Add(1)
  1262  		go func() {
  1263  			defer a.wg.Done()
  1264  			defer a.mergeingFiles.Store(false)
  1265  			if err := a.MergeLoop(a.ctx, 1); err != nil {
  1266  				if errors.Is(err, context.Canceled) {
  1267  					return
  1268  				}
  1269  				log.Warn("[snapshots] merge", "err", err)
  1270  			}
  1271  
  1272  			a.BuildOptionalMissedIndicesInBackground(a.ctx, 1)
  1273  		}()
  1274  	}()
  1275  }
  1276  
  1277  func (a *AggregatorV3) BatchHistoryWriteStart() *AggregatorV3 {
  1278  	a.walLock.RLock()
  1279  	return a
  1280  }
  1281  func (a *AggregatorV3) BatchHistoryWriteEnd() {
  1282  	a.walLock.RUnlock()
  1283  }
  1284  
  1285  func (a *AggregatorV3) AddAccountPrev(addr []byte, prev []byte) error {
  1286  	return a.accounts.AddPrevValue(addr, nil, prev)
  1287  }
  1288  
  1289  func (a *AggregatorV3) AddStoragePrev(addr []byte, loc []byte, prev []byte) error {
  1290  	return a.storage.AddPrevValue(addr, loc, prev)
  1291  }
  1292  
  1293  // AddCodePrev - addr+inc => code
  1294  func (a *AggregatorV3) AddCodePrev(addr []byte, prev []byte) error {
  1295  	return a.code.AddPrevValue(addr, nil, prev)
  1296  }
  1297  
  1298  func (a *AggregatorV3) PutIdx(idx kv.InvertedIdx, key []byte) error {
  1299  	switch idx {
  1300  	case kv.TblTracesFromIdx:
  1301  		return a.tracesFrom.Add(key)
  1302  	case kv.TblTracesToIdx:
  1303  		return a.tracesTo.Add(key)
  1304  	case kv.TblLogAddressIdx:
  1305  		return a.logAddrs.Add(key)
  1306  	case kv.LogTopicIndex:
  1307  		return a.logTopics.Add(key)
  1308  	default:
  1309  		panic(idx)
  1310  	}
  1311  }
  1312  
  1313  // DisableReadAhead - usage: `defer d.EnableReadAhead().DisableReadAhead()`. Please don't use this funcs without `defer` to avoid leak.
  1314  func (a *AggregatorV3) DisableReadAhead() {
  1315  	a.accounts.DisableReadAhead()
  1316  	a.storage.DisableReadAhead()
  1317  	a.code.DisableReadAhead()
  1318  	a.logAddrs.DisableReadAhead()
  1319  	a.logTopics.DisableReadAhead()
  1320  	a.tracesFrom.DisableReadAhead()
  1321  	a.tracesTo.DisableReadAhead()
  1322  }
  1323  func (a *AggregatorV3) EnableReadAhead() *AggregatorV3 {
  1324  	a.accounts.EnableReadAhead()
  1325  	a.storage.EnableReadAhead()
  1326  	a.code.EnableReadAhead()
  1327  	a.logAddrs.EnableReadAhead()
  1328  	a.logTopics.EnableReadAhead()
  1329  	a.tracesFrom.EnableReadAhead()
  1330  	a.tracesTo.EnableReadAhead()
  1331  	return a
  1332  }
  1333  func (a *AggregatorV3) EnableMadvWillNeed() *AggregatorV3 {
  1334  	a.accounts.EnableMadvWillNeed()
  1335  	a.storage.EnableMadvWillNeed()
  1336  	a.code.EnableMadvWillNeed()
  1337  	a.logAddrs.EnableMadvWillNeed()
  1338  	a.logTopics.EnableMadvWillNeed()
  1339  	a.tracesFrom.EnableMadvWillNeed()
  1340  	a.tracesTo.EnableMadvWillNeed()
  1341  	return a
  1342  }
  1343  func (a *AggregatorV3) EnableMadvNormal() *AggregatorV3 {
  1344  	a.accounts.EnableMadvNormalReadAhead()
  1345  	a.storage.EnableMadvNormalReadAhead()
  1346  	a.code.EnableMadvNormalReadAhead()
  1347  	a.logAddrs.EnableMadvNormalReadAhead()
  1348  	a.logTopics.EnableMadvNormalReadAhead()
  1349  	a.tracesFrom.EnableMadvNormalReadAhead()
  1350  	a.tracesTo.EnableMadvNormalReadAhead()
  1351  	return a
  1352  }
  1353  
  1354  func (ac *AggregatorV3Context) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int, tx kv.Tx) (timestamps iter.U64, err error) {
  1355  	switch name {
  1356  	case kv.AccountsHistoryIdx:
  1357  		return ac.accounts.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1358  	case kv.StorageHistoryIdx:
  1359  		return ac.storage.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1360  	case kv.CodeHistoryIdx:
  1361  		return ac.code.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1362  	case kv.LogTopicIdx:
  1363  		return ac.logTopics.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1364  	case kv.LogAddrIdx:
  1365  		return ac.logAddrs.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1366  	case kv.TracesFromIdx:
  1367  		return ac.tracesFrom.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1368  	case kv.TracesToIdx:
  1369  		return ac.tracesTo.IdxRange(k, fromTs, toTs, asc, limit, tx)
  1370  	default:
  1371  		return nil, fmt.Errorf("unexpected history name: %s", name)
  1372  	}
  1373  }
  1374  
  1375  // -- range end
  1376  
  1377  func (ac *AggregatorV3Context) ReadAccountDataNoStateWithRecent(addr []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) {
  1378  	return ac.accounts.GetNoStateWithRecent(addr, txNum, tx)
  1379  }
  1380  
  1381  func (ac *AggregatorV3Context) ReadAccountDataNoState(addr []byte, txNum uint64) ([]byte, bool, error) {
  1382  	return ac.accounts.GetNoState(addr, txNum)
  1383  }
  1384  
  1385  func (ac *AggregatorV3Context) ReadAccountStorageNoStateWithRecent(addr []byte, loc []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) {
  1386  	if cap(ac.keyBuf) < len(addr)+len(loc) {
  1387  		ac.keyBuf = make([]byte, len(addr)+len(loc))
  1388  	} else if len(ac.keyBuf) != len(addr)+len(loc) {
  1389  		ac.keyBuf = ac.keyBuf[:len(addr)+len(loc)]
  1390  	}
  1391  	copy(ac.keyBuf, addr)
  1392  	copy(ac.keyBuf[len(addr):], loc)
  1393  	return ac.storage.GetNoStateWithRecent(ac.keyBuf, txNum, tx)
  1394  }
  1395  func (ac *AggregatorV3Context) ReadAccountStorageNoStateWithRecent2(key []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) {
  1396  	return ac.storage.GetNoStateWithRecent(key, txNum, tx)
  1397  }
  1398  
  1399  func (ac *AggregatorV3Context) ReadAccountStorageNoState(addr []byte, loc []byte, txNum uint64) ([]byte, bool, error) {
  1400  	if cap(ac.keyBuf) < len(addr)+len(loc) {
  1401  		ac.keyBuf = make([]byte, len(addr)+len(loc))
  1402  	} else if len(ac.keyBuf) != len(addr)+len(loc) {
  1403  		ac.keyBuf = ac.keyBuf[:len(addr)+len(loc)]
  1404  	}
  1405  	copy(ac.keyBuf, addr)
  1406  	copy(ac.keyBuf[len(addr):], loc)
  1407  	return ac.storage.GetNoState(ac.keyBuf, txNum)
  1408  }
  1409  
  1410  func (ac *AggregatorV3Context) ReadAccountCodeNoStateWithRecent(addr []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) {
  1411  	return ac.code.GetNoStateWithRecent(addr, txNum, tx)
  1412  }
  1413  func (ac *AggregatorV3Context) ReadAccountCodeNoState(addr []byte, txNum uint64) ([]byte, bool, error) {
  1414  	return ac.code.GetNoState(addr, txNum)
  1415  }
  1416  
  1417  func (ac *AggregatorV3Context) ReadAccountCodeSizeNoStateWithRecent(addr []byte, txNum uint64, tx kv.Tx) (int, bool, error) {
  1418  	code, noState, err := ac.code.GetNoStateWithRecent(addr, txNum, tx)
  1419  	if err != nil {
  1420  		return 0, false, err
  1421  	}
  1422  	return len(code), noState, nil
  1423  }
  1424  func (ac *AggregatorV3Context) ReadAccountCodeSizeNoState(addr []byte, txNum uint64) (int, bool, error) {
  1425  	code, noState, err := ac.code.GetNoState(addr, txNum)
  1426  	if err != nil {
  1427  		return 0, false, err
  1428  	}
  1429  	return len(code), noState, nil
  1430  }
  1431  
  1432  func (ac *AggregatorV3Context) AccountHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) {
  1433  	return ac.accounts.HistoryRange(startTxNum, endTxNum, asc, limit, tx)
  1434  }
  1435  
  1436  func (ac *AggregatorV3Context) StorageHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) {
  1437  	return ac.storage.HistoryRange(startTxNum, endTxNum, asc, limit, tx)
  1438  }
  1439  
  1440  func (ac *AggregatorV3Context) CodeHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) {
  1441  	return ac.code.HistoryRange(startTxNum, endTxNum, asc, limit, tx)
  1442  }
  1443  
  1444  func (ac *AggregatorV3Context) AccountHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) iter.KV {
  1445  	return ac.accounts.WalkAsOf(startTxNum, from, to, tx, limit)
  1446  }
  1447  
  1448  func (ac *AggregatorV3Context) StorageHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) iter.KV {
  1449  	return ac.storage.WalkAsOf(startTxNum, from, to, tx, limit)
  1450  }
  1451  
  1452  func (ac *AggregatorV3Context) CodeHistoricalStateRange(startTxNum uint64, from, to []byte, limit int, tx kv.Tx) iter.KV {
  1453  	return ac.code.WalkAsOf(startTxNum, from, to, tx, limit)
  1454  }
  1455  
  1456  type FilesStats22 struct {
  1457  }
  1458  
  1459  func (a *AggregatorV3) Stats() FilesStats22 {
  1460  	var fs FilesStats22
  1461  	return fs
  1462  }
  1463  
  1464  type AggregatorV3Context struct {
  1465  	a          *AggregatorV3
  1466  	accounts   *HistoryContext
  1467  	storage    *HistoryContext
  1468  	code       *HistoryContext
  1469  	logAddrs   *InvertedIndexContext
  1470  	logTopics  *InvertedIndexContext
  1471  	tracesFrom *InvertedIndexContext
  1472  	tracesTo   *InvertedIndexContext
  1473  	keyBuf     []byte
  1474  
  1475  	id uint64 // set only if TRACE_AGG=true
  1476  }
  1477  
  1478  func (a *AggregatorV3) MakeContext() *AggregatorV3Context {
  1479  	ac := &AggregatorV3Context{
  1480  		a:          a,
  1481  		accounts:   a.accounts.MakeContext(),
  1482  		storage:    a.storage.MakeContext(),
  1483  		code:       a.code.MakeContext(),
  1484  		logAddrs:   a.logAddrs.MakeContext(),
  1485  		logTopics:  a.logTopics.MakeContext(),
  1486  		tracesFrom: a.tracesFrom.MakeContext(),
  1487  		tracesTo:   a.tracesTo.MakeContext(),
  1488  
  1489  		id: a.leakDetector.Add(),
  1490  	}
  1491  
  1492  	return ac
  1493  }
  1494  func (ac *AggregatorV3Context) Close() {
  1495  	ac.a.leakDetector.Del(ac.id)
  1496  	ac.accounts.Close()
  1497  	ac.storage.Close()
  1498  	ac.code.Close()
  1499  	ac.logAddrs.Close()
  1500  	ac.logTopics.Close()
  1501  	ac.tracesFrom.Close()
  1502  	ac.tracesTo.Close()
  1503  }
  1504  
  1505  // BackgroundResult - used only indicate that some work is done
  1506  // no much reason to pass exact results by this object, just get latest state when need
  1507  type BackgroundResult struct {
  1508  	err error
  1509  	has bool
  1510  }
  1511  
  1512  func (br *BackgroundResult) Has() bool     { return br.has }
  1513  func (br *BackgroundResult) Set(err error) { br.has, br.err = true, err }
  1514  func (br *BackgroundResult) GetAndReset() (bool, error) {
  1515  	has, err := br.has, br.err
  1516  	br.has, br.err = false, nil
  1517  	return has, err
  1518  }
  1519  
  1520  func lastIdInDB(db kv.RoDB, table string) (lstInDb uint64) {
  1521  	if err := db.View(context.Background(), func(tx kv.Tx) error {
  1522  		lst, _ := kv.LastKey(tx, table)
  1523  		if len(lst) > 0 {
  1524  			lstInDb = binary.BigEndian.Uint64(lst)
  1525  		}
  1526  		return nil
  1527  	}); err != nil {
  1528  		log.Warn("[snapshots] lastIdInDB", "err", err)
  1529  	}
  1530  	return lstInDb
  1531  }
  1532  
  1533  // AggregatorStep is used for incremental reconstitution, it allows
  1534  // accessing history in isolated way for each step
  1535  type AggregatorStep struct {
  1536  	a        *AggregatorV3
  1537  	accounts *HistoryStep
  1538  	storage  *HistoryStep
  1539  	code     *HistoryStep
  1540  	keyBuf   []byte
  1541  }
  1542  
  1543  func (a *AggregatorV3) MakeSteps() ([]*AggregatorStep, error) {
  1544  	frozenAndIndexed := a.EndTxNumFrozenAndIndexed()
  1545  	accountSteps := a.accounts.MakeSteps(frozenAndIndexed)
  1546  	codeSteps := a.code.MakeSteps(frozenAndIndexed)
  1547  	storageSteps := a.storage.MakeSteps(frozenAndIndexed)
  1548  	if len(accountSteps) != len(storageSteps) || len(storageSteps) != len(codeSteps) {
  1549  		return nil, fmt.Errorf("different limit of steps (try merge snapshots): accountSteps=%d, storageSteps=%d, codeSteps=%d", len(accountSteps), len(storageSteps), len(codeSteps))
  1550  	}
  1551  	steps := make([]*AggregatorStep, len(accountSteps))
  1552  	for i, accountStep := range accountSteps {
  1553  		steps[i] = &AggregatorStep{
  1554  			a:        a,
  1555  			accounts: accountStep,
  1556  			storage:  storageSteps[i],
  1557  			code:     codeSteps[i],
  1558  		}
  1559  	}
  1560  	return steps, nil
  1561  }
  1562  
  1563  func (as *AggregatorStep) TxNumRange() (uint64, uint64) {
  1564  	return as.accounts.indexFile.startTxNum, as.accounts.indexFile.endTxNum
  1565  }
  1566  
  1567  func (as *AggregatorStep) IterateAccountsTxs() *ScanIteratorInc {
  1568  	return as.accounts.iterateTxs()
  1569  }
  1570  
  1571  func (as *AggregatorStep) IterateStorageTxs() *ScanIteratorInc {
  1572  	return as.storage.iterateTxs()
  1573  }
  1574  
  1575  func (as *AggregatorStep) IterateCodeTxs() *ScanIteratorInc {
  1576  	return as.code.iterateTxs()
  1577  }
  1578  
  1579  func (as *AggregatorStep) ReadAccountDataNoState(addr []byte, txNum uint64) ([]byte, bool, uint64) {
  1580  	return as.accounts.GetNoState(addr, txNum)
  1581  }
  1582  
  1583  func (as *AggregatorStep) ReadAccountStorageNoState(addr []byte, loc []byte, txNum uint64) ([]byte, bool, uint64) {
  1584  	if cap(as.keyBuf) < len(addr)+len(loc) {
  1585  		as.keyBuf = make([]byte, len(addr)+len(loc))
  1586  	} else if len(as.keyBuf) != len(addr)+len(loc) {
  1587  		as.keyBuf = as.keyBuf[:len(addr)+len(loc)]
  1588  	}
  1589  	copy(as.keyBuf, addr)
  1590  	copy(as.keyBuf[len(addr):], loc)
  1591  	return as.storage.GetNoState(as.keyBuf, txNum)
  1592  }
  1593  
  1594  func (as *AggregatorStep) ReadAccountCodeNoState(addr []byte, txNum uint64) ([]byte, bool, uint64) {
  1595  	return as.code.GetNoState(addr, txNum)
  1596  }
  1597  
  1598  func (as *AggregatorStep) ReadAccountCodeSizeNoState(addr []byte, txNum uint64) (int, bool, uint64) {
  1599  	code, noState, stateTxNum := as.code.GetNoState(addr, txNum)
  1600  	return len(code), noState, stateTxNum
  1601  }
  1602  
  1603  func (as *AggregatorStep) MaxTxNumAccounts(addr []byte) (bool, uint64) {
  1604  	return as.accounts.MaxTxNum(addr)
  1605  }
  1606  
  1607  func (as *AggregatorStep) MaxTxNumStorage(addr []byte, loc []byte) (bool, uint64) {
  1608  	if cap(as.keyBuf) < len(addr)+len(loc) {
  1609  		as.keyBuf = make([]byte, len(addr)+len(loc))
  1610  	} else if len(as.keyBuf) != len(addr)+len(loc) {
  1611  		as.keyBuf = as.keyBuf[:len(addr)+len(loc)]
  1612  	}
  1613  	copy(as.keyBuf, addr)
  1614  	copy(as.keyBuf[len(addr):], loc)
  1615  	return as.storage.MaxTxNum(as.keyBuf)
  1616  }
  1617  
  1618  func (as *AggregatorStep) MaxTxNumCode(addr []byte) (bool, uint64) {
  1619  	return as.code.MaxTxNum(addr)
  1620  }
  1621  
  1622  func (as *AggregatorStep) IterateAccountsHistory(txNum uint64) *HistoryIteratorInc {
  1623  	return as.accounts.interateHistoryBeforeTxNum(txNum)
  1624  }
  1625  
  1626  func (as *AggregatorStep) IterateStorageHistory(txNum uint64) *HistoryIteratorInc {
  1627  	return as.storage.interateHistoryBeforeTxNum(txNum)
  1628  }
  1629  
  1630  func (as *AggregatorStep) IterateCodeHistory(txNum uint64) *HistoryIteratorInc {
  1631  	return as.code.interateHistoryBeforeTxNum(txNum)
  1632  }
  1633  
  1634  func (as *AggregatorStep) Clone() *AggregatorStep {
  1635  	return &AggregatorStep{
  1636  		a:        as.a,
  1637  		accounts: as.accounts.Clone(),
  1638  		storage:  as.storage.Clone(),
  1639  		code:     as.code.Clone(),
  1640  	}
  1641  }