github.com/ledgerwatch/erigon-lib@v1.0.0/state/aggregator_bench_test.go (about)

     1  package state
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"math/rand"
     8  	"os"
     9  	"path"
    10  	"path/filepath"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/ledgerwatch/log/v3"
    15  	"github.com/stretchr/testify/require"
    16  
    17  	"github.com/ledgerwatch/erigon-lib/commitment"
    18  	"github.com/ledgerwatch/erigon-lib/common"
    19  	"github.com/ledgerwatch/erigon-lib/common/length"
    20  	"github.com/ledgerwatch/erigon-lib/compress"
    21  	"github.com/ledgerwatch/erigon-lib/kv"
    22  	"github.com/ledgerwatch/erigon-lib/kv/mdbx"
    23  	"github.com/ledgerwatch/erigon-lib/recsplit"
    24  )
    25  
    26  func testDbAndAggregatorBench(b *testing.B, aggStep uint64) (string, kv.RwDB, *Aggregator) {
    27  	b.Helper()
    28  	logger := log.New()
    29  	path := b.TempDir()
    30  	b.Cleanup(func() { os.RemoveAll(path) })
    31  	db := mdbx.NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
    32  		return kv.ChaindataTablesCfg
    33  	}).MustOpen()
    34  	b.Cleanup(db.Close)
    35  	agg, err := NewAggregator(path, path, aggStep, CommitmentModeDirect, commitment.VariantHexPatriciaTrie, logger)
    36  	require.NoError(b, err)
    37  	b.Cleanup(agg.Close)
    38  	return path, db, agg
    39  }
    40  
    41  func BenchmarkAggregator_Processing(b *testing.B) {
    42  	ctx, cancel := context.WithCancel(context.Background())
    43  	defer cancel()
    44  
    45  	longKeys := queueKeys(ctx, 64, length.Addr+length.Hash)
    46  	vals := queueKeys(ctx, 53, length.Hash)
    47  
    48  	aggStep := uint64(100_00)
    49  	_, db, agg := testDbAndAggregatorBench(b, aggStep)
    50  
    51  	tx, err := db.BeginRw(ctx)
    52  	require.NoError(b, err)
    53  	defer func() {
    54  		if tx != nil {
    55  			tx.Rollback()
    56  		}
    57  	}()
    58  
    59  	agg.SetTx(tx)
    60  	defer agg.StartWrites().FinishWrites()
    61  	require.NoError(b, err)
    62  
    63  	b.ReportAllocs()
    64  	b.ResetTimer()
    65  
    66  	for i := 0; i < b.N; i++ {
    67  		key := <-longKeys
    68  		val := <-vals
    69  		txNum := uint64(i)
    70  		agg.SetTxNum(txNum)
    71  		err := agg.WriteAccountStorage(key[:length.Addr], key[length.Addr:], val)
    72  		require.NoError(b, err)
    73  		err = agg.FinishTx()
    74  		require.NoError(b, err)
    75  	}
    76  }
    77  
    78  func queueKeys(ctx context.Context, seed, ofSize uint64) <-chan []byte {
    79  	rnd := rand.New(rand.NewSource(int64(seed)))
    80  	keys := make(chan []byte, 1)
    81  	go func() {
    82  		for {
    83  			if ctx.Err() != nil {
    84  				break
    85  			}
    86  			bb := make([]byte, ofSize)
    87  			rnd.Read(bb)
    88  
    89  			keys <- bb
    90  		}
    91  		close(keys)
    92  	}()
    93  	return keys
    94  }
    95  
    96  func Benchmark_BtreeIndex_Allocation(b *testing.B) {
    97  	rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
    98  	for i := 0; i < b.N; i++ {
    99  		now := time.Now()
   100  		count := rnd.Intn(1000000000)
   101  		bt := newBtAlloc(uint64(count), uint64(1<<12), true)
   102  		bt.traverseDfs()
   103  		fmt.Printf("alloc %v\n", time.Since(now))
   104  	}
   105  }
   106  
   107  func Benchmark_BtreeIndex_Search(b *testing.B) {
   108  	logger := log.New()
   109  	rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
   110  	tmp := b.TempDir()
   111  	defer os.RemoveAll(tmp)
   112  	dataPath := "../../data/storage.256-288.kv"
   113  
   114  	indexPath := path.Join(tmp, filepath.Base(dataPath)+".bti")
   115  	err := BuildBtreeIndex(dataPath, indexPath, logger)
   116  	require.NoError(b, err)
   117  
   118  	M := 1024
   119  	bt, err := OpenBtreeIndex(indexPath, dataPath, uint64(M))
   120  
   121  	require.NoError(b, err)
   122  
   123  	idx := NewBtIndexReader(bt)
   124  
   125  	keys, err := pivotKeysFromKV(dataPath)
   126  	require.NoError(b, err)
   127  
   128  	for i := 0; i < b.N; i++ {
   129  		p := rnd.Intn(len(keys))
   130  		cur, err := idx.Seek(keys[p])
   131  		require.NoErrorf(b, err, "i=%d", i)
   132  		require.EqualValues(b, keys[p], cur.key)
   133  		require.NotEmptyf(b, cur.Value(), "i=%d", i)
   134  	}
   135  
   136  	bt.Close()
   137  }
   138  
   139  func benchInitBtreeIndex(b *testing.B, M uint64) (*BtIndex, [][]byte, string) {
   140  	b.Helper()
   141  
   142  	logger := log.New()
   143  	tmp := b.TempDir()
   144  	b.Cleanup(func() { os.RemoveAll(tmp) })
   145  
   146  	dataPath := generateCompressedKV(b, tmp, 52, 10, 1000000, logger)
   147  	indexPath := path.Join(tmp, filepath.Base(dataPath)+".bt")
   148  	bt, err := CreateBtreeIndex(indexPath, dataPath, M, logger)
   149  	require.NoError(b, err)
   150  
   151  	keys, err := pivotKeysFromKV(dataPath)
   152  	require.NoError(b, err)
   153  	return bt, keys, dataPath
   154  }
   155  
   156  func Benchmark_BTree_Seek(b *testing.B) {
   157  	M := uint64(1024)
   158  	bt, keys, _ := benchInitBtreeIndex(b, M)
   159  	defer bt.Close()
   160  
   161  	rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
   162  
   163  	b.Run("seek_only", func(b *testing.B) {
   164  		for i := 0; i < b.N; i++ {
   165  			p := rnd.Intn(len(keys))
   166  
   167  			cur, err := bt.Seek(keys[p])
   168  			require.NoError(b, err)
   169  
   170  			require.EqualValues(b, keys[p], cur.key)
   171  		}
   172  	})
   173  
   174  	b.Run("seek_then_next", func(b *testing.B) {
   175  		for i := 0; i < b.N; i++ {
   176  			p := rnd.Intn(len(keys))
   177  
   178  			cur, err := bt.Seek(keys[p])
   179  			require.NoError(b, err)
   180  
   181  			require.EqualValues(b, keys[p], cur.key)
   182  
   183  			prevKey := common.Copy(keys[p])
   184  			ntimer := time.Duration(0)
   185  			nextKeys := 5000
   186  			for j := 0; j < nextKeys; j++ {
   187  				ntime := time.Now()
   188  
   189  				if !cur.Next() {
   190  					break
   191  				}
   192  				ntimer += time.Since(ntime)
   193  
   194  				nk := cur.Key()
   195  				if bytes.Compare(prevKey, nk) > 0 {
   196  					b.Fatalf("prev %s cur %s, next key should be greater", prevKey, nk)
   197  				}
   198  				prevKey = nk
   199  			}
   200  			if i%1000 == 0 {
   201  				fmt.Printf("next_access_last[of %d keys] %v\n", nextKeys, ntimer/time.Duration(nextKeys))
   202  			}
   203  
   204  		}
   205  	})
   206  }
   207  
   208  // requires existing KV index file at ../../data/storage.kv
   209  func Benchmark_Recsplit_Find_ExternalFile(b *testing.B) {
   210  	dataPath := "../../data/storage.kv"
   211  	f, err := os.Stat(dataPath)
   212  	if err != nil || f.IsDir() {
   213  		b.Skip("requires existing KV index file at ../../data/storage.kv")
   214  	}
   215  
   216  	rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
   217  	tmp := b.TempDir()
   218  
   219  	defer os.RemoveAll(tmp)
   220  
   221  	indexPath := dataPath + "i"
   222  	idx, err := recsplit.OpenIndex(indexPath)
   223  	require.NoError(b, err)
   224  	idxr := recsplit.NewIndexReader(idx)
   225  
   226  	decomp, err := compress.NewDecompressor(dataPath)
   227  	require.NoError(b, err)
   228  	defer decomp.Close()
   229  
   230  	getter := decomp.MakeGetter()
   231  
   232  	keys, err := pivotKeysFromKV(dataPath)
   233  	require.NoError(b, err)
   234  
   235  	for i := 0; i < b.N; i++ {
   236  		p := rnd.Intn(len(keys))
   237  
   238  		offset := idxr.Lookup(keys[p])
   239  		getter.Reset(offset)
   240  
   241  		require.True(b, getter.HasNext())
   242  
   243  		key, pa := getter.Next(nil)
   244  		require.NotEmpty(b, key)
   245  
   246  		value, pb := getter.Next(nil)
   247  		if pb-pa != 1 {
   248  			require.NotEmpty(b, value)
   249  		}
   250  
   251  		require.NoErrorf(b, err, "i=%d", i)
   252  		require.EqualValues(b, keys[p], key)
   253  	}
   254  }