github.com/koko1123/flow-go-1@v0.29.6/ledger/complete/ledger_benchmark_test.go (about)

     1  package complete_test
     2  
     3  import (
     4  	"math"
     5  	"math/rand"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/rs/zerolog"
    10  	"github.com/stretchr/testify/require"
    11  	"go.uber.org/atomic"
    12  
    13  	"github.com/koko1123/flow-go-1/ledger"
    14  	"github.com/koko1123/flow-go-1/ledger/common/pathfinder"
    15  	"github.com/koko1123/flow-go-1/ledger/common/testutils"
    16  	"github.com/koko1123/flow-go-1/ledger/complete"
    17  	"github.com/koko1123/flow-go-1/ledger/complete/wal"
    18  	"github.com/koko1123/flow-go-1/ledger/partial/ptrie"
    19  	"github.com/koko1123/flow-go-1/module/metrics"
    20  )
    21  
    22  // GENERAL COMMENT:
    23  // running this test with
    24  //
    25  //	go test -bench=.  -benchmem
    26  //
    27  // will track the heap allocations for the Benchmarks
    28  func BenchmarkStorage(b *testing.B) { benchmarkStorage(100, b) }
    29  
    30  // BenchmarkStorage benchmarks the performance of the storage layer
    31  func benchmarkStorage(steps int, b *testing.B) {
    32  	// assumption: 1000 key updates per collection
    33  	const (
    34  		numInsPerStep      = 1000
    35  		keyNumberOfParts   = 10
    36  		keyPartMinByteSize = 1
    37  		keyPartMaxByteSize = 100
    38  		valueMaxByteSize   = 32
    39  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
    40  		checkpointsToKeep  = 1
    41  	)
    42  
    43  	rand.Seed(time.Now().UnixNano())
    44  
    45  	dir := b.TempDir()
    46  
    47  	diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, steps+1, pathfinder.PathByteSize, wal.SegmentSize)
    48  	require.NoError(b, err)
    49  
    50  	led, err := complete.NewLedger(diskWal, steps+1, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
    51  	require.NoError(b, err)
    52  
    53  	compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(steps+1), checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
    54  	require.NoError(b, err)
    55  
    56  	<-compactor.Ready()
    57  
    58  	defer func() {
    59  		<-led.Done()
    60  		<-compactor.Done()
    61  	}()
    62  
    63  	totalUpdateTimeMS := 0
    64  	totalReadTimeMS := 0
    65  	totalProofTimeMS := 0
    66  	totalRegOperation := 0
    67  	totalProofSize := 0
    68  	totalPTrieConstTimeMS := 0
    69  
    70  	state := led.InitialState()
    71  	for i := 0; i < steps; i++ {
    72  
    73  		keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize)
    74  		values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize)
    75  
    76  		totalRegOperation += len(keys)
    77  
    78  		start := time.Now()
    79  		update, err := ledger.NewUpdate(state, keys, values)
    80  		if err != nil {
    81  			b.Fatal(err)
    82  		}
    83  
    84  		newState, _, err := led.Set(update)
    85  		if err != nil {
    86  			b.Fatal(err)
    87  		}
    88  
    89  		elapsed := time.Since(start)
    90  		totalUpdateTimeMS += int(elapsed / time.Millisecond)
    91  
    92  		// read values and compare values
    93  		start = time.Now()
    94  		query, err := ledger.NewQuery(newState, keys)
    95  		if err != nil {
    96  			b.Fatal(err)
    97  		}
    98  		_, err = led.Get(query)
    99  		if err != nil {
   100  			b.Fatal(err)
   101  		}
   102  		elapsed = time.Since(start)
   103  		totalReadTimeMS += int(elapsed / time.Millisecond)
   104  
   105  		start = time.Now()
   106  		// validate proofs (check individual proof and batch proof)
   107  		proof, err := led.Prove(query)
   108  		if err != nil {
   109  			b.Fatal(err)
   110  		}
   111  		elapsed = time.Since(start)
   112  		totalProofTimeMS += int(elapsed / time.Millisecond)
   113  
   114  		totalProofSize += len(proof)
   115  
   116  		start = time.Now()
   117  		p, _ := ledger.DecodeTrieBatchProof(proof)
   118  
   119  		// construct a partial trie using proofs
   120  		_, err = ptrie.NewPSMT(ledger.RootHash(newState), p)
   121  		if err != nil {
   122  			b.Fatal("failed to create PSMT")
   123  		}
   124  		elapsed = time.Since(start)
   125  		totalPTrieConstTimeMS += int(elapsed / time.Millisecond)
   126  
   127  		state = newState
   128  	}
   129  
   130  	b.ReportMetric(float64(totalUpdateTimeMS/steps), "update_time_(ms)")
   131  	b.ReportMetric(float64(totalUpdateTimeMS*1000000/totalRegOperation), "update_time_per_reg_(ns)")
   132  
   133  	b.ReportMetric(float64(totalReadTimeMS/steps), "read_time_(ms)")
   134  	b.ReportMetric(float64(totalReadTimeMS*1000000/totalRegOperation), "read_time_per_reg_(ns)")
   135  
   136  	b.ReportMetric(float64(totalProofTimeMS/steps), "read_w_proof_time_(ms)")
   137  	b.ReportMetric(float64(totalProofTimeMS*1000000/totalRegOperation), "read_w_proof_time_per_reg_(ns)")
   138  
   139  	b.ReportMetric(float64(totalProofSize/steps), "proof_size_(MB)")
   140  	b.ReportMetric(float64(totalPTrieConstTimeMS/steps), "ptrie_const_time_(ms)")
   141  
   142  }
   143  
   144  // BenchmarkTrieUpdate benchmarks the performance of a trie update
   145  func BenchmarkTrieUpdate(b *testing.B) {
   146  	// key updates per iteration
   147  	const (
   148  		numInsPerStep      = 10000
   149  		keyNumberOfParts   = 3
   150  		keyPartMinByteSize = 1
   151  		keyPartMaxByteSize = 100
   152  		valueMaxByteSize   = 32
   153  		capacity           = 101
   154  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
   155  		checkpointsToKeep  = 1
   156  	)
   157  
   158  	rand.Seed(1)
   159  
   160  	dir := b.TempDir()
   161  
   162  	diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize)
   163  	require.NoError(b, err)
   164  
   165  	led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
   166  	require.NoError(b, err)
   167  
   168  	compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
   169  	require.NoError(b, err)
   170  
   171  	<-compactor.Ready()
   172  
   173  	defer func() {
   174  		<-led.Done()
   175  		<-compactor.Done()
   176  	}()
   177  
   178  	state := led.InitialState()
   179  
   180  	keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize)
   181  	values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize)
   182  
   183  	update, err := ledger.NewUpdate(state, keys, values)
   184  	if err != nil {
   185  		b.Fatal(err)
   186  	}
   187  
   188  	b.ResetTimer()
   189  	for i := 0; i < b.N; i++ {
   190  		_, _, err := led.Set(update)
   191  		if err != nil {
   192  			b.Fatal(err)
   193  		}
   194  	}
   195  	b.StopTimer()
   196  }
   197  
   198  // BenchmarkTrieUpdate benchmarks the performance of a trie read
   199  func BenchmarkTrieRead(b *testing.B) {
   200  	// key updates per iteration
   201  	const (
   202  		numInsPerStep      = 10000
   203  		keyNumberOfParts   = 10
   204  		keyPartMinByteSize = 1
   205  		keyPartMaxByteSize = 100
   206  		valueMaxByteSize   = 32
   207  		capacity           = 101
   208  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
   209  		checkpointsToKeep  = 1
   210  	)
   211  
   212  	rand.Seed(1)
   213  
   214  	dir := b.TempDir()
   215  
   216  	diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize)
   217  	require.NoError(b, err)
   218  
   219  	led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
   220  	require.NoError(b, err)
   221  
   222  	compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
   223  	require.NoError(b, err)
   224  
   225  	<-compactor.Ready()
   226  
   227  	defer func() {
   228  		<-led.Done()
   229  		<-compactor.Done()
   230  	}()
   231  
   232  	state := led.InitialState()
   233  
   234  	keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize)
   235  	values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize)
   236  
   237  	update, err := ledger.NewUpdate(state, keys, values)
   238  	if err != nil {
   239  		b.Fatal(err)
   240  	}
   241  
   242  	newState, _, err := led.Set(update)
   243  	if err != nil {
   244  		b.Fatal(err)
   245  	}
   246  
   247  	query, err := ledger.NewQuery(newState, keys)
   248  	if err != nil {
   249  		b.Fatal(err)
   250  	}
   251  
   252  	b.ResetTimer()
   253  	for i := 0; i < b.N; i++ {
   254  		_, err = led.Get(query)
   255  		if err != nil {
   256  			b.Fatal(err)
   257  		}
   258  	}
   259  	b.StopTimer()
   260  }
   261  
   262  func BenchmarkLedgerGetOneValue(b *testing.B) {
   263  	// key updates per iteration
   264  	const (
   265  		numInsPerStep      = 10000
   266  		keyNumberOfParts   = 10
   267  		keyPartMinByteSize = 1
   268  		keyPartMaxByteSize = 100
   269  		valueMaxByteSize   = 32
   270  		capacity           = 101
   271  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
   272  		checkpointsToKeep  = 1
   273  	)
   274  
   275  	rand.Seed(1)
   276  
   277  	dir := b.TempDir()
   278  
   279  	diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize)
   280  	require.NoError(b, err)
   281  
   282  	led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
   283  	require.NoError(b, err)
   284  
   285  	compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
   286  	require.NoError(b, err)
   287  
   288  	<-compactor.Ready()
   289  
   290  	defer func() {
   291  		<-led.Done()
   292  		<-compactor.Done()
   293  	}()
   294  
   295  	state := led.InitialState()
   296  
   297  	keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize)
   298  	values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize)
   299  
   300  	update, err := ledger.NewUpdate(state, keys, values)
   301  	if err != nil {
   302  		b.Fatal(err)
   303  	}
   304  
   305  	newState, _, err := led.Set(update)
   306  	if err != nil {
   307  		b.Fatal(err)
   308  	}
   309  
   310  	b.Run("batch get", func(b *testing.B) {
   311  		query, err := ledger.NewQuery(newState, []ledger.Key{keys[0]})
   312  		if err != nil {
   313  			b.Fatal(err)
   314  		}
   315  
   316  		b.ResetTimer()
   317  		for i := 0; i < b.N; i++ {
   318  			_, err = led.Get(query)
   319  			if err != nil {
   320  				b.Fatal(err)
   321  			}
   322  		}
   323  	})
   324  
   325  	b.Run("single get", func(b *testing.B) {
   326  		query, err := ledger.NewQuerySingleValue(newState, keys[0])
   327  		if err != nil {
   328  			b.Fatal(err)
   329  		}
   330  
   331  		b.ResetTimer()
   332  		for i := 0; i < b.N; i++ {
   333  			_, err = led.GetSingleValue(query)
   334  			if err != nil {
   335  				b.Fatal(err)
   336  			}
   337  		}
   338  	})
   339  }
   340  
   341  // BenchmarkTrieUpdate benchmarks the performance of a trie prove
   342  func BenchmarkTrieProve(b *testing.B) {
   343  	// key updates per iteration
   344  	const (
   345  		numInsPerStep      = 10000
   346  		keyNumberOfParts   = 10
   347  		keyPartMinByteSize = 1
   348  		keyPartMaxByteSize = 100
   349  		valueMaxByteSize   = 32
   350  		capacity           = 101
   351  		checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation.
   352  		checkpointsToKeep  = 1
   353  	)
   354  
   355  	rand.Seed(1)
   356  
   357  	dir := b.TempDir()
   358  
   359  	diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir, capacity, pathfinder.PathByteSize, wal.SegmentSize)
   360  	require.NoError(b, err)
   361  
   362  	led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion)
   363  	require.NoError(b, err)
   364  
   365  	compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false))
   366  	require.NoError(b, err)
   367  
   368  	<-compactor.Ready()
   369  
   370  	defer func() {
   371  		<-led.Done()
   372  		<-compactor.Done()
   373  	}()
   374  
   375  	state := led.InitialState()
   376  
   377  	keys := testutils.RandomUniqueKeys(numInsPerStep, keyNumberOfParts, keyPartMinByteSize, keyPartMaxByteSize)
   378  	values := testutils.RandomValues(numInsPerStep, 1, valueMaxByteSize)
   379  
   380  	update, err := ledger.NewUpdate(state, keys, values)
   381  	if err != nil {
   382  		b.Fatal(err)
   383  	}
   384  
   385  	newState, _, err := led.Set(update)
   386  	if err != nil {
   387  		b.Fatal(err)
   388  	}
   389  
   390  	query, err := ledger.NewQuery(newState, keys)
   391  	if err != nil {
   392  		b.Fatal(err)
   393  	}
   394  
   395  	b.ResetTimer()
   396  	for i := 0; i < b.N; i++ {
   397  		_, err := led.Prove(query)
   398  		if err != nil {
   399  			b.Fatal(err)
   400  		}
   401  	}
   402  	b.StopTimer()
   403  }