github.com/ethersphere/bee/v2@v2.2.0/pkg/storage/storagetest/benchmark.go (about)

     1  // Copyright 2022 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package storagetest
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"encoding/hex"
    11  	"errors"
    12  	"flag"
    13  	"fmt"
    14  	"math/rand"
    15  	"runtime"
    16  	"testing"
    17  	"time"
    18  
    19  	postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing"
    20  	storage "github.com/ethersphere/bee/v2/pkg/storage"
    21  	"github.com/ethersphere/bee/v2/pkg/swarm"
    22  )
    23  
    24  var (
    25  	valueSize        = flag.Int("value_size", 100, "Size of each value")
    26  	compressionRatio = flag.Float64("compression_ratio", 0.5, "")
    27  	maxConcurrency   = flag.Int("max_concurrency", 2048, "Max concurrency in concurrent benchmark")
    28  	batchSize        = flag.Int("batch_size", 1000, "Max number of records that would trigger commit")
    29  )
    30  
    31  var keyLen = 16
    32  
    33  const (
    34  	hitKeyFormat     = "1%015d"
    35  	missingKeyFormat = "0%015d"
    36  )
    37  
    38  func randomBytes(r *rand.Rand, n int) []byte {
    39  	b := make([]byte, n)
    40  	for i := 0; i < n; i++ {
    41  		b[i] = ' ' + byte(r.Intn('~'-' '+1))
    42  	}
    43  	return b
    44  }
    45  
    46  func compressibleBytes(r *rand.Rand, ratio float64, valueSize int) []byte {
    47  	m := maxInt(int(float64(valueSize)*ratio), 1)
    48  	p := randomBytes(r, m)
    49  	b := make([]byte, 0, valueSize+valueSize%m)
    50  	for len(b) < valueSize {
    51  		b = append(b, p...)
    52  	}
    53  	return b[:valueSize]
    54  }
    55  
    56  type randomValueGenerator struct {
    57  	b []byte
    58  	k int
    59  }
    60  
    61  func (g *randomValueGenerator) Value(i int) []byte {
    62  	i = (i * g.k) % len(g.b)
    63  	return g.b[i : i+g.k]
    64  }
    65  
    66  func makeRandomValueGenerator(r *rand.Rand, ratio float64, valueSize int) randomValueGenerator {
    67  	b := compressibleBytes(r, ratio, valueSize)
    68  	max := maxInt(valueSize, 1024*1024)
    69  	for len(b) < max {
    70  		b = append(b, compressibleBytes(r, ratio, valueSize)...)
    71  	}
    72  	return randomValueGenerator{b: b, k: valueSize}
    73  }
    74  
    75  type entryGenerator interface {
    76  	keyGenerator
    77  	Value(i int) []byte
    78  }
    79  
    80  type pairedEntryGenerator struct {
    81  	keyGenerator
    82  	randomValueGenerator
    83  }
    84  
    85  type startAtEntryGenerator struct {
    86  	entryGenerator
    87  	start int
    88  }
    89  
    90  var _ entryGenerator = (*startAtEntryGenerator)(nil)
    91  
    92  func (g *startAtEntryGenerator) NKey() int {
    93  	return g.entryGenerator.NKey() - g.start
    94  }
    95  
    96  func (g *startAtEntryGenerator) Key(i int) []byte {
    97  	return g.entryGenerator.Key(g.start + i)
    98  }
    99  
   100  func newStartAtEntryGenerator(start int, g entryGenerator) entryGenerator {
   101  	return &startAtEntryGenerator{start: start, entryGenerator: g}
   102  }
   103  
   104  func newSequentialKeys(size int, start int, keyFormat string) [][]byte {
   105  	keys := make([][]byte, size)
   106  	buffer := make([]byte, size*keyLen)
   107  	for i := 0; i < size; i++ {
   108  		begin, end := i*keyLen, (i+1)*keyLen
   109  		key := buffer[begin:begin:end]
   110  		_, _ = fmt.Fprintf(bytes.NewBuffer(key), keyFormat, start+i)
   111  		keys[i] = buffer[begin:end:end]
   112  	}
   113  	return keys
   114  }
   115  
   116  func newRandomKeys(n int, format string) [][]byte {
   117  	r := rand.New(rand.NewSource(time.Now().Unix()))
   118  	keys := make([][]byte, n)
   119  	buffer := make([]byte, n*keyLen)
   120  	for i := 0; i < n; i++ {
   121  		begin, end := i*keyLen, (i+1)*keyLen
   122  		key := buffer[begin:begin:end]
   123  		_, _ = fmt.Fprintf(bytes.NewBuffer(key), format, r.Intn(n))
   124  		keys[i] = buffer[begin:end:end]
   125  	}
   126  	return keys
   127  }
   128  
   129  func newFullRandomKeys(size int, start int, format string) [][]byte {
   130  	keys := newSequentialKeys(size, start, format)
   131  	r := rand.New(rand.NewSource(time.Now().Unix()))
   132  	for i := 0; i < size; i++ {
   133  		j := r.Intn(size)
   134  		keys[i], keys[j] = keys[j], keys[i]
   135  	}
   136  	return keys
   137  }
   138  
   139  func newFullRandomEntryGenerator(start, size int) entryGenerator {
   140  	r := rand.New(rand.NewSource(time.Now().Unix()))
   141  	return &pairedEntryGenerator{
   142  		keyGenerator:         newFullRandomKeyGenerator(start, size),
   143  		randomValueGenerator: makeRandomValueGenerator(r, *compressionRatio, *valueSize),
   144  	}
   145  }
   146  
   147  func newSequentialEntryGenerator(size int) entryGenerator {
   148  	r := rand.New(rand.NewSource(time.Now().Unix()))
   149  	return &pairedEntryGenerator{
   150  		keyGenerator:         newSequentialKeyGenerator(size),
   151  		randomValueGenerator: makeRandomValueGenerator(r, *compressionRatio, *valueSize),
   152  	}
   153  }
   154  
   155  type keyGenerator interface {
   156  	NKey() int
   157  	Key(i int) []byte
   158  }
   159  
   160  type reversedKeyGenerator struct {
   161  	keyGenerator
   162  }
   163  
   164  var _ keyGenerator = (*reversedKeyGenerator)(nil)
   165  
   166  func (g *reversedKeyGenerator) Key(i int) []byte {
   167  	return g.keyGenerator.Key(g.NKey() - i - 1)
   168  }
   169  
   170  func newReversedKeyGenerator(g keyGenerator) keyGenerator {
   171  	return &reversedKeyGenerator{keyGenerator: g}
   172  }
   173  
   174  type roundKeyGenerator struct {
   175  	keyGenerator
   176  }
   177  
   178  var _ keyGenerator = (*roundKeyGenerator)(nil)
   179  
   180  func (g *roundKeyGenerator) Key(i int) []byte {
   181  	index := i % g.NKey()
   182  	return g.keyGenerator.Key(index)
   183  }
   184  
   185  func newRoundKeyGenerator(g keyGenerator) keyGenerator {
   186  	return &roundKeyGenerator{keyGenerator: g}
   187  }
   188  
   189  type predefinedKeyGenerator struct {
   190  	keys [][]byte
   191  }
   192  
   193  func (g *predefinedKeyGenerator) NKey() int {
   194  	return len(g.keys)
   195  }
   196  
   197  func (g *predefinedKeyGenerator) Key(i int) []byte {
   198  	if i >= len(g.keys) {
   199  		return g.keys[0]
   200  	}
   201  	return g.keys[i]
   202  }
   203  
   204  func newRandomKeyGenerator(n int) keyGenerator {
   205  	return &predefinedKeyGenerator{keys: newRandomKeys(n, hitKeyFormat)}
   206  }
   207  
   208  func newRandomMissingKeyGenerator(n int) keyGenerator {
   209  	return &predefinedKeyGenerator{keys: newRandomKeys(n, missingKeyFormat)}
   210  }
   211  
   212  func newFullRandomKeyGenerator(start, n int) keyGenerator {
   213  	return &predefinedKeyGenerator{keys: newFullRandomKeys(n, start, hitKeyFormat)}
   214  }
   215  
   216  func newSequentialKeyGenerator(n int) keyGenerator {
   217  	return &predefinedKeyGenerator{keys: newSequentialKeys(n, 0, hitKeyFormat)}
   218  }
   219  
   220  func maxInt(a int, b int) int {
   221  	if a >= b {
   222  		return a
   223  	}
   224  	return b
   225  }
   226  
   227  func doRead(b *testing.B, db storage.Store, g keyGenerator, allowNotFound bool) {
   228  	b.Helper()
   229  
   230  	for i := 0; i < b.N; i++ {
   231  		key := g.Key(i)
   232  		item := &obj1{
   233  			Id: string(key),
   234  		}
   235  		err := db.Get(item)
   236  		switch {
   237  		case err == nil:
   238  		case allowNotFound && errors.Is(err, storage.ErrNotFound):
   239  		default:
   240  			b.Fatalf("%d: db get key[%s] error: %s\n", b.N, key, err)
   241  		}
   242  	}
   243  }
   244  
   245  type singularDBWriter struct {
   246  	db storage.Store
   247  }
   248  
   249  func (w *singularDBWriter) Put(key, value []byte) error {
   250  	item := &obj1{
   251  		Id:  string(key),
   252  		Buf: value,
   253  	}
   254  	return w.db.Put(item)
   255  }
   256  
   257  func (w *singularDBWriter) Delete(key []byte) error {
   258  	item := &obj1{
   259  		Id: string(key),
   260  	}
   261  	return w.db.Delete(item)
   262  }
   263  
   264  func newDBWriter(db storage.Store) *singularDBWriter {
   265  	return &singularDBWriter{db: db}
   266  }
   267  
   268  func doWrite(b *testing.B, db storage.Store, g entryGenerator) {
   269  	b.Helper()
   270  
   271  	w := newDBWriter(db)
   272  	for i := 0; i < b.N; i++ {
   273  		if err := w.Put(g.Key(i), g.Value(i)); err != nil {
   274  			b.Fatalf("write key '%s': %v", string(g.Key(i)), err)
   275  		}
   276  	}
   277  }
   278  
   279  func doDelete(b *testing.B, db storage.Store, g keyGenerator) {
   280  	b.Helper()
   281  
   282  	w := newDBWriter(db)
   283  	for i := 0; i < b.N; i++ {
   284  		if err := w.Delete(g.Key(i)); err != nil {
   285  			b.Fatalf("delete key '%s': %v", string(g.Key(i)), err)
   286  		}
   287  	}
   288  }
   289  
   290  func resetBenchmark(b *testing.B) {
   291  	b.Helper()
   292  
   293  	runtime.GC()
   294  	b.ResetTimer()
   295  }
   296  
   297  func populate(b *testing.B, db storage.Store) {
   298  	b.Helper()
   299  
   300  	doWrite(b, db, newFullRandomEntryGenerator(0, b.N))
   301  }
   302  
   303  // chunk
   304  func doDeleteChunk(b *testing.B, db storage.ChunkStore, g keyGenerator) {
   305  	b.Helper()
   306  
   307  	for i := 0; i < b.N; i++ {
   308  		addr := swarm.MustParseHexAddress(string(g.Key(i)))
   309  		if err := db.Delete(context.Background(), addr); err != nil {
   310  			b.Fatalf("delete key '%s': %v", string(g.Key(i)), err)
   311  		}
   312  	}
   313  }
   314  
   315  func doWriteChunk(b *testing.B, db storage.Putter, g entryGenerator) {
   316  	b.Helper()
   317  
   318  	for i := 0; i < b.N; i++ {
   319  		buf := make([]byte, swarm.HashSize)
   320  		if _, err := hex.Decode(buf, g.Key(i)); err != nil {
   321  			b.Fatalf("decode value: %v", err)
   322  		}
   323  		addr := swarm.NewAddress(buf)
   324  		chunk := swarm.NewChunk(addr, g.Value(i)).WithStamp(postagetesting.MustNewStamp())
   325  		if err := db.Put(context.Background(), chunk); err != nil {
   326  			b.Fatalf("write key '%s': %v", string(g.Key(i)), err)
   327  		}
   328  	}
   329  }
   330  
   331  func doReadChunk(b *testing.B, db storage.ChunkStore, g keyGenerator, allowNotFound bool) {
   332  	b.Helper()
   333  
   334  	for i := 0; i < b.N; i++ {
   335  		key := string(g.Key(i))
   336  		addr := swarm.MustParseHexAddress(key)
   337  		_, err := db.Get(context.Background(), addr)
   338  		switch {
   339  		case err == nil:
   340  		case allowNotFound && errors.Is(err, storage.ErrNotFound):
   341  		default:
   342  			b.Fatalf("%d: db get key[%s] error: %s\n", b.N, key, err)
   343  		}
   344  	}
   345  }
   346  
   347  // fixed size batch
   348  type batchDBWriter struct {
   349  	db    storage.Batcher
   350  	batch storage.Batch
   351  	max   int
   352  	count int
   353  }
   354  
   355  func (w *batchDBWriter) commit(max int) {
   356  	if w.count >= max {
   357  		_ = w.batch.Commit()
   358  		w.count = 0
   359  		w.batch = w.db.Batch(context.Background())
   360  	}
   361  }
   362  
   363  func (w *batchDBWriter) Put(key, value []byte) {
   364  	item := &obj1{
   365  		Id:  string(key),
   366  		Buf: value,
   367  	}
   368  	_ = w.batch.Put(item)
   369  	w.count++
   370  	w.commit(w.max)
   371  }
   372  
   373  func (w *batchDBWriter) Delete(key []byte) {
   374  	item := &obj1{
   375  		Id: string(key),
   376  	}
   377  	_ = w.batch.Delete(item)
   378  	w.count++
   379  	w.commit(w.max)
   380  }
   381  
   382  func newBatchDBWriter(db storage.Batcher) *batchDBWriter {
   383  	batch := db.Batch(context.Background())
   384  	return &batchDBWriter{
   385  		db:    db,
   386  		batch: batch,
   387  		max:   *batchSize,
   388  	}
   389  }