github.com/cilium/statedb@v0.3.2/benchmarks_test.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package statedb
     5  
     6  import (
     7  	"context"
     8  	"iter"
     9  	"log/slog"
    10  	"math/rand"
    11  	"sort"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	"github.com/cilium/hive"
    19  	"github.com/cilium/hive/cell"
    20  	"github.com/cilium/hive/hivetest"
    21  	"github.com/cilium/statedb/index"
    22  	"github.com/cilium/statedb/part"
    23  )
    24  
    25  // Number of objects to insert in tests that do repeated inserts.
    26  const numObjectsToInsert = 1000
    27  
    28  func BenchmarkDB_WriteTxn_1(b *testing.B) {
    29  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
    30  	b.ResetTimer()
    31  	for i := 0; i < b.N; i++ {
    32  		txn := db.WriteTxn(table)
    33  		_, _, err := table.Insert(txn, testObject{ID: 123})
    34  		if err != nil {
    35  			b.Fatalf("Insert error: %s", err)
    36  		}
    37  		txn.Commit()
    38  	}
    39  	b.ReportMetric(float64(b.N)/b.Elapsed().Seconds(), "objects/sec")
    40  }
    41  
    42  func BenchmarkDB_WriteTxn_10(b *testing.B) {
    43  	benchmarkDB_WriteTxn_batch(b, 10)
    44  }
    45  
    46  func BenchmarkDB_WriteTxn_100(b *testing.B) {
    47  	benchmarkDB_WriteTxn_batch(b, 100)
    48  }
    49  
    50  func BenchmarkDB_WriteTxn_1000(b *testing.B) {
    51  	benchmarkDB_WriteTxn_batch(b, 1000)
    52  }
    53  
    54  func benchmarkDB_WriteTxn_batch(b *testing.B, batchSize int) {
    55  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
    56  	n := b.N
    57  	b.ResetTimer()
    58  
    59  	for n > 0 {
    60  		txn := db.WriteTxn(table)
    61  		toWrite := min(n, batchSize)
    62  		for i := range toWrite {
    63  			_, _, err := table.Insert(txn, testObject{ID: uint64(i)})
    64  			if err != nil {
    65  				b.Fatalf("Insert error: %s", err)
    66  			}
    67  		}
    68  		txn.Commit()
    69  		n -= toWrite
    70  	}
    71  
    72  	b.ReportMetric(float64(b.N)/b.Elapsed().Seconds(), "objects/sec")
    73  }
    74  
    75  func BenchmarkDB_WriteTxn_100_SecondaryIndex(b *testing.B) {
    76  	db, table := newTestDBWithMetrics(b, &NopMetrics{}, tagsIndex)
    77  	batchSize := 100
    78  	n := b.N
    79  	tagSet := part.NewSet("test")
    80  
    81  	for n > 0 {
    82  		txn := db.WriteTxn(table)
    83  		toWrite := min(n, batchSize)
    84  		for i := range toWrite {
    85  			_, _, err := table.Insert(txn, testObject{ID: uint64(i), Tags: tagSet})
    86  			if err != nil {
    87  				b.Fatalf("Insert error: %s", err)
    88  			}
    89  		}
    90  		txn.Commit()
    91  		n -= toWrite
    92  	}
    93  
    94  	b.ReportMetric(float64(b.N)/b.Elapsed().Seconds(), "objects/sec")
    95  }
    96  
    97  func BenchmarkDB_Modify(b *testing.B) {
    98  	benchmarkDB_Modify_vs_GetInsert(b, false)
    99  }
   100  
   101  func BenchmarkDB_GetInsert(b *testing.B) {
   102  	benchmarkDB_Modify_vs_GetInsert(b, true)
   103  }
   104  
   105  func benchmarkDB_Modify_vs_GetInsert(b *testing.B, doGetInsert bool) {
   106  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   107  
   108  	ids := []uint64{}
   109  	for i := 0; i < numObjectsToInsert; i++ {
   110  		ids = append(ids, uint64(i))
   111  	}
   112  	rand.Shuffle(numObjectsToInsert, func(i, j int) {
   113  		ids[i], ids[j] = ids[j], ids[i]
   114  	})
   115  	txn := db.WriteTxn(table)
   116  	for _, id := range ids {
   117  		_, _, err := table.Insert(txn, testObject{ID: id})
   118  		if err != nil {
   119  			b.Fatalf("Insert error: %s", err)
   120  		}
   121  	}
   122  	txn.Commit()
   123  
   124  	b.ResetTimer()
   125  
   126  	for i := 0; i < b.N; i++ {
   127  		txn := db.WriteTxn(table)
   128  		for _, id := range ids {
   129  			if doGetInsert {
   130  				old, _, _ := table.Get(txn, idIndex.Query(id))
   131  				table.Insert(txn, old)
   132  			} else {
   133  				table.Modify(
   134  					txn,
   135  					testObject{ID: id},
   136  					func(old testObject, new testObject) testObject {
   137  						return new
   138  					})
   139  			}
   140  		}
   141  		txn.Commit()
   142  	}
   143  	b.ReportMetric(float64(b.N*len(ids))/b.Elapsed().Seconds(), "objects/sec")
   144  }
   145  
   146  func BenchmarkDB_RandomInsert(b *testing.B) {
   147  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   148  	ids := []uint64{}
   149  	for i := 0; i < numObjectsToInsert; i++ {
   150  		ids = append(ids, uint64(i))
   151  	}
   152  	rand.Shuffle(numObjectsToInsert, func(i, j int) {
   153  		ids[i], ids[j] = ids[j], ids[i]
   154  	})
   155  	b.ResetTimer()
   156  
   157  	for j := 0; j < b.N; j++ {
   158  		txn := db.WriteTxn(table)
   159  		for _, id := range ids {
   160  			_, _, err := table.Insert(txn, testObject{ID: id, Tags: part.Set[string]{}})
   161  			if err != nil {
   162  				b.Fatalf("Insert error: %s", err)
   163  			}
   164  		}
   165  		txn.Abort()
   166  	}
   167  	b.StopTimer()
   168  
   169  	b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec")
   170  }
   171  
   172  // BenchmarkDB_RandomReplace is like BenchmarkDB_RandomInsert, but instead of
   173  // always inserting a new value this test replaces an existing value.
   174  // This mainly shows the cost of the revision index delete and insert.
   175  //
   176  // This also uses a secondary index to make this a more realistic.
   177  func BenchmarkDB_RandomReplace(b *testing.B) {
   178  	db, table := newTestDBWithMetrics(b, &NopMetrics{}, tagsIndex)
   179  	ids := []uint64{}
   180  	txn := db.WriteTxn(table)
   181  	for i := 0; i < numObjectsToInsert; i++ {
   182  		tag := "odd"
   183  		if i%2 == 0 {
   184  			tag = "even"
   185  		}
   186  		table.Insert(txn, testObject{ID: uint64(i), Tags: part.NewSet(tag)})
   187  		ids = append(ids, uint64(i))
   188  	}
   189  	txn.Commit()
   190  	rand.Shuffle(numObjectsToInsert, func(i, j int) {
   191  		ids[i], ids[j] = ids[j], ids[i]
   192  	})
   193  	b.ResetTimer()
   194  
   195  	for j := 0; j < b.N; j++ {
   196  		txn := db.WriteTxn(table)
   197  		for _, id := range ids {
   198  			tag := "odd"
   199  			if id%2 == 0 {
   200  				tag = "even"
   201  			}
   202  			_, _, err := table.Insert(txn, testObject{ID: id, Tags: part.NewSet(tag)})
   203  			if err != nil {
   204  				b.Fatalf("Insert error: %s", err)
   205  			}
   206  		}
   207  		txn.Abort()
   208  	}
   209  	b.StopTimer()
   210  
   211  	b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec")
   212  }
   213  
   214  func BenchmarkDB_SequentialInsert(b *testing.B) {
   215  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   216  	b.ResetTimer()
   217  
   218  	for j := 0; j < b.N; j++ {
   219  		txn := db.WriteTxn(table)
   220  		for id := uint64(0); id < uint64(numObjectsToInsert); id++ {
   221  			_, _, err := table.Insert(txn, testObject{ID: id})
   222  			if err != nil {
   223  				b.Fatalf("Insert error: %s", err)
   224  			}
   225  		}
   226  		txn.Commit()
   227  	}
   228  	b.StopTimer()
   229  
   230  	require.EqualValues(b, table.NumObjects(db.ReadTxn()), numObjectsToInsert)
   231  	b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec")
   232  }
   233  
   234  func BenchmarkDB_Changes_Baseline(b *testing.B) {
   235  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   236  	b.ResetTimer()
   237  	for n := 0; n < b.N; n++ {
   238  		txn := db.WriteTxn(table)
   239  		for i := uint64(0); i < numObjectsToInsert; i++ {
   240  			_, _, err := table.Insert(txn, testObject{ID: uint64(i)})
   241  			if err != nil {
   242  				b.Fatalf("Insert: %s", err)
   243  			}
   244  		}
   245  		txn.Commit()
   246  
   247  		// Delete all objects to time the baseline without deletion tracking.
   248  		txn = db.WriteTxn(table)
   249  		table.DeleteAll(txn)
   250  		txn.Commit()
   251  	}
   252  	b.ReportMetric(float64(b.N*numObjectsToInsert)/b.Elapsed().Seconds(), "objects/sec")
   253  }
   254  
   255  func BenchmarkDB_Changes(b *testing.B) {
   256  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   257  
   258  	// Create the change iterator.
   259  	txn := db.WriteTxn(table)
   260  	require.Zero(b, table.NumObjects(txn))
   261  	iter, err := table.Changes(txn)
   262  	txn.Commit()
   263  	require.NoError(b, err)
   264  
   265  	b.ResetTimer()
   266  	for n := 0; n < b.N; n++ {
   267  		// Create objects
   268  		txn = db.WriteTxn(table)
   269  		for i := 0; i < numObjectsToInsert; i++ {
   270  			_, _, err := table.Insert(txn, testObject{ID: uint64(i)})
   271  			if err != nil {
   272  				b.Fatalf("Insert: %s", err)
   273  			}
   274  		}
   275  		txn.Commit()
   276  
   277  		// Observe the creations.
   278  		changes, watch := iter.Next(db.ReadTxn())
   279  		nDeleted := 0
   280  		nExists := 0
   281  
   282  		for change := range changes {
   283  			if change.Deleted {
   284  				b.Fatalf("expected create for %v", change)
   285  			}
   286  			nExists++
   287  		}
   288  		if numObjectsToInsert != nExists {
   289  			b.Fatalf("expected to observe %d, got %d", numObjectsToInsert, nExists)
   290  		}
   291  
   292  		// Delete all objects to time the cost for deletion tracking.
   293  		txn = db.WriteTxn(table)
   294  		table.DeleteAll(txn)
   295  		txn.Commit()
   296  
   297  		// Watch channel should be closed now.
   298  		<-watch
   299  
   300  		// Observe the deletions.
   301  		changes, watch = iter.Next(db.ReadTxn())
   302  		for change := range changes {
   303  			if change.Deleted {
   304  				nDeleted++
   305  				nExists--
   306  			} else {
   307  				b.Fatalf("expected deleted for %v", change)
   308  			}
   309  		}
   310  		if numObjectsToInsert != nDeleted {
   311  			b.Fatalf("expected to see %d deleted, got %d", numObjectsToInsert, nDeleted)
   312  		}
   313  	}
   314  	b.StopTimer()
   315  	eventuallyGraveyardIsEmpty(b, db)
   316  	b.ReportMetric(float64(b.N*numObjectsToInsert)/b.Elapsed().Seconds(), "objects/sec")
   317  }
   318  
   319  func BenchmarkDB_RandomLookup(b *testing.B) {
   320  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   321  
   322  	wtxn := db.WriteTxn(table)
   323  	queries := []Query[testObject]{}
   324  	for i := 0; i < numObjectsToInsert; i++ {
   325  		queries = append(queries, idIndex.Query(uint64(i)))
   326  		_, _, err := table.Insert(wtxn, testObject{ID: uint64(i)})
   327  		require.NoError(b, err)
   328  	}
   329  	wtxn.Commit()
   330  	rand.Shuffle(numObjectsToInsert, func(i, j int) {
   331  		queries[i], queries[j] = queries[j], queries[i]
   332  	})
   333  	b.ResetTimer()
   334  
   335  	for j := 0; j < b.N; j++ {
   336  		txn := db.ReadTxn()
   337  		for _, q := range queries {
   338  			_, _, ok := table.Get(txn, q)
   339  			if !ok {
   340  				b.Fatal("object not found")
   341  			}
   342  		}
   343  	}
   344  	b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec")
   345  }
   346  
   347  func BenchmarkDB_SequentialLookup(b *testing.B) {
   348  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   349  	wtxn := db.WriteTxn(table)
   350  	ids := []uint64{}
   351  	queries := []Query[testObject]{}
   352  	for i := 0; i < numObjectsToInsert; i++ {
   353  		queries = append(queries, idIndex.Query(uint64(i)))
   354  		ids = append(ids, uint64(i))
   355  		_, _, err := table.Insert(wtxn, testObject{ID: uint64(i)})
   356  		require.NoError(b, err)
   357  	}
   358  	wtxn.Commit()
   359  	b.ResetTimer()
   360  
   361  	txn := db.ReadTxn()
   362  	for n := 0; n < b.N; n++ {
   363  		for _, q := range queries {
   364  			_, _, ok := table.Get(txn, q)
   365  			if !ok {
   366  				b.Fatalf("Object not found")
   367  			}
   368  		}
   369  	}
   370  	b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec")
   371  }
   372  
   373  func BenchmarkDB_Prefix_SecondaryIndex(b *testing.B) {
   374  	db, table := newTestDBWithMetrics(b, &NopMetrics{}, tagsIndex)
   375  	tagSet := part.NewSet("test")
   376  	txn := db.WriteTxn(table)
   377  	for i := 0; i < numObjectsToInsert; i++ {
   378  		_, _, err := table.Insert(txn, testObject{ID: uint64(i), Tags: tagSet})
   379  		require.NoError(b, err)
   380  	}
   381  	rtxn := txn.Commit()
   382  	b.ResetTimer()
   383  
   384  	q := tagsIndex.Query("test")
   385  	for n := 0; n < b.N; n++ {
   386  		count := 0
   387  		for range table.Prefix(rtxn, q) {
   388  			count++
   389  		}
   390  		if count != numObjectsToInsert {
   391  			b.Fatalf("wrong number of objects, expected %d, got %d", numObjectsToInsert, count)
   392  		}
   393  	}
   394  
   395  	b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec")
   396  }
   397  
   398  const numObjectsIteration = 100000
   399  
   400  func BenchmarkDB_FullIteration_All(b *testing.B) {
   401  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   402  	wtxn := db.WriteTxn(table)
   403  	for i := 0; i < numObjectsIteration; i++ {
   404  		_, _, err := table.Insert(wtxn, testObject{ID: uint64(i)})
   405  		require.NoError(b, err)
   406  	}
   407  	wtxn.Commit()
   408  	b.ResetTimer()
   409  
   410  	for j := 0; j < b.N; j++ {
   411  		txn := db.ReadTxn()
   412  		i := uint64(0)
   413  		for obj := range table.All(txn) {
   414  			if obj.ID != i {
   415  				b.Fatalf("expected ID %d, got %d", i, obj.ID)
   416  			}
   417  			i++
   418  		}
   419  		if numObjectsIteration != i {
   420  			b.Fatalf("expected to iterate %d objects, got %d", numObjectsIteration, i)
   421  		}
   422  	}
   423  	b.ReportMetric(float64(numObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec")
   424  }
   425  
   426  func BenchmarkDB_FullIteration_Get(b *testing.B) {
   427  	db, table := newTestDBWithMetrics(b, &NopMetrics{})
   428  	wtxn := db.WriteTxn(table)
   429  	ids := []uint64{}
   430  	queries := []Query[testObject]{}
   431  	for i := 0; i < numObjectsIteration; i++ {
   432  		queries = append(queries, idIndex.Query(uint64(i)))
   433  		ids = append(ids, uint64(i))
   434  		_, _, err := table.Insert(wtxn, testObject{ID: uint64(i)})
   435  		require.NoError(b, err)
   436  	}
   437  	wtxn.Commit()
   438  	b.ResetTimer()
   439  
   440  	txn := db.ReadTxn()
   441  	for n := 0; n < b.N; n++ {
   442  		for _, q := range queries {
   443  			_, _, ok := table.Get(txn, q)
   444  			if !ok {
   445  				b.Fatalf("Object not found")
   446  			}
   447  		}
   448  	}
   449  	b.ReportMetric(float64(numObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec")
   450  }
   451  
   452  type testObject2 testObject
   453  
   454  var (
   455  	id2Index = Index[testObject2, uint64]{
   456  		Name: "id",
   457  		FromObject: func(t testObject2) index.KeySet {
   458  			return index.NewKeySet(index.Uint64(t.ID))
   459  		},
   460  		FromKey: index.Uint64,
   461  		Unique:  true,
   462  	}
   463  )
   464  
   465  // BenchmarkDB_PropagationDelay tests the propagation delay when changes from one
   466  // table are propagated to another.
   467  func BenchmarkDB_PropagationDelay(b *testing.B) {
   468  	const batchSize = 10
   469  
   470  	var (
   471  		db     *DB
   472  		table1 = MustNewTable("test", idIndex)
   473  		table2 = MustNewTable("test2", id2Index)
   474  	)
   475  
   476  	h := hive.New(
   477  		Cell, // DB
   478  		cell.Invoke(func(db_ *DB) error {
   479  			db = db_
   480  			return db.RegisterTable(table1, table2)
   481  		}),
   482  	)
   483  
   484  	log := hivetest.Logger(b, hivetest.LogLevel(slog.LevelError))
   485  	require.NoError(b, h.Start(log, context.TODO()))
   486  	b.Cleanup(func() {
   487  		assert.NoError(b, h.Stop(log, context.TODO()))
   488  	})
   489  
   490  	b.ResetTimer()
   491  
   492  	var (
   493  		revision = Revision(0)
   494  		watch1   = closedWatchChannel
   495  	)
   496  
   497  	samples := []time.Duration{}
   498  
   499  	// Test the propagation delay for microbatch
   500  	// Doing b.N/batchSize rounds to get per-object cost versus per
   501  	// batch cost.
   502  	for i := 0; i < b.N/batchSize; i++ {
   503  		start := time.Now()
   504  
   505  		// Commit a batch to the first table.
   506  		wtxn := db.WriteTxn(table1)
   507  		for i := 0; i < batchSize; i++ {
   508  			table1.Insert(wtxn, testObject{ID: uint64(i)})
   509  		}
   510  		wtxn.Commit()
   511  
   512  		// Wait for the trigger
   513  		<-watch1
   514  
   515  		// Grab a watch channel on the second table
   516  		txn := db.ReadTxn()
   517  		_, watch2 := table2.AllWatch(txn)
   518  
   519  		// Propagate the batch from first table to the second table
   520  		var seq iter.Seq2[testObject, Revision]
   521  		seq, watch1 = table1.LowerBoundWatch(txn, ByRevision[testObject](revision))
   522  		wtxn = db.WriteTxn(table2)
   523  		for obj := range seq {
   524  			table2.Insert(wtxn, testObject2(obj))
   525  		}
   526  		wtxn.Commit()
   527  		revision = table1.Revision(txn)
   528  
   529  		// Wait for trigger on second table
   530  		<-watch2
   531  
   532  		samples = append(samples, time.Since(start))
   533  	}
   534  	b.StopTimer()
   535  
   536  	if len(samples) > 100 {
   537  		sort.Slice(samples,
   538  			func(i, j int) bool {
   539  				return samples[i] < samples[j]
   540  			})
   541  		b.ReportMetric(float64(samples[len(samples)/2]/time.Microsecond), "50th_µs")
   542  		b.ReportMetric(float64(samples[len(samples)*9/10]/time.Microsecond), "90th_µs")
   543  		b.ReportMetric(float64(samples[len(samples)*99/100]/time.Microsecond), "99th_µs")
   544  	}
   545  
   546  }