github.com/cockroachdb/pebble@v1.1.2/level_iter_test.go (about)

     1  // Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package pebble
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"fmt"
    11  	"strings"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/cockroachdb/datadriven"
    16  	"github.com/cockroachdb/pebble/bloom"
    17  	"github.com/cockroachdb/pebble/internal/base"
    18  	"github.com/cockroachdb/pebble/internal/keyspan"
    19  	"github.com/cockroachdb/pebble/internal/manifest"
    20  	"github.com/cockroachdb/pebble/internal/rangedel"
    21  	"github.com/cockroachdb/pebble/internal/testkeys"
    22  	"github.com/cockroachdb/pebble/objstorage/objstorageprovider"
    23  	"github.com/cockroachdb/pebble/sstable"
    24  	"github.com/cockroachdb/pebble/vfs"
    25  	"github.com/stretchr/testify/require"
    26  	"golang.org/x/exp/rand"
    27  )
    28  
    29  const (
    30  	level = 1
    31  )
    32  
    33  func TestLevelIter(t *testing.T) {
    34  	var iters []*fakeIter
    35  	var files manifest.LevelSlice
    36  
    37  	newIters := func(
    38  		_ context.Context, file *manifest.FileMetadata, opts *IterOptions, _ internalIterOpts,
    39  	) (internalIterator, keyspan.FragmentIterator, error) {
    40  		f := *iters[file.FileNum]
    41  		f.lower = opts.GetLowerBound()
    42  		f.upper = opts.GetUpperBound()
    43  		return &f, nil, nil
    44  	}
    45  
    46  	datadriven.RunTest(t, "testdata/level_iter", func(t *testing.T, d *datadriven.TestData) string {
    47  		switch d.Cmd {
    48  		case "define":
    49  			iters = nil
    50  			var metas []*fileMetadata
    51  			for _, line := range strings.Split(d.Input, "\n") {
    52  				f := &fakeIter{}
    53  				for _, key := range strings.Fields(line) {
    54  					j := strings.Index(key, ":")
    55  					f.keys = append(f.keys, base.ParseInternalKey(key[:j]))
    56  					f.vals = append(f.vals, []byte(key[j+1:]))
    57  				}
    58  				iters = append(iters, f)
    59  
    60  				meta := (&fileMetadata{
    61  					FileNum: FileNum(len(metas)),
    62  				}).ExtendPointKeyBounds(
    63  					DefaultComparer.Compare,
    64  					f.keys[0],
    65  					f.keys[len(f.keys)-1],
    66  				)
    67  				meta.InitPhysicalBacking()
    68  				metas = append(metas, meta)
    69  			}
    70  			files = manifest.NewLevelSliceKeySorted(base.DefaultComparer.Compare, metas)
    71  
    72  			return ""
    73  
    74  		case "iter":
    75  			var opts IterOptions
    76  			for _, arg := range d.CmdArgs {
    77  				if len(arg.Vals) != 1 {
    78  					return fmt.Sprintf("%s: %s=<value>", d.Cmd, arg.Key)
    79  				}
    80  				switch arg.Key {
    81  				case "lower":
    82  					opts.LowerBound = []byte(arg.Vals[0])
    83  				case "upper":
    84  					opts.UpperBound = []byte(arg.Vals[0])
    85  				default:
    86  					return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key)
    87  				}
    88  			}
    89  
    90  			iter := newLevelIter(opts, testkeys.Comparer, newIters, files.Iter(), manifest.Level(level),
    91  				internalIterOpts{})
    92  			defer iter.Close()
    93  			// Fake up the range deletion initialization.
    94  			iter.initRangeDel(new(keyspan.FragmentIterator))
    95  			iter.disableInvariants = true
    96  			return runInternalIterCmd(t, d, iter, iterCmdVerboseKey)
    97  
    98  		case "load":
    99  			// The "load" command allows testing the iterator options passed to load
   100  			// sstables.
   101  			//
   102  			// load <key> [lower=<key>] [upper=<key>]
   103  			var opts IterOptions
   104  			var key string
   105  			for _, arg := range d.CmdArgs {
   106  				if len(arg.Vals) == 0 {
   107  					key = arg.Key
   108  					continue
   109  				}
   110  				if len(arg.Vals) != 1 {
   111  					return fmt.Sprintf("%s: %s=<value>", d.Cmd, arg.Key)
   112  				}
   113  				switch arg.Key {
   114  				case "lower":
   115  					opts.LowerBound = []byte(arg.Vals[0])
   116  				case "upper":
   117  					opts.UpperBound = []byte(arg.Vals[0])
   118  				default:
   119  					return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key)
   120  				}
   121  			}
   122  
   123  			var tableOpts *IterOptions
   124  			newIters2 := func(
   125  				ctx context.Context, file *manifest.FileMetadata, opts *IterOptions,
   126  				internalOpts internalIterOpts,
   127  			) (internalIterator, keyspan.FragmentIterator, error) {
   128  				tableOpts = opts
   129  				return newIters(ctx, file, opts, internalOpts)
   130  			}
   131  
   132  			iter := newLevelIter(opts, testkeys.Comparer, newIters2, files.Iter(),
   133  				manifest.Level(level), internalIterOpts{})
   134  			iter.SeekGE([]byte(key), base.SeekGEFlagsNone)
   135  			lower, upper := tableOpts.GetLowerBound(), tableOpts.GetUpperBound()
   136  			return fmt.Sprintf("[%s,%s]\n", lower, upper)
   137  
   138  		default:
   139  			return fmt.Sprintf("unknown command: %s", d.Cmd)
   140  		}
   141  	})
   142  }
   143  
   144  type levelIterTest struct {
   145  	cmp          base.Comparer
   146  	mem          vfs.FS
   147  	readers      []*sstable.Reader
   148  	metas        []*fileMetadata
   149  	itersCreated int
   150  }
   151  
   152  func newLevelIterTest() *levelIterTest {
   153  	lt := &levelIterTest{
   154  		cmp: *DefaultComparer,
   155  		mem: vfs.NewMem(),
   156  	}
   157  	lt.cmp.Split = func(a []byte) int { return len(a) }
   158  	return lt
   159  }
   160  
   161  func (lt *levelIterTest) newIters(
   162  	ctx context.Context, file *manifest.FileMetadata, opts *IterOptions, iio internalIterOpts,
   163  ) (internalIterator, keyspan.FragmentIterator, error) {
   164  	lt.itersCreated++
   165  	iter, err := lt.readers[file.FileNum].NewIterWithBlockPropertyFiltersAndContextEtc(
   166  		ctx, opts.LowerBound, opts.UpperBound, nil, false, true, iio.stats,
   167  		sstable.TrivialReaderProvider{Reader: lt.readers[file.FileNum]})
   168  	if err != nil {
   169  		return nil, nil, err
   170  	}
   171  	rangeDelIter, err := lt.readers[file.FileNum].NewRawRangeDelIter()
   172  	if err != nil {
   173  		return nil, nil, err
   174  	}
   175  	return iter, rangeDelIter, nil
   176  }
   177  
   178  func (lt *levelIterTest) runClear(d *datadriven.TestData) string {
   179  	lt.mem = vfs.NewMem()
   180  	for _, r := range lt.readers {
   181  		r.Close()
   182  	}
   183  	lt.readers = nil
   184  	lt.metas = nil
   185  	lt.itersCreated = 0
   186  	return ""
   187  }
   188  
   189  func (lt *levelIterTest) runBuild(d *datadriven.TestData) string {
   190  	fileNum := FileNum(len(lt.readers))
   191  	name := fmt.Sprint(fileNum)
   192  	f0, err := lt.mem.Create(name)
   193  	if err != nil {
   194  		return err.Error()
   195  	}
   196  
   197  	tableFormat := sstable.TableFormatRocksDBv2
   198  	for _, arg := range d.CmdArgs {
   199  		if arg.Key == "format" {
   200  			switch arg.Vals[0] {
   201  			case "rocksdbv2":
   202  				tableFormat = sstable.TableFormatRocksDBv2
   203  			case "pebblev2":
   204  				tableFormat = sstable.TableFormatPebblev2
   205  			}
   206  		}
   207  	}
   208  	fp := bloom.FilterPolicy(10)
   209  	w := sstable.NewWriter(objstorageprovider.NewFileWritable(f0), sstable.WriterOptions{
   210  		Comparer:     &lt.cmp,
   211  		FilterPolicy: fp,
   212  		TableFormat:  tableFormat,
   213  	})
   214  	var tombstones []keyspan.Span
   215  	f := keyspan.Fragmenter{
   216  		Cmp:    lt.cmp.Compare,
   217  		Format: lt.cmp.FormatKey,
   218  		Emit: func(fragmented keyspan.Span) {
   219  			tombstones = append(tombstones, fragmented)
   220  		},
   221  	}
   222  	for _, key := range strings.Split(d.Input, "\n") {
   223  		j := strings.Index(key, ":")
   224  		ikey := base.ParseInternalKey(key[:j])
   225  		value := []byte(key[j+1:])
   226  		switch ikey.Kind() {
   227  		case InternalKeyKindRangeDelete:
   228  			f.Add(rangedel.Decode(ikey, value, nil))
   229  		case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete:
   230  			if err := w.AddRangeKey(ikey, value); err != nil {
   231  				return err.Error()
   232  			}
   233  		default:
   234  			if err := w.Add(ikey, value); err != nil {
   235  				return err.Error()
   236  			}
   237  		}
   238  	}
   239  	f.Finish()
   240  	for _, v := range tombstones {
   241  		if err := rangedel.Encode(&v, w.Add); err != nil {
   242  			return err.Error()
   243  		}
   244  	}
   245  	if err := w.Close(); err != nil {
   246  		return err.Error()
   247  	}
   248  	meta, err := w.Metadata()
   249  	if err != nil {
   250  		return err.Error()
   251  	}
   252  
   253  	f1, err := lt.mem.Open(name)
   254  	if err != nil {
   255  		return err.Error()
   256  	}
   257  	readable, err := sstable.NewSimpleReadable(f1)
   258  	if err != nil {
   259  		return err.Error()
   260  	}
   261  	r, err := sstable.NewReader(readable, sstable.ReaderOptions{
   262  		Filters: map[string]FilterPolicy{
   263  			fp.Name(): fp,
   264  		},
   265  	})
   266  	if err != nil {
   267  		return err.Error()
   268  	}
   269  	lt.readers = append(lt.readers, r)
   270  	m := &fileMetadata{FileNum: fileNum}
   271  	if meta.HasPointKeys {
   272  		m.ExtendPointKeyBounds(lt.cmp.Compare, meta.SmallestPoint, meta.LargestPoint)
   273  	}
   274  	if meta.HasRangeDelKeys {
   275  		m.ExtendPointKeyBounds(lt.cmp.Compare, meta.SmallestRangeDel, meta.LargestRangeDel)
   276  	}
   277  	if meta.HasRangeKeys {
   278  		m.ExtendRangeKeyBounds(lt.cmp.Compare, meta.SmallestRangeKey, meta.LargestRangeKey)
   279  	}
   280  	m.InitPhysicalBacking()
   281  	lt.metas = append(lt.metas, m)
   282  
   283  	var buf bytes.Buffer
   284  	for _, f := range lt.metas {
   285  		fmt.Fprintf(&buf, "%d: %s-%s\n", f.FileNum, f.Smallest, f.Largest)
   286  	}
   287  	return buf.String()
   288  }
   289  
   290  func TestLevelIterBoundaries(t *testing.T) {
   291  	lt := newLevelIterTest()
   292  	defer lt.runClear(nil)
   293  
   294  	var iter *levelIter
   295  	datadriven.RunTest(t, "testdata/level_iter_boundaries", func(t *testing.T, d *datadriven.TestData) string {
   296  		switch d.Cmd {
   297  		case "clear":
   298  			return lt.runClear(d)
   299  
   300  		case "build":
   301  			return lt.runBuild(d)
   302  
   303  		case "iter":
   304  			// The save and continue parameters allow us to save the iterator
   305  			// for later continued use.
   306  			save := false
   307  			cont := false
   308  			for _, arg := range d.CmdArgs {
   309  				switch arg.Key {
   310  				case "save":
   311  					save = true
   312  				case "continue":
   313  					cont = true
   314  				default:
   315  					return fmt.Sprintf("%s: unknown arg: %s", d.Cmd, arg.Key)
   316  				}
   317  			}
   318  			if !cont && iter != nil {
   319  				return "preceding iter was not closed"
   320  			}
   321  			if cont && iter == nil {
   322  				return "no existing iter"
   323  			}
   324  			if iter == nil {
   325  				slice := manifest.NewLevelSliceKeySorted(lt.cmp.Compare, lt.metas)
   326  				iter = newLevelIter(IterOptions{}, testkeys.Comparer, lt.newIters, slice.Iter(),
   327  					manifest.Level(level), internalIterOpts{})
   328  				// Fake up the range deletion initialization.
   329  				iter.initRangeDel(new(keyspan.FragmentIterator))
   330  			}
   331  			if !save {
   332  				defer func() {
   333  					iter.Close()
   334  					iter = nil
   335  				}()
   336  			}
   337  			return runInternalIterCmd(t, d, iter, iterCmdVerboseKey)
   338  
   339  		case "file-pos":
   340  			// Returns the FileNum at which the iterator is positioned.
   341  			if iter == nil {
   342  				return "nil levelIter"
   343  			}
   344  			if iter.iterFile == nil {
   345  				return "nil iterFile"
   346  			}
   347  			return fmt.Sprintf("file %d", iter.iterFile.FileNum)
   348  
   349  		default:
   350  			return fmt.Sprintf("unknown command: %s", d.Cmd)
   351  		}
   352  	})
   353  }
   354  
   355  // levelIterTestIter allows a datadriven test to use runInternalIterCmd and
   356  // perform parallel operations on both both a levelIter and rangeDelIter.
   357  type levelIterTestIter struct {
   358  	*levelIter
   359  	rangeDelIter keyspan.FragmentIterator
   360  }
   361  
   362  func (i *levelIterTestIter) rangeDelSeek(
   363  	key []byte, ikey *InternalKey, val base.LazyValue, dir int,
   364  ) (*InternalKey, base.LazyValue) {
   365  	var tombstone keyspan.Span
   366  	if i.rangeDelIter != nil {
   367  		var t *keyspan.Span
   368  		if dir < 0 {
   369  			t = keyspan.SeekLE(i.levelIter.cmp, i.rangeDelIter, key)
   370  		} else {
   371  			t = i.rangeDelIter.SeekGE(key)
   372  		}
   373  		if t != nil {
   374  			tombstone = t.Visible(1000)
   375  		}
   376  	}
   377  	if ikey == nil {
   378  		return &InternalKey{
   379  			UserKey: []byte(fmt.Sprintf("./%s", tombstone)),
   380  		}, base.LazyValue{}
   381  	}
   382  	return &InternalKey{
   383  		UserKey: []byte(fmt.Sprintf("%s/%s", ikey.UserKey, tombstone)),
   384  		Trailer: ikey.Trailer,
   385  	}, val
   386  }
   387  
   388  func (i *levelIterTestIter) String() string {
   389  	return "level-iter-test"
   390  }
   391  
   392  func (i *levelIterTestIter) SeekGE(
   393  	key []byte, flags base.SeekGEFlags,
   394  ) (*InternalKey, base.LazyValue) {
   395  	ikey, val := i.levelIter.SeekGE(key, flags)
   396  	return i.rangeDelSeek(key, ikey, val, 1)
   397  }
   398  
   399  func (i *levelIterTestIter) SeekPrefixGE(
   400  	prefix, key []byte, flags base.SeekGEFlags,
   401  ) (*base.InternalKey, base.LazyValue) {
   402  	ikey, val := i.levelIter.SeekPrefixGE(prefix, key, flags)
   403  	return i.rangeDelSeek(key, ikey, val, 1)
   404  }
   405  
   406  func (i *levelIterTestIter) SeekLT(
   407  	key []byte, flags base.SeekLTFlags,
   408  ) (*InternalKey, base.LazyValue) {
   409  	ikey, val := i.levelIter.SeekLT(key, flags)
   410  	return i.rangeDelSeek(key, ikey, val, -1)
   411  }
   412  
   413  func TestLevelIterSeek(t *testing.T) {
   414  	lt := newLevelIterTest()
   415  	defer lt.runClear(nil)
   416  
   417  	datadriven.RunTest(t, "testdata/level_iter_seek", func(t *testing.T, d *datadriven.TestData) string {
   418  		switch d.Cmd {
   419  		case "clear":
   420  			return lt.runClear(d)
   421  
   422  		case "build":
   423  			return lt.runBuild(d)
   424  
   425  		case "iter":
   426  			var stats base.InternalIteratorStats
   427  			slice := manifest.NewLevelSliceKeySorted(lt.cmp.Compare, lt.metas)
   428  			iter := &levelIterTestIter{levelIter: &levelIter{}}
   429  			iter.init(context.Background(), IterOptions{}, testkeys.Comparer, lt.newIters, slice.Iter(),
   430  				manifest.Level(level), internalIterOpts{stats: &stats})
   431  			defer iter.Close()
   432  			iter.initRangeDel(&iter.rangeDelIter)
   433  			return runInternalIterCmd(t, d, iter, iterCmdVerboseKey, iterCmdStats(&stats))
   434  
   435  		case "iters-created":
   436  			return fmt.Sprintf("%d", lt.itersCreated)
   437  		default:
   438  			return fmt.Sprintf("unknown command: %s", d.Cmd)
   439  		}
   440  	})
   441  }
   442  
   443  func buildLevelIterTables(
   444  	b *testing.B, blockSize, restartInterval, count int,
   445  ) ([]*sstable.Reader, manifest.LevelSlice, [][]byte, func()) {
   446  	mem := vfs.NewMem()
   447  	files := make([]vfs.File, count)
   448  	for i := range files {
   449  		f, err := mem.Create(fmt.Sprintf("bench%d", i))
   450  		if err != nil {
   451  			b.Fatal(err)
   452  		}
   453  		files[i] = f
   454  	}
   455  
   456  	writers := make([]*sstable.Writer, len(files))
   457  	for i := range files {
   458  		writers[i] = sstable.NewWriter(objstorageprovider.NewFileWritable(files[i]), sstable.WriterOptions{
   459  			BlockRestartInterval: restartInterval,
   460  			BlockSize:            blockSize,
   461  			Compression:          NoCompression,
   462  		})
   463  	}
   464  
   465  	var keys [][]byte
   466  	var i int
   467  	const targetSize = 2 << 20
   468  	for _, w := range writers {
   469  		for ; w.EstimatedSize() < targetSize; i++ {
   470  			key := []byte(fmt.Sprintf("%08d", i))
   471  			keys = append(keys, key)
   472  			ikey := base.MakeInternalKey(key, 0, InternalKeyKindSet)
   473  			w.Add(ikey, nil)
   474  		}
   475  		if err := w.Close(); err != nil {
   476  			b.Fatal(err)
   477  		}
   478  	}
   479  
   480  	opts := sstable.ReaderOptions{Cache: NewCache(128 << 20), Comparer: DefaultComparer}
   481  	defer opts.Cache.Unref()
   482  	readers := make([]*sstable.Reader, len(files))
   483  	for i := range files {
   484  		f, err := mem.Open(fmt.Sprintf("bench%d", i))
   485  		if err != nil {
   486  			b.Fatal(err)
   487  		}
   488  		readable, err := sstable.NewSimpleReadable(f)
   489  		if err != nil {
   490  			b.Fatal(err)
   491  		}
   492  		readers[i], err = sstable.NewReader(readable, opts)
   493  		if err != nil {
   494  			b.Fatal(err)
   495  		}
   496  	}
   497  
   498  	cleanup := func() {
   499  		for _, r := range readers {
   500  			require.NoError(b, r.Close())
   501  		}
   502  	}
   503  
   504  	meta := make([]*fileMetadata, len(readers))
   505  	for i := range readers {
   506  		iter, err := readers[i].NewIter(nil /* lower */, nil /* upper */)
   507  		require.NoError(b, err)
   508  		smallest, _ := iter.First()
   509  		meta[i] = &fileMetadata{}
   510  		meta[i].FileNum = FileNum(i)
   511  		largest, _ := iter.Last()
   512  		meta[i].ExtendPointKeyBounds(opts.Comparer.Compare, (*smallest).Clone(), (*largest).Clone())
   513  		meta[i].InitPhysicalBacking()
   514  	}
   515  	slice := manifest.NewLevelSliceKeySorted(base.DefaultComparer.Compare, meta)
   516  	return readers, slice, keys, cleanup
   517  }
   518  
   519  func BenchmarkLevelIterSeekGE(b *testing.B) {
   520  	const blockSize = 32 << 10
   521  
   522  	for _, restartInterval := range []int{16} {
   523  		b.Run(fmt.Sprintf("restart=%d", restartInterval),
   524  			func(b *testing.B) {
   525  				for _, count := range []int{5} {
   526  					b.Run(fmt.Sprintf("count=%d", count),
   527  						func(b *testing.B) {
   528  							readers, metas, keys, cleanup := buildLevelIterTables(b, blockSize, restartInterval, count)
   529  							defer cleanup()
   530  							newIters := func(
   531  								_ context.Context, file *manifest.FileMetadata, _ *IterOptions, _ internalIterOpts,
   532  							) (internalIterator, keyspan.FragmentIterator, error) {
   533  								iter, err := readers[file.FileNum].NewIter(nil /* lower */, nil /* upper */)
   534  								return iter, nil, err
   535  							}
   536  							l := newLevelIter(IterOptions{}, DefaultComparer, newIters, metas.Iter(), manifest.Level(level), internalIterOpts{})
   537  							rng := rand.New(rand.NewSource(uint64(time.Now().UnixNano())))
   538  
   539  							b.ResetTimer()
   540  							for i := 0; i < b.N; i++ {
   541  								l.SeekGE(keys[rng.Intn(len(keys))], base.SeekGEFlagsNone)
   542  							}
   543  							l.Close()
   544  						})
   545  				}
   546  			})
   547  	}
   548  }
   549  
   550  // A benchmark that simulates the behavior of a levelIter being used as part
   551  // of a mergingIter where narrow bounds are repeatedly set and used to Seek
   552  // and then iterate over the keys within the bounds. This resembles MVCC
   553  // scanning by CockroachDB when doing a lookup/index join with a large number
   554  // of left rows, that are batched and reuse the same iterator, and which can
   555  // have good locality of access. This results in the successive bounds being
   556  // in the same file.
   557  func BenchmarkLevelIterSeqSeekGEWithBounds(b *testing.B) {
   558  	const blockSize = 32 << 10
   559  
   560  	for _, restartInterval := range []int{16} {
   561  		b.Run(fmt.Sprintf("restart=%d", restartInterval),
   562  			func(b *testing.B) {
   563  				for _, count := range []int{5} {
   564  					b.Run(fmt.Sprintf("count=%d", count),
   565  						func(b *testing.B) {
   566  							readers, metas, keys, cleanup :=
   567  								buildLevelIterTables(b, blockSize, restartInterval, count)
   568  							defer cleanup()
   569  							// This newIters is cheaper than in practice since it does not do
   570  							// tableCacheShard.findNode.
   571  							newIters := func(
   572  								_ context.Context, file *manifest.FileMetadata, opts *IterOptions, _ internalIterOpts,
   573  							) (internalIterator, keyspan.FragmentIterator, error) {
   574  								iter, err := readers[file.FileNum].NewIter(
   575  									opts.LowerBound, opts.UpperBound)
   576  								return iter, nil, err
   577  							}
   578  							l := newLevelIter(IterOptions{}, DefaultComparer, newIters, metas.Iter(), manifest.Level(level), internalIterOpts{})
   579  							// Fake up the range deletion initialization, to resemble the usage
   580  							// in a mergingIter.
   581  							l.initRangeDel(new(keyspan.FragmentIterator))
   582  							keyCount := len(keys)
   583  							b.ResetTimer()
   584  							for i := 0; i < b.N; i++ {
   585  								pos := i % (keyCount - 1)
   586  								l.SetBounds(keys[pos], keys[pos+1])
   587  								// SeekGE will return keys[pos].
   588  								k, _ := l.SeekGE(keys[pos], base.SeekGEFlagsNone)
   589  								// Next() will get called once and return nil.
   590  								for k != nil {
   591  									k, _ = l.Next()
   592  								}
   593  							}
   594  							l.Close()
   595  						})
   596  				}
   597  			})
   598  	}
   599  }
   600  
   601  // BenchmarkLevelIterSeqSeekPrefixGE simulates the behavior of a levelIter
   602  // being used as part of a mergingIter where SeekPrefixGE is used to seek in a
   603  // monotonically increasing manner. This resembles key-value lookups done by
   604  // CockroachDB when evaluating Put operations.
   605  func BenchmarkLevelIterSeqSeekPrefixGE(b *testing.B) {
   606  	const blockSize = 32 << 10
   607  	const restartInterval = 16
   608  	readers, metas, keys, cleanup :=
   609  		buildLevelIterTables(b, blockSize, restartInterval, 5)
   610  	defer cleanup()
   611  	// This newIters is cheaper than in practice since it does not do
   612  	// tableCacheShard.findNode.
   613  	newIters := func(
   614  		_ context.Context, file *manifest.FileMetadata, opts *IterOptions, _ internalIterOpts,
   615  	) (internalIterator, keyspan.FragmentIterator, error) {
   616  		iter, err := readers[file.FileNum].NewIter(
   617  			opts.LowerBound, opts.UpperBound)
   618  		return iter, nil, err
   619  	}
   620  
   621  	for _, skip := range []int{1, 2, 4, 8, 16} {
   622  		for _, useNext := range []bool{false, true} {
   623  			b.Run(fmt.Sprintf("skip=%d/use-next=%t", skip, useNext),
   624  				func(b *testing.B) {
   625  					l := newLevelIter(IterOptions{}, testkeys.Comparer, newIters, metas.Iter(),
   626  						manifest.Level(level), internalIterOpts{})
   627  					// Fake up the range deletion initialization, to resemble the usage
   628  					// in a mergingIter.
   629  					l.initRangeDel(new(keyspan.FragmentIterator))
   630  					keyCount := len(keys)
   631  					pos := 0
   632  					l.SeekPrefixGE(keys[pos], keys[pos], base.SeekGEFlagsNone)
   633  					b.ResetTimer()
   634  					for i := 0; i < b.N; i++ {
   635  						pos += skip
   636  						var flags base.SeekGEFlags
   637  						if useNext {
   638  							flags = flags.EnableTrySeekUsingNext()
   639  						}
   640  						if pos >= keyCount {
   641  							pos = 0
   642  							flags = flags.DisableTrySeekUsingNext()
   643  						}
   644  						// SeekPrefixGE will return keys[pos].
   645  						l.SeekPrefixGE(keys[pos], keys[pos], flags)
   646  					}
   647  					b.StopTimer()
   648  					l.Close()
   649  				})
   650  		}
   651  	}
   652  }
   653  
   654  func BenchmarkLevelIterNext(b *testing.B) {
   655  	const blockSize = 32 << 10
   656  
   657  	for _, restartInterval := range []int{16} {
   658  		b.Run(fmt.Sprintf("restart=%d", restartInterval),
   659  			func(b *testing.B) {
   660  				for _, count := range []int{5} {
   661  					b.Run(fmt.Sprintf("count=%d", count),
   662  						func(b *testing.B) {
   663  							readers, metas, _, cleanup := buildLevelIterTables(b, blockSize, restartInterval, count)
   664  							defer cleanup()
   665  							newIters := func(
   666  								_ context.Context, file *manifest.FileMetadata, _ *IterOptions, _ internalIterOpts,
   667  							) (internalIterator, keyspan.FragmentIterator, error) {
   668  								iter, err := readers[file.FileNum].NewIter(nil /* lower */, nil /* upper */)
   669  								return iter, nil, err
   670  							}
   671  							l := newLevelIter(IterOptions{}, testkeys.Comparer, newIters, metas.Iter(), manifest.Level(level), internalIterOpts{})
   672  
   673  							b.ResetTimer()
   674  							for i := 0; i < b.N; i++ {
   675  								key, _ := l.Next()
   676  								if key == nil {
   677  									key, _ = l.First()
   678  								}
   679  								_ = key
   680  							}
   681  							l.Close()
   682  						})
   683  				}
   684  			})
   685  	}
   686  }
   687  
   688  func BenchmarkLevelIterPrev(b *testing.B) {
   689  	const blockSize = 32 << 10
   690  
   691  	for _, restartInterval := range []int{16} {
   692  		b.Run(fmt.Sprintf("restart=%d", restartInterval),
   693  			func(b *testing.B) {
   694  				for _, count := range []int{5} {
   695  					b.Run(fmt.Sprintf("count=%d", count),
   696  						func(b *testing.B) {
   697  							readers, metas, _, cleanup := buildLevelIterTables(b, blockSize, restartInterval, count)
   698  							defer cleanup()
   699  							newIters := func(
   700  								_ context.Context, file *manifest.FileMetadata, _ *IterOptions, _ internalIterOpts,
   701  							) (internalIterator, keyspan.FragmentIterator, error) {
   702  								iter, err := readers[file.FileNum].NewIter(nil /* lower */, nil /* upper */)
   703  								return iter, nil, err
   704  							}
   705  							l := newLevelIter(IterOptions{}, DefaultComparer, newIters, metas.Iter(), manifest.Level(level), internalIterOpts{})
   706  
   707  							b.ResetTimer()
   708  							for i := 0; i < b.N; i++ {
   709  								key, _ := l.Prev()
   710  								if key == nil {
   711  									key, _ = l.Last()
   712  								}
   713  								_ = key
   714  							}
   715  							l.Close()
   716  						})
   717  				}
   718  			})
   719  	}
   720  }