github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/internal/manifest/l0_sublevels_test.go (about)

     1  // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package manifest
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"io"
    11  	"math"
    12  	"os"
    13  	"slices"
    14  	"sort"
    15  	"strconv"
    16  	"strings"
    17  	"testing"
    18  	"time"
    19  
    20  	"github.com/cockroachdb/datadriven"
    21  	"github.com/cockroachdb/pebble/internal/base"
    22  	"github.com/cockroachdb/pebble/internal/testkeys"
    23  	"github.com/cockroachdb/pebble/record"
    24  	"github.com/stretchr/testify/require"
    25  	"golang.org/x/exp/rand"
    26  )
    27  
    28  func readManifest(filename string) (*Version, error) {
    29  	f, err := os.Open(filename)
    30  	if err != nil {
    31  		return nil, err
    32  	}
    33  	defer f.Close()
    34  	rr := record.NewReader(f, 0 /* logNum */)
    35  	var v *Version
    36  	addedByFileNum := make(map[base.FileNum]*FileMetadata)
    37  	for {
    38  		r, err := rr.Next()
    39  		if err == io.EOF {
    40  			break
    41  		}
    42  		if err != nil {
    43  			return nil, err
    44  		}
    45  		var ve VersionEdit
    46  		if err = ve.Decode(r); err != nil {
    47  			return nil, err
    48  		}
    49  		var bve BulkVersionEdit
    50  		bve.AddedByFileNum = addedByFileNum
    51  		if err := bve.Accumulate(&ve); err != nil {
    52  			return nil, err
    53  		}
    54  		if v, err = bve.Apply(v, base.DefaultComparer.Compare, base.DefaultFormatter, 10<<20, 32000, nil, ProhibitSplitUserKeys); err != nil {
    55  			return nil, err
    56  		}
    57  	}
    58  	return v, nil
    59  }
    60  
    61  func visualizeSublevels(
    62  	s *L0Sublevels, compactionFiles bitSet, otherLevels [][]*FileMetadata,
    63  ) string {
    64  	var buf strings.Builder
    65  	if compactionFiles == nil {
    66  		compactionFiles = newBitSet(s.levelMetadata.Len())
    67  	}
    68  	largestChar := byte('a')
    69  	printLevel := func(files []*FileMetadata, level string, isL0 bool) {
    70  		lastChar := byte('a')
    71  		fmt.Fprintf(&buf, "L%s:", level)
    72  		for i := 0; i < 5-len(level); i++ {
    73  			buf.WriteByte(' ')
    74  		}
    75  		for j, f := range files {
    76  			for lastChar < f.Smallest.UserKey[0] {
    77  				buf.WriteString("   ")
    78  				lastChar++
    79  			}
    80  			buf.WriteByte(f.Smallest.UserKey[0])
    81  			middleChar := byte('-')
    82  			if isL0 {
    83  				if compactionFiles[f.L0Index] {
    84  					middleChar = '+'
    85  				} else if f.IsCompacting() {
    86  					if f.IsIntraL0Compacting {
    87  						middleChar = '^'
    88  					} else {
    89  						middleChar = 'v'
    90  					}
    91  				}
    92  			} else if f.IsCompacting() {
    93  				middleChar = '='
    94  			}
    95  			if largestChar < f.Largest.UserKey[0] {
    96  				largestChar = f.Largest.UserKey[0]
    97  			}
    98  			if f.Smallest.UserKey[0] == f.Largest.UserKey[0] {
    99  				buf.WriteByte(f.Largest.UserKey[0])
   100  				if compactionFiles[f.L0Index] {
   101  					buf.WriteByte('+')
   102  				} else if j < len(files)-1 {
   103  					buf.WriteByte(' ')
   104  				}
   105  				lastChar++
   106  				continue
   107  			}
   108  			buf.WriteByte(middleChar)
   109  			buf.WriteByte(middleChar)
   110  			lastChar++
   111  			for lastChar < f.Largest.UserKey[0] {
   112  				buf.WriteByte(middleChar)
   113  				buf.WriteByte(middleChar)
   114  				buf.WriteByte(middleChar)
   115  				lastChar++
   116  			}
   117  			if f.Largest.IsExclusiveSentinel() &&
   118  				j < len(files)-1 && files[j+1].Smallest.UserKey[0] == f.Largest.UserKey[0] {
   119  				// This case happens where two successive files have
   120  				// matching end/start user keys but where the left-side file
   121  				// has the sentinel key as its end key trailer. In this case
   122  				// we print the sstables as:
   123  				//
   124  				// a------d------g
   125  				//
   126  				continue
   127  			}
   128  			buf.WriteByte(middleChar)
   129  			buf.WriteByte(f.Largest.UserKey[0])
   130  			if j < len(files)-1 {
   131  				buf.WriteByte(' ')
   132  			}
   133  			lastChar++
   134  		}
   135  		fmt.Fprintf(&buf, "\n")
   136  	}
   137  	for i := len(s.levelFiles) - 1; i >= 0; i-- {
   138  		printLevel(s.levelFiles[i], fmt.Sprintf("0.%d", i), true)
   139  	}
   140  	for i := range otherLevels {
   141  		if len(otherLevels[i]) == 0 {
   142  			continue
   143  		}
   144  		printLevel(otherLevels[i], strconv.Itoa(i+1), false)
   145  	}
   146  	buf.WriteString("       ")
   147  	for b := byte('a'); b <= largestChar; b++ {
   148  		buf.WriteByte(b)
   149  		buf.WriteByte(b)
   150  		if b < largestChar {
   151  			buf.WriteByte(' ')
   152  		}
   153  	}
   154  	buf.WriteByte('\n')
   155  	return buf.String()
   156  }
   157  
   158  func TestL0Sublevels(t *testing.T) {
   159  	parseMeta := func(s string) (*FileMetadata, error) {
   160  		parts := strings.Split(s, ":")
   161  		if len(parts) != 2 {
   162  			t.Fatalf("malformed table spec: %s", s)
   163  		}
   164  		fileNum, err := strconv.Atoi(strings.TrimSpace(parts[0]))
   165  		if err != nil {
   166  			return nil, err
   167  		}
   168  		fields := strings.Fields(parts[1])
   169  		keyRange := strings.Split(strings.TrimSpace(fields[0]), "-")
   170  		m := (&FileMetadata{}).ExtendPointKeyBounds(
   171  			base.DefaultComparer.Compare,
   172  			base.ParseInternalKey(strings.TrimSpace(keyRange[0])),
   173  			base.ParseInternalKey(strings.TrimSpace(keyRange[1])),
   174  		)
   175  		m.SmallestSeqNum = m.Smallest.SeqNum()
   176  		m.LargestSeqNum = m.Largest.SeqNum()
   177  		if m.Largest.IsExclusiveSentinel() {
   178  			m.LargestSeqNum = m.SmallestSeqNum
   179  		}
   180  		m.FileNum = base.FileNum(fileNum)
   181  		m.Size = uint64(256)
   182  		m.InitPhysicalBacking()
   183  		if len(fields) > 1 {
   184  			for _, field := range fields[1:] {
   185  				parts := strings.Split(field, "=")
   186  				switch parts[0] {
   187  				case "base_compacting":
   188  					m.IsIntraL0Compacting = false
   189  					m.CompactionState = CompactionStateCompacting
   190  				case "intra_l0_compacting":
   191  					m.IsIntraL0Compacting = true
   192  					m.CompactionState = CompactionStateCompacting
   193  				case "compacting":
   194  					m.CompactionState = CompactionStateCompacting
   195  				case "size":
   196  					sizeInt, err := strconv.Atoi(parts[1])
   197  					if err != nil {
   198  						return nil, err
   199  					}
   200  					m.Size = uint64(sizeInt)
   201  				}
   202  			}
   203  		}
   204  
   205  		return m, nil
   206  	}
   207  
   208  	var err error
   209  	var fileMetas [NumLevels][]*FileMetadata
   210  	var explicitSublevels [][]*FileMetadata
   211  	var activeCompactions []L0Compaction
   212  	var sublevels *L0Sublevels
   213  	baseLevel := NumLevels - 1
   214  
   215  	datadriven.RunTest(t, "testdata/l0_sublevels", func(t *testing.T, td *datadriven.TestData) string {
   216  		pickBaseCompaction := false
   217  		level := 0
   218  		addL0FilesOpt := false
   219  		switch td.Cmd {
   220  		case "add-l0-files":
   221  			addL0FilesOpt = true
   222  			level = 0
   223  			fallthrough
   224  		case "define":
   225  			if !addL0FilesOpt {
   226  				fileMetas = [NumLevels][]*FileMetadata{}
   227  				baseLevel = NumLevels - 1
   228  				activeCompactions = nil
   229  			}
   230  			explicitSublevels = [][]*FileMetadata{}
   231  			sublevel := -1
   232  			addedL0Files := make([]*FileMetadata, 0)
   233  			for _, data := range strings.Split(td.Input, "\n") {
   234  				data = strings.TrimSpace(data)
   235  				switch data[:2] {
   236  				case "L0", "L1", "L2", "L3", "L4", "L5", "L6":
   237  					level, err = strconv.Atoi(data[1:2])
   238  					if err != nil {
   239  						return err.Error()
   240  					}
   241  					if level == 0 && len(data) > 3 {
   242  						// Sublevel was specified.
   243  						sublevel, err = strconv.Atoi(data[3:])
   244  						if err != nil {
   245  							return err.Error()
   246  						}
   247  					} else {
   248  						sublevel = -1
   249  					}
   250  				default:
   251  					meta, err := parseMeta(data)
   252  					if err != nil {
   253  						return err.Error()
   254  					}
   255  					if level != 0 && level < baseLevel {
   256  						baseLevel = level
   257  					}
   258  					fileMetas[level] = append(fileMetas[level], meta)
   259  					if level == 0 {
   260  						addedL0Files = append(addedL0Files, meta)
   261  					}
   262  					if sublevel != -1 {
   263  						for len(explicitSublevels) <= sublevel {
   264  							explicitSublevels = append(explicitSublevels, []*FileMetadata{})
   265  						}
   266  						explicitSublevels[sublevel] = append(explicitSublevels[sublevel], meta)
   267  					}
   268  				}
   269  			}
   270  
   271  			flushSplitMaxBytes := 64
   272  			initialize := true
   273  			for _, arg := range td.CmdArgs {
   274  				switch arg.Key {
   275  				case "flush_split_max_bytes":
   276  					flushSplitMaxBytes, err = strconv.Atoi(arg.Vals[0])
   277  					if err != nil {
   278  						t.Fatal(err)
   279  					}
   280  				case "no_initialize":
   281  					// This case is for use with explicitly-specified sublevels
   282  					// only.
   283  					initialize = false
   284  				}
   285  			}
   286  			SortBySeqNum(fileMetas[0])
   287  			for i := 1; i < NumLevels; i++ {
   288  				SortBySmallest(fileMetas[i], base.DefaultComparer.Compare)
   289  			}
   290  
   291  			levelMetadata := makeLevelMetadata(base.DefaultComparer.Compare, 0, fileMetas[0])
   292  			if initialize {
   293  				if addL0FilesOpt {
   294  					SortBySeqNum(addedL0Files)
   295  					sublevels, err = sublevels.AddL0Files(addedL0Files, int64(flushSplitMaxBytes), &levelMetadata)
   296  					// Check if the output matches a full initialization.
   297  					sublevels2, _ := NewL0Sublevels(&levelMetadata, base.DefaultComparer.Compare, base.DefaultFormatter, int64(flushSplitMaxBytes))
   298  					if sublevels != nil && sublevels2 != nil {
   299  						require.Equal(t, sublevels.flushSplitUserKeys, sublevels2.flushSplitUserKeys)
   300  						require.Equal(t, sublevels.levelFiles, sublevels2.levelFiles)
   301  					}
   302  				} else {
   303  					sublevels, err = NewL0Sublevels(
   304  						&levelMetadata,
   305  						base.DefaultComparer.Compare,
   306  						base.DefaultFormatter,
   307  						int64(flushSplitMaxBytes))
   308  				}
   309  				if err != nil {
   310  					return err.Error()
   311  				}
   312  				sublevels.InitCompactingFileInfo(nil)
   313  			} else {
   314  				// This case is for use with explicitly-specified sublevels
   315  				// only.
   316  				sublevels = &L0Sublevels{
   317  					levelFiles:    explicitSublevels,
   318  					cmp:           base.DefaultComparer.Compare,
   319  					formatKey:     base.DefaultFormatter,
   320  					levelMetadata: &levelMetadata,
   321  				}
   322  				for _, files := range explicitSublevels {
   323  					sublevels.Levels = append(sublevels.Levels, NewLevelSliceSpecificOrder(files))
   324  				}
   325  			}
   326  
   327  			if err != nil {
   328  				t.Fatal(err)
   329  			}
   330  
   331  			var builder strings.Builder
   332  			builder.WriteString(sublevels.describe(true))
   333  			builder.WriteString(visualizeSublevels(sublevels, nil, fileMetas[1:]))
   334  			return builder.String()
   335  		case "pick-base-compaction":
   336  			pickBaseCompaction = true
   337  			fallthrough
   338  		case "pick-intra-l0-compaction":
   339  			minCompactionDepth := 3
   340  			earliestUnflushedSeqNum := uint64(math.MaxUint64)
   341  			for _, arg := range td.CmdArgs {
   342  				switch arg.Key {
   343  				case "min_depth":
   344  					minCompactionDepth, err = strconv.Atoi(arg.Vals[0])
   345  					if err != nil {
   346  						t.Fatal(err)
   347  					}
   348  				case "earliest_unflushed_seqnum":
   349  					eusnInt, err := strconv.Atoi(arg.Vals[0])
   350  					if err != nil {
   351  						t.Fatal(err)
   352  					}
   353  					earliestUnflushedSeqNum = uint64(eusnInt)
   354  				}
   355  			}
   356  
   357  			var lcf *L0CompactionFiles
   358  			if pickBaseCompaction {
   359  				baseFiles := NewLevelSliceKeySorted(base.DefaultComparer.Compare, fileMetas[baseLevel])
   360  				lcf, err = sublevels.PickBaseCompaction(minCompactionDepth, baseFiles)
   361  				if err == nil && lcf != nil {
   362  					// Try to extend the base compaction into a more rectangular
   363  					// shape, using the smallest/largest keys of the files before
   364  					// and after overlapping base files. This mimics the logic
   365  					// the compactor is expected to implement.
   366  					baseFiles := fileMetas[baseLevel]
   367  					firstFile := sort.Search(len(baseFiles), func(i int) bool {
   368  						return sublevels.cmp(baseFiles[i].Largest.UserKey, sublevels.orderedIntervals[lcf.minIntervalIndex].startKey.key) >= 0
   369  					})
   370  					lastFile := sort.Search(len(baseFiles), func(i int) bool {
   371  						return sublevels.cmp(baseFiles[i].Smallest.UserKey, sublevels.orderedIntervals[lcf.maxIntervalIndex+1].startKey.key) >= 0
   372  					})
   373  					startKey := base.InvalidInternalKey
   374  					endKey := base.InvalidInternalKey
   375  					if firstFile > 0 {
   376  						startKey = baseFiles[firstFile-1].Largest
   377  					}
   378  					if lastFile < len(baseFiles) {
   379  						endKey = baseFiles[lastFile].Smallest
   380  					}
   381  					sublevels.ExtendL0ForBaseCompactionTo(
   382  						startKey,
   383  						endKey,
   384  						lcf)
   385  				}
   386  			} else {
   387  				lcf, err = sublevels.PickIntraL0Compaction(earliestUnflushedSeqNum, minCompactionDepth)
   388  			}
   389  			if err != nil {
   390  				return fmt.Sprintf("error: %s", err.Error())
   391  			}
   392  			if lcf == nil {
   393  				return "no compaction picked"
   394  			}
   395  			var builder strings.Builder
   396  			builder.WriteString(fmt.Sprintf("compaction picked with stack depth reduction %d\n", lcf.seedIntervalStackDepthReduction))
   397  			for i, file := range lcf.Files {
   398  				builder.WriteString(file.FileNum.String())
   399  				if i < len(lcf.Files)-1 {
   400  					builder.WriteByte(',')
   401  				}
   402  			}
   403  			startKey := sublevels.orderedIntervals[lcf.seedInterval].startKey
   404  			endKey := sublevels.orderedIntervals[lcf.seedInterval+1].startKey
   405  			builder.WriteString(fmt.Sprintf("\nseed interval: %s-%s\n", startKey.key, endKey.key))
   406  			builder.WriteString(visualizeSublevels(sublevels, lcf.FilesIncluded, fileMetas[1:]))
   407  
   408  			return builder.String()
   409  		case "read-amp":
   410  			return strconv.Itoa(sublevels.ReadAmplification())
   411  		case "in-use-key-ranges":
   412  			var buf bytes.Buffer
   413  			for _, data := range strings.Split(strings.TrimSpace(td.Input), "\n") {
   414  				keyRange := strings.Split(strings.TrimSpace(data), "-")
   415  				smallest := []byte(strings.TrimSpace(keyRange[0]))
   416  				largest := []byte(strings.TrimSpace(keyRange[1]))
   417  
   418  				keyRanges := sublevels.InUseKeyRanges(smallest, largest)
   419  				for i, r := range keyRanges {
   420  					fmt.Fprintf(&buf, "%s-%s", sublevels.formatKey(r.Start), sublevels.formatKey(r.End))
   421  					if i < len(keyRanges)-1 {
   422  						fmt.Fprint(&buf, ", ")
   423  					}
   424  				}
   425  				if len(keyRanges) == 0 {
   426  					fmt.Fprint(&buf, ".")
   427  				}
   428  				fmt.Fprintln(&buf)
   429  			}
   430  			return buf.String()
   431  		case "flush-split-keys":
   432  			var builder strings.Builder
   433  			builder.WriteString("flush user split keys: ")
   434  			flushSplitKeys := sublevels.FlushSplitKeys()
   435  			for i, key := range flushSplitKeys {
   436  				builder.Write(key)
   437  				if i < len(flushSplitKeys)-1 {
   438  					builder.WriteString(", ")
   439  				}
   440  			}
   441  			if len(flushSplitKeys) == 0 {
   442  				builder.WriteString("none")
   443  			}
   444  			return builder.String()
   445  		case "max-depth-after-ongoing-compactions":
   446  			return strconv.Itoa(sublevels.MaxDepthAfterOngoingCompactions())
   447  		case "l0-check-ordering":
   448  			for sublevel, files := range sublevels.levelFiles {
   449  				slice := NewLevelSliceSpecificOrder(files)
   450  				err := CheckOrdering(base.DefaultComparer.Compare, base.DefaultFormatter,
   451  					L0Sublevel(sublevel), slice.Iter(), ProhibitSplitUserKeys)
   452  				if err != nil {
   453  					return err.Error()
   454  				}
   455  			}
   456  			return "OK"
   457  		case "update-state-for-compaction":
   458  			var fileNums []base.FileNum
   459  			for _, arg := range td.CmdArgs {
   460  				switch arg.Key {
   461  				case "files":
   462  					for _, val := range arg.Vals {
   463  						fileNum, err := strconv.ParseUint(val, 10, 64)
   464  						if err != nil {
   465  							return err.Error()
   466  						}
   467  						fileNums = append(fileNums, base.FileNum(fileNum))
   468  					}
   469  				}
   470  			}
   471  			files := make([]*FileMetadata, 0, len(fileNums))
   472  			for _, num := range fileNums {
   473  				for _, f := range fileMetas[0] {
   474  					if f.FileNum == num {
   475  						f.CompactionState = CompactionStateCompacting
   476  						files = append(files, f)
   477  						break
   478  					}
   479  				}
   480  			}
   481  			slice := NewLevelSliceSeqSorted(files)
   482  			sm, la := KeyRange(base.DefaultComparer.Compare, slice.Iter())
   483  			activeCompactions = append(activeCompactions, L0Compaction{Smallest: sm, Largest: la})
   484  			if err := sublevels.UpdateStateForStartedCompaction([]LevelSlice{slice}, true); err != nil {
   485  				return err.Error()
   486  			}
   487  			return "OK"
   488  		case "describe":
   489  			var builder strings.Builder
   490  			builder.WriteString(sublevels.describe(true))
   491  			builder.WriteString(visualizeSublevels(sublevels, nil, fileMetas[1:]))
   492  			return builder.String()
   493  		}
   494  		return fmt.Sprintf("unrecognized command: %s", td.Cmd)
   495  	})
   496  }
   497  
   498  func TestAddL0FilesEquivalence(t *testing.T) {
   499  	seed := uint64(time.Now().UnixNano())
   500  	rng := rand.New(rand.NewSource(seed))
   501  	t.Logf("seed: %d", seed)
   502  
   503  	var inUseKeys [][]byte
   504  	const keyReusePct = 0.15
   505  	var fileMetas []*FileMetadata
   506  	var s, s2 *L0Sublevels
   507  	keySpace := testkeys.Alpha(8)
   508  
   509  	flushSplitMaxBytes := rng.Int63n(1 << 20)
   510  
   511  	// The outer loop runs once for each version edit. The inner loop(s) run
   512  	// once for each file, or each file bound.
   513  	for i := 0; i < 100; i++ {
   514  		var filesToAdd []*FileMetadata
   515  		numFiles := 1 + rng.Intn(9)
   516  		keys := make([][]byte, 0, 2*numFiles)
   517  		for j := 0; j < 2*numFiles; j++ {
   518  			if rng.Float64() <= keyReusePct && len(inUseKeys) > 0 {
   519  				keys = append(keys, inUseKeys[rng.Intn(len(inUseKeys))])
   520  			} else {
   521  				newKey := testkeys.Key(keySpace, rng.Int63n(keySpace.Count()))
   522  				inUseKeys = append(inUseKeys, newKey)
   523  				keys = append(keys, newKey)
   524  			}
   525  		}
   526  		slices.SortFunc(keys, bytes.Compare)
   527  		for j := 0; j < numFiles; j++ {
   528  			startKey := keys[j*2]
   529  			endKey := keys[j*2+1]
   530  			if bytes.Equal(startKey, endKey) {
   531  				continue
   532  			}
   533  			meta := (&FileMetadata{
   534  				FileNum:        base.FileNum(i*10 + j + 1),
   535  				Size:           rng.Uint64n(1 << 20),
   536  				SmallestSeqNum: uint64(2*i + 1),
   537  				LargestSeqNum:  uint64(2*i + 2),
   538  			}).ExtendPointKeyBounds(
   539  				base.DefaultComparer.Compare,
   540  				base.MakeInternalKey(startKey, uint64(2*i+1), base.InternalKeyKindSet),
   541  				base.MakeRangeDeleteSentinelKey(endKey),
   542  			)
   543  			meta.InitPhysicalBacking()
   544  			fileMetas = append(fileMetas, meta)
   545  			filesToAdd = append(filesToAdd, meta)
   546  		}
   547  		if len(filesToAdd) == 0 {
   548  			continue
   549  		}
   550  
   551  		levelMetadata := makeLevelMetadata(testkeys.Comparer.Compare, 0, fileMetas)
   552  		var err error
   553  
   554  		if s2 == nil {
   555  			s2, err = NewL0Sublevels(&levelMetadata, testkeys.Comparer.Compare, testkeys.Comparer.FormatKey, flushSplitMaxBytes)
   556  			require.NoError(t, err)
   557  		} else {
   558  			// AddL0Files relies on the indices in FileMetadatas pointing to that of
   559  			// the previous L0Sublevels. So it must be called before NewL0Sublevels;
   560  			// calling it the other way around results in out-of-bounds panics.
   561  			SortBySeqNum(filesToAdd)
   562  			s2, err = s2.AddL0Files(filesToAdd, flushSplitMaxBytes, &levelMetadata)
   563  			require.NoError(t, err)
   564  		}
   565  
   566  		s, err = NewL0Sublevels(&levelMetadata, testkeys.Comparer.Compare, testkeys.Comparer.FormatKey, flushSplitMaxBytes)
   567  		require.NoError(t, err)
   568  
   569  		// Check for equivalence.
   570  		require.Equal(t, s.flushSplitUserKeys, s2.flushSplitUserKeys)
   571  		require.Equal(t, s.orderedIntervals, s2.orderedIntervals)
   572  		require.Equal(t, s.levelFiles, s2.levelFiles)
   573  	}
   574  }
   575  
   576  func BenchmarkManifestApplyWithL0Sublevels(b *testing.B) {
   577  	b.ResetTimer()
   578  	for n := 0; n < b.N; n++ {
   579  		v, err := readManifest("testdata/MANIFEST_import")
   580  		require.NotNil(b, v)
   581  		require.NoError(b, err)
   582  	}
   583  }
   584  
   585  func BenchmarkL0SublevelsInit(b *testing.B) {
   586  	v, err := readManifest("testdata/MANIFEST_import")
   587  	if err != nil {
   588  		b.Fatal(err)
   589  	}
   590  	b.ResetTimer()
   591  	for n := 0; n < b.N; n++ {
   592  		sl, err := NewL0Sublevels(&v.Levels[0],
   593  			base.DefaultComparer.Compare, base.DefaultFormatter, 5<<20)
   594  		require.NoError(b, err)
   595  		if sl == nil {
   596  			b.Fatal("expected non-nil L0Sublevels to be generated")
   597  		}
   598  	}
   599  }
   600  
   601  func BenchmarkL0SublevelsInitAndPick(b *testing.B) {
   602  	v, err := readManifest("testdata/MANIFEST_import")
   603  	if err != nil {
   604  		b.Fatal(err)
   605  	}
   606  	b.ResetTimer()
   607  	for n := 0; n < b.N; n++ {
   608  		sl, err := NewL0Sublevels(&v.Levels[0],
   609  			base.DefaultComparer.Compare, base.DefaultFormatter, 5<<20)
   610  		require.NoError(b, err)
   611  		if sl == nil {
   612  			b.Fatal("expected non-nil L0Sublevels to be generated")
   613  		}
   614  		c, err := sl.PickBaseCompaction(2, LevelSlice{})
   615  		require.NoError(b, err)
   616  		if c == nil {
   617  			b.Fatal("expected non-nil compaction to be generated")
   618  		}
   619  	}
   620  }