github.com/cockroachdb/pebble@v1.1.2/internal/manifest/l0_sublevels_test.go (about)

     1  // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package manifest
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"io"
    11  	"math"
    12  	"os"
    13  	"sort"
    14  	"strconv"
    15  	"strings"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/cockroachdb/datadriven"
    20  	"github.com/cockroachdb/pebble/internal/base"
    21  	"github.com/cockroachdb/pebble/internal/testkeys"
    22  	"github.com/cockroachdb/pebble/record"
    23  	"github.com/stretchr/testify/require"
    24  	"golang.org/x/exp/rand"
    25  )
    26  
    27  func readManifest(filename string) (*Version, error) {
    28  	f, err := os.Open(filename)
    29  	if err != nil {
    30  		return nil, err
    31  	}
    32  	defer f.Close()
    33  	rr := record.NewReader(f, 0 /* logNum */)
    34  	var v *Version
    35  	addedByFileNum := make(map[base.FileNum]*FileMetadata)
    36  	for {
    37  		r, err := rr.Next()
    38  		if err == io.EOF {
    39  			break
    40  		}
    41  		if err != nil {
    42  			return nil, err
    43  		}
    44  		var ve VersionEdit
    45  		if err = ve.Decode(r); err != nil {
    46  			return nil, err
    47  		}
    48  		var bve BulkVersionEdit
    49  		bve.AddedByFileNum = addedByFileNum
    50  		if err := bve.Accumulate(&ve); err != nil {
    51  			return nil, err
    52  		}
    53  		if v, err = bve.Apply(v, base.DefaultComparer.Compare, base.DefaultFormatter, 10<<20, 32000, nil, ProhibitSplitUserKeys); err != nil {
    54  			return nil, err
    55  		}
    56  	}
    57  	return v, nil
    58  }
    59  
    60  func visualizeSublevels(
    61  	s *L0Sublevels, compactionFiles bitSet, otherLevels [][]*FileMetadata,
    62  ) string {
    63  	var buf strings.Builder
    64  	if compactionFiles == nil {
    65  		compactionFiles = newBitSet(s.levelMetadata.Len())
    66  	}
    67  	largestChar := byte('a')
    68  	printLevel := func(files []*FileMetadata, level string, isL0 bool) {
    69  		lastChar := byte('a')
    70  		fmt.Fprintf(&buf, "L%s:", level)
    71  		for i := 0; i < 5-len(level); i++ {
    72  			buf.WriteByte(' ')
    73  		}
    74  		for j, f := range files {
    75  			for lastChar < f.Smallest.UserKey[0] {
    76  				buf.WriteString("   ")
    77  				lastChar++
    78  			}
    79  			buf.WriteByte(f.Smallest.UserKey[0])
    80  			middleChar := byte('-')
    81  			if isL0 {
    82  				if compactionFiles[f.L0Index] {
    83  					middleChar = '+'
    84  				} else if f.IsCompacting() {
    85  					if f.IsIntraL0Compacting {
    86  						middleChar = '^'
    87  					} else {
    88  						middleChar = 'v'
    89  					}
    90  				}
    91  			} else if f.IsCompacting() {
    92  				middleChar = '='
    93  			}
    94  			if largestChar < f.Largest.UserKey[0] {
    95  				largestChar = f.Largest.UserKey[0]
    96  			}
    97  			if f.Smallest.UserKey[0] == f.Largest.UserKey[0] {
    98  				buf.WriteByte(f.Largest.UserKey[0])
    99  				if compactionFiles[f.L0Index] {
   100  					buf.WriteByte('+')
   101  				} else if j < len(files)-1 {
   102  					buf.WriteByte(' ')
   103  				}
   104  				lastChar++
   105  				continue
   106  			}
   107  			buf.WriteByte(middleChar)
   108  			buf.WriteByte(middleChar)
   109  			lastChar++
   110  			for lastChar < f.Largest.UserKey[0] {
   111  				buf.WriteByte(middleChar)
   112  				buf.WriteByte(middleChar)
   113  				buf.WriteByte(middleChar)
   114  				lastChar++
   115  			}
   116  			if f.Largest.IsExclusiveSentinel() &&
   117  				j < len(files)-1 && files[j+1].Smallest.UserKey[0] == f.Largest.UserKey[0] {
   118  				// This case happens where two successive files have
   119  				// matching end/start user keys but where the left-side file
   120  				// has the sentinel key as its end key trailer. In this case
   121  				// we print the sstables as:
   122  				//
   123  				// a------d------g
   124  				//
   125  				continue
   126  			}
   127  			buf.WriteByte(middleChar)
   128  			buf.WriteByte(f.Largest.UserKey[0])
   129  			if j < len(files)-1 {
   130  				buf.WriteByte(' ')
   131  			}
   132  			lastChar++
   133  		}
   134  		fmt.Fprintf(&buf, "\n")
   135  	}
   136  	for i := len(s.levelFiles) - 1; i >= 0; i-- {
   137  		printLevel(s.levelFiles[i], fmt.Sprintf("0.%d", i), true)
   138  	}
   139  	for i := range otherLevels {
   140  		if len(otherLevels[i]) == 0 {
   141  			continue
   142  		}
   143  		printLevel(otherLevels[i], strconv.Itoa(i+1), false)
   144  	}
   145  	buf.WriteString("       ")
   146  	for b := byte('a'); b <= largestChar; b++ {
   147  		buf.WriteByte(b)
   148  		buf.WriteByte(b)
   149  		if b < largestChar {
   150  			buf.WriteByte(' ')
   151  		}
   152  	}
   153  	buf.WriteByte('\n')
   154  	return buf.String()
   155  }
   156  
   157  func TestL0Sublevels(t *testing.T) {
   158  	parseMeta := func(s string) (*FileMetadata, error) {
   159  		parts := strings.Split(s, ":")
   160  		if len(parts) != 2 {
   161  			t.Fatalf("malformed table spec: %s", s)
   162  		}
   163  		fileNum, err := strconv.Atoi(strings.TrimSpace(parts[0]))
   164  		if err != nil {
   165  			return nil, err
   166  		}
   167  		fields := strings.Fields(parts[1])
   168  		keyRange := strings.Split(strings.TrimSpace(fields[0]), "-")
   169  		m := (&FileMetadata{}).ExtendPointKeyBounds(
   170  			base.DefaultComparer.Compare,
   171  			base.ParseInternalKey(strings.TrimSpace(keyRange[0])),
   172  			base.ParseInternalKey(strings.TrimSpace(keyRange[1])),
   173  		)
   174  		m.SmallestSeqNum = m.Smallest.SeqNum()
   175  		m.LargestSeqNum = m.Largest.SeqNum()
   176  		if m.Largest.IsExclusiveSentinel() {
   177  			m.LargestSeqNum = m.SmallestSeqNum
   178  		}
   179  		m.FileNum = base.FileNum(fileNum)
   180  		m.Size = uint64(256)
   181  		m.InitPhysicalBacking()
   182  		if len(fields) > 1 {
   183  			for _, field := range fields[1:] {
   184  				parts := strings.Split(field, "=")
   185  				switch parts[0] {
   186  				case "base_compacting":
   187  					m.IsIntraL0Compacting = false
   188  					m.CompactionState = CompactionStateCompacting
   189  				case "intra_l0_compacting":
   190  					m.IsIntraL0Compacting = true
   191  					m.CompactionState = CompactionStateCompacting
   192  				case "compacting":
   193  					m.CompactionState = CompactionStateCompacting
   194  				case "size":
   195  					sizeInt, err := strconv.Atoi(parts[1])
   196  					if err != nil {
   197  						return nil, err
   198  					}
   199  					m.Size = uint64(sizeInt)
   200  				}
   201  			}
   202  		}
   203  
   204  		return m, nil
   205  	}
   206  
   207  	var err error
   208  	var fileMetas [NumLevels][]*FileMetadata
   209  	var explicitSublevels [][]*FileMetadata
   210  	var activeCompactions []L0Compaction
   211  	var sublevels *L0Sublevels
   212  	baseLevel := NumLevels - 1
   213  
   214  	datadriven.RunTest(t, "testdata/l0_sublevels", func(t *testing.T, td *datadriven.TestData) string {
   215  		pickBaseCompaction := false
   216  		level := 0
   217  		addL0FilesOpt := false
   218  		switch td.Cmd {
   219  		case "add-l0-files":
   220  			addL0FilesOpt = true
   221  			level = 0
   222  			fallthrough
   223  		case "define":
   224  			if !addL0FilesOpt {
   225  				fileMetas = [NumLevels][]*FileMetadata{}
   226  				baseLevel = NumLevels - 1
   227  				activeCompactions = nil
   228  			}
   229  			explicitSublevels = [][]*FileMetadata{}
   230  			sublevel := -1
   231  			addedL0Files := make([]*FileMetadata, 0)
   232  			for _, data := range strings.Split(td.Input, "\n") {
   233  				data = strings.TrimSpace(data)
   234  				switch data[:2] {
   235  				case "L0", "L1", "L2", "L3", "L4", "L5", "L6":
   236  					level, err = strconv.Atoi(data[1:2])
   237  					if err != nil {
   238  						return err.Error()
   239  					}
   240  					if level == 0 && len(data) > 3 {
   241  						// Sublevel was specified.
   242  						sublevel, err = strconv.Atoi(data[3:])
   243  						if err != nil {
   244  							return err.Error()
   245  						}
   246  					} else {
   247  						sublevel = -1
   248  					}
   249  				default:
   250  					meta, err := parseMeta(data)
   251  					if err != nil {
   252  						return err.Error()
   253  					}
   254  					if level != 0 && level < baseLevel {
   255  						baseLevel = level
   256  					}
   257  					fileMetas[level] = append(fileMetas[level], meta)
   258  					if level == 0 {
   259  						addedL0Files = append(addedL0Files, meta)
   260  					}
   261  					if sublevel != -1 {
   262  						for len(explicitSublevels) <= sublevel {
   263  							explicitSublevels = append(explicitSublevels, []*FileMetadata{})
   264  						}
   265  						explicitSublevels[sublevel] = append(explicitSublevels[sublevel], meta)
   266  					}
   267  				}
   268  			}
   269  
   270  			flushSplitMaxBytes := 64
   271  			initialize := true
   272  			for _, arg := range td.CmdArgs {
   273  				switch arg.Key {
   274  				case "flush_split_max_bytes":
   275  					flushSplitMaxBytes, err = strconv.Atoi(arg.Vals[0])
   276  					if err != nil {
   277  						t.Fatal(err)
   278  					}
   279  				case "no_initialize":
   280  					// This case is for use with explicitly-specified sublevels
   281  					// only.
   282  					initialize = false
   283  				}
   284  			}
   285  			SortBySeqNum(fileMetas[0])
   286  			for i := 1; i < NumLevels; i++ {
   287  				SortBySmallest(fileMetas[i], base.DefaultComparer.Compare)
   288  			}
   289  
   290  			levelMetadata := makeLevelMetadata(base.DefaultComparer.Compare, 0, fileMetas[0])
   291  			if initialize {
   292  				if addL0FilesOpt {
   293  					SortBySeqNum(addedL0Files)
   294  					sublevels, err = sublevels.AddL0Files(addedL0Files, int64(flushSplitMaxBytes), &levelMetadata)
   295  					// Check if the output matches a full initialization.
   296  					sublevels2, _ := NewL0Sublevels(&levelMetadata, base.DefaultComparer.Compare, base.DefaultFormatter, int64(flushSplitMaxBytes))
   297  					if sublevels != nil && sublevels2 != nil {
   298  						require.Equal(t, sublevels.flushSplitUserKeys, sublevels2.flushSplitUserKeys)
   299  						require.Equal(t, sublevels.levelFiles, sublevels2.levelFiles)
   300  					}
   301  				} else {
   302  					sublevels, err = NewL0Sublevels(
   303  						&levelMetadata,
   304  						base.DefaultComparer.Compare,
   305  						base.DefaultFormatter,
   306  						int64(flushSplitMaxBytes))
   307  				}
   308  				if err != nil {
   309  					return err.Error()
   310  				}
   311  				sublevels.InitCompactingFileInfo(nil)
   312  			} else {
   313  				// This case is for use with explicitly-specified sublevels
   314  				// only.
   315  				sublevels = &L0Sublevels{
   316  					levelFiles:    explicitSublevels,
   317  					cmp:           base.DefaultComparer.Compare,
   318  					formatKey:     base.DefaultFormatter,
   319  					levelMetadata: &levelMetadata,
   320  				}
   321  				for _, files := range explicitSublevels {
   322  					sublevels.Levels = append(sublevels.Levels, NewLevelSliceSpecificOrder(files))
   323  				}
   324  			}
   325  
   326  			if err != nil {
   327  				t.Fatal(err)
   328  			}
   329  
   330  			var builder strings.Builder
   331  			builder.WriteString(sublevels.describe(true))
   332  			builder.WriteString(visualizeSublevels(sublevels, nil, fileMetas[1:]))
   333  			return builder.String()
   334  		case "pick-base-compaction":
   335  			pickBaseCompaction = true
   336  			fallthrough
   337  		case "pick-intra-l0-compaction":
   338  			minCompactionDepth := 3
   339  			earliestUnflushedSeqNum := uint64(math.MaxUint64)
   340  			for _, arg := range td.CmdArgs {
   341  				switch arg.Key {
   342  				case "min_depth":
   343  					minCompactionDepth, err = strconv.Atoi(arg.Vals[0])
   344  					if err != nil {
   345  						t.Fatal(err)
   346  					}
   347  				case "earliest_unflushed_seqnum":
   348  					eusnInt, err := strconv.Atoi(arg.Vals[0])
   349  					if err != nil {
   350  						t.Fatal(err)
   351  					}
   352  					earliestUnflushedSeqNum = uint64(eusnInt)
   353  				}
   354  			}
   355  
   356  			var lcf *L0CompactionFiles
   357  			if pickBaseCompaction {
   358  				baseFiles := NewLevelSliceKeySorted(base.DefaultComparer.Compare, fileMetas[baseLevel])
   359  				lcf, err = sublevels.PickBaseCompaction(minCompactionDepth, baseFiles)
   360  				if err == nil && lcf != nil {
   361  					// Try to extend the base compaction into a more rectangular
   362  					// shape, using the smallest/largest keys of the files before
   363  					// and after overlapping base files. This mimics the logic
   364  					// the compactor is expected to implement.
   365  					baseFiles := fileMetas[baseLevel]
   366  					firstFile := sort.Search(len(baseFiles), func(i int) bool {
   367  						return sublevels.cmp(baseFiles[i].Largest.UserKey, sublevels.orderedIntervals[lcf.minIntervalIndex].startKey.key) >= 0
   368  					})
   369  					lastFile := sort.Search(len(baseFiles), func(i int) bool {
   370  						return sublevels.cmp(baseFiles[i].Smallest.UserKey, sublevels.orderedIntervals[lcf.maxIntervalIndex+1].startKey.key) >= 0
   371  					})
   372  					startKey := base.InvalidInternalKey
   373  					endKey := base.InvalidInternalKey
   374  					if firstFile > 0 {
   375  						startKey = baseFiles[firstFile-1].Largest
   376  					}
   377  					if lastFile < len(baseFiles) {
   378  						endKey = baseFiles[lastFile].Smallest
   379  					}
   380  					sublevels.ExtendL0ForBaseCompactionTo(
   381  						startKey,
   382  						endKey,
   383  						lcf)
   384  				}
   385  			} else {
   386  				lcf, err = sublevels.PickIntraL0Compaction(earliestUnflushedSeqNum, minCompactionDepth)
   387  			}
   388  			if err != nil {
   389  				return fmt.Sprintf("error: %s", err.Error())
   390  			}
   391  			if lcf == nil {
   392  				return "no compaction picked"
   393  			}
   394  			var builder strings.Builder
   395  			builder.WriteString(fmt.Sprintf("compaction picked with stack depth reduction %d\n", lcf.seedIntervalStackDepthReduction))
   396  			for i, file := range lcf.Files {
   397  				builder.WriteString(file.FileNum.String())
   398  				if i < len(lcf.Files)-1 {
   399  					builder.WriteByte(',')
   400  				}
   401  			}
   402  			startKey := sublevels.orderedIntervals[lcf.seedInterval].startKey
   403  			endKey := sublevels.orderedIntervals[lcf.seedInterval+1].startKey
   404  			builder.WriteString(fmt.Sprintf("\nseed interval: %s-%s\n", startKey.key, endKey.key))
   405  			builder.WriteString(visualizeSublevels(sublevels, lcf.FilesIncluded, fileMetas[1:]))
   406  
   407  			return builder.String()
   408  		case "read-amp":
   409  			return strconv.Itoa(sublevels.ReadAmplification())
   410  		case "in-use-key-ranges":
   411  			var buf bytes.Buffer
   412  			for _, data := range strings.Split(strings.TrimSpace(td.Input), "\n") {
   413  				keyRange := strings.Split(strings.TrimSpace(data), "-")
   414  				smallest := []byte(strings.TrimSpace(keyRange[0]))
   415  				largest := []byte(strings.TrimSpace(keyRange[1]))
   416  
   417  				keyRanges := sublevels.InUseKeyRanges(smallest, largest)
   418  				for i, r := range keyRanges {
   419  					fmt.Fprintf(&buf, "%s-%s", sublevels.formatKey(r.Start), sublevels.formatKey(r.End))
   420  					if i < len(keyRanges)-1 {
   421  						fmt.Fprint(&buf, ", ")
   422  					}
   423  				}
   424  				if len(keyRanges) == 0 {
   425  					fmt.Fprint(&buf, ".")
   426  				}
   427  				fmt.Fprintln(&buf)
   428  			}
   429  			return buf.String()
   430  		case "flush-split-keys":
   431  			var builder strings.Builder
   432  			builder.WriteString("flush user split keys: ")
   433  			flushSplitKeys := sublevels.FlushSplitKeys()
   434  			for i, key := range flushSplitKeys {
   435  				builder.Write(key)
   436  				if i < len(flushSplitKeys)-1 {
   437  					builder.WriteString(", ")
   438  				}
   439  			}
   440  			if len(flushSplitKeys) == 0 {
   441  				builder.WriteString("none")
   442  			}
   443  			return builder.String()
   444  		case "max-depth-after-ongoing-compactions":
   445  			return strconv.Itoa(sublevels.MaxDepthAfterOngoingCompactions())
   446  		case "l0-check-ordering":
   447  			for sublevel, files := range sublevels.levelFiles {
   448  				slice := NewLevelSliceSpecificOrder(files)
   449  				err := CheckOrdering(base.DefaultComparer.Compare, base.DefaultFormatter,
   450  					L0Sublevel(sublevel), slice.Iter(), ProhibitSplitUserKeys)
   451  				if err != nil {
   452  					return err.Error()
   453  				}
   454  			}
   455  			return "OK"
   456  		case "update-state-for-compaction":
   457  			var fileNums []base.FileNum
   458  			for _, arg := range td.CmdArgs {
   459  				switch arg.Key {
   460  				case "files":
   461  					for _, val := range arg.Vals {
   462  						fileNum, err := strconv.ParseUint(val, 10, 64)
   463  						if err != nil {
   464  							return err.Error()
   465  						}
   466  						fileNums = append(fileNums, base.FileNum(fileNum))
   467  					}
   468  				}
   469  			}
   470  			files := make([]*FileMetadata, 0, len(fileNums))
   471  			for _, num := range fileNums {
   472  				for _, f := range fileMetas[0] {
   473  					if f.FileNum == num {
   474  						f.CompactionState = CompactionStateCompacting
   475  						files = append(files, f)
   476  						break
   477  					}
   478  				}
   479  			}
   480  			slice := NewLevelSliceSeqSorted(files)
   481  			sm, la := KeyRange(base.DefaultComparer.Compare, slice.Iter())
   482  			activeCompactions = append(activeCompactions, L0Compaction{Smallest: sm, Largest: la})
   483  			if err := sublevels.UpdateStateForStartedCompaction([]LevelSlice{slice}, true); err != nil {
   484  				return err.Error()
   485  			}
   486  			return "OK"
   487  		case "describe":
   488  			var builder strings.Builder
   489  			builder.WriteString(sublevels.describe(true))
   490  			builder.WriteString(visualizeSublevels(sublevels, nil, fileMetas[1:]))
   491  			return builder.String()
   492  		}
   493  		return fmt.Sprintf("unrecognized command: %s", td.Cmd)
   494  	})
   495  }
   496  
   497  func TestAddL0FilesEquivalence(t *testing.T) {
   498  	seed := uint64(time.Now().UnixNano())
   499  	rng := rand.New(rand.NewSource(seed))
   500  	t.Logf("seed: %d", seed)
   501  
   502  	var inUseKeys [][]byte
   503  	const keyReusePct = 0.15
   504  	var fileMetas []*FileMetadata
   505  	var s, s2 *L0Sublevels
   506  	keySpace := testkeys.Alpha(8)
   507  
   508  	flushSplitMaxBytes := rng.Int63n(1 << 20)
   509  
   510  	// The outer loop runs once for each version edit. The inner loop(s) run
   511  	// once for each file, or each file bound.
   512  	for i := 0; i < 100; i++ {
   513  		var filesToAdd []*FileMetadata
   514  		numFiles := 1 + rng.Intn(9)
   515  		keys := make([][]byte, 0, 2*numFiles)
   516  		for j := 0; j < 2*numFiles; j++ {
   517  			if rng.Float64() <= keyReusePct && len(inUseKeys) > 0 {
   518  				keys = append(keys, inUseKeys[rng.Intn(len(inUseKeys))])
   519  			} else {
   520  				newKey := testkeys.Key(keySpace, rng.Int63n(keySpace.Count()))
   521  				inUseKeys = append(inUseKeys, newKey)
   522  				keys = append(keys, newKey)
   523  			}
   524  		}
   525  		sort.Slice(keys, func(i, j int) bool {
   526  			return bytes.Compare(keys[i], keys[j]) < 0
   527  		})
   528  		for j := 0; j < numFiles; j++ {
   529  			startKey := keys[j*2]
   530  			endKey := keys[j*2+1]
   531  			if bytes.Equal(startKey, endKey) {
   532  				continue
   533  			}
   534  			meta := (&FileMetadata{
   535  				FileNum:        base.FileNum(i*10 + j + 1),
   536  				Size:           rng.Uint64n(1 << 20),
   537  				SmallestSeqNum: uint64(2*i + 1),
   538  				LargestSeqNum:  uint64(2*i + 2),
   539  			}).ExtendPointKeyBounds(
   540  				base.DefaultComparer.Compare,
   541  				base.MakeInternalKey(startKey, uint64(2*i+1), base.InternalKeyKindSet),
   542  				base.MakeRangeDeleteSentinelKey(endKey),
   543  			)
   544  			meta.InitPhysicalBacking()
   545  			fileMetas = append(fileMetas, meta)
   546  			filesToAdd = append(filesToAdd, meta)
   547  		}
   548  		if len(filesToAdd) == 0 {
   549  			continue
   550  		}
   551  
   552  		levelMetadata := makeLevelMetadata(testkeys.Comparer.Compare, 0, fileMetas)
   553  		var err error
   554  
   555  		if s2 == nil {
   556  			s2, err = NewL0Sublevels(&levelMetadata, testkeys.Comparer.Compare, testkeys.Comparer.FormatKey, flushSplitMaxBytes)
   557  			require.NoError(t, err)
   558  		} else {
   559  			// AddL0Files relies on the indices in FileMetadatas pointing to that of
   560  			// the previous L0Sublevels. So it must be called before NewL0Sublevels;
   561  			// calling it the other way around results in out-of-bounds panics.
   562  			SortBySeqNum(filesToAdd)
   563  			s2, err = s2.AddL0Files(filesToAdd, flushSplitMaxBytes, &levelMetadata)
   564  			require.NoError(t, err)
   565  		}
   566  
   567  		s, err = NewL0Sublevels(&levelMetadata, testkeys.Comparer.Compare, testkeys.Comparer.FormatKey, flushSplitMaxBytes)
   568  		require.NoError(t, err)
   569  
   570  		// Check for equivalence.
   571  		require.Equal(t, s.flushSplitUserKeys, s2.flushSplitUserKeys)
   572  		require.Equal(t, s.orderedIntervals, s2.orderedIntervals)
   573  		require.Equal(t, s.levelFiles, s2.levelFiles)
   574  	}
   575  }
   576  
   577  func BenchmarkManifestApplyWithL0Sublevels(b *testing.B) {
   578  	b.ResetTimer()
   579  	for n := 0; n < b.N; n++ {
   580  		v, err := readManifest("testdata/MANIFEST_import")
   581  		require.NotNil(b, v)
   582  		require.NoError(b, err)
   583  	}
   584  }
   585  
   586  func BenchmarkL0SublevelsInit(b *testing.B) {
   587  	v, err := readManifest("testdata/MANIFEST_import")
   588  	if err != nil {
   589  		b.Fatal(err)
   590  	}
   591  	b.ResetTimer()
   592  	for n := 0; n < b.N; n++ {
   593  		sl, err := NewL0Sublevels(&v.Levels[0],
   594  			base.DefaultComparer.Compare, base.DefaultFormatter, 5<<20)
   595  		require.NoError(b, err)
   596  		if sl == nil {
   597  			b.Fatal("expected non-nil L0Sublevels to be generated")
   598  		}
   599  	}
   600  }
   601  
   602  func BenchmarkL0SublevelsInitAndPick(b *testing.B) {
   603  	v, err := readManifest("testdata/MANIFEST_import")
   604  	if err != nil {
   605  		b.Fatal(err)
   606  	}
   607  	b.ResetTimer()
   608  	for n := 0; n < b.N; n++ {
   609  		sl, err := NewL0Sublevels(&v.Levels[0],
   610  			base.DefaultComparer.Compare, base.DefaultFormatter, 5<<20)
   611  		require.NoError(b, err)
   612  		if sl == nil {
   613  			b.Fatal("expected non-nil L0Sublevels to be generated")
   614  		}
   615  		c, err := sl.PickBaseCompaction(2, LevelSlice{})
   616  		require.NoError(b, err)
   617  		if c == nil {
   618  			b.Fatal("expected non-nil compaction to be generated")
   619  		}
   620  	}
   621  }