github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/metamorphic/options.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package metamorphic
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"os"
    11  	"path/filepath"
    12  	"runtime"
    13  	"strconv"
    14  	"strings"
    15  	"time"
    16  
    17  	"github.com/cockroachdb/errors"
    18  	"github.com/cockroachdb/pebble"
    19  	"github.com/cockroachdb/pebble/bloom"
    20  	"github.com/cockroachdb/pebble/internal/cache"
    21  	"github.com/cockroachdb/pebble/internal/testkeys"
    22  	"github.com/cockroachdb/pebble/objstorage/remote"
    23  	"github.com/cockroachdb/pebble/sstable"
    24  	"github.com/cockroachdb/pebble/vfs"
    25  	"golang.org/x/exp/rand"
    26  )
    27  
    28  const (
    29  	// The metamorphic test exercises range keys, so we cannot use an older
    30  	// FormatMajorVersion than pebble.FormatRangeKeys.
    31  	minimumFormatMajorVersion = pebble.FormatRangeKeys
    32  	// The format major version to use in the default options configurations. We
    33  	// default to the last format major version of Cockroach 22.2 so we exercise
    34  	// the runtime version ratcheting that a cluster upgrading to 23.1 would
    35  	// experience. The randomized options may still use format major versions
    36  	// that are less than defaultFormatMajorVersion but are at least
    37  	// minimumFormatMajorVersion.
    38  	defaultFormatMajorVersion = pebble.FormatPrePebblev1Marked
    39  	// newestFormatMajorVersionToTest is the most recent format major version
    40  	// the metamorphic tests should use. This may be greater than
    41  	// pebble.FormatNewest when some format major versions are marked as
    42  	// experimental.
    43  	newestFormatMajorVersionToTest = pebble.FormatNewest
    44  )
    45  
    46  func parseOptions(
    47  	opts *TestOptions, data string, customOptionParsers map[string]func(string) (CustomOption, bool),
    48  ) error {
    49  	hooks := &pebble.ParseHooks{
    50  		NewCache:        pebble.NewCache,
    51  		NewFilterPolicy: filterPolicyFromName,
    52  		SkipUnknown: func(name, value string) bool {
    53  			switch name {
    54  			case "TestOptions":
    55  				return true
    56  			case "TestOptions.strictfs":
    57  				opts.strictFS = true
    58  				return true
    59  			case "TestOptions.ingest_using_apply":
    60  				opts.ingestUsingApply = true
    61  				return true
    62  			case "TestOptions.delete_sized":
    63  				opts.deleteSized = true
    64  				return true
    65  			case "TestOptions.replace_single_delete":
    66  				opts.replaceSingleDelete = true
    67  				return true
    68  			case "TestOptions.use_disk":
    69  				opts.useDisk = true
    70  				return true
    71  			case "TestOptions.initial_state_desc":
    72  				opts.initialStateDesc = value
    73  				return true
    74  			case "TestOptions.initial_state_path":
    75  				opts.initialStatePath = value
    76  				return true
    77  			case "TestOptions.threads":
    78  				v, err := strconv.Atoi(value)
    79  				if err != nil {
    80  					panic(err)
    81  				}
    82  				opts.threads = v
    83  				return true
    84  			case "TestOptions.disable_block_property_collector":
    85  				v, err := strconv.ParseBool(value)
    86  				if err != nil {
    87  					panic(err)
    88  				}
    89  				opts.disableBlockPropertyCollector = v
    90  				if v {
    91  					opts.Opts.BlockPropertyCollectors = nil
    92  				}
    93  				return true
    94  			case "TestOptions.enable_value_blocks":
    95  				opts.enableValueBlocks = true
    96  				opts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
    97  				return true
    98  			case "TestOptions.async_apply_to_db":
    99  				opts.asyncApplyToDB = true
   100  				return true
   101  			case "TestOptions.shared_storage_enabled":
   102  				opts.sharedStorageEnabled = true
   103  				sharedStorage := remote.NewInMem()
   104  				opts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
   105  					"": sharedStorage,
   106  				})
   107  				opts.sharedStorageFS = sharedStorage
   108  				if opts.Opts.Experimental.CreateOnShared == remote.CreateOnSharedNone {
   109  					opts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
   110  				}
   111  				return true
   112  			case "TestOptions.secondary_cache_enabled":
   113  				opts.secondaryCacheEnabled = true
   114  				opts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
   115  				return true
   116  			case "TestOptions.seed_efos":
   117  				v, err := strconv.ParseUint(value, 10, 64)
   118  				if err != nil {
   119  					panic(err)
   120  				}
   121  				opts.seedEFOS = v
   122  				return true
   123  			case "TestOptions.ingest_split":
   124  				opts.ingestSplit = true
   125  				opts.Opts.Experimental.IngestSplit = func() bool {
   126  					return true
   127  				}
   128  				return true
   129  			case "TestOptions.use_shared_replicate":
   130  				opts.useSharedReplicate = true
   131  				return true
   132  			case "TestOptions.use_excise":
   133  				opts.useExcise = true
   134  				return true
   135  			case "TestOptions.efos_always_creates_iterators":
   136  				opts.efosAlwaysCreatesIters = true
   137  				opts.Opts.TestingAlwaysCreateEFOSIterators(true /* value */)
   138  				return true
   139  			default:
   140  				if customOptionParsers == nil {
   141  					return false
   142  				}
   143  				name = strings.TrimPrefix(name, "TestOptions.")
   144  				if p, ok := customOptionParsers[name]; ok {
   145  					if customOpt, ok := p(value); ok {
   146  						opts.CustomOpts = append(opts.CustomOpts, customOpt)
   147  						return true
   148  					}
   149  				}
   150  				return false
   151  			}
   152  		},
   153  	}
   154  	err := opts.Opts.Parse(data, hooks)
   155  	opts.Opts.EnsureDefaults()
   156  	return err
   157  }
   158  
   159  func optionsToString(opts *TestOptions) string {
   160  	var buf bytes.Buffer
   161  	if opts.strictFS {
   162  		fmt.Fprint(&buf, "  strictfs=true\n")
   163  	}
   164  	if opts.ingestUsingApply {
   165  		fmt.Fprint(&buf, "  ingest_using_apply=true\n")
   166  	}
   167  	if opts.deleteSized {
   168  		fmt.Fprint(&buf, "  delete_sized=true\n")
   169  	}
   170  	if opts.replaceSingleDelete {
   171  		fmt.Fprint(&buf, "  replace_single_delete=true\n")
   172  	}
   173  	if opts.useDisk {
   174  		fmt.Fprint(&buf, "  use_disk=true\n")
   175  	}
   176  	if opts.initialStatePath != "" {
   177  		fmt.Fprintf(&buf, "  initial_state_path=%s\n", opts.initialStatePath)
   178  	}
   179  	if opts.initialStateDesc != "" {
   180  		fmt.Fprintf(&buf, "  initial_state_desc=%s\n", opts.initialStateDesc)
   181  	}
   182  	if opts.threads != 0 {
   183  		fmt.Fprintf(&buf, "  threads=%d\n", opts.threads)
   184  	}
   185  	if opts.disableBlockPropertyCollector {
   186  		fmt.Fprintf(&buf, "  disable_block_property_collector=%t\n", opts.disableBlockPropertyCollector)
   187  	}
   188  	if opts.enableValueBlocks {
   189  		fmt.Fprintf(&buf, "  enable_value_blocks=%t\n", opts.enableValueBlocks)
   190  	}
   191  	if opts.asyncApplyToDB {
   192  		fmt.Fprint(&buf, "  async_apply_to_db=true\n")
   193  	}
   194  	if opts.sharedStorageEnabled {
   195  		fmt.Fprint(&buf, "  shared_storage_enabled=true\n")
   196  	}
   197  	if opts.secondaryCacheEnabled {
   198  		fmt.Fprint(&buf, "  secondary_cache_enabled=true\n")
   199  	}
   200  	if opts.seedEFOS != 0 {
   201  		fmt.Fprintf(&buf, "  seed_efos=%d\n", opts.seedEFOS)
   202  	}
   203  	if opts.ingestSplit {
   204  		fmt.Fprintf(&buf, "  ingest_split=%v\n", opts.ingestSplit)
   205  	}
   206  	if opts.useSharedReplicate {
   207  		fmt.Fprintf(&buf, "  use_shared_replicate=%v\n", opts.useSharedReplicate)
   208  	}
   209  	if opts.useExcise {
   210  		fmt.Fprintf(&buf, "  use_excise=%v\n", opts.useExcise)
   211  	}
   212  	if opts.efosAlwaysCreatesIters {
   213  		fmt.Fprintf(&buf, "  efos_always_creates_iterators=%v\n", opts.efosAlwaysCreatesIters)
   214  	}
   215  	for _, customOpt := range opts.CustomOpts {
   216  		fmt.Fprintf(&buf, "  %s=%s\n", customOpt.Name(), customOpt.Value())
   217  	}
   218  
   219  	s := opts.Opts.String()
   220  	if buf.Len() == 0 {
   221  		return s
   222  	}
   223  	return s + "\n[TestOptions]\n" + buf.String()
   224  }
   225  
   226  func defaultTestOptions() *TestOptions {
   227  	return &TestOptions{
   228  		Opts:    defaultOptions(),
   229  		threads: 16,
   230  	}
   231  }
   232  
   233  func defaultOptions() *pebble.Options {
   234  	opts := &pebble.Options{
   235  		Comparer:           testkeys.Comparer,
   236  		FS:                 vfs.NewMem(),
   237  		FormatMajorVersion: defaultFormatMajorVersion,
   238  		Levels: []pebble.LevelOptions{{
   239  			FilterPolicy: bloom.FilterPolicy(10),
   240  		}},
   241  		BlockPropertyCollectors: blockPropertyCollectorConstructors,
   242  	}
   243  	// TODO(sumeer): add IneffectualSingleDeleteCallback that panics by
   244  	// supporting a test option that does not generate ineffectual single
   245  	// deletes.
   246  	opts.Experimental.SingleDeleteInvariantViolationCallback = func(
   247  		userKey []byte) {
   248  		panic(errors.AssertionFailedf("single del invariant violations on key %q", userKey))
   249  	}
   250  	return opts
   251  }
   252  
   253  // TestOptions describes the options configuring an individual run of the
   254  // metamorphic tests.
   255  type TestOptions struct {
   256  	// Opts holds the *pebble.Options for the test.
   257  	Opts *pebble.Options
   258  	// CustomOptions holds custom test options that are defined outside of this
   259  	// package.
   260  	CustomOpts []CustomOption
   261  	useDisk    bool
   262  	strictFS   bool
   263  	threads    int
   264  	// Use Batch.Apply rather than DB.Ingest.
   265  	ingestUsingApply bool
   266  	// Use Batch.DeleteSized rather than Batch.Delete.
   267  	deleteSized bool
   268  	// Replace a SINGLEDEL with a DELETE.
   269  	replaceSingleDelete bool
   270  	// The path on the local filesystem where the initial state of the database
   271  	// exists.  Empty if the test run begins from an empty database state.
   272  	initialStatePath string
   273  	// A human-readable string describing the initial state of the database.
   274  	// Empty if the test run begins from an empty database state.
   275  	initialStateDesc string
   276  	// Disable the block property collector, which may be used by block property
   277  	// filters.
   278  	disableBlockPropertyCollector bool
   279  	// Enable the use of value blocks.
   280  	enableValueBlocks bool
   281  	// Use DB.ApplyNoSyncWait for applies that want to sync the WAL.
   282  	asyncApplyToDB bool
   283  	// Enable the use of shared storage.
   284  	sharedStorageEnabled bool
   285  	// sharedStorageFS stores the remote.Storage that is being used with shared
   286  	// storage.
   287  	sharedStorageFS remote.Storage
   288  	// Enables the use of shared replication in TestOptions.
   289  	useSharedReplicate bool
   290  	// Enable the secondary cache. Only effective if sharedStorageEnabled is
   291  	// also true.
   292  	secondaryCacheEnabled bool
   293  	// If nonzero, enables the use of EventuallyFileOnlySnapshots for
   294  	// newSnapshotOps that are keyspan-bounded. The set of which newSnapshotOps
   295  	// are actually created as EventuallyFileOnlySnapshots is deterministically
   296  	// derived from the seed and the operation index.
   297  	seedEFOS uint64
   298  	// Enables ingest splits. Saved here for serialization as Options does not
   299  	// serialize this.
   300  	ingestSplit bool
   301  	// Enables operations that do excises. Note that a false value for this does
   302  	// not guarantee the lack of excises, as useSharedReplicate can also cause
   303  	// excises. However !useExcise && !useSharedReplicate can be used to guarantee
   304  	// lack of excises.
   305  	useExcise bool
   306  	// Enables EFOS to always create iterators, even if a conflicting excise
   307  	// happens. Used to guarantee EFOS determinism when conflicting excises are
   308  	// in play. If false, EFOS determinism is maintained by having the DB do a
   309  	// flush after every new EFOS.
   310  	efosAlwaysCreatesIters bool
   311  }
   312  
   313  // CustomOption defines a custom option that configures the behavior of an
   314  // individual test run. Like all test options, custom options are serialized to
   315  // the OPTIONS file even if they're not options ordinarily understood by Pebble.
   316  type CustomOption interface {
   317  	// Name returns the name of the custom option. This is the key under which
   318  	// the option appears in the OPTIONS file, within the [TestOptions] stanza.
   319  	Name() string
   320  	// Value returns the value of the custom option, serialized as it should
   321  	// appear within the OPTIONS file.
   322  	Value() string
   323  	// Close is run after the test database has been closed at the end of the
   324  	// test as well as during restart operations within the test sequence. It's
   325  	// passed a copy of the *pebble.Options. If the custom options hold on to
   326  	// any resources outside, Close should release them.
   327  	Close(*pebble.Options) error
   328  	// Open is run before the test runs and during a restart operation after the
   329  	// test database has been closed and Close has been called. It's passed a
   330  	// copy of the *pebble.Options. If the custom options must acquire any
   331  	// resources before the test continues, it should reacquire them.
   332  	Open(*pebble.Options) error
   333  
   334  	// TODO(jackson): provide additional hooks for custom options changing the
   335  	// behavior of a run.
   336  }
   337  
   338  func standardOptions() []*TestOptions {
   339  	// The index labels are not strictly necessary, but they make it easier to
   340  	// find which options correspond to a failure.
   341  	stdOpts := []string{
   342  		0: "", // default options
   343  		1: `
   344  [Options]
   345    cache_size=1
   346  `,
   347  		2: `
   348  [Options]
   349    disable_wal=true
   350  `,
   351  		3: `
   352  [Options]
   353    l0_compaction_threshold=1
   354  `,
   355  		4: `
   356  [Options]
   357    l0_compaction_threshold=1
   358    l0_stop_writes_threshold=1
   359  `,
   360  		5: `
   361  [Options]
   362    lbase_max_bytes=1
   363  `,
   364  		6: `
   365  [Options]
   366    max_manifest_file_size=1
   367  `,
   368  		7: `
   369  [Options]
   370    max_open_files=1
   371  `,
   372  		8: `
   373  [Options]
   374    mem_table_size=2000
   375  `,
   376  		9: `
   377  [Options]
   378    mem_table_stop_writes_threshold=2
   379  `,
   380  		10: `
   381  [Options]
   382    wal_dir=data/wal
   383  `,
   384  		11: `
   385  [Level "0"]
   386    block_restart_interval=1
   387  `,
   388  		12: `
   389  [Level "0"]
   390    block_size=1
   391  `,
   392  		13: `
   393  [Level "0"]
   394    compression=NoCompression
   395  `,
   396  		14: `
   397  [Level "0"]
   398    index_block_size=1
   399  `,
   400  		15: `
   401  [Level "0"]
   402    target_file_size=1
   403  `,
   404  		16: `
   405  [Level "0"]
   406    filter_policy=none
   407  `,
   408  		// 1GB
   409  		17: `
   410  [Options]
   411    bytes_per_sync=1073741824
   412  [TestOptions]
   413    strictfs=true
   414  `,
   415  		18: `
   416  [Options]
   417    max_concurrent_compactions=2
   418  `,
   419  		19: `
   420  [TestOptions]
   421    ingest_using_apply=true
   422  `,
   423  		20: `
   424  [TestOptions]
   425    replace_single_delete=true
   426  `,
   427  		21: `
   428  [TestOptions]
   429   use_disk=true
   430  `,
   431  		22: `
   432  [Options]
   433    max_writer_concurrency=2
   434    force_writer_parallelism=true
   435  `,
   436  		23: `
   437  [TestOptions]
   438    disable_block_property_collector=true
   439  `,
   440  		24: `
   441  [TestOptions]
   442    threads=1
   443  `,
   444  		25: `
   445  [TestOptions]
   446    enable_value_blocks=true
   447  `,
   448  		26: fmt.Sprintf(`
   449  [Options]
   450    format_major_version=%s
   451  `, newestFormatMajorVersionToTest),
   452  		27: `
   453  [TestOptions]
   454    shared_storage_enabled=true
   455    secondary_cache_enabled=true
   456  `,
   457  	}
   458  
   459  	opts := make([]*TestOptions, len(stdOpts))
   460  	for i := range opts {
   461  		opts[i] = defaultTestOptions()
   462  		// NB: The standard options by definition can never include custom
   463  		// options, so no need to propagate custom option parsers.
   464  		if err := parseOptions(opts[i], stdOpts[i], nil /* custom option parsers */); err != nil {
   465  			panic(err)
   466  		}
   467  	}
   468  	return opts
   469  }
   470  
   471  func randomOptions(
   472  	rng *rand.Rand, customOptionParsers map[string]func(string) (CustomOption, bool),
   473  ) *TestOptions {
   474  	testOpts := defaultTestOptions()
   475  	opts := testOpts.Opts
   476  
   477  	// There are some private options, which we don't want users to fiddle with.
   478  	// There's no way to set it through the public interface. The only method is
   479  	// through Parse.
   480  	{
   481  		var privateOpts bytes.Buffer
   482  		fmt.Fprintln(&privateOpts, `[Options]`)
   483  		if rng.Intn(3) == 0 /* 33% */ {
   484  			fmt.Fprintln(&privateOpts, `  disable_delete_only_compactions=true`)
   485  		}
   486  		if rng.Intn(3) == 0 /* 33% */ {
   487  			fmt.Fprintln(&privateOpts, `  disable_elision_only_compactions=true`)
   488  		}
   489  		if rng.Intn(5) == 0 /* 20% */ {
   490  			fmt.Fprintln(&privateOpts, `  disable_lazy_combined_iteration=true`)
   491  		}
   492  		if privateOptsStr := privateOpts.String(); privateOptsStr != `[Options]\n` {
   493  			parseOptions(testOpts, privateOptsStr, customOptionParsers)
   494  		}
   495  	}
   496  
   497  	opts.BytesPerSync = 1 << uint(rng.Intn(28))     // 1B - 256MB
   498  	opts.Cache = cache.New(1 << uint(rng.Intn(30))) // 1B - 1GB
   499  	opts.DisableWAL = rng.Intn(2) == 0
   500  	opts.FlushDelayDeleteRange = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
   501  	opts.FlushDelayRangeKey = time.Millisecond * time.Duration(5*rng.Intn(245))    // 5-250ms
   502  	opts.FlushSplitBytes = 1 << rng.Intn(20)                                       // 1B - 1MB
   503  	opts.FormatMajorVersion = minimumFormatMajorVersion
   504  	n := int(newestFormatMajorVersionToTest - opts.FormatMajorVersion)
   505  	opts.FormatMajorVersion += pebble.FormatMajorVersion(rng.Intn(n + 1))
   506  	opts.Experimental.L0CompactionConcurrency = 1 + rng.Intn(4) // 1-4
   507  	opts.Experimental.LevelMultiplier = 5 << rng.Intn(7)        // 5 - 320
   508  	opts.TargetByteDeletionRate = 1 << uint(20+rng.Intn(10))    // 1MB - 1GB
   509  	opts.Experimental.ValidateOnIngest = rng.Intn(2) != 0
   510  	opts.L0CompactionThreshold = 1 + rng.Intn(100)     // 1 - 100
   511  	opts.L0CompactionFileThreshold = 1 << rng.Intn(11) // 1 - 1024
   512  	opts.L0StopWritesThreshold = 1 + rng.Intn(100)     // 1 - 100
   513  	if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
   514  		opts.L0StopWritesThreshold = opts.L0CompactionThreshold
   515  	}
   516  	opts.LBaseMaxBytes = 1 << uint(rng.Intn(30)) // 1B - 1GB
   517  	maxConcurrentCompactions := rng.Intn(3) + 1  // 1-3
   518  	opts.MaxConcurrentCompactions = func() int {
   519  		return maxConcurrentCompactions
   520  	}
   521  	opts.MaxManifestFileSize = 1 << uint(rng.Intn(30)) // 1B  - 1GB
   522  	opts.MemTableSize = 2 << (10 + uint(rng.Intn(16))) // 2KB - 256MB
   523  	opts.MemTableStopWritesThreshold = 2 + rng.Intn(5) // 2 - 5
   524  	if rng.Intn(2) == 0 {
   525  		opts.WALDir = "data/wal"
   526  	}
   527  	if rng.Intn(4) == 0 {
   528  		// Enable Writer parallelism for 25% of the random options. Setting
   529  		// MaxWriterConcurrency to any value greater than or equal to 1 has the
   530  		// same effect currently.
   531  		opts.Experimental.MaxWriterConcurrency = 2
   532  		opts.Experimental.ForceWriterParallelism = true
   533  	}
   534  	if rng.Intn(2) == 0 {
   535  		opts.Experimental.DisableIngestAsFlushable = func() bool { return true }
   536  	}
   537  
   538  	// We either use no multilevel compactions, multilevel compactions with the
   539  	// default (zero) additional propensity, or multilevel compactions with an
   540  	// additional propensity to encourage more multilevel compactions than we
   541  	// ohterwise would.
   542  	switch rng.Intn(3) {
   543  	case 0:
   544  		opts.Experimental.MultiLevelCompactionHeuristic = pebble.NoMultiLevel{}
   545  	case 1:
   546  		opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{}
   547  	default:
   548  		opts.Experimental.MultiLevelCompactionHeuristic = pebble.WriteAmpHeuristic{
   549  			AddPropensity: rng.Float64() * float64(rng.Intn(3)), // [0,3.0)
   550  			AllowL0:       rng.Intn(4) == 1,                     // 25% of the time
   551  		}
   552  	}
   553  
   554  	var lopts pebble.LevelOptions
   555  	lopts.BlockRestartInterval = 1 + rng.Intn(64)  // 1 - 64
   556  	lopts.BlockSize = 1 << uint(rng.Intn(24))      // 1 - 16MB
   557  	lopts.BlockSizeThreshold = 50 + rng.Intn(50)   // 50 - 100
   558  	lopts.IndexBlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
   559  	lopts.TargetFileSize = 1 << uint(rng.Intn(28)) // 1 - 256MB
   560  
   561  	// We either use no bloom filter, the default filter, or a filter with
   562  	// randomized bits-per-key setting. We zero out the Filters map. It'll get
   563  	// repopulated on EnsureDefaults accordingly.
   564  	opts.Filters = nil
   565  	switch rng.Intn(3) {
   566  	case 0:
   567  		lopts.FilterPolicy = nil
   568  	case 1:
   569  		lopts.FilterPolicy = bloom.FilterPolicy(10)
   570  	default:
   571  		lopts.FilterPolicy = newTestingFilterPolicy(1 << rng.Intn(5))
   572  	}
   573  
   574  	// We use either no compression, snappy compression or zstd compression.
   575  	switch rng.Intn(3) {
   576  	case 0:
   577  		lopts.Compression = pebble.NoCompression
   578  	case 1:
   579  		lopts.Compression = pebble.ZstdCompression
   580  	default:
   581  		lopts.Compression = pebble.SnappyCompression
   582  	}
   583  	opts.Levels = []pebble.LevelOptions{lopts}
   584  
   585  	// Explicitly disable disk-backed FS's for the random configurations. The
   586  	// single standard test configuration that uses a disk-backed FS is
   587  	// sufficient.
   588  	testOpts.useDisk = false
   589  	testOpts.strictFS = rng.Intn(2) != 0 // Only relevant for MemFS.
   590  	testOpts.threads = rng.Intn(runtime.GOMAXPROCS(0)) + 1
   591  	if testOpts.strictFS {
   592  		opts.DisableWAL = false
   593  	}
   594  	testOpts.ingestUsingApply = rng.Intn(2) != 0
   595  	testOpts.deleteSized = rng.Intn(2) != 0
   596  	testOpts.replaceSingleDelete = rng.Intn(2) != 0
   597  	testOpts.disableBlockPropertyCollector = rng.Intn(2) == 1
   598  	if testOpts.disableBlockPropertyCollector {
   599  		testOpts.Opts.BlockPropertyCollectors = nil
   600  	}
   601  	testOpts.enableValueBlocks = opts.FormatMajorVersion >= pebble.FormatSSTableValueBlocks &&
   602  		rng.Intn(2) != 0
   603  	if testOpts.enableValueBlocks {
   604  		testOpts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
   605  	}
   606  	testOpts.asyncApplyToDB = rng.Intn(2) != 0
   607  	// 20% of time, enable shared storage.
   608  	if rng.Intn(5) == 0 {
   609  		testOpts.sharedStorageEnabled = true
   610  		inMemShared := remote.NewInMem()
   611  		testOpts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
   612  			"": inMemShared,
   613  		})
   614  		testOpts.sharedStorageFS = inMemShared
   615  		// If shared storage is enabled, pick between writing all files on shared
   616  		// vs. lower levels only, 50% of the time.
   617  		testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
   618  		if rng.Intn(2) == 0 {
   619  			testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedLower
   620  		}
   621  		// If shared storage is enabled, enable secondary cache 50% of time.
   622  		if rng.Intn(2) == 0 {
   623  			testOpts.secondaryCacheEnabled = true
   624  			// TODO(josh): Randomize various secondary cache settings.
   625  			testOpts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
   626  		}
   627  		// 50% of the time, enable shared replication.
   628  		testOpts.useSharedReplicate = rng.Intn(2) == 0
   629  	}
   630  	testOpts.seedEFOS = rng.Uint64()
   631  	// TODO(bilal): Enable ingestSplit when known bugs with virtual sstables
   632  	// are addressed.
   633  	//
   634  	// testOpts.ingestSplit = rng.Intn(2) == 0
   635  	opts.Experimental.IngestSplit = func() bool { return testOpts.ingestSplit }
   636  	testOpts.useExcise = rng.Intn(2) == 0
   637  	if testOpts.useExcise || testOpts.useSharedReplicate {
   638  		testOpts.efosAlwaysCreatesIters = rng.Intn(2) == 0
   639  		opts.TestingAlwaysCreateEFOSIterators(testOpts.efosAlwaysCreatesIters)
   640  		if testOpts.Opts.FormatMajorVersion < pebble.FormatVirtualSSTables {
   641  			testOpts.Opts.FormatMajorVersion = pebble.FormatVirtualSSTables
   642  		}
   643  	}
   644  	testOpts.Opts.EnsureDefaults()
   645  	return testOpts
   646  }
   647  
   648  func setupInitialState(dataDir string, testOpts *TestOptions) error {
   649  	// Copy (vfs.Default,<initialStatePath>/data) to (testOpts.opts.FS,<dataDir>).
   650  	ok, err := vfs.Clone(
   651  		vfs.Default,
   652  		testOpts.Opts.FS,
   653  		vfs.Default.PathJoin(testOpts.initialStatePath, "data"),
   654  		dataDir,
   655  		vfs.CloneSync,
   656  		vfs.CloneSkip(func(filename string) bool {
   657  			// Skip the archive of historical files, any checkpoints created by
   658  			// operations and files staged for ingest in tmp.
   659  			b := filepath.Base(filename)
   660  			return b == "archive" || b == "checkpoints" || b == "tmp"
   661  		}))
   662  	if err != nil {
   663  		return err
   664  	} else if !ok {
   665  		return os.ErrNotExist
   666  	}
   667  
   668  	// Tests with wal_dir set store their WALs in a `wal` directory. The source
   669  	// database (initialStatePath) could've had wal_dir set, or the current test
   670  	// options (testOpts) could have wal_dir set, or both.
   671  	fs := testOpts.Opts.FS
   672  	walDir := fs.PathJoin(dataDir, "wal")
   673  	if err := fs.MkdirAll(walDir, os.ModePerm); err != nil {
   674  		return err
   675  	}
   676  
   677  	// Copy <dataDir>/wal/*.log -> <dataDir>.
   678  	src, dst := walDir, dataDir
   679  	if testOpts.Opts.WALDir != "" {
   680  		// Copy <dataDir>/*.log -> <dataDir>/wal.
   681  		src, dst = dst, src
   682  	}
   683  	return moveLogs(fs, src, dst)
   684  }
   685  
   686  func moveLogs(fs vfs.FS, srcDir, dstDir string) error {
   687  	ls, err := fs.List(srcDir)
   688  	if err != nil {
   689  		return err
   690  	}
   691  	for _, f := range ls {
   692  		if filepath.Ext(f) != ".log" {
   693  			continue
   694  		}
   695  		src := fs.PathJoin(srcDir, f)
   696  		dst := fs.PathJoin(dstDir, f)
   697  		if err := fs.Rename(src, dst); err != nil {
   698  			return err
   699  		}
   700  	}
   701  	return nil
   702  }
   703  
   704  var blockPropertyCollectorConstructors = []func() pebble.BlockPropertyCollector{
   705  	sstable.NewTestKeysBlockPropertyCollector,
   706  }
   707  
   708  // testingFilterPolicy is used to allow bloom filter policies with non-default
   709  // bits-per-key setting. It is necessary because the name of the production
   710  // filter policy is fixed (see bloom.FilterPolicy.Name()); we need to output a
   711  // custom policy name to the OPTIONS file that the test can then parse.
   712  type testingFilterPolicy struct {
   713  	bloom.FilterPolicy
   714  }
   715  
   716  var _ pebble.FilterPolicy = (*testingFilterPolicy)(nil)
   717  
   718  func newTestingFilterPolicy(bitsPerKey int) *testingFilterPolicy {
   719  	return &testingFilterPolicy{
   720  		FilterPolicy: bloom.FilterPolicy(bitsPerKey),
   721  	}
   722  }
   723  
   724  const testingFilterPolicyFmt = "testing_bloom_filter/bits_per_key=%d"
   725  
   726  // Name implements the pebble.FilterPolicy interface.
   727  func (t *testingFilterPolicy) Name() string {
   728  	if t.FilterPolicy == 10 {
   729  		return "rocksdb.BuiltinBloomFilter"
   730  	}
   731  	return fmt.Sprintf(testingFilterPolicyFmt, t.FilterPolicy)
   732  }
   733  
   734  func filterPolicyFromName(name string) (pebble.FilterPolicy, error) {
   735  	switch name {
   736  	case "none":
   737  		return nil, nil
   738  	case "rocksdb.BuiltinBloomFilter":
   739  		return bloom.FilterPolicy(10), nil
   740  	}
   741  	var bitsPerKey int
   742  	if _, err := fmt.Sscanf(name, testingFilterPolicyFmt, &bitsPerKey); err != nil {
   743  		return nil, errors.Errorf("Invalid filter policy name '%s'", name)
   744  	}
   745  	return newTestingFilterPolicy(bitsPerKey), nil
   746  }