github.com/cockroachdb/pebble@v1.1.1-0.20240513155919-3622ade60459/metamorphic/options.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package metamorphic
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"os"
    11  	"path/filepath"
    12  	"runtime"
    13  	"strconv"
    14  	"strings"
    15  	"time"
    16  
    17  	"github.com/cockroachdb/errors"
    18  	"github.com/cockroachdb/pebble"
    19  	"github.com/cockroachdb/pebble/bloom"
    20  	"github.com/cockroachdb/pebble/internal/cache"
    21  	"github.com/cockroachdb/pebble/internal/testkeys"
    22  	"github.com/cockroachdb/pebble/objstorage/remote"
    23  	"github.com/cockroachdb/pebble/sstable"
    24  	"github.com/cockroachdb/pebble/vfs"
    25  	"golang.org/x/exp/rand"
    26  )
    27  
    28  const (
    29  	// The metamorphic test exercises range keys, so we cannot use an older
    30  	// FormatMajorVersion than pebble.FormatRangeKeys.
    31  	minimumFormatMajorVersion = pebble.FormatRangeKeys
    32  	// The format major version to use in the default options configurations. We
    33  	// default to the last format major version of Cockroach 22.2 so we exercise
    34  	// the runtime version ratcheting that a cluster upgrading to 23.1 would
    35  	// experience. The randomized options may still use format major versions
    36  	// that are less than defaultFormatMajorVersion but are at least
    37  	// minimumFormatMajorVersion.
    38  	defaultFormatMajorVersion = pebble.FormatPrePebblev1Marked
    39  	// newestFormatMajorVersionToTest is the most recent format major version
    40  	// the metamorphic tests should use. This may be greater than
    41  	// pebble.FormatNewest when some format major versions are marked as
    42  	// experimental.
    43  	newestFormatMajorVersionToTest = pebble.FormatNewest
    44  )
    45  
    46  func parseOptions(
    47  	opts *TestOptions, data string, customOptionParsers map[string]func(string) (CustomOption, bool),
    48  ) error {
    49  	hooks := &pebble.ParseHooks{
    50  		NewCache:        pebble.NewCache,
    51  		NewFilterPolicy: filterPolicyFromName,
    52  		SkipUnknown: func(name, value string) bool {
    53  			switch name {
    54  			case "TestOptions":
    55  				return true
    56  			case "TestOptions.strictfs":
    57  				opts.strictFS = true
    58  				return true
    59  			case "TestOptions.ingest_using_apply":
    60  				opts.ingestUsingApply = true
    61  				return true
    62  			case "TestOptions.delete_sized":
    63  				opts.deleteSized = true
    64  				return true
    65  			case "TestOptions.replace_single_delete":
    66  				opts.replaceSingleDelete = true
    67  				return true
    68  			case "TestOptions.use_disk":
    69  				opts.useDisk = true
    70  				return true
    71  			case "TestOptions.initial_state_desc":
    72  				opts.initialStateDesc = value
    73  				return true
    74  			case "TestOptions.initial_state_path":
    75  				opts.initialStatePath = value
    76  				return true
    77  			case "TestOptions.threads":
    78  				v, err := strconv.Atoi(value)
    79  				if err != nil {
    80  					panic(err)
    81  				}
    82  				opts.threads = v
    83  				return true
    84  			case "TestOptions.disable_block_property_collector":
    85  				v, err := strconv.ParseBool(value)
    86  				if err != nil {
    87  					panic(err)
    88  				}
    89  				opts.disableBlockPropertyCollector = v
    90  				if v {
    91  					opts.Opts.BlockPropertyCollectors = nil
    92  				}
    93  				return true
    94  			case "TestOptions.enable_value_blocks":
    95  				opts.enableValueBlocks = true
    96  				opts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
    97  				return true
    98  			case "TestOptions.async_apply_to_db":
    99  				opts.asyncApplyToDB = true
   100  				return true
   101  			case "TestOptions.shared_storage_enabled":
   102  				opts.sharedStorageEnabled = true
   103  				opts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
   104  					"": remote.NewInMem(),
   105  				})
   106  				if opts.Opts.Experimental.CreateOnShared == remote.CreateOnSharedNone {
   107  					opts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
   108  				}
   109  				return true
   110  			case "TestOptions.secondary_cache_enabled":
   111  				opts.secondaryCacheEnabled = true
   112  				opts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
   113  				return true
   114  			case "TestOptions.seed_efos":
   115  				v, err := strconv.ParseUint(value, 10, 64)
   116  				if err != nil {
   117  					panic(err)
   118  				}
   119  				opts.seedEFOS = v
   120  				return true
   121  			case "TestOptions.ingest_split":
   122  				opts.ingestSplit = true
   123  				opts.Opts.Experimental.IngestSplit = func() bool {
   124  					return true
   125  				}
   126  				return true
   127  			default:
   128  				if customOptionParsers == nil {
   129  					return false
   130  				}
   131  				name = strings.TrimPrefix(name, "TestOptions.")
   132  				if p, ok := customOptionParsers[name]; ok {
   133  					if customOpt, ok := p(value); ok {
   134  						opts.CustomOpts = append(opts.CustomOpts, customOpt)
   135  						return true
   136  					}
   137  				}
   138  				return false
   139  			}
   140  		},
   141  	}
   142  	err := opts.Opts.Parse(data, hooks)
   143  	opts.Opts.EnsureDefaults()
   144  	return err
   145  }
   146  
   147  func optionsToString(opts *TestOptions) string {
   148  	var buf bytes.Buffer
   149  	if opts.strictFS {
   150  		fmt.Fprint(&buf, "  strictfs=true\n")
   151  	}
   152  	if opts.ingestUsingApply {
   153  		fmt.Fprint(&buf, "  ingest_using_apply=true\n")
   154  	}
   155  	if opts.deleteSized {
   156  		fmt.Fprint(&buf, "  delete_sized=true\n")
   157  	}
   158  	if opts.replaceSingleDelete {
   159  		fmt.Fprint(&buf, "  replace_single_delete=true\n")
   160  	}
   161  	if opts.useDisk {
   162  		fmt.Fprint(&buf, "  use_disk=true\n")
   163  	}
   164  	if opts.initialStatePath != "" {
   165  		fmt.Fprintf(&buf, "  initial_state_path=%s\n", opts.initialStatePath)
   166  	}
   167  	if opts.initialStateDesc != "" {
   168  		fmt.Fprintf(&buf, "  initial_state_desc=%s\n", opts.initialStateDesc)
   169  	}
   170  	if opts.threads != 0 {
   171  		fmt.Fprintf(&buf, "  threads=%d\n", opts.threads)
   172  	}
   173  	if opts.disableBlockPropertyCollector {
   174  		fmt.Fprintf(&buf, "  disable_block_property_collector=%t\n", opts.disableBlockPropertyCollector)
   175  	}
   176  	if opts.enableValueBlocks {
   177  		fmt.Fprintf(&buf, "  enable_value_blocks=%t\n", opts.enableValueBlocks)
   178  	}
   179  	if opts.asyncApplyToDB {
   180  		fmt.Fprint(&buf, "  async_apply_to_db=true\n")
   181  	}
   182  	if opts.sharedStorageEnabled {
   183  		fmt.Fprint(&buf, "  shared_storage_enabled=true\n")
   184  	}
   185  	if opts.secondaryCacheEnabled {
   186  		fmt.Fprint(&buf, "  secondary_cache_enabled=true\n")
   187  	}
   188  	if opts.seedEFOS != 0 {
   189  		fmt.Fprintf(&buf, "  seed_efos=%d\n", opts.seedEFOS)
   190  	}
   191  	if opts.ingestSplit {
   192  		fmt.Fprintf(&buf, "  ingest_split=%v\n", opts.ingestSplit)
   193  	}
   194  	for _, customOpt := range opts.CustomOpts {
   195  		fmt.Fprintf(&buf, "  %s=%s\n", customOpt.Name(), customOpt.Value())
   196  	}
   197  
   198  	s := opts.Opts.String()
   199  	if buf.Len() == 0 {
   200  		return s
   201  	}
   202  	return s + "\n[TestOptions]\n" + buf.String()
   203  }
   204  
   205  func defaultTestOptions() *TestOptions {
   206  	return &TestOptions{
   207  		Opts:    defaultOptions(),
   208  		threads: 16,
   209  	}
   210  }
   211  
   212  func defaultOptions() *pebble.Options {
   213  	opts := &pebble.Options{
   214  		Comparer:           testkeys.Comparer,
   215  		FS:                 vfs.NewMem(),
   216  		FormatMajorVersion: defaultFormatMajorVersion,
   217  		Levels: []pebble.LevelOptions{{
   218  			FilterPolicy: bloom.FilterPolicy(10),
   219  		}},
   220  		BlockPropertyCollectors: blockPropertyCollectorConstructors,
   221  	}
   222  	// TODO(sumeer): add IneffectualSingleDeleteCallback that panics by
   223  	// supporting a test option that does not generate ineffectual single
   224  	// deletes.
   225  	opts.Experimental.SingleDeleteInvariantViolationCallback = func(
   226  		userKey []byte) {
   227  		panic(errors.AssertionFailedf("single del invariant violations on key %q", userKey))
   228  	}
   229  	return opts
   230  }
   231  
   232  // TestOptions describes the options configuring an individual run of the
   233  // metamorphic tests.
   234  type TestOptions struct {
   235  	// Opts holds the *pebble.Options for the test.
   236  	Opts *pebble.Options
   237  	// CustomOptions holds custom test options that are defined outside of this
   238  	// package.
   239  	CustomOpts []CustomOption
   240  	useDisk    bool
   241  	strictFS   bool
   242  	threads    int
   243  	// Use Batch.Apply rather than DB.Ingest.
   244  	ingestUsingApply bool
   245  	// Use Batch.DeleteSized rather than Batch.Delete.
   246  	deleteSized bool
   247  	// Replace a SINGLEDEL with a DELETE.
   248  	replaceSingleDelete bool
   249  	// The path on the local filesystem where the initial state of the database
   250  	// exists.  Empty if the test run begins from an empty database state.
   251  	initialStatePath string
   252  	// A human-readable string describing the initial state of the database.
   253  	// Empty if the test run begins from an empty database state.
   254  	initialStateDesc string
   255  	// Disable the block property collector, which may be used by block property
   256  	// filters.
   257  	disableBlockPropertyCollector bool
   258  	// Enable the use of value blocks.
   259  	enableValueBlocks bool
   260  	// Use DB.ApplyNoSyncWait for applies that want to sync the WAL.
   261  	asyncApplyToDB bool
   262  	// Enable the use of shared storage.
   263  	sharedStorageEnabled bool
   264  	// Enable the secondary cache. Only effective if sharedStorageEnabled is
   265  	// also true.
   266  	secondaryCacheEnabled bool
   267  	// If nonzero, enables the use of EventuallyFileOnlySnapshots for
   268  	// newSnapshotOps that are keyspan-bounded. The set of which newSnapshotOps
   269  	// are actually created as EventuallyFileOnlySnapshots is deterministically
   270  	// derived from the seed and the operation index.
   271  	seedEFOS uint64
   272  	// Enables ingest splits. Saved here for serialization as Options does not
   273  	// serialize this.
   274  	ingestSplit bool
   275  }
   276  
   277  // CustomOption defines a custom option that configures the behavior of an
   278  // individual test run. Like all test options, custom options are serialized to
   279  // the OPTIONS file even if they're not options ordinarily understood by Pebble.
   280  type CustomOption interface {
   281  	// Name returns the name of the custom option. This is the key under which
   282  	// the option appears in the OPTIONS file, within the [TestOptions] stanza.
   283  	Name() string
   284  	// Value returns the value of the custom option, serialized as it should
   285  	// appear within the OPTIONS file.
   286  	Value() string
   287  	// Close is run after the test database has been closed at the end of the
   288  	// test as well as during restart operations within the test sequence. It's
   289  	// passed a copy of the *pebble.Options. If the custom options hold on to
   290  	// any resources outside, Close should release them.
   291  	Close(*pebble.Options) error
   292  	// Open is run before the test runs and during a restart operation after the
   293  	// test database has been closed and Close has been called. It's passed a
   294  	// copy of the *pebble.Options. If the custom options must acquire any
   295  	// resources before the test continues, it should reacquire them.
   296  	Open(*pebble.Options) error
   297  
   298  	// TODO(jackson): provide additional hooks for custom options changing the
   299  	// behavior of a run.
   300  }
   301  
   302  func standardOptions() []*TestOptions {
   303  	// The index labels are not strictly necessary, but they make it easier to
   304  	// find which options correspond to a failure.
   305  	stdOpts := []string{
   306  		0: "", // default options
   307  		1: `
   308  [Options]
   309    cache_size=1
   310  `,
   311  		2: `
   312  [Options]
   313    disable_wal=true
   314  `,
   315  		3: `
   316  [Options]
   317    l0_compaction_threshold=1
   318  `,
   319  		4: `
   320  [Options]
   321    l0_compaction_threshold=1
   322    l0_stop_writes_threshold=1
   323  `,
   324  		5: `
   325  [Options]
   326    lbase_max_bytes=1
   327  `,
   328  		6: `
   329  [Options]
   330    max_manifest_file_size=1
   331  `,
   332  		7: `
   333  [Options]
   334    max_open_files=1
   335  `,
   336  		8: `
   337  [Options]
   338    mem_table_size=2000
   339  `,
   340  		9: `
   341  [Options]
   342    mem_table_stop_writes_threshold=2
   343  `,
   344  		10: `
   345  [Options]
   346    wal_dir=data/wal
   347  `,
   348  		11: `
   349  [Level "0"]
   350    block_restart_interval=1
   351  `,
   352  		12: `
   353  [Level "0"]
   354    block_size=1
   355  `,
   356  		13: `
   357  [Level "0"]
   358    compression=NoCompression
   359  `,
   360  		14: `
   361  [Level "0"]
   362    index_block_size=1
   363  `,
   364  		15: `
   365  [Level "0"]
   366    target_file_size=1
   367  `,
   368  		16: `
   369  [Level "0"]
   370    filter_policy=none
   371  `,
   372  		// 1GB
   373  		17: `
   374  [Options]
   375    bytes_per_sync=1073741824
   376  [TestOptions]
   377    strictfs=true
   378  `,
   379  		18: `
   380  [Options]
   381    max_concurrent_compactions=2
   382  `,
   383  		19: `
   384  [TestOptions]
   385    ingest_using_apply=true
   386  `,
   387  		20: `
   388  [TestOptions]
   389    replace_single_delete=true
   390  `,
   391  		21: `
   392  [TestOptions]
   393   use_disk=true
   394  `,
   395  		22: `
   396  [Options]
   397    max_writer_concurrency=2
   398    force_writer_parallelism=true
   399  `,
   400  		23: `
   401  [TestOptions]
   402    disable_block_property_collector=true
   403  `,
   404  		24: `
   405  [TestOptions]
   406    threads=1
   407  `,
   408  		25: `
   409  [TestOptions]
   410    enable_value_blocks=true
   411  `,
   412  		26: fmt.Sprintf(`
   413  [Options]
   414    format_major_version=%s
   415  `, newestFormatMajorVersionToTest),
   416  		27: `
   417  [TestOptions]
   418    shared_storage_enabled=true
   419    secondary_cache_enabled=true
   420  `,
   421  	}
   422  
   423  	opts := make([]*TestOptions, len(stdOpts))
   424  	for i := range opts {
   425  		opts[i] = defaultTestOptions()
   426  		// NB: The standard options by definition can never include custom
   427  		// options, so no need to propagate custom option parsers.
   428  		if err := parseOptions(opts[i], stdOpts[i], nil /* custom option parsers */); err != nil {
   429  			panic(err)
   430  		}
   431  	}
   432  	return opts
   433  }
   434  
   435  func randomOptions(
   436  	rng *rand.Rand, customOptionParsers map[string]func(string) (CustomOption, bool),
   437  ) *TestOptions {
   438  	testOpts := defaultTestOptions()
   439  	opts := testOpts.Opts
   440  
   441  	// There are some private options, which we don't want users to fiddle with.
   442  	// There's no way to set it through the public interface. The only method is
   443  	// through Parse.
   444  	{
   445  		var privateOpts bytes.Buffer
   446  		fmt.Fprintln(&privateOpts, `[Options]`)
   447  		if rng.Intn(3) == 0 /* 33% */ {
   448  			fmt.Fprintln(&privateOpts, `  disable_delete_only_compactions=true`)
   449  		}
   450  		if rng.Intn(3) == 0 /* 33% */ {
   451  			fmt.Fprintln(&privateOpts, `  disable_elision_only_compactions=true`)
   452  		}
   453  		if rng.Intn(5) == 0 /* 20% */ {
   454  			fmt.Fprintln(&privateOpts, `  disable_lazy_combined_iteration=true`)
   455  		}
   456  		if privateOptsStr := privateOpts.String(); privateOptsStr != `[Options]\n` {
   457  			parseOptions(testOpts, privateOptsStr, customOptionParsers)
   458  		}
   459  	}
   460  
   461  	opts.BytesPerSync = 1 << uint(rng.Intn(28))     // 1B - 256MB
   462  	opts.Cache = cache.New(1 << uint(rng.Intn(30))) // 1B - 1GB
   463  	opts.DisableWAL = rng.Intn(2) == 0
   464  	opts.FlushDelayDeleteRange = time.Millisecond * time.Duration(5*rng.Intn(245)) // 5-250ms
   465  	opts.FlushDelayRangeKey = time.Millisecond * time.Duration(5*rng.Intn(245))    // 5-250ms
   466  	opts.FlushSplitBytes = 1 << rng.Intn(20)                                       // 1B - 1MB
   467  	opts.FormatMajorVersion = minimumFormatMajorVersion
   468  	n := int(newestFormatMajorVersionToTest - opts.FormatMajorVersion)
   469  	opts.FormatMajorVersion += pebble.FormatMajorVersion(rng.Intn(n + 1))
   470  	opts.Experimental.L0CompactionConcurrency = 1 + rng.Intn(4) // 1-4
   471  	opts.Experimental.LevelMultiplier = 5 << rng.Intn(7)        // 5 - 320
   472  	opts.TargetByteDeletionRate = 1 << uint(20+rng.Intn(10))    // 1MB - 1GB
   473  	opts.Experimental.ValidateOnIngest = rng.Intn(2) != 0
   474  	opts.L0CompactionThreshold = 1 + rng.Intn(100)     // 1 - 100
   475  	opts.L0CompactionFileThreshold = 1 << rng.Intn(11) // 1 - 1024
   476  	opts.L0StopWritesThreshold = 1 + rng.Intn(100)     // 1 - 100
   477  	if opts.L0StopWritesThreshold < opts.L0CompactionThreshold {
   478  		opts.L0StopWritesThreshold = opts.L0CompactionThreshold
   479  	}
   480  	opts.LBaseMaxBytes = 1 << uint(rng.Intn(30)) // 1B - 1GB
   481  	maxConcurrentCompactions := rng.Intn(3) + 1  // 1-3
   482  	opts.MaxConcurrentCompactions = func() int {
   483  		return maxConcurrentCompactions
   484  	}
   485  	opts.MaxManifestFileSize = 1 << uint(rng.Intn(30)) // 1B  - 1GB
   486  	opts.MemTableSize = 2 << (10 + uint(rng.Intn(16))) // 2KB - 256MB
   487  	opts.MemTableStopWritesThreshold = 2 + rng.Intn(5) // 2 - 5
   488  	if rng.Intn(2) == 0 {
   489  		opts.WALDir = "data/wal"
   490  	}
   491  	if rng.Intn(4) == 0 {
   492  		// Enable Writer parallelism for 25% of the random options. Setting
   493  		// MaxWriterConcurrency to any value greater than or equal to 1 has the
   494  		// same effect currently.
   495  		opts.Experimental.MaxWriterConcurrency = 2
   496  		opts.Experimental.ForceWriterParallelism = true
   497  	}
   498  	if rng.Intn(2) == 0 {
   499  		opts.Experimental.DisableIngestAsFlushable = func() bool { return true }
   500  	}
   501  	var lopts pebble.LevelOptions
   502  	lopts.BlockRestartInterval = 1 + rng.Intn(64)  // 1 - 64
   503  	lopts.BlockSize = 1 << uint(rng.Intn(24))      // 1 - 16MB
   504  	lopts.BlockSizeThreshold = 50 + rng.Intn(50)   // 50 - 100
   505  	lopts.IndexBlockSize = 1 << uint(rng.Intn(24)) // 1 - 16MB
   506  	lopts.TargetFileSize = 1 << uint(rng.Intn(28)) // 1 - 256MB
   507  
   508  	// We either use no bloom filter, the default filter, or a filter with
   509  	// randomized bits-per-key setting. We zero out the Filters map. It'll get
   510  	// repopulated on EnsureDefaults accordingly.
   511  	opts.Filters = nil
   512  	switch rng.Intn(3) {
   513  	case 0:
   514  		lopts.FilterPolicy = nil
   515  	case 1:
   516  		lopts.FilterPolicy = bloom.FilterPolicy(10)
   517  	default:
   518  		lopts.FilterPolicy = newTestingFilterPolicy(1 << rng.Intn(5))
   519  	}
   520  	opts.Levels = []pebble.LevelOptions{lopts}
   521  
   522  	// Explicitly disable disk-backed FS's for the random configurations. The
   523  	// single standard test configuration that uses a disk-backed FS is
   524  	// sufficient.
   525  	testOpts.useDisk = false
   526  	testOpts.strictFS = rng.Intn(2) != 0 // Only relevant for MemFS.
   527  	testOpts.threads = rng.Intn(runtime.GOMAXPROCS(0)) + 1
   528  	if testOpts.strictFS {
   529  		opts.DisableWAL = false
   530  	}
   531  	testOpts.ingestUsingApply = rng.Intn(2) != 0
   532  	testOpts.deleteSized = rng.Intn(2) != 0
   533  	testOpts.replaceSingleDelete = rng.Intn(2) != 0
   534  	testOpts.disableBlockPropertyCollector = rng.Intn(2) == 1
   535  	if testOpts.disableBlockPropertyCollector {
   536  		testOpts.Opts.BlockPropertyCollectors = nil
   537  	}
   538  	testOpts.enableValueBlocks = opts.FormatMajorVersion >= pebble.FormatSSTableValueBlocks &&
   539  		rng.Intn(2) != 0
   540  	if testOpts.enableValueBlocks {
   541  		testOpts.Opts.Experimental.EnableValueBlocks = func() bool { return true }
   542  	}
   543  	testOpts.asyncApplyToDB = rng.Intn(2) != 0
   544  	// 20% of time, enable shared storage.
   545  	if rng.Intn(5) == 0 {
   546  		testOpts.sharedStorageEnabled = true
   547  		testOpts.Opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
   548  			"": remote.NewInMem(),
   549  		})
   550  		// If shared storage is enabled, pick between writing all files on shared
   551  		// vs. lower levels only, 50% of the time.
   552  		testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedAll
   553  		if rng.Intn(2) == 0 {
   554  			testOpts.Opts.Experimental.CreateOnShared = remote.CreateOnSharedLower
   555  		}
   556  		// If shared storage is enabled, enable secondary cache 50% of time.
   557  		if rng.Intn(2) == 0 {
   558  			testOpts.secondaryCacheEnabled = true
   559  			// TODO(josh): Randomize various secondary cache settings.
   560  			testOpts.Opts.Experimental.SecondaryCacheSizeBytes = 1024 * 1024 * 32 // 32 MBs
   561  		}
   562  	}
   563  	testOpts.seedEFOS = rng.Uint64()
   564  	testOpts.ingestSplit = rng.Intn(2) == 0
   565  	opts.Experimental.IngestSplit = func() bool { return testOpts.ingestSplit }
   566  	testOpts.Opts.EnsureDefaults()
   567  	return testOpts
   568  }
   569  
   570  func setupInitialState(dataDir string, testOpts *TestOptions) error {
   571  	// Copy (vfs.Default,<initialStatePath>/data) to (testOpts.opts.FS,<dataDir>).
   572  	ok, err := vfs.Clone(
   573  		vfs.Default,
   574  		testOpts.Opts.FS,
   575  		vfs.Default.PathJoin(testOpts.initialStatePath, "data"),
   576  		dataDir,
   577  		vfs.CloneSync,
   578  		vfs.CloneSkip(func(filename string) bool {
   579  			// Skip the archive of historical files, any checkpoints created by
   580  			// operations and files staged for ingest in tmp.
   581  			b := filepath.Base(filename)
   582  			return b == "archive" || b == "checkpoints" || b == "tmp"
   583  		}))
   584  	if err != nil {
   585  		return err
   586  	} else if !ok {
   587  		return os.ErrNotExist
   588  	}
   589  
   590  	// Tests with wal_dir set store their WALs in a `wal` directory. The source
   591  	// database (initialStatePath) could've had wal_dir set, or the current test
   592  	// options (testOpts) could have wal_dir set, or both.
   593  	fs := testOpts.Opts.FS
   594  	walDir := fs.PathJoin(dataDir, "wal")
   595  	if err := fs.MkdirAll(walDir, os.ModePerm); err != nil {
   596  		return err
   597  	}
   598  
   599  	// Copy <dataDir>/wal/*.log -> <dataDir>.
   600  	src, dst := walDir, dataDir
   601  	if testOpts.Opts.WALDir != "" {
   602  		// Copy <dataDir>/*.log -> <dataDir>/wal.
   603  		src, dst = dst, src
   604  	}
   605  	return moveLogs(fs, src, dst)
   606  }
   607  
   608  func moveLogs(fs vfs.FS, srcDir, dstDir string) error {
   609  	ls, err := fs.List(srcDir)
   610  	if err != nil {
   611  		return err
   612  	}
   613  	for _, f := range ls {
   614  		if filepath.Ext(f) != ".log" {
   615  			continue
   616  		}
   617  		src := fs.PathJoin(srcDir, f)
   618  		dst := fs.PathJoin(dstDir, f)
   619  		if err := fs.Rename(src, dst); err != nil {
   620  			return err
   621  		}
   622  	}
   623  	return nil
   624  }
   625  
   626  var blockPropertyCollectorConstructors = []func() pebble.BlockPropertyCollector{
   627  	sstable.NewTestKeysBlockPropertyCollector,
   628  }
   629  
   630  // testingFilterPolicy is used to allow bloom filter policies with non-default
   631  // bits-per-key setting. It is necessary because the name of the production
   632  // filter policy is fixed (see bloom.FilterPolicy.Name()); we need to output a
   633  // custom policy name to the OPTIONS file that the test can then parse.
   634  type testingFilterPolicy struct {
   635  	bloom.FilterPolicy
   636  }
   637  
   638  var _ pebble.FilterPolicy = (*testingFilterPolicy)(nil)
   639  
   640  func newTestingFilterPolicy(bitsPerKey int) *testingFilterPolicy {
   641  	return &testingFilterPolicy{
   642  		FilterPolicy: bloom.FilterPolicy(bitsPerKey),
   643  	}
   644  }
   645  
   646  const testingFilterPolicyFmt = "testing_bloom_filter/bits_per_key=%d"
   647  
   648  // Name implements the pebble.FilterPolicy interface.
   649  func (t *testingFilterPolicy) Name() string {
   650  	if t.FilterPolicy == 10 {
   651  		return "rocksdb.BuiltinBloomFilter"
   652  	}
   653  	return fmt.Sprintf(testingFilterPolicyFmt, t.FilterPolicy)
   654  }
   655  
   656  func filterPolicyFromName(name string) (pebble.FilterPolicy, error) {
   657  	switch name {
   658  	case "none":
   659  		return nil, nil
   660  	case "rocksdb.BuiltinBloomFilter":
   661  		return bloom.FilterPolicy(10), nil
   662  	}
   663  	var bitsPerKey int
   664  	if _, err := fmt.Sscanf(name, testingFilterPolicyFmt, &bitsPerKey); err != nil {
   665  		return nil, errors.Errorf("Invalid filter policy name '%s'", name)
   666  	}
   667  	return newTestingFilterPolicy(bitsPerKey), nil
   668  }