github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/metamorphic/generator.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package metamorphic
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"os"
    11  	"slices"
    12  
    13  	"github.com/cockroachdb/pebble"
    14  	"github.com/cockroachdb/pebble/internal/randvar"
    15  	"github.com/cockroachdb/pebble/internal/testkeys"
    16  	"golang.org/x/exp/rand"
    17  )
    18  
    19  const maxValueSize = 20
    20  
    21  type iterOpts struct {
    22  	lower    []byte
    23  	upper    []byte
    24  	keyTypes uint32 // pebble.IterKeyType
    25  	// maskSuffix may be set if keyTypes is IterKeyTypePointsAndRanges to
    26  	// configure IterOptions.RangeKeyMasking.Suffix.
    27  	maskSuffix []byte
    28  
    29  	// If filterMax is >0, this iterator will filter out any keys that have
    30  	// suffixes that don't fall within the range [filterMin,filterMax).
    31  	// Additionally, the iterator will be constructed with a block-property
    32  	// filter that filters out blocks accordingly. Not all OPTIONS hook up the
    33  	// corresponding block property collector, so block-filtering may still be
    34  	// effectively disabled in some runs. The iterator operations themselves
    35  	// however will always skip past any points that should be filtered to
    36  	// ensure determinism.
    37  	filterMin uint64
    38  	filterMax uint64
    39  
    40  	// see IterOptions.UseL6Filters.
    41  	useL6Filters bool
    42  
    43  	// NB: If adding or removing fields, ensure IsZero is in sync.
    44  }
    45  
    46  func (o iterOpts) IsZero() bool {
    47  	return o.lower == nil && o.upper == nil && o.keyTypes == 0 &&
    48  		o.maskSuffix == nil && o.filterMin == 0 && o.filterMax == 0 && !o.useL6Filters
    49  }
    50  
    51  type generator struct {
    52  	cfg config
    53  	rng *rand.Rand
    54  
    55  	init *initOp
    56  	ops  []op
    57  
    58  	// keyManager tracks the state of keys a operation generation time.
    59  	keyManager *keyManager
    60  	dbs        objIDSlice
    61  	// Unordered sets of object IDs for live objects. Used to randomly select on
    62  	// object when generating an operation. There are 4 concrete objects: the DB
    63  	// (of which there is exactly 1), batches, iterators, and snapshots.
    64  	//
    65  	// liveBatches contains the live indexed and write-only batches.
    66  	liveBatches objIDSlice
    67  	// liveIters contains the live iterators.
    68  	liveIters     objIDSlice
    69  	itersLastOpts map[objID]iterOpts
    70  	// liveReaders contains the DB, and any live indexed batches and snapshots. The DB is always
    71  	// at index 0.
    72  	liveReaders objIDSlice
    73  	// liveSnapshots contains the live snapshots.
    74  	liveSnapshots objIDSlice
    75  	// liveWriters contains the DB, and any live batches. The DB is always at index 0.
    76  	liveWriters objIDSlice
    77  
    78  	// Maps used to find associated objects during generation. These maps are not
    79  	// needed during test execution.
    80  	//
    81  	// batchID -> batch iters: used to keep track of the open iterators on an
    82  	// indexed batch. The iter set value will also be indexed by the readers map.
    83  	batches map[objID]objIDSet
    84  	// iterID -> reader iters: used to keep track of all of the open
    85  	// iterators. The iter set value will also be indexed by either the batches
    86  	// or snapshots maps.
    87  	iters map[objID]objIDSet
    88  	// objectID -> db: used to keep track of the DB a batch, iter, or snapshot
    89  	// was created on.
    90  	objDB map[objID]objID
    91  	// readerID -> reader iters: used to keep track of the open iterators on a
    92  	// reader. The iter set value will also be indexed by either the batches or
    93  	// snapshots maps. This map is the union of batches and snapshots maps.
    94  	readers map[objID]objIDSet
    95  	// snapshotID -> snapshot iters: used to keep track of the open iterators on
    96  	// a snapshot. The iter set value will also be indexed by the readers map.
    97  	snapshots map[objID]objIDSet
    98  	// snapshotID -> bounds of the snapshot: only populated for snapshots that
    99  	// are constrained by bounds.
   100  	snapshotBounds map[objID][]pebble.KeyRange
   101  	// iterSequenceNumber is the metaTimestamp at which the iter was created.
   102  	iterCreationTimestamp map[objID]int
   103  	// iterReaderID is a map from an iterID to a readerID.
   104  	iterReaderID map[objID]objID
   105  }
   106  
   107  func newGenerator(rng *rand.Rand, cfg config, km *keyManager) *generator {
   108  	g := &generator{
   109  		cfg:                   cfg,
   110  		rng:                   rng,
   111  		init:                  &initOp{dbSlots: uint32(cfg.numInstances)},
   112  		keyManager:            km,
   113  		liveReaders:           objIDSlice{makeObjID(dbTag, 1)},
   114  		liveWriters:           objIDSlice{makeObjID(dbTag, 1)},
   115  		dbs:                   objIDSlice{makeObjID(dbTag, 1)},
   116  		objDB:                 make(map[objID]objID),
   117  		batches:               make(map[objID]objIDSet),
   118  		iters:                 make(map[objID]objIDSet),
   119  		readers:               make(map[objID]objIDSet),
   120  		snapshots:             make(map[objID]objIDSet),
   121  		snapshotBounds:        make(map[objID][]pebble.KeyRange),
   122  		itersLastOpts:         make(map[objID]iterOpts),
   123  		iterCreationTimestamp: make(map[objID]int),
   124  		iterReaderID:          make(map[objID]objID),
   125  	}
   126  	for i := 1; i < cfg.numInstances; i++ {
   127  		g.liveReaders = append(g.liveReaders, makeObjID(dbTag, uint32(i+1)))
   128  		g.liveWriters = append(g.liveWriters, makeObjID(dbTag, uint32(i+1)))
   129  		g.dbs = append(g.dbs, makeObjID(dbTag, uint32(i+1)))
   130  	}
   131  	// Note that the initOp fields are populated during generation.
   132  	g.ops = append(g.ops, g.init)
   133  	return g
   134  }
   135  
   136  func generate(rng *rand.Rand, count uint64, cfg config, km *keyManager) []op {
   137  	g := newGenerator(rng, cfg, km)
   138  
   139  	generators := []func(){
   140  		batchAbort:                  g.batchAbort,
   141  		batchCommit:                 g.batchCommit,
   142  		dbCheckpoint:                g.dbCheckpoint,
   143  		dbCompact:                   g.dbCompact,
   144  		dbFlush:                     g.dbFlush,
   145  		dbRatchetFormatMajorVersion: g.dbRatchetFormatMajorVersion,
   146  		dbRestart:                   g.dbRestart,
   147  		iterClose:                   g.randIter(g.iterClose),
   148  		iterFirst:                   g.randIter(g.iterFirst),
   149  		iterLast:                    g.randIter(g.iterLast),
   150  		iterNext:                    g.randIter(g.iterNext),
   151  		iterNextWithLimit:           g.randIter(g.iterNextWithLimit),
   152  		iterNextPrefix:              g.randIter(g.iterNextPrefix),
   153  		iterCanSingleDelete:         g.randIter(g.iterCanSingleDelete),
   154  		iterPrev:                    g.randIter(g.iterPrev),
   155  		iterPrevWithLimit:           g.randIter(g.iterPrevWithLimit),
   156  		iterSeekGE:                  g.randIter(g.iterSeekGE),
   157  		iterSeekGEWithLimit:         g.randIter(g.iterSeekGEWithLimit),
   158  		iterSeekLT:                  g.randIter(g.iterSeekLT),
   159  		iterSeekLTWithLimit:         g.randIter(g.iterSeekLTWithLimit),
   160  		iterSeekPrefixGE:            g.randIter(g.iterSeekPrefixGE),
   161  		iterSetBounds:               g.randIter(g.iterSetBounds),
   162  		iterSetOptions:              g.randIter(g.iterSetOptions),
   163  		newBatch:                    g.newBatch,
   164  		newIndexedBatch:             g.newIndexedBatch,
   165  		newIter:                     g.newIter,
   166  		newIterUsingClone:           g.newIterUsingClone,
   167  		newSnapshot:                 g.newSnapshot,
   168  		readerGet:                   g.readerGet,
   169  		replicate:                   g.replicate,
   170  		snapshotClose:               g.snapshotClose,
   171  		writerApply:                 g.writerApply,
   172  		writerDelete:                g.writerDelete,
   173  		writerDeleteRange:           g.writerDeleteRange,
   174  		writerIngest:                g.writerIngest,
   175  		writerIngestAndExcise:       g.writerIngestAndExcise,
   176  		writerMerge:                 g.writerMerge,
   177  		writerRangeKeyDelete:        g.writerRangeKeyDelete,
   178  		writerRangeKeySet:           g.writerRangeKeySet,
   179  		writerRangeKeyUnset:         g.writerRangeKeyUnset,
   180  		writerSet:                   g.writerSet,
   181  		writerSingleDelete:          g.writerSingleDelete,
   182  	}
   183  
   184  	// TPCC-style deck of cards randomization. Every time the end of the deck is
   185  	// reached, we shuffle the deck.
   186  	deck := randvar.NewDeck(g.rng, cfg.ops...)
   187  
   188  	defer func() {
   189  		if r := recover(); r != nil {
   190  			fmt.Fprintln(os.Stderr, formatOps(g.ops))
   191  			panic(r)
   192  		}
   193  	}()
   194  	for i := uint64(0); i < count; i++ {
   195  		generators[deck.Int()]()
   196  	}
   197  
   198  	g.dbClose()
   199  	return g.ops
   200  }
   201  
   202  func (g *generator) add(op op) {
   203  	g.ops = append(g.ops, op)
   204  	g.keyManager.update(op)
   205  }
   206  
   207  // randKeyToWrite returns a key for any write other than SingleDelete.
   208  //
   209  // TODO(peter): make the size and distribution of keys configurable. See
   210  // keyDist and keySizeDist in config.go.
   211  func (g *generator) randKeyToWrite(newKey float64) []byte {
   212  	return g.randKeyHelper(g.keyManager.knownKeys(), newKey, nil)
   213  }
   214  
   215  // prefixKeyRange generates a [start, end) pair consisting of two prefix keys.
   216  func (g *generator) prefixKeyRange() ([]byte, []byte) {
   217  	start := g.randPrefixToWrite(0.001)
   218  	end := g.randPrefixToWrite(0.001)
   219  	for g.cmp(start, end) == 0 {
   220  		end = g.randPrefixToWrite(0.05)
   221  	}
   222  	if g.cmp(start, end) > 0 {
   223  		start, end = end, start
   224  	}
   225  	return start, end
   226  }
   227  
   228  // randPrefixToWrite returns a prefix key (a key with no suffix) for a range key
   229  // write operation.
   230  func (g *generator) randPrefixToWrite(newPrefix float64) []byte {
   231  	prefixes := g.keyManager.prefixes()
   232  	if len(prefixes) > 0 && g.rng.Float64() > newPrefix {
   233  		// Use an existing prefix.
   234  		p := g.rng.Intn(len(prefixes))
   235  		return prefixes[p]
   236  	}
   237  
   238  	// Use a new prefix.
   239  	var prefix []byte
   240  	for {
   241  		prefix = g.randKeyHelperSuffix(nil, 4, 12, 0)
   242  		if !g.keyManager.prefixExists(prefix) {
   243  			if !g.keyManager.addNewKey(prefix) {
   244  				panic("key must not exist if prefix doesn't exist")
   245  			}
   246  			return prefix
   247  		}
   248  	}
   249  }
   250  
   251  // randSuffixToWrite generates a random suffix according to the configuration's suffix
   252  // distribution. It takes a probability 0 ≤ p ≤ 1.0 indicating the probability
   253  // with which the generator should increase the max suffix generated by the
   254  // generator.
   255  //
   256  // randSuffixToWrite may return a nil suffix, with the probability the
   257  // configuration's suffix distribution assigns to the zero suffix.
   258  func (g *generator) randSuffixToWrite(incMaxProb float64) []byte {
   259  	if g.rng.Float64() < incMaxProb {
   260  		g.cfg.writeSuffixDist.IncMax(1)
   261  	}
   262  	return suffixFromInt(int64(g.cfg.writeSuffixDist.Uint64(g.rng)))
   263  }
   264  
   265  // randSuffixToRead generates a random suffix used during reads. The suffixes
   266  // generated by this function are within the same range as suffixes generated by
   267  // randSuffixToWrite, however randSuffixToRead pulls from a uniform
   268  // distribution.
   269  func (g *generator) randSuffixToRead() []byte {
   270  	// When reading, don't apply the recency skewing in order to better exercise
   271  	// a reading a mix of older and newer keys.
   272  	max := g.cfg.writeSuffixDist.Max()
   273  	return suffixFromInt(g.rng.Int63n(int64(max)))
   274  }
   275  
   276  func suffixFromInt(suffix int64) []byte {
   277  	// Treat the zero as no suffix to match the behavior during point key
   278  	// generation in randKeyHelper.
   279  	if suffix == 0 {
   280  		return nil
   281  	}
   282  	return testkeys.Suffix(suffix)
   283  }
   284  
   285  func (g *generator) randKeyToSingleDelete(id objID) []byte {
   286  	keys := g.keyManager.eligibleSingleDeleteKeys(id)
   287  	length := len(keys)
   288  	if length == 0 {
   289  		return nil
   290  	}
   291  	return keys[g.rng.Intn(length)]
   292  }
   293  
   294  // randKeyToRead returns a key for read operations.
   295  func (g *generator) randKeyToRead(newKey float64) []byte {
   296  	return g.randKeyHelper(g.keyManager.knownKeys(), newKey, nil)
   297  }
   298  
   299  // randKeyToReadInRange returns a key for read operations within the provided
   300  // key range. The bounds of the provided key range must span a prefix boundary.
   301  func (g *generator) randKeyToReadInRange(newKey float64, kr pebble.KeyRange) []byte {
   302  	return g.randKeyHelper(g.keyManager.knownKeysInRange(kr), newKey, &kr)
   303  }
   304  
   305  func (g *generator) randKeyHelper(
   306  	keys [][]byte, newKey float64, newKeyBounds *pebble.KeyRange,
   307  ) []byte {
   308  	switch {
   309  	case len(keys) > 0 && g.rng.Float64() > newKey:
   310  		// Use an existing user key.
   311  		return keys[g.rng.Intn(len(keys))]
   312  
   313  	case len(keys) > 0 && g.rng.Float64() > g.cfg.newPrefix:
   314  		// Use an existing prefix but a new suffix, producing a new user key.
   315  		prefixes := g.keyManager.prefixes()
   316  
   317  		// If we're constrained to a key range, find which existing prefixes
   318  		// fall within that key range.
   319  		if newKeyBounds != nil {
   320  			s, _ := slices.BinarySearchFunc(prefixes, newKeyBounds.Start, g.cmp)
   321  			e, _ := slices.BinarySearchFunc(prefixes, newKeyBounds.End, g.cmp)
   322  			prefixes = prefixes[s:e]
   323  		}
   324  
   325  		if len(prefixes) > 0 {
   326  			for {
   327  				// Pick a prefix on each iteration in case most or all suffixes are
   328  				// already in use for any individual prefix.
   329  				p := g.rng.Intn(len(prefixes))
   330  				suffix := int64(g.cfg.writeSuffixDist.Uint64(g.rng))
   331  
   332  				var key []byte
   333  				if suffix > 0 {
   334  					key = resizeBuffer(key, len(prefixes[p]), testkeys.SuffixLen(suffix))
   335  					n := copy(key, prefixes[p])
   336  					testkeys.WriteSuffix(key[n:], suffix)
   337  				} else {
   338  					key = resizeBuffer(key, len(prefixes[p]), 0)
   339  					copy(key, prefixes[p])
   340  				}
   341  
   342  				if (newKeyBounds == nil || (g.cmp(key, newKeyBounds.Start) >= 0 && g.cmp(key, newKeyBounds.End) < 0)) &&
   343  					g.keyManager.addNewKey(key) {
   344  					return key
   345  				}
   346  
   347  				// If the generated key already existed, or the generated key
   348  				// fell outside the provided bounds, increase the suffix
   349  				// distribution and loop.
   350  				g.cfg.writeSuffixDist.IncMax(1)
   351  			}
   352  		}
   353  		// Otherwise fall through to generating a new prefix.
   354  		fallthrough
   355  
   356  	default:
   357  		// Use a new prefix, producing a new user key.
   358  
   359  		var key []byte
   360  
   361  		suffix := int64(g.cfg.writeSuffixDist.Uint64(g.rng))
   362  
   363  		// If we have bounds in which we need to generate the key, use
   364  		// testkeys.RandomSeparator to generate a key between the bounds.
   365  		if newKeyBounds != nil {
   366  			targetLength := 4 + g.rng.Intn(8)
   367  			key = testkeys.RandomSeparator(nil, g.prefix(newKeyBounds.Start), g.prefix(newKeyBounds.End),
   368  				suffix, targetLength, g.rng)
   369  		} else {
   370  			for {
   371  				key = g.randKeyHelperSuffix(nil, 4, 12, suffix)
   372  				if !g.keyManager.prefixExists(key[:testkeys.Comparer.Split(key)]) {
   373  					if !g.keyManager.addNewKey(key) {
   374  						panic("key must not exist if prefix doesn't exist")
   375  					}
   376  					break
   377  				}
   378  			}
   379  		}
   380  		return key
   381  	}
   382  }
   383  
   384  // randKeyHelperSuffix is a helper function for randKeyHelper, and should not be
   385  // invoked directly.
   386  func (g *generator) randKeyHelperSuffix(
   387  	dst []byte, minPrefixLen, maxPrefixLen int, suffix int64,
   388  ) []byte {
   389  	n := minPrefixLen
   390  	if maxPrefixLen > minPrefixLen {
   391  		n += g.rng.Intn(maxPrefixLen - minPrefixLen)
   392  	}
   393  	// In order to test a mix of suffixed and unsuffixed keys, omit the zero
   394  	// suffix.
   395  	if suffix == 0 {
   396  		dst = resizeBuffer(dst, n, 0)
   397  		g.fillRand(dst)
   398  		return dst
   399  	}
   400  	suffixLen := testkeys.SuffixLen(suffix)
   401  	dst = resizeBuffer(dst, n, suffixLen)
   402  	g.fillRand(dst[:n])
   403  	testkeys.WriteSuffix(dst[n:], suffix)
   404  	return dst
   405  }
   406  
   407  func resizeBuffer(buf []byte, prefixLen, suffixLen int) []byte {
   408  	if cap(buf) >= prefixLen+suffixLen {
   409  		return buf[:prefixLen+suffixLen]
   410  	}
   411  	return make([]byte, prefixLen+suffixLen)
   412  }
   413  
   414  // TODO(peter): make the value size configurable. See valueSizeDist in
   415  // config.go.
   416  func (g *generator) randValue(min, max int) []byte {
   417  	n := min
   418  	if max > min {
   419  		n += g.rng.Intn(max - min)
   420  	}
   421  	if n == 0 {
   422  		return nil
   423  	}
   424  	buf := make([]byte, n)
   425  	g.fillRand(buf)
   426  	return buf
   427  }
   428  
   429  func (g *generator) fillRand(buf []byte) {
   430  	// NB: The actual random values are not particularly important. We only use
   431  	// lowercase letters because that makes visual determination of ordering
   432  	// easier, rather than having to remember the lexicographic ordering of
   433  	// uppercase vs lowercase, or letters vs numbers vs punctuation.
   434  	const letters = "abcdefghijklmnopqrstuvwxyz"
   435  	const lettersLen = uint64(len(letters))
   436  	const lettersCharsPerRand = 12 // floor(log(math.MaxUint64)/log(lettersLen))
   437  
   438  	var r uint64
   439  	var q int
   440  	for i := 0; i < len(buf); i++ {
   441  		if q == 0 {
   442  			r = g.rng.Uint64()
   443  			q = lettersCharsPerRand
   444  		}
   445  		buf[i] = letters[r%lettersLen]
   446  		r = r / lettersLen
   447  		q--
   448  	}
   449  }
   450  
   451  func (g *generator) newBatch() {
   452  	batchID := makeObjID(batchTag, g.init.batchSlots)
   453  	g.init.batchSlots++
   454  	g.liveBatches = append(g.liveBatches, batchID)
   455  	g.liveWriters = append(g.liveWriters, batchID)
   456  	dbID := g.dbs.rand(g.rng)
   457  	g.objDB[batchID] = dbID
   458  
   459  	g.add(&newBatchOp{
   460  		dbID:    dbID,
   461  		batchID: batchID,
   462  	})
   463  }
   464  
   465  func (g *generator) newIndexedBatch() {
   466  	batchID := makeObjID(batchTag, g.init.batchSlots)
   467  	g.init.batchSlots++
   468  	g.liveBatches = append(g.liveBatches, batchID)
   469  	g.liveReaders = append(g.liveReaders, batchID)
   470  	g.liveWriters = append(g.liveWriters, batchID)
   471  
   472  	iters := make(objIDSet)
   473  	g.batches[batchID] = iters
   474  	g.readers[batchID] = iters
   475  	dbID := g.dbs.rand(g.rng)
   476  	g.objDB[batchID] = dbID
   477  
   478  	g.add(&newIndexedBatchOp{
   479  		dbID:    dbID,
   480  		batchID: batchID,
   481  	})
   482  }
   483  
   484  // removeFromBatchGenerator will not generate a closeOp for the target batch as
   485  // not every batch that is removed from the generator should be closed. For
   486  // example, running a closeOp before an ingestOp that contains the closed batch
   487  // will cause an error.
   488  func (g *generator) removeBatchFromGenerator(batchID objID) {
   489  	g.liveBatches.remove(batchID)
   490  	iters := g.batches[batchID]
   491  	delete(g.batches, batchID)
   492  
   493  	if iters != nil {
   494  		g.liveReaders.remove(batchID)
   495  		delete(g.readers, batchID)
   496  	}
   497  	g.liveWriters.remove(batchID)
   498  	for _, id := range iters.sorted() {
   499  		g.liveIters.remove(id)
   500  		delete(g.iters, id)
   501  		g.add(&closeOp{objID: id, derivedDBID: g.objDB[batchID]})
   502  	}
   503  }
   504  
   505  func (g *generator) batchAbort() {
   506  	if len(g.liveBatches) == 0 {
   507  		return
   508  	}
   509  
   510  	batchID := g.liveBatches.rand(g.rng)
   511  	g.removeBatchFromGenerator(batchID)
   512  
   513  	g.add(&closeOp{objID: batchID, derivedDBID: g.objDB[batchID]})
   514  }
   515  
   516  func (g *generator) batchCommit() {
   517  	if len(g.liveBatches) == 0 {
   518  		return
   519  	}
   520  
   521  	batchID := g.liveBatches.rand(g.rng)
   522  	dbID := g.objDB[batchID]
   523  	g.removeBatchFromGenerator(batchID)
   524  
   525  	// The batch we're applying may contain single delete tombstones that when
   526  	// applied to the writer result in nondeterminism in the deleted key. If
   527  	// that's the case, we can restore determinism by first deleting the key
   528  	// from the writer.
   529  	//
   530  	// Generating additional operations here is not ideal, but it simplifies
   531  	// single delete invariants significantly.
   532  	singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, false /* collapsed */)
   533  	for _, conflict := range singleDeleteConflicts {
   534  		g.add(&deleteOp{
   535  			writerID:    dbID,
   536  			key:         conflict,
   537  			derivedDBID: dbID,
   538  		})
   539  	}
   540  
   541  	g.add(&batchCommitOp{
   542  		dbID:    dbID,
   543  		batchID: batchID,
   544  	})
   545  	g.add(&closeOp{objID: batchID, derivedDBID: dbID})
   546  
   547  }
   548  
   549  func (g *generator) dbClose() {
   550  	// Close any live iterators and snapshots, so that we can close the DB
   551  	// cleanly.
   552  	for len(g.liveIters) > 0 {
   553  		g.randIter(g.iterClose)()
   554  	}
   555  	for len(g.liveSnapshots) > 0 {
   556  		g.snapshotClose()
   557  	}
   558  	for len(g.liveBatches) > 0 {
   559  		batchID := g.liveBatches[0]
   560  		dbID := g.objDB[batchID]
   561  		g.removeBatchFromGenerator(batchID)
   562  		g.add(&closeOp{objID: batchID, derivedDBID: dbID})
   563  	}
   564  	for len(g.dbs) > 0 {
   565  		db := g.dbs[0]
   566  		g.dbs = g.dbs[1:]
   567  		g.add(&closeOp{objID: db})
   568  	}
   569  }
   570  
   571  func (g *generator) dbCheckpoint() {
   572  	// 1/2 of the time we don't restrict the checkpoint;
   573  	// 1/4 of the time we restrict to 1 span;
   574  	// 1/8 of the time we restrict to 2 spans; etc.
   575  	numSpans := 0
   576  	var spans []pebble.CheckpointSpan
   577  	for g.rng.Intn(2) == 0 {
   578  		numSpans++
   579  	}
   580  	if numSpans > 0 {
   581  		spans = make([]pebble.CheckpointSpan, numSpans)
   582  	}
   583  	for i := range spans {
   584  		start := g.randKeyToRead(0.01)
   585  		end := g.randKeyToRead(0.01)
   586  		if g.cmp(start, end) > 0 {
   587  			start, end = end, start
   588  		}
   589  		spans[i].Start = start
   590  		spans[i].End = end
   591  	}
   592  	dbID := g.dbs.rand(g.rng)
   593  	g.add(&checkpointOp{
   594  		dbID:  dbID,
   595  		spans: spans,
   596  	})
   597  }
   598  
   599  func (g *generator) dbCompact() {
   600  	// Generate new key(s) with a 1% probability.
   601  	start := g.randKeyToRead(0.01)
   602  	end := g.randKeyToRead(0.01)
   603  	if g.cmp(start, end) > 0 {
   604  		start, end = end, start
   605  	}
   606  	dbID := g.dbs.rand(g.rng)
   607  	g.add(&compactOp{
   608  		dbID:        dbID,
   609  		start:       start,
   610  		end:         end,
   611  		parallelize: g.rng.Float64() < 0.5,
   612  	})
   613  }
   614  
   615  func (g *generator) dbFlush() {
   616  	g.add(&flushOp{g.dbs.rand(g.rng)})
   617  }
   618  
   619  func (g *generator) dbRatchetFormatMajorVersion() {
   620  	// Ratchet to a random format major version between the minimum the
   621  	// metamorphic tests support and the newest. At runtime, the generated
   622  	// version may be behind the database's format major version, in which case
   623  	// RatchetFormatMajorVersion should deterministically error.
   624  
   625  	dbID := g.dbs.rand(g.rng)
   626  	n := int(newestFormatMajorVersionToTest - minimumFormatMajorVersion)
   627  	vers := pebble.FormatMajorVersion(g.rng.Intn(n+1)) + minimumFormatMajorVersion
   628  	g.add(&dbRatchetFormatMajorVersionOp{dbID: dbID, vers: vers})
   629  }
   630  
   631  func (g *generator) dbRestart() {
   632  	// Close any live iterators and snapshots, so that we can close the DB
   633  	// cleanly.
   634  	dbID := g.dbs.rand(g.rng)
   635  	for len(g.liveIters) > 0 {
   636  		g.randIter(g.iterClose)()
   637  	}
   638  	for len(g.liveSnapshots) > 0 {
   639  		g.snapshotClose()
   640  	}
   641  	// Close the batches.
   642  	for len(g.liveBatches) > 0 {
   643  		batchID := g.liveBatches[0]
   644  		dbID := g.objDB[batchID]
   645  		g.removeBatchFromGenerator(batchID)
   646  		g.add(&closeOp{objID: batchID, derivedDBID: dbID})
   647  	}
   648  	if len(g.liveReaders) != len(g.dbs) || len(g.liveWriters) != len(g.dbs) {
   649  		panic(fmt.Sprintf("unexpected counts: liveReaders %d, liveWriters: %d",
   650  			len(g.liveReaders), len(g.liveWriters)))
   651  	}
   652  	g.add(&dbRestartOp{dbID: dbID})
   653  }
   654  
   655  // maybeSetSnapshotIterBounds must be called whenever creating a new iterator or
   656  // modifying the bounds of an iterator. If the iterator is backed by a snapshot
   657  // that only guarantees consistency within a limited set of key spans, then the
   658  // iterator must set bounds within one of the snapshot's consistent keyspans. It
   659  // returns true if the provided readerID is a bounded snapshot and bounds were
   660  // set.
   661  func (g *generator) maybeSetSnapshotIterBounds(readerID objID, opts *iterOpts) bool {
   662  	snapBounds, isBoundedSnapshot := g.snapshotBounds[readerID]
   663  	if !isBoundedSnapshot {
   664  		return false
   665  	}
   666  	// Pick a random keyrange within one of the snapshot's key ranges.
   667  	parentBounds := snapBounds[g.rng.Intn(len(snapBounds))]
   668  	// With 10% probability, use the parent start bound as-is.
   669  	if g.rng.Float64() <= 0.1 {
   670  		opts.lower = parentBounds.Start
   671  	} else {
   672  		opts.lower = testkeys.RandomSeparator(
   673  			nil, /* dst */
   674  			parentBounds.Start,
   675  			parentBounds.End,
   676  			0, /* suffix */
   677  			4+g.rng.Intn(8),
   678  			g.rng,
   679  		)
   680  	}
   681  	// With 10% probability, use the parent end bound as-is.
   682  	if g.rng.Float64() <= 0.1 {
   683  		opts.upper = parentBounds.End
   684  	} else {
   685  		opts.upper = testkeys.RandomSeparator(
   686  			nil, /* dst */
   687  			opts.lower,
   688  			parentBounds.End,
   689  			0, /* suffix */
   690  			4+g.rng.Intn(8),
   691  			g.rng,
   692  		)
   693  	}
   694  	return true
   695  }
   696  
   697  func (g *generator) newIter() {
   698  	iterID := makeObjID(iterTag, g.init.iterSlots)
   699  	g.init.iterSlots++
   700  	g.liveIters = append(g.liveIters, iterID)
   701  
   702  	readerID := g.liveReaders.rand(g.rng)
   703  	if iters := g.readers[readerID]; iters != nil {
   704  		iters[iterID] = struct{}{}
   705  		g.iters[iterID] = iters
   706  		//lint:ignore SA9003 - readability
   707  	} else {
   708  		// NB: the DB object does not track its open iterators because it never
   709  		// closes.
   710  	}
   711  	g.iterReaderID[iterID] = readerID
   712  	dbID := g.deriveDB(iterID)
   713  
   714  	var opts iterOpts
   715  	if !g.maybeSetSnapshotIterBounds(readerID, &opts) {
   716  		// Generate lower/upper bounds with a 10% probability.
   717  		if g.rng.Float64() <= 0.1 {
   718  			// Generate a new key with a .1% probability.
   719  			opts.lower = g.randKeyToRead(0.001)
   720  		}
   721  		if g.rng.Float64() <= 0.1 {
   722  			// Generate a new key with a .1% probability.
   723  			opts.upper = g.randKeyToRead(0.001)
   724  		}
   725  		if g.cmp(opts.lower, opts.upper) > 0 {
   726  			opts.lower, opts.upper = opts.upper, opts.lower
   727  		}
   728  	}
   729  	opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
   730  
   731  	// With 10% probability, enable automatic filtering of keys with suffixes
   732  	// not in the provided range. This filtering occurs both through
   733  	// block-property filtering and explicitly within the iterator operations to
   734  	// ensure determinism.
   735  	if g.rng.Float64() <= 0.1 {
   736  		max := g.cfg.writeSuffixDist.Max()
   737  		opts.filterMin, opts.filterMax = g.rng.Uint64n(max)+1, g.rng.Uint64n(max)+1
   738  		if opts.filterMin > opts.filterMax {
   739  			opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
   740  		} else if opts.filterMin == opts.filterMax {
   741  			opts.filterMax = opts.filterMin + 1
   742  		}
   743  	}
   744  
   745  	// Enable L6 filters with a 10% probability.
   746  	if g.rng.Float64() <= 0.1 {
   747  		opts.useL6Filters = true
   748  	}
   749  
   750  	g.itersLastOpts[iterID] = opts
   751  	g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
   752  	g.iterReaderID[iterID] = readerID
   753  	g.add(&newIterOp{
   754  		readerID:    readerID,
   755  		iterID:      iterID,
   756  		iterOpts:    opts,
   757  		derivedDBID: dbID,
   758  	})
   759  }
   760  
   761  func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
   762  	// Iterate over different key types.
   763  	p := g.rng.Float64()
   764  	switch {
   765  	case p < 0.2: // 20% probability
   766  		keyTypes = uint32(pebble.IterKeyTypePointsOnly)
   767  	case p < 0.8: // 60% probability
   768  		keyTypes = uint32(pebble.IterKeyTypePointsAndRanges)
   769  		// With 50% probability, enable masking.
   770  		if g.rng.Intn(2) == 1 {
   771  			maskSuffix = g.randSuffixToRead()
   772  		}
   773  	default: // 20% probability
   774  		keyTypes = uint32(pebble.IterKeyTypeRangesOnly)
   775  	}
   776  	return keyTypes, maskSuffix
   777  }
   778  
   779  func (g *generator) deriveDB(readerID objID) objID {
   780  	if readerID.tag() == iterTag {
   781  		readerID = g.iterReaderID[readerID]
   782  	}
   783  	dbParentID := readerID
   784  	if dbParentID.tag() != dbTag {
   785  		dbParentID = g.objDB[dbParentID]
   786  	}
   787  	g.objDB[readerID] = dbParentID
   788  	return dbParentID
   789  }
   790  
   791  func (g *generator) newIterUsingClone() {
   792  	if len(g.liveIters) == 0 {
   793  		return
   794  	}
   795  	existingIterID := g.liveIters.rand(g.rng)
   796  	iterID := makeObjID(iterTag, g.init.iterSlots)
   797  	g.init.iterSlots++
   798  	g.liveIters = append(g.liveIters, iterID)
   799  	if iters := g.iters[existingIterID]; iters != nil {
   800  		iters[iterID] = struct{}{}
   801  		g.iters[iterID] = iters
   802  		//lint:ignore SA9003 - readability
   803  	} else {
   804  		// NB: the DB object does not track its open iterators because it never
   805  		// closes.
   806  	}
   807  	readerID := g.iterReaderID[existingIterID]
   808  	g.iterReaderID[iterID] = readerID
   809  	g.deriveDB(iterID)
   810  
   811  	var refreshBatch bool
   812  	if readerID.tag() == batchTag {
   813  		refreshBatch = g.rng.Intn(2) == 1
   814  	}
   815  
   816  	opts := g.itersLastOpts[existingIterID]
   817  	// With 50% probability, consider modifying the iterator options used by the
   818  	// clone.
   819  	if g.rng.Intn(2) == 1 {
   820  		g.maybeMutateOptions(readerID, &opts)
   821  	}
   822  	g.itersLastOpts[iterID] = opts
   823  
   824  	g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
   825  	g.iterReaderID[iterID] = g.iterReaderID[existingIterID]
   826  	g.add(&newIterUsingCloneOp{
   827  		existingIterID:  existingIterID,
   828  		iterID:          iterID,
   829  		refreshBatch:    refreshBatch,
   830  		iterOpts:        opts,
   831  		derivedReaderID: readerID,
   832  	})
   833  }
   834  
   835  func (g *generator) iterClose(iterID objID) {
   836  	g.liveIters.remove(iterID)
   837  	if readerIters, ok := g.iters[iterID]; ok {
   838  		delete(g.iters, iterID)
   839  		delete(readerIters, iterID)
   840  		//lint:ignore SA9003 - readability
   841  	} else {
   842  		// NB: the DB object does not track its open iterators because it never
   843  		// closes.
   844  	}
   845  
   846  	readerID := g.iterReaderID[iterID]
   847  	g.add(&closeOp{objID: iterID, derivedDBID: g.objDB[readerID]})
   848  }
   849  
   850  func (g *generator) iterSetBounds(iterID objID) {
   851  	iterLastOpts := g.itersLastOpts[iterID]
   852  	newOpts := iterLastOpts
   853  	// TODO(jackson): The logic to increase the probability of advancing bounds
   854  	// monotonically only applies if the snapshot is not bounded. Refactor to
   855  	// allow bounded snapshots to benefit too, when possible.
   856  	if !g.maybeSetSnapshotIterBounds(g.iterReaderID[iterID], &newOpts) {
   857  		var lower, upper []byte
   858  		genLower := g.rng.Float64() <= 0.9
   859  		genUpper := g.rng.Float64() <= 0.9
   860  		// When one of ensureLowerGE, ensureUpperLE is true, the new bounds
   861  		// don't overlap with the previous bounds.
   862  		var ensureLowerGE, ensureUpperLE bool
   863  		if genLower && iterLastOpts.upper != nil && g.rng.Float64() <= 0.9 {
   864  			ensureLowerGE = true
   865  		}
   866  		if (!ensureLowerGE || g.rng.Float64() < 0.5) && genUpper && iterLastOpts.lower != nil {
   867  			ensureUpperLE = true
   868  			ensureLowerGE = false
   869  		}
   870  		attempts := 0
   871  		for {
   872  			attempts++
   873  			if genLower {
   874  				// Generate a new key with a .1% probability.
   875  				lower = g.randKeyToRead(0.001)
   876  			}
   877  			if genUpper {
   878  				// Generate a new key with a .1% probability.
   879  				upper = g.randKeyToRead(0.001)
   880  			}
   881  			if g.cmp(lower, upper) > 0 {
   882  				lower, upper = upper, lower
   883  			}
   884  			if ensureLowerGE && g.cmp(iterLastOpts.upper, lower) > 0 {
   885  				if attempts < 25 {
   886  					continue
   887  				}
   888  				lower = iterLastOpts.upper
   889  				upper = lower
   890  				break
   891  			}
   892  			if ensureUpperLE && g.cmp(upper, iterLastOpts.lower) > 0 {
   893  				if attempts < 25 {
   894  					continue
   895  				}
   896  				upper = iterLastOpts.lower
   897  				lower = upper
   898  				break
   899  			}
   900  			break
   901  		}
   902  		newOpts.lower = lower
   903  		newOpts.upper = upper
   904  	}
   905  	g.itersLastOpts[iterID] = newOpts
   906  	g.add(&iterSetBoundsOp{
   907  		iterID: iterID,
   908  		lower:  newOpts.lower,
   909  		upper:  newOpts.upper,
   910  	})
   911  	// Additionally seek the iterator in a manner consistent with the bounds,
   912  	// and do some steps (Next/Prev). The seeking exercises typical
   913  	// CockroachDB behavior when using iterators and the steps are trying to
   914  	// stress the region near the bounds. Ideally, we should not do this as
   915  	// part of generating a single op, but this is easier than trying to
   916  	// control future op generation via generator state.
   917  	doSeekLT := newOpts.upper != nil && g.rng.Float64() < 0.5
   918  	doSeekGE := newOpts.lower != nil && g.rng.Float64() < 0.5
   919  	if doSeekLT && doSeekGE {
   920  		// Pick the seek.
   921  		if g.rng.Float64() < 0.5 {
   922  			doSeekGE = false
   923  		} else {
   924  			doSeekLT = false
   925  		}
   926  	}
   927  	if doSeekLT {
   928  		g.add(&iterSeekLTOp{
   929  			iterID:          iterID,
   930  			key:             newOpts.upper,
   931  			derivedReaderID: g.iterReaderID[iterID],
   932  		})
   933  		if g.rng.Float64() < 0.5 {
   934  			g.iterNext(iterID)
   935  		}
   936  		if g.rng.Float64() < 0.5 {
   937  			g.iterNext(iterID)
   938  		}
   939  		if g.rng.Float64() < 0.5 {
   940  			g.iterPrev(iterID)
   941  		}
   942  	} else if doSeekGE {
   943  		g.add(&iterSeekGEOp{
   944  			iterID:          iterID,
   945  			key:             newOpts.lower,
   946  			derivedReaderID: g.iterReaderID[iterID],
   947  		})
   948  		if g.rng.Float64() < 0.5 {
   949  			g.iterPrev(iterID)
   950  		}
   951  		if g.rng.Float64() < 0.5 {
   952  			g.iterPrev(iterID)
   953  		}
   954  		if g.rng.Float64() < 0.5 {
   955  			g.iterNext(iterID)
   956  		}
   957  	}
   958  }
   959  
   960  func (g *generator) iterSetOptions(iterID objID) {
   961  	opts := g.itersLastOpts[iterID]
   962  	g.maybeMutateOptions(g.iterReaderID[iterID], &opts)
   963  	g.itersLastOpts[iterID] = opts
   964  	g.add(&iterSetOptionsOp{
   965  		iterID:          iterID,
   966  		iterOpts:        opts,
   967  		derivedReaderID: g.iterReaderID[iterID],
   968  	})
   969  
   970  	// Additionally, perform a random absolute positioning operation. The
   971  	// SetOptions contract requires one before the next relative positioning
   972  	// operation. Ideally, we should not do this as part of generating a single
   973  	// op, but this is easier than trying to control future op generation via
   974  	// generator state.
   975  	g.pickOneUniform(
   976  		g.iterFirst,
   977  		g.iterLast,
   978  		g.iterSeekGE,
   979  		g.iterSeekGEWithLimit,
   980  		g.iterSeekPrefixGE,
   981  		g.iterSeekLT,
   982  		g.iterSeekLTWithLimit,
   983  	)(iterID)
   984  }
   985  
   986  func (g *generator) iterSeekGE(iterID objID) {
   987  	g.add(&iterSeekGEOp{
   988  		iterID:          iterID,
   989  		key:             g.randKeyToRead(0.001), // 0.1% new keys
   990  		derivedReaderID: g.iterReaderID[iterID],
   991  	})
   992  }
   993  
   994  func (g *generator) iterSeekGEWithLimit(iterID objID) {
   995  	// 0.1% new keys
   996  	key, limit := g.randKeyToRead(0.001), g.randKeyToRead(0.001)
   997  	if g.cmp(key, limit) > 0 {
   998  		key, limit = limit, key
   999  	}
  1000  	g.add(&iterSeekGEOp{
  1001  		iterID:          iterID,
  1002  		key:             key,
  1003  		limit:           limit,
  1004  		derivedReaderID: g.iterReaderID[iterID],
  1005  	})
  1006  }
  1007  
  1008  func (g *generator) randKeyToReadWithinBounds(lower, upper []byte, readerID objID) []*keyMeta {
  1009  	var inRangeKeys []*keyMeta
  1010  	for _, keyMeta := range g.keyManager.byObj[readerID] {
  1011  		posKey := keyMeta.key
  1012  		if g.cmp(posKey, lower) < 0 || g.cmp(posKey, upper) >= 0 {
  1013  			continue
  1014  		}
  1015  		inRangeKeys = append(inRangeKeys, keyMeta)
  1016  	}
  1017  	return inRangeKeys
  1018  }
  1019  
  1020  func (g *generator) iterSeekPrefixGE(iterID objID) {
  1021  	lower := g.itersLastOpts[iterID].lower
  1022  	upper := g.itersLastOpts[iterID].upper
  1023  	iterCreationTimestamp := g.iterCreationTimestamp[iterID]
  1024  	var key []byte
  1025  
  1026  	// We try to make sure that the SeekPrefixGE key is within the iter bounds,
  1027  	// and that the iter can read the key. If the key was created on a batch
  1028  	// which deleted the key, then the key will still be considered visible
  1029  	// by the current logic. We're also not accounting for keys written to
  1030  	// batches which haven't been presisted to the DB. But we're only picking
  1031  	// keys in a best effort manner, and the logic is better than picking a
  1032  	// random key.
  1033  	if g.rng.Intn(10) >= 1 {
  1034  		possibleKeys := make([][]byte, 0, 100)
  1035  		inRangeKeys := g.randKeyToReadWithinBounds(lower, upper, g.objDB[iterID])
  1036  		for _, keyMeta := range inRangeKeys {
  1037  			visibleHistory := keyMeta.history.before(iterCreationTimestamp)
  1038  
  1039  			// Check if the last op on this key set a value, (eg SETs, MERGEs).
  1040  			// If the key should be visible to the iterator and it would make a
  1041  			// good candidate for a SeekPrefixGE.
  1042  			if visibleHistory.hasVisibleValue() {
  1043  				possibleKeys = append(possibleKeys, keyMeta.key)
  1044  			}
  1045  		}
  1046  
  1047  		if len(possibleKeys) > 0 {
  1048  			key = []byte(possibleKeys[g.rng.Int31n(int32(len(possibleKeys)))])
  1049  		}
  1050  	}
  1051  
  1052  	if key == nil {
  1053  		// TODO(bananabrick): We should try and use keys within the bounds,
  1054  		// even if we couldn't find any keys visible to the iterator. However,
  1055  		// doing this in experiments didn't really increase the valid
  1056  		// SeekPrefixGE calls by much.
  1057  		key = g.randKeyToRead(0) // 0% new keys
  1058  	}
  1059  
  1060  	g.add(&iterSeekPrefixGEOp{
  1061  		iterID:          iterID,
  1062  		key:             key,
  1063  		derivedReaderID: g.iterReaderID[iterID],
  1064  	})
  1065  }
  1066  
  1067  func (g *generator) iterSeekLT(iterID objID) {
  1068  	g.add(&iterSeekLTOp{
  1069  		iterID:          iterID,
  1070  		key:             g.randKeyToRead(0.001), // 0.1% new keys
  1071  		derivedReaderID: g.iterReaderID[iterID],
  1072  	})
  1073  }
  1074  
  1075  func (g *generator) iterSeekLTWithLimit(iterID objID) {
  1076  	// 0.1% new keys
  1077  	key, limit := g.randKeyToRead(0.001), g.randKeyToRead(0.001)
  1078  	if g.cmp(limit, key) > 0 {
  1079  		key, limit = limit, key
  1080  	}
  1081  	g.add(&iterSeekLTOp{
  1082  		iterID:          iterID,
  1083  		key:             key,
  1084  		limit:           limit,
  1085  		derivedReaderID: g.iterReaderID[iterID],
  1086  	})
  1087  }
  1088  
  1089  // randIter performs partial func application ("currying"), returning a new
  1090  // function that supplies the given func with a random iterator.
  1091  func (g *generator) randIter(gen func(objID)) func() {
  1092  	return func() {
  1093  		if len(g.liveIters) == 0 {
  1094  			return
  1095  		}
  1096  		gen(g.liveIters.rand(g.rng))
  1097  	}
  1098  }
  1099  
  1100  func (g *generator) iterFirst(iterID objID) {
  1101  	g.add(&iterFirstOp{
  1102  		iterID:          iterID,
  1103  		derivedReaderID: g.iterReaderID[iterID],
  1104  	})
  1105  }
  1106  
  1107  func (g *generator) iterLast(iterID objID) {
  1108  	g.add(&iterLastOp{
  1109  		iterID:          iterID,
  1110  		derivedReaderID: g.iterReaderID[iterID],
  1111  	})
  1112  }
  1113  
  1114  func (g *generator) iterNext(iterID objID) {
  1115  	g.add(&iterNextOp{
  1116  		iterID:          iterID,
  1117  		derivedReaderID: g.iterReaderID[iterID],
  1118  	})
  1119  }
  1120  
  1121  func (g *generator) iterPrev(iterID objID) {
  1122  	g.add(&iterPrevOp{
  1123  		iterID:          iterID,
  1124  		derivedReaderID: g.iterReaderID[iterID],
  1125  	})
  1126  }
  1127  
  1128  func (g *generator) iterNextWithLimit(iterID objID) {
  1129  	g.add(&iterNextOp{
  1130  		iterID:          iterID,
  1131  		limit:           g.randKeyToRead(0.001), // 0.1% new keys
  1132  		derivedReaderID: g.iterReaderID[iterID],
  1133  	})
  1134  }
  1135  
  1136  func (g *generator) iterNextPrefix(iterID objID) {
  1137  	g.add(&iterNextPrefixOp{
  1138  		iterID:          iterID,
  1139  		derivedReaderID: g.iterReaderID[iterID],
  1140  	})
  1141  }
  1142  
  1143  func (g *generator) iterCanSingleDelete(iterID objID) {
  1144  	g.add(&iterCanSingleDelOp{
  1145  		iterID:          iterID,
  1146  		derivedReaderID: g.iterReaderID[iterID],
  1147  	})
  1148  }
  1149  
  1150  func (g *generator) iterPrevWithLimit(iterID objID) {
  1151  	g.add(&iterPrevOp{
  1152  		iterID:          iterID,
  1153  		limit:           g.randKeyToRead(0.001), // 0.1% new keys
  1154  		derivedReaderID: g.iterReaderID[iterID],
  1155  	})
  1156  }
  1157  
  1158  func (g *generator) readerGet() {
  1159  	if len(g.liveReaders) == 0 {
  1160  		return
  1161  	}
  1162  
  1163  	readerID := g.liveReaders.rand(g.rng)
  1164  
  1165  	// If the chosen reader is a snapshot created with user-specified key
  1166  	// ranges, restrict the read to fall within one of the provided key ranges.
  1167  	var key []byte
  1168  	if bounds := g.snapshotBounds[readerID]; len(bounds) > 0 {
  1169  		kr := bounds[g.rng.Intn(len(bounds))]
  1170  		key = g.randKeyToReadInRange(0.001, kr) // 0.1% new keys
  1171  	} else {
  1172  		key = g.randKeyToRead(0.001) // 0.1% new keys
  1173  	}
  1174  	derivedDBID := objID(0)
  1175  	if readerID.tag() == batchTag || readerID.tag() == snapTag {
  1176  		derivedDBID = g.deriveDB(readerID)
  1177  	}
  1178  	g.add(&getOp{readerID: readerID, key: key, derivedDBID: derivedDBID})
  1179  }
  1180  
  1181  func (g *generator) replicate() {
  1182  	if len(g.dbs) < 2 {
  1183  		return
  1184  	}
  1185  
  1186  	source := g.dbs.rand(g.rng)
  1187  	dest := source
  1188  	for dest == source {
  1189  		dest = g.dbs.rand(g.rng)
  1190  	}
  1191  
  1192  	startKey, endKey := g.prefixKeyRange()
  1193  	g.add(&replicateOp{
  1194  		source: source,
  1195  		dest:   dest,
  1196  		start:  startKey,
  1197  		end:    endKey,
  1198  	})
  1199  }
  1200  
  1201  // generateDisjointKeyRanges generates n disjoint key ranges.
  1202  func (g *generator) generateDisjointKeyRanges(n int) []pebble.KeyRange {
  1203  	bounds := make([][]byte, 2*n)
  1204  	used := map[string]bool{}
  1205  	for i := 0; i < len(bounds); i++ {
  1206  		k := g.prefix(g.randKeyToRead(0.1))
  1207  		for used[string(k)] {
  1208  			k = g.prefix(g.randKeyToRead(0.1))
  1209  		}
  1210  		bounds[i] = k
  1211  		used[string(k)] = true
  1212  	}
  1213  	slices.SortFunc(bounds, g.cmp)
  1214  	keyRanges := make([]pebble.KeyRange, n)
  1215  	for i := range keyRanges {
  1216  		keyRanges[i] = pebble.KeyRange{
  1217  			Start: bounds[i*2],
  1218  			End:   bounds[i*2+1],
  1219  		}
  1220  	}
  1221  	return keyRanges
  1222  }
  1223  
  1224  func (g *generator) newSnapshot() {
  1225  	snapID := makeObjID(snapTag, g.init.snapshotSlots)
  1226  	g.init.snapshotSlots++
  1227  	g.liveSnapshots = append(g.liveSnapshots, snapID)
  1228  	g.liveReaders = append(g.liveReaders, snapID)
  1229  	dbID := g.dbs.rand(g.rng)
  1230  	g.objDB[snapID] = dbID
  1231  
  1232  	iters := make(objIDSet)
  1233  	g.snapshots[snapID] = iters
  1234  	g.readers[snapID] = iters
  1235  
  1236  	s := &newSnapshotOp{
  1237  		dbID:   dbID,
  1238  		snapID: snapID,
  1239  	}
  1240  
  1241  	// Impose bounds on the keys that may be read with the snapshot. Setting bounds
  1242  	// allows some runs of the metamorphic test to use a EventuallyFileOnlySnapshot
  1243  	// instead of a Snapshot, testing equivalence between the two for reads within
  1244  	// those bounds.
  1245  	s.bounds = g.generateDisjointKeyRanges(
  1246  		g.rng.Intn(5) + 1, /* between 1-5 */
  1247  	)
  1248  	g.snapshotBounds[snapID] = s.bounds
  1249  	g.add(s)
  1250  }
  1251  
  1252  func (g *generator) snapshotClose() {
  1253  	if len(g.liveSnapshots) == 0 {
  1254  		return
  1255  	}
  1256  
  1257  	snapID := g.liveSnapshots.rand(g.rng)
  1258  	g.liveSnapshots.remove(snapID)
  1259  	iters := g.snapshots[snapID]
  1260  	delete(g.snapshots, snapID)
  1261  	g.liveReaders.remove(snapID)
  1262  	delete(g.readers, snapID)
  1263  
  1264  	for _, id := range iters.sorted() {
  1265  		g.liveIters.remove(id)
  1266  		delete(g.iters, id)
  1267  		g.add(&closeOp{objID: id, derivedDBID: g.objDB[snapID]})
  1268  	}
  1269  
  1270  	g.add(&closeOp{objID: snapID, derivedDBID: g.objDB[snapID]})
  1271  }
  1272  
  1273  func (g *generator) writerApply() {
  1274  	if len(g.liveBatches) == 0 {
  1275  		return
  1276  	}
  1277  	if len(g.liveWriters) < 2 {
  1278  		panic(fmt.Sprintf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters)))
  1279  	}
  1280  
  1281  	batchID := g.liveBatches.rand(g.rng)
  1282  	dbID := g.objDB[batchID]
  1283  
  1284  	var writerID objID
  1285  	for {
  1286  		// NB: The writer we're applying to, as well as the batch we're applying,
  1287  		// must be from the same DB. The writer could be the db itself. Applying
  1288  		// a batch from one DB on another DB results in a panic, so avoid that.
  1289  		writerID = g.liveWriters.rand(g.rng)
  1290  		writerDBID := writerID
  1291  		if writerID.tag() != dbTag {
  1292  			writerDBID = g.objDB[writerID]
  1293  		}
  1294  		if writerID != batchID && writerDBID == dbID {
  1295  			break
  1296  		}
  1297  	}
  1298  
  1299  	// The batch we're applying may contain single delete tombstones that when
  1300  	// applied to the writer result in nondeterminism in the deleted key. If
  1301  	// that's the case, we can restore determinism by first deleting the key
  1302  	// from the writer.
  1303  	//
  1304  	// Generating additional operations here is not ideal, but it simplifies
  1305  	// single delete invariants significantly.
  1306  	singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, writerID, false /* collapsed */)
  1307  	for _, conflict := range singleDeleteConflicts {
  1308  		g.add(&deleteOp{
  1309  			writerID:    writerID,
  1310  			key:         conflict,
  1311  			derivedDBID: dbID,
  1312  		})
  1313  	}
  1314  
  1315  	g.removeBatchFromGenerator(batchID)
  1316  
  1317  	g.add(&applyOp{
  1318  		writerID: writerID,
  1319  		batchID:  batchID,
  1320  	})
  1321  	g.add(&closeOp{
  1322  		objID:       batchID,
  1323  		derivedDBID: dbID,
  1324  	})
  1325  }
  1326  
  1327  func (g *generator) writerDelete() {
  1328  	if len(g.liveWriters) == 0 {
  1329  		return
  1330  	}
  1331  
  1332  	writerID := g.liveWriters.rand(g.rng)
  1333  	derivedDBID := writerID
  1334  	if derivedDBID.tag() != dbTag {
  1335  		derivedDBID = g.objDB[writerID]
  1336  	}
  1337  	g.add(&deleteOp{
  1338  		writerID:    writerID,
  1339  		key:         g.randKeyToWrite(0.001), // 0.1% new keys
  1340  		derivedDBID: derivedDBID,
  1341  	})
  1342  }
  1343  
  1344  func (g *generator) writerDeleteRange() {
  1345  	if len(g.liveWriters) == 0 {
  1346  		return
  1347  	}
  1348  
  1349  	start := g.randKeyToWrite(0.001)
  1350  	end := g.randKeyToWrite(0.001)
  1351  	if g.cmp(start, end) > 0 {
  1352  		start, end = end, start
  1353  	}
  1354  
  1355  	writerID := g.liveWriters.rand(g.rng)
  1356  	g.add(&deleteRangeOp{
  1357  		writerID: writerID,
  1358  		start:    start,
  1359  		end:      end,
  1360  	})
  1361  }
  1362  
  1363  func (g *generator) writerRangeKeyDelete() {
  1364  	if len(g.liveWriters) == 0 {
  1365  		return
  1366  	}
  1367  	start, end := g.prefixKeyRange()
  1368  
  1369  	writerID := g.liveWriters.rand(g.rng)
  1370  	g.add(&rangeKeyDeleteOp{
  1371  		writerID: writerID,
  1372  		start:    start,
  1373  		end:      end,
  1374  	})
  1375  }
  1376  
  1377  func (g *generator) writerRangeKeySet() {
  1378  	if len(g.liveWriters) == 0 {
  1379  		return
  1380  	}
  1381  	start, end := g.prefixKeyRange()
  1382  
  1383  	// 90% of the time, set a suffix.
  1384  	var suffix []byte
  1385  	if g.rng.Float64() < 0.90 {
  1386  		// Increase the max suffix 5% of the time.
  1387  		suffix = g.randSuffixToWrite(0.05)
  1388  	}
  1389  
  1390  	writerID := g.liveWriters.rand(g.rng)
  1391  	g.add(&rangeKeySetOp{
  1392  		writerID: writerID,
  1393  		start:    start,
  1394  		end:      end,
  1395  		suffix:   suffix,
  1396  		value:    g.randValue(0, maxValueSize),
  1397  	})
  1398  }
  1399  
  1400  func (g *generator) writerRangeKeyUnset() {
  1401  	if len(g.liveWriters) == 0 {
  1402  		return
  1403  	}
  1404  	start, end := g.prefixKeyRange()
  1405  
  1406  	// 90% of the time, set a suffix.
  1407  	var suffix []byte
  1408  	if g.rng.Float64() < 0.90 {
  1409  		// Increase the max suffix 5% of the time.
  1410  		suffix = g.randSuffixToWrite(0.05)
  1411  	}
  1412  
  1413  	// TODO(jackson): Increase probability of effective unsets? Purely random
  1414  	// unsets are unlikely to remove an active range key.
  1415  
  1416  	writerID := g.liveWriters.rand(g.rng)
  1417  	g.add(&rangeKeyUnsetOp{
  1418  		writerID: writerID,
  1419  		start:    start,
  1420  		end:      end,
  1421  		suffix:   suffix,
  1422  	})
  1423  }
  1424  
  1425  func (g *generator) writerIngest() {
  1426  	if len(g.liveBatches) == 0 {
  1427  		return
  1428  	}
  1429  
  1430  	// Ingest between 1 and 3 batches.
  1431  	dbID := g.dbs.rand(g.rng)
  1432  	n := min(1+g.rng.Intn(3), len(g.liveBatches))
  1433  	batchIDs := make([]objID, n)
  1434  	derivedDBIDs := make([]objID, n)
  1435  	for i := 0; i < n; i++ {
  1436  		batchID := g.liveBatches.rand(g.rng)
  1437  		batchIDs[i] = batchID
  1438  		derivedDBIDs[i] = g.objDB[batchIDs[i]]
  1439  		g.removeBatchFromGenerator(batchID)
  1440  	}
  1441  
  1442  	// Ingestions may fail if the ingested sstables overlap one another.
  1443  	// Either it succeeds and its keys are committed to the DB, or it fails and
  1444  	// the keys are not committed.
  1445  	if !g.keyManager.doObjectBoundsOverlap(batchIDs) {
  1446  		// This ingestion will succeed.
  1447  		//
  1448  		// The batches we're ingesting may contain single delete tombstones that
  1449  		// when applied to the writer result in nondeterminism in the deleted key.
  1450  		// If that's the case, we can restore determinism by first deleting the keys
  1451  		// from the writer.
  1452  		//
  1453  		// Generating additional operations here is not ideal, but it simplifies
  1454  		// single delete invariants significantly.
  1455  		for _, batchID := range batchIDs {
  1456  			singleDeleteConflicts := g.keyManager.checkForSingleDelConflicts(batchID, dbID, true /* collapsed */)
  1457  			for _, conflict := range singleDeleteConflicts {
  1458  				g.add(&deleteOp{
  1459  					writerID:    dbID,
  1460  					key:         conflict,
  1461  					derivedDBID: dbID,
  1462  				})
  1463  			}
  1464  		}
  1465  	}
  1466  	g.add(&ingestOp{
  1467  		dbID:         dbID,
  1468  		batchIDs:     batchIDs,
  1469  		derivedDBIDs: derivedDBIDs,
  1470  	})
  1471  }
  1472  
  1473  func (g *generator) writerIngestAndExcise() {
  1474  	if len(g.liveBatches) == 0 {
  1475  		return
  1476  	}
  1477  
  1478  	dbID := g.dbs.rand(g.rng)
  1479  	batchID := g.liveBatches.rand(g.rng)
  1480  	g.removeBatchFromGenerator(batchID)
  1481  
  1482  	start, end := g.prefixKeyRange()
  1483  	derivedDBID := g.objDB[batchID]
  1484  
  1485  	g.add(&ingestAndExciseOp{
  1486  		dbID:        dbID,
  1487  		batchID:     batchID,
  1488  		derivedDBID: derivedDBID,
  1489  		exciseStart: start,
  1490  		exciseEnd:   end,
  1491  	})
  1492  }
  1493  
  1494  func (g *generator) writerMerge() {
  1495  	if len(g.liveWriters) == 0 {
  1496  		return
  1497  	}
  1498  
  1499  	writerID := g.liveWriters.rand(g.rng)
  1500  	g.add(&mergeOp{
  1501  		writerID: writerID,
  1502  		// 20% new keys.
  1503  		key:   g.randKeyToWrite(0.2),
  1504  		value: g.randValue(0, maxValueSize),
  1505  	})
  1506  }
  1507  
  1508  func (g *generator) writerSet() {
  1509  	if len(g.liveWriters) == 0 {
  1510  		return
  1511  	}
  1512  
  1513  	writerID := g.liveWriters.rand(g.rng)
  1514  	g.add(&setOp{
  1515  		writerID: writerID,
  1516  		// 50% new keys.
  1517  		key:   g.randKeyToWrite(0.5),
  1518  		value: g.randValue(0, maxValueSize),
  1519  	})
  1520  }
  1521  
  1522  func (g *generator) writerSingleDelete() {
  1523  	if len(g.liveWriters) == 0 {
  1524  		return
  1525  	}
  1526  
  1527  	writerID := g.liveWriters.rand(g.rng)
  1528  	key := g.randKeyToSingleDelete(writerID)
  1529  	if key == nil {
  1530  		return
  1531  	}
  1532  	g.add(&singleDeleteOp{
  1533  		writerID: writerID,
  1534  		key:      key,
  1535  		// Keys eligible for single deletes can be removed with a regular
  1536  		// delete. Mutate a percentage of SINGLEDEL ops into DELETEs. Note that
  1537  		// here we are only determining whether the replacement *could* happen.
  1538  		// At test runtime, the `replaceSingleDelete` test option must also be
  1539  		// set to true for the single delete to be replaced.
  1540  		maybeReplaceDelete: g.rng.Float64() < 0.25,
  1541  	})
  1542  }
  1543  
  1544  func (g *generator) maybeMutateOptions(readerID objID, opts *iterOpts) {
  1545  	// With 95% probability, allow changes to any options at all. This ensures
  1546  	// that in 5% of cases there are no changes, and SetOptions hits its fast
  1547  	// path.
  1548  	if g.rng.Intn(100) >= 5 {
  1549  		if !g.maybeSetSnapshotIterBounds(readerID, opts) {
  1550  			// With 1/3 probability, clear existing bounds.
  1551  			if opts.lower != nil && g.rng.Intn(3) == 0 {
  1552  				opts.lower = nil
  1553  			}
  1554  			if opts.upper != nil && g.rng.Intn(3) == 0 {
  1555  				opts.upper = nil
  1556  			}
  1557  			// With 1/3 probability, update the bounds.
  1558  			if g.rng.Intn(3) == 0 {
  1559  				// Generate a new key with a .1% probability.
  1560  				opts.lower = g.randKeyToRead(0.001)
  1561  			}
  1562  			if g.rng.Intn(3) == 0 {
  1563  				// Generate a new key with a .1% probability.
  1564  				opts.upper = g.randKeyToRead(0.001)
  1565  			}
  1566  			if g.cmp(opts.lower, opts.upper) > 0 {
  1567  				opts.lower, opts.upper = opts.upper, opts.lower
  1568  			}
  1569  		}
  1570  
  1571  		// With 1/3 probability, update the key-types/mask.
  1572  		if g.rng.Intn(3) == 0 {
  1573  			opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
  1574  		}
  1575  
  1576  		// With 1/3 probability, clear existing filter.
  1577  		if opts.filterMax > 0 && g.rng.Intn(3) == 0 {
  1578  			opts.filterMax, opts.filterMin = 0, 0
  1579  		}
  1580  		// With 10% probability, set a filter range.
  1581  		if g.rng.Intn(10) == 1 {
  1582  			max := g.cfg.writeSuffixDist.Max()
  1583  			opts.filterMin, opts.filterMax = g.rng.Uint64n(max)+1, g.rng.Uint64n(max)+1
  1584  			if opts.filterMin > opts.filterMax {
  1585  				opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
  1586  			} else if opts.filterMin == opts.filterMax {
  1587  				opts.filterMax = opts.filterMin + 1
  1588  			}
  1589  		}
  1590  		// With 10% probability, flip enablement of L6 filters.
  1591  		if g.rng.Float64() <= 0.1 {
  1592  			opts.useL6Filters = !opts.useL6Filters
  1593  		}
  1594  	}
  1595  }
  1596  
  1597  func (g *generator) pickOneUniform(options ...func(objID)) func(objID) {
  1598  	i := g.rng.Intn(len(options))
  1599  	return options[i]
  1600  }
  1601  
  1602  func (g *generator) cmp(a, b []byte) int {
  1603  	return g.keyManager.comparer.Compare(a, b)
  1604  }
  1605  
  1606  func (g *generator) equal(a, b []byte) bool {
  1607  	return g.keyManager.comparer.Equal(a, b)
  1608  }
  1609  
  1610  func (g *generator) split(a []byte) int {
  1611  	return g.keyManager.comparer.Split(a)
  1612  }
  1613  
  1614  func (g *generator) prefix(a []byte) []byte {
  1615  	return a[:g.split(a)]
  1616  }
  1617  
  1618  func (g *generator) String() string {
  1619  	var buf bytes.Buffer
  1620  	for _, op := range g.ops {
  1621  		fmt.Fprintf(&buf, "%s\n", op)
  1622  	}
  1623  	return buf.String()
  1624  }