github.com/cockroachdb/pebble@v1.1.1-0.20240513155919-3622ade60459/metamorphic/generator.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package metamorphic
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"sort"
    11  
    12  	"github.com/cockroachdb/pebble"
    13  	"github.com/cockroachdb/pebble/internal/randvar"
    14  	"github.com/cockroachdb/pebble/internal/testkeys"
    15  	"golang.org/x/exp/rand"
    16  )
    17  
    18  const maxValueSize = 20
    19  
    20  type iterOpts struct {
    21  	lower    []byte
    22  	upper    []byte
    23  	keyTypes uint32 // pebble.IterKeyType
    24  	// maskSuffix may be set if keyTypes is IterKeyTypePointsAndRanges to
    25  	// configure IterOptions.RangeKeyMasking.Suffix.
    26  	maskSuffix []byte
    27  
    28  	// If filterMax is >0, this iterator will filter out any keys that have
    29  	// suffixes that don't fall within the range [filterMin,filterMax).
    30  	// Additionally, the iterator will be constructed with a block-property
    31  	// filter that filters out blocks accordingly. Not all OPTIONS hook up the
    32  	// corresponding block property collector, so block-filtering may still be
    33  	// effectively disabled in some runs. The iterator operations themselves
    34  	// however will always skip past any points that should be filtered to
    35  	// ensure determinism.
    36  	filterMin uint64
    37  	filterMax uint64
    38  
    39  	// see IterOptions.UseL6Filters.
    40  	useL6Filters bool
    41  
    42  	// NB: If adding or removing fields, ensure IsZero is in sync.
    43  }
    44  
    45  func (o iterOpts) IsZero() bool {
    46  	return o.lower == nil && o.upper == nil && o.keyTypes == 0 &&
    47  		o.maskSuffix == nil && o.filterMin == 0 && o.filterMax == 0 && !o.useL6Filters
    48  }
    49  
    50  type generator struct {
    51  	cfg config
    52  	rng *rand.Rand
    53  
    54  	init *initOp
    55  	ops  []op
    56  
    57  	// keyManager tracks the state of keys a operation generation time.
    58  	keyManager *keyManager
    59  	// Unordered sets of object IDs for live objects. Used to randomly select on
    60  	// object when generating an operation. There are 4 concrete objects: the DB
    61  	// (of which there is exactly 1), batches, iterators, and snapshots.
    62  	//
    63  	// liveBatches contains the live indexed and write-only batches.
    64  	liveBatches objIDSlice
    65  	// liveIters contains the live iterators.
    66  	liveIters     objIDSlice
    67  	itersLastOpts map[objID]iterOpts
    68  	// liveReaders contains the DB, and any live indexed batches and snapshots. The DB is always
    69  	// at index 0.
    70  	liveReaders objIDSlice
    71  	// liveSnapshots contains the live snapshots.
    72  	liveSnapshots objIDSlice
    73  	// liveWriters contains the DB, and any live batches. The DB is always at index 0.
    74  	liveWriters objIDSlice
    75  
    76  	// Maps used to find associated objects during generation. These maps are not
    77  	// needed during test execution.
    78  	//
    79  	// batchID -> batch iters: used to keep track of the open iterators on an
    80  	// indexed batch. The iter set value will also be indexed by the readers map.
    81  	batches map[objID]objIDSet
    82  	// iterID -> reader iters: used to keep track of all of the open
    83  	// iterators. The iter set value will also be indexed by either the batches
    84  	// or snapshots maps.
    85  	iters map[objID]objIDSet
    86  	// readerID -> reader iters: used to keep track of the open iterators on a
    87  	// reader. The iter set value will also be indexed by either the batches or
    88  	// snapshots maps. This map is the union of batches and snapshots maps.
    89  	readers map[objID]objIDSet
    90  	// snapshotID -> snapshot iters: used to keep track of the open iterators on
    91  	// a snapshot. The iter set value will also be indexed by the readers map.
    92  	snapshots map[objID]objIDSet
    93  	// snapshotID -> bounds of the snapshot: only populated for snapshots that
    94  	// are constrained by bounds.
    95  	snapshotBounds map[objID][]pebble.KeyRange
    96  	// iterSequenceNumber is the metaTimestamp at which the iter was created.
    97  	iterCreationTimestamp map[objID]int
    98  	// iterReaderID is a map from an iterID to a readerID.
    99  	iterReaderID map[objID]objID
   100  }
   101  
   102  func newGenerator(rng *rand.Rand, cfg config, km *keyManager) *generator {
   103  	g := &generator{
   104  		cfg:                   cfg,
   105  		rng:                   rng,
   106  		init:                  &initOp{},
   107  		keyManager:            km,
   108  		liveReaders:           objIDSlice{makeObjID(dbTag, 0)},
   109  		liveWriters:           objIDSlice{makeObjID(dbTag, 0)},
   110  		batches:               make(map[objID]objIDSet),
   111  		iters:                 make(map[objID]objIDSet),
   112  		readers:               make(map[objID]objIDSet),
   113  		snapshots:             make(map[objID]objIDSet),
   114  		snapshotBounds:        make(map[objID][]pebble.KeyRange),
   115  		itersLastOpts:         make(map[objID]iterOpts),
   116  		iterCreationTimestamp: make(map[objID]int),
   117  		iterReaderID:          make(map[objID]objID),
   118  	}
   119  	// Note that the initOp fields are populated during generation.
   120  	g.ops = append(g.ops, g.init)
   121  	return g
   122  }
   123  
   124  func generate(rng *rand.Rand, count uint64, cfg config, km *keyManager) []op {
   125  	g := newGenerator(rng, cfg, km)
   126  
   127  	generators := []func(){
   128  		batchAbort:                  g.batchAbort,
   129  		batchCommit:                 g.batchCommit,
   130  		dbCheckpoint:                g.dbCheckpoint,
   131  		dbCompact:                   g.dbCompact,
   132  		dbFlush:                     g.dbFlush,
   133  		dbRatchetFormatMajorVersion: g.dbRatchetFormatMajorVersion,
   134  		dbRestart:                   g.dbRestart,
   135  		iterClose:                   g.randIter(g.iterClose),
   136  		iterFirst:                   g.randIter(g.iterFirst),
   137  		iterLast:                    g.randIter(g.iterLast),
   138  		iterNext:                    g.randIter(g.iterNext),
   139  		iterNextWithLimit:           g.randIter(g.iterNextWithLimit),
   140  		iterNextPrefix:              g.randIter(g.iterNextPrefix),
   141  		iterCanSingleDelete:         g.randIter(g.iterCanSingleDelete),
   142  		iterPrev:                    g.randIter(g.iterPrev),
   143  		iterPrevWithLimit:           g.randIter(g.iterPrevWithLimit),
   144  		iterSeekGE:                  g.randIter(g.iterSeekGE),
   145  		iterSeekGEWithLimit:         g.randIter(g.iterSeekGEWithLimit),
   146  		iterSeekLT:                  g.randIter(g.iterSeekLT),
   147  		iterSeekLTWithLimit:         g.randIter(g.iterSeekLTWithLimit),
   148  		iterSeekPrefixGE:            g.randIter(g.iterSeekPrefixGE),
   149  		iterSetBounds:               g.randIter(g.iterSetBounds),
   150  		iterSetOptions:              g.randIter(g.iterSetOptions),
   151  		newBatch:                    g.newBatch,
   152  		newIndexedBatch:             g.newIndexedBatch,
   153  		newIter:                     g.newIter,
   154  		newIterUsingClone:           g.newIterUsingClone,
   155  		newSnapshot:                 g.newSnapshot,
   156  		readerGet:                   g.readerGet,
   157  		snapshotClose:               g.snapshotClose,
   158  		writerApply:                 g.writerApply,
   159  		writerDelete:                g.writerDelete,
   160  		writerDeleteRange:           g.writerDeleteRange,
   161  		writerIngest:                g.writerIngest,
   162  		writerMerge:                 g.writerMerge,
   163  		writerRangeKeyDelete:        g.writerRangeKeyDelete,
   164  		writerRangeKeySet:           g.writerRangeKeySet,
   165  		writerRangeKeyUnset:         g.writerRangeKeyUnset,
   166  		writerSet:                   g.writerSet,
   167  		writerSingleDelete:          g.writerSingleDelete,
   168  	}
   169  
   170  	// TPCC-style deck of cards randomization. Every time the end of the deck is
   171  	// reached, we shuffle the deck.
   172  	deck := randvar.NewDeck(g.rng, cfg.ops...)
   173  	for i := uint64(0); i < count; i++ {
   174  		generators[deck.Int()]()
   175  	}
   176  
   177  	g.dbClose()
   178  	return g.ops
   179  }
   180  
   181  func (g *generator) add(op op) {
   182  	g.ops = append(g.ops, op)
   183  	g.keyManager.update(op)
   184  }
   185  
   186  // randKeyToWrite returns a key for any write other than SingleDelete.
   187  //
   188  // TODO(peter): make the size and distribution of keys configurable. See
   189  // keyDist and keySizeDist in config.go.
   190  func (g *generator) randKeyToWrite(newKey float64) []byte {
   191  	return g.randKeyHelper(g.keyManager.eligibleWriteKeys(), newKey, nil)
   192  }
   193  
   194  // prefixKeyRange generates a [start, end) pair consisting of two prefix keys.
   195  func (g *generator) prefixKeyRange() ([]byte, []byte) {
   196  	start := g.randPrefixToWrite(0.001)
   197  	end := g.randPrefixToWrite(0.001)
   198  	for g.cmp(start, end) == 0 {
   199  		end = g.randPrefixToWrite(0.05)
   200  	}
   201  	if g.cmp(start, end) > 0 {
   202  		start, end = end, start
   203  	}
   204  	return start, end
   205  }
   206  
   207  // randPrefixToWrite returns a prefix key (a key with no suffix) for a range key
   208  // write operation.
   209  func (g *generator) randPrefixToWrite(newPrefix float64) []byte {
   210  	prefixes := g.keyManager.prefixes()
   211  	if len(prefixes) > 0 && g.rng.Float64() > newPrefix {
   212  		// Use an existing prefix.
   213  		p := g.rng.Intn(len(prefixes))
   214  		return prefixes[p]
   215  	}
   216  
   217  	// Use a new prefix.
   218  	var prefix []byte
   219  	for {
   220  		prefix = g.randKeyHelperSuffix(nil, 4, 12, 0)
   221  		if !g.keyManager.prefixExists(prefix) {
   222  			if !g.keyManager.addNewKey(prefix) {
   223  				panic("key must not exist if prefix doesn't exist")
   224  			}
   225  			return prefix
   226  		}
   227  	}
   228  }
   229  
   230  // randSuffixToWrite generates a random suffix according to the configuration's suffix
   231  // distribution. It takes a probability 0 ≤ p ≤ 1.0 indicating the probability
   232  // with which the generator should increase the max suffix generated by the
   233  // generator.
   234  //
   235  // randSuffixToWrite may return a nil suffix, with the probability the
   236  // configuration's suffix distribution assigns to the zero suffix.
   237  func (g *generator) randSuffixToWrite(incMaxProb float64) []byte {
   238  	if g.rng.Float64() < incMaxProb {
   239  		g.cfg.writeSuffixDist.IncMax(1)
   240  	}
   241  	return suffixFromInt(int64(g.cfg.writeSuffixDist.Uint64(g.rng)))
   242  }
   243  
   244  // randSuffixToRead generates a random suffix used during reads. The suffixes
   245  // generated by this function are within the same range as suffixes generated by
   246  // randSuffixToWrite, however randSuffixToRead pulls from a uniform
   247  // distribution.
   248  func (g *generator) randSuffixToRead() []byte {
   249  	// When reading, don't apply the recency skewing in order to better exercise
   250  	// a reading a mix of older and newer keys.
   251  	max := g.cfg.writeSuffixDist.Max()
   252  	return suffixFromInt(g.rng.Int63n(int64(max)))
   253  }
   254  
   255  func suffixFromInt(suffix int64) []byte {
   256  	// Treat the zero as no suffix to match the behavior during point key
   257  	// generation in randKeyHelper.
   258  	if suffix == 0 {
   259  		return nil
   260  	}
   261  	return testkeys.Suffix(suffix)
   262  }
   263  
   264  func (g *generator) randKeyToSingleDelete(id objID) []byte {
   265  	keys := g.keyManager.eligibleSingleDeleteKeys(id)
   266  	length := len(keys)
   267  	if length == 0 {
   268  		return nil
   269  	}
   270  	return keys[g.rng.Intn(length)]
   271  }
   272  
   273  // randKeyToRead returns a key for read operations.
   274  func (g *generator) randKeyToRead(newKey float64) []byte {
   275  	return g.randKeyHelper(g.keyManager.eligibleReadKeys(), newKey, nil)
   276  }
   277  
   278  // randKeyToReadInRange returns a key for read operations within the provided
   279  // key range. The bounds of the provided key range must span a prefix boundary.
   280  func (g *generator) randKeyToReadInRange(newKey float64, kr pebble.KeyRange) []byte {
   281  	return g.randKeyHelper(g.keyManager.eligibleReadKeysInRange(kr), newKey, &kr)
   282  }
   283  
   284  func (g *generator) randKeyHelper(
   285  	keys [][]byte, newKey float64, newKeyBounds *pebble.KeyRange,
   286  ) []byte {
   287  	switch {
   288  	case len(keys) > 0 && g.rng.Float64() > newKey:
   289  		// Use an existing user key.
   290  		return keys[g.rng.Intn(len(keys))]
   291  
   292  	case len(keys) > 0 && g.rng.Float64() > g.cfg.newPrefix:
   293  		// Use an existing prefix but a new suffix, producing a new user key.
   294  		prefixes := g.keyManager.prefixes()
   295  
   296  		// If we're constrained to a key range, find which existing prefixes
   297  		// fall within that key range.
   298  		if newKeyBounds != nil {
   299  			s := sort.Search(len(prefixes), func(i int) bool {
   300  				return g.cmp(prefixes[i], newKeyBounds.Start) >= 0
   301  			})
   302  			e := sort.Search(len(prefixes), func(i int) bool {
   303  				return g.cmp(prefixes[i], newKeyBounds.End) >= 0
   304  			})
   305  			prefixes = prefixes[s:e]
   306  		}
   307  
   308  		if len(prefixes) > 0 {
   309  			for {
   310  				// Pick a prefix on each iteration in case most or all suffixes are
   311  				// already in use for any individual prefix.
   312  				p := g.rng.Intn(len(prefixes))
   313  				suffix := int64(g.cfg.writeSuffixDist.Uint64(g.rng))
   314  
   315  				var key []byte
   316  				if suffix > 0 {
   317  					key = resizeBuffer(key, len(prefixes[p]), testkeys.SuffixLen(suffix))
   318  					n := copy(key, prefixes[p])
   319  					testkeys.WriteSuffix(key[n:], suffix)
   320  				} else {
   321  					key = resizeBuffer(key, len(prefixes[p]), 0)
   322  					copy(key, prefixes[p])
   323  				}
   324  
   325  				if (newKeyBounds == nil || (g.cmp(key, newKeyBounds.Start) >= 0 && g.cmp(key, newKeyBounds.End) < 0)) &&
   326  					g.keyManager.addNewKey(key) {
   327  					return key
   328  				}
   329  
   330  				// If the generated key already existed, or the generated key
   331  				// fell outside the provided bounds, increase the suffix
   332  				// distribution and loop.
   333  				g.cfg.writeSuffixDist.IncMax(1)
   334  			}
   335  		}
   336  		// Otherwise fall through to generating a new prefix.
   337  		fallthrough
   338  
   339  	default:
   340  		// Use a new prefix, producing a new user key.
   341  
   342  		var key []byte
   343  
   344  		suffix := int64(g.cfg.writeSuffixDist.Uint64(g.rng))
   345  
   346  		// If we have bounds in which we need to generate the key, use
   347  		// testkeys.RandomSeparator to generate a key between the bounds.
   348  		if newKeyBounds != nil {
   349  			targetLength := 4 + g.rng.Intn(8)
   350  			key = testkeys.RandomSeparator(nil, g.prefix(newKeyBounds.Start), g.prefix(newKeyBounds.End),
   351  				suffix, targetLength, g.rng)
   352  		} else {
   353  			for {
   354  				key = g.randKeyHelperSuffix(nil, 4, 12, suffix)
   355  				if !g.keyManager.prefixExists(key[:testkeys.Comparer.Split(key)]) {
   356  					if !g.keyManager.addNewKey(key) {
   357  						panic("key must not exist if prefix doesn't exist")
   358  					}
   359  					break
   360  				}
   361  			}
   362  		}
   363  		return key
   364  	}
   365  }
   366  
   367  // randKeyHelperSuffix is a helper function for randKeyHelper, and should not be
   368  // invoked directly.
   369  func (g *generator) randKeyHelperSuffix(
   370  	dst []byte, minPrefixLen, maxPrefixLen int, suffix int64,
   371  ) []byte {
   372  	n := minPrefixLen
   373  	if maxPrefixLen > minPrefixLen {
   374  		n += g.rng.Intn(maxPrefixLen - minPrefixLen)
   375  	}
   376  	// In order to test a mix of suffixed and unsuffixed keys, omit the zero
   377  	// suffix.
   378  	if suffix == 0 {
   379  		dst = resizeBuffer(dst, n, 0)
   380  		g.fillRand(dst)
   381  		return dst
   382  	}
   383  	suffixLen := testkeys.SuffixLen(suffix)
   384  	dst = resizeBuffer(dst, n, suffixLen)
   385  	g.fillRand(dst[:n])
   386  	testkeys.WriteSuffix(dst[n:], suffix)
   387  	return dst
   388  }
   389  
   390  func resizeBuffer(buf []byte, prefixLen, suffixLen int) []byte {
   391  	if cap(buf) >= prefixLen+suffixLen {
   392  		return buf[:prefixLen+suffixLen]
   393  	}
   394  	return make([]byte, prefixLen+suffixLen)
   395  }
   396  
   397  // TODO(peter): make the value size configurable. See valueSizeDist in
   398  // config.go.
   399  func (g *generator) randValue(min, max int) []byte {
   400  	n := min
   401  	if max > min {
   402  		n += g.rng.Intn(max - min)
   403  	}
   404  	if n == 0 {
   405  		return nil
   406  	}
   407  	buf := make([]byte, n)
   408  	g.fillRand(buf)
   409  	return buf
   410  }
   411  
   412  func (g *generator) fillRand(buf []byte) {
   413  	// NB: The actual random values are not particularly important. We only use
   414  	// lowercase letters because that makes visual determination of ordering
   415  	// easier, rather than having to remember the lexicographic ordering of
   416  	// uppercase vs lowercase, or letters vs numbers vs punctuation.
   417  	const letters = "abcdefghijklmnopqrstuvwxyz"
   418  	const lettersLen = uint64(len(letters))
   419  	const lettersCharsPerRand = 12 // floor(log(math.MaxUint64)/log(lettersLen))
   420  
   421  	var r uint64
   422  	var q int
   423  	for i := 0; i < len(buf); i++ {
   424  		if q == 0 {
   425  			r = g.rng.Uint64()
   426  			q = lettersCharsPerRand
   427  		}
   428  		buf[i] = letters[r%lettersLen]
   429  		r = r / lettersLen
   430  		q--
   431  	}
   432  }
   433  
   434  func (g *generator) newBatch() {
   435  	batchID := makeObjID(batchTag, g.init.batchSlots)
   436  	g.init.batchSlots++
   437  	g.liveBatches = append(g.liveBatches, batchID)
   438  	g.liveWriters = append(g.liveWriters, batchID)
   439  
   440  	g.add(&newBatchOp{
   441  		batchID: batchID,
   442  	})
   443  }
   444  
   445  func (g *generator) newIndexedBatch() {
   446  	batchID := makeObjID(batchTag, g.init.batchSlots)
   447  	g.init.batchSlots++
   448  	g.liveBatches = append(g.liveBatches, batchID)
   449  	g.liveReaders = append(g.liveReaders, batchID)
   450  	g.liveWriters = append(g.liveWriters, batchID)
   451  
   452  	iters := make(objIDSet)
   453  	g.batches[batchID] = iters
   454  	g.readers[batchID] = iters
   455  
   456  	g.add(&newIndexedBatchOp{
   457  		batchID: batchID,
   458  	})
   459  }
   460  
   461  // removeFromBatchGenerator will not generate a closeOp for the target batch as
   462  // not every batch that is removed from the generator should be closed. For
   463  // example, running a closeOp before an ingestOp that contains the closed batch
   464  // will cause an error.
   465  func (g *generator) removeBatchFromGenerator(batchID objID) {
   466  	g.liveBatches.remove(batchID)
   467  	iters := g.batches[batchID]
   468  	delete(g.batches, batchID)
   469  
   470  	if iters != nil {
   471  		g.liveReaders.remove(batchID)
   472  		delete(g.readers, batchID)
   473  	}
   474  	g.liveWriters.remove(batchID)
   475  	for _, id := range iters.sorted() {
   476  		g.liveIters.remove(id)
   477  		delete(g.iters, id)
   478  		g.add(&closeOp{objID: id})
   479  	}
   480  }
   481  
   482  func (g *generator) batchAbort() {
   483  	if len(g.liveBatches) == 0 {
   484  		return
   485  	}
   486  
   487  	batchID := g.liveBatches.rand(g.rng)
   488  	g.removeBatchFromGenerator(batchID)
   489  
   490  	g.add(&closeOp{objID: batchID})
   491  }
   492  
   493  func (g *generator) batchCommit() {
   494  	if len(g.liveBatches) == 0 {
   495  		return
   496  	}
   497  
   498  	batchID := g.liveBatches.rand(g.rng)
   499  	g.removeBatchFromGenerator(batchID)
   500  	g.add(&batchCommitOp{
   501  		batchID: batchID,
   502  	})
   503  	g.add(&closeOp{objID: batchID})
   504  
   505  }
   506  
   507  func (g *generator) dbClose() {
   508  	// Close any live iterators and snapshots, so that we can close the DB
   509  	// cleanly.
   510  	for len(g.liveIters) > 0 {
   511  		g.randIter(g.iterClose)()
   512  	}
   513  	for len(g.liveSnapshots) > 0 {
   514  		g.snapshotClose()
   515  	}
   516  	for len(g.liveBatches) > 0 {
   517  		batchID := g.liveBatches[0]
   518  		g.removeBatchFromGenerator(batchID)
   519  		g.add(&closeOp{objID: batchID})
   520  	}
   521  	g.add(&closeOp{objID: makeObjID(dbTag, 0)})
   522  }
   523  
   524  func (g *generator) dbCheckpoint() {
   525  	// 1/2 of the time we don't restrict the checkpoint;
   526  	// 1/4 of the time we restrict to 1 span;
   527  	// 1/8 of the time we restrict to 2 spans; etc.
   528  	numSpans := 0
   529  	var spans []pebble.CheckpointSpan
   530  	for g.rng.Intn(2) == 0 {
   531  		numSpans++
   532  	}
   533  	if numSpans > 0 {
   534  		spans = make([]pebble.CheckpointSpan, numSpans)
   535  	}
   536  	for i := range spans {
   537  		start := g.randKeyToRead(0.01)
   538  		end := g.randKeyToRead(0.01)
   539  		if g.cmp(start, end) > 0 {
   540  			start, end = end, start
   541  		}
   542  		spans[i].Start = start
   543  		spans[i].End = end
   544  	}
   545  	g.add(&checkpointOp{
   546  		spans: spans,
   547  	})
   548  }
   549  
   550  func (g *generator) dbCompact() {
   551  	// Generate new key(s) with a 1% probability.
   552  	start := g.randKeyToRead(0.01)
   553  	end := g.randKeyToRead(0.01)
   554  	if g.cmp(start, end) > 0 {
   555  		start, end = end, start
   556  	}
   557  	g.add(&compactOp{
   558  		start:       start,
   559  		end:         end,
   560  		parallelize: g.rng.Float64() < 0.5,
   561  	})
   562  }
   563  
   564  func (g *generator) dbFlush() {
   565  	g.add(&flushOp{})
   566  }
   567  
   568  func (g *generator) dbRatchetFormatMajorVersion() {
   569  	// Ratchet to a random format major version between the minimum the
   570  	// metamorphic tests support and the newest. At runtime, the generated
   571  	// version may be behind the database's format major version, in which case
   572  	// RatchetFormatMajorVersion should deterministically error.
   573  
   574  	n := int(newestFormatMajorVersionToTest - minimumFormatMajorVersion)
   575  	vers := pebble.FormatMajorVersion(g.rng.Intn(n+1)) + minimumFormatMajorVersion
   576  	g.add(&dbRatchetFormatMajorVersionOp{vers: vers})
   577  }
   578  
   579  func (g *generator) dbRestart() {
   580  	// Close any live iterators and snapshots, so that we can close the DB
   581  	// cleanly.
   582  	for len(g.liveIters) > 0 {
   583  		g.randIter(g.iterClose)()
   584  	}
   585  	for len(g.liveSnapshots) > 0 {
   586  		g.snapshotClose()
   587  	}
   588  	// Close the batches.
   589  	for len(g.liveBatches) > 0 {
   590  		batchID := g.liveBatches[0]
   591  		g.removeBatchFromGenerator(batchID)
   592  		g.add(&closeOp{objID: batchID})
   593  	}
   594  	if len(g.liveReaders) != 1 || len(g.liveWriters) != 1 {
   595  		panic(fmt.Sprintf("unexpected counts: liveReaders %d, liveWriters: %d",
   596  			len(g.liveReaders), len(g.liveWriters)))
   597  	}
   598  	g.add(&dbRestartOp{})
   599  }
   600  
   601  // maybeSetSnapshotIterBounds must be called whenever creating a new iterator or
   602  // modifying the bounds of an iterator. If the iterator is backed by a snapshot
   603  // that only guarantees consistency within a limited set of key spans, then the
   604  // iterator must set bounds within one of the snapshot's consistent keyspans. It
   605  // returns true if the provided readerID is a bounded snapshot and bounds were
   606  // set.
   607  func (g *generator) maybeSetSnapshotIterBounds(readerID objID, opts *iterOpts) bool {
   608  	snapBounds, isBoundedSnapshot := g.snapshotBounds[readerID]
   609  	if !isBoundedSnapshot {
   610  		return false
   611  	}
   612  	// Pick a random keyrange within one of the snapshot's key ranges.
   613  	parentBounds := snapBounds[g.rng.Intn(len(snapBounds))]
   614  	// With 10% probability, use the parent start bound as-is.
   615  	if g.rng.Float64() <= 0.1 {
   616  		opts.lower = parentBounds.Start
   617  	} else {
   618  		opts.lower = testkeys.RandomSeparator(
   619  			nil, /* dst */
   620  			parentBounds.Start,
   621  			parentBounds.End,
   622  			0, /* suffix */
   623  			4+g.rng.Intn(8),
   624  			g.rng,
   625  		)
   626  	}
   627  	// With 10% probability, use the parent end bound as-is.
   628  	if g.rng.Float64() <= 0.1 {
   629  		opts.upper = parentBounds.End
   630  	} else {
   631  		opts.upper = testkeys.RandomSeparator(
   632  			nil, /* dst */
   633  			opts.lower,
   634  			parentBounds.End,
   635  			0, /* suffix */
   636  			4+g.rng.Intn(8),
   637  			g.rng,
   638  		)
   639  	}
   640  	return true
   641  }
   642  
   643  func (g *generator) newIter() {
   644  	iterID := makeObjID(iterTag, g.init.iterSlots)
   645  	g.init.iterSlots++
   646  	g.liveIters = append(g.liveIters, iterID)
   647  
   648  	readerID := g.liveReaders.rand(g.rng)
   649  	if iters := g.readers[readerID]; iters != nil {
   650  		iters[iterID] = struct{}{}
   651  		g.iters[iterID] = iters
   652  		//lint:ignore SA9003 - readability
   653  	} else {
   654  		// NB: the DB object does not track its open iterators because it never
   655  		// closes.
   656  	}
   657  	g.iterReaderID[iterID] = readerID
   658  
   659  	var opts iterOpts
   660  	if !g.maybeSetSnapshotIterBounds(readerID, &opts) {
   661  		// Generate lower/upper bounds with a 10% probability.
   662  		if g.rng.Float64() <= 0.1 {
   663  			// Generate a new key with a .1% probability.
   664  			opts.lower = g.randKeyToRead(0.001)
   665  		}
   666  		if g.rng.Float64() <= 0.1 {
   667  			// Generate a new key with a .1% probability.
   668  			opts.upper = g.randKeyToRead(0.001)
   669  		}
   670  		if g.cmp(opts.lower, opts.upper) > 0 {
   671  			opts.lower, opts.upper = opts.upper, opts.lower
   672  		}
   673  	}
   674  	opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
   675  
   676  	// With 10% probability, enable automatic filtering of keys with suffixes
   677  	// not in the provided range. This filtering occurs both through
   678  	// block-property filtering and explicitly within the iterator operations to
   679  	// ensure determinism.
   680  	if g.rng.Float64() <= 0.1 {
   681  		max := g.cfg.writeSuffixDist.Max()
   682  		opts.filterMin, opts.filterMax = g.rng.Uint64n(max)+1, g.rng.Uint64n(max)+1
   683  		if opts.filterMin > opts.filterMax {
   684  			opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
   685  		} else if opts.filterMin == opts.filterMax {
   686  			opts.filterMax = opts.filterMin + 1
   687  		}
   688  	}
   689  
   690  	// Enable L6 filters with a 10% probability.
   691  	if g.rng.Float64() <= 0.1 {
   692  		opts.useL6Filters = true
   693  	}
   694  
   695  	g.itersLastOpts[iterID] = opts
   696  	g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
   697  	g.iterReaderID[iterID] = readerID
   698  	g.add(&newIterOp{
   699  		readerID: readerID,
   700  		iterID:   iterID,
   701  		iterOpts: opts,
   702  	})
   703  }
   704  
   705  func (g *generator) randKeyTypesAndMask() (keyTypes uint32, maskSuffix []byte) {
   706  	// Iterate over different key types.
   707  	p := g.rng.Float64()
   708  	switch {
   709  	case p < 0.2: // 20% probability
   710  		keyTypes = uint32(pebble.IterKeyTypePointsOnly)
   711  	case p < 0.8: // 60% probability
   712  		keyTypes = uint32(pebble.IterKeyTypePointsAndRanges)
   713  		// With 50% probability, enable masking.
   714  		if g.rng.Intn(2) == 1 {
   715  			maskSuffix = g.randSuffixToRead()
   716  		}
   717  	default: // 20% probability
   718  		keyTypes = uint32(pebble.IterKeyTypeRangesOnly)
   719  	}
   720  	return keyTypes, maskSuffix
   721  }
   722  
   723  func (g *generator) newIterUsingClone() {
   724  	if len(g.liveIters) == 0 {
   725  		return
   726  	}
   727  	existingIterID := g.liveIters.rand(g.rng)
   728  	iterID := makeObjID(iterTag, g.init.iterSlots)
   729  	g.init.iterSlots++
   730  	g.liveIters = append(g.liveIters, iterID)
   731  	if iters := g.iters[existingIterID]; iters != nil {
   732  		iters[iterID] = struct{}{}
   733  		g.iters[iterID] = iters
   734  		//lint:ignore SA9003 - readability
   735  	} else {
   736  		// NB: the DB object does not track its open iterators because it never
   737  		// closes.
   738  	}
   739  	readerID := g.iterReaderID[existingIterID]
   740  	g.iterReaderID[iterID] = readerID
   741  
   742  	var refreshBatch bool
   743  	if readerID.tag() == batchTag {
   744  		refreshBatch = g.rng.Intn(2) == 1
   745  	}
   746  
   747  	opts := g.itersLastOpts[existingIterID]
   748  	// With 50% probability, consider modifying the iterator options used by the
   749  	// clone.
   750  	if g.rng.Intn(2) == 1 {
   751  		g.maybeMutateOptions(readerID, &opts)
   752  	}
   753  	g.itersLastOpts[iterID] = opts
   754  
   755  	g.iterCreationTimestamp[iterID] = g.keyManager.nextMetaTimestamp()
   756  	g.iterReaderID[iterID] = g.iterReaderID[existingIterID]
   757  	g.add(&newIterUsingCloneOp{
   758  		existingIterID:  existingIterID,
   759  		iterID:          iterID,
   760  		refreshBatch:    refreshBatch,
   761  		iterOpts:        opts,
   762  		derivedReaderID: readerID,
   763  	})
   764  }
   765  
   766  func (g *generator) iterClose(iterID objID) {
   767  	g.liveIters.remove(iterID)
   768  	if readerIters, ok := g.iters[iterID]; ok {
   769  		delete(g.iters, iterID)
   770  		delete(readerIters, iterID)
   771  		//lint:ignore SA9003 - readability
   772  	} else {
   773  		// NB: the DB object does not track its open iterators because it never
   774  		// closes.
   775  	}
   776  
   777  	g.add(&closeOp{objID: iterID})
   778  }
   779  
   780  func (g *generator) iterSetBounds(iterID objID) {
   781  	iterLastOpts := g.itersLastOpts[iterID]
   782  	newOpts := iterLastOpts
   783  	// TODO(jackson): The logic to increase the probability of advancing bounds
   784  	// monotonically only applies if the snapshot is not bounded. Refactor to
   785  	// allow bounded snapshots to benefit too, when possible.
   786  	if !g.maybeSetSnapshotIterBounds(g.iterReaderID[iterID], &newOpts) {
   787  		var lower, upper []byte
   788  		genLower := g.rng.Float64() <= 0.9
   789  		genUpper := g.rng.Float64() <= 0.9
   790  		// When one of ensureLowerGE, ensureUpperLE is true, the new bounds
   791  		// don't overlap with the previous bounds.
   792  		var ensureLowerGE, ensureUpperLE bool
   793  		if genLower && iterLastOpts.upper != nil && g.rng.Float64() <= 0.9 {
   794  			ensureLowerGE = true
   795  		}
   796  		if (!ensureLowerGE || g.rng.Float64() < 0.5) && genUpper && iterLastOpts.lower != nil {
   797  			ensureUpperLE = true
   798  			ensureLowerGE = false
   799  		}
   800  		attempts := 0
   801  		for {
   802  			attempts++
   803  			if genLower {
   804  				// Generate a new key with a .1% probability.
   805  				lower = g.randKeyToRead(0.001)
   806  			}
   807  			if genUpper {
   808  				// Generate a new key with a .1% probability.
   809  				upper = g.randKeyToRead(0.001)
   810  			}
   811  			if g.cmp(lower, upper) > 0 {
   812  				lower, upper = upper, lower
   813  			}
   814  			if ensureLowerGE && g.cmp(iterLastOpts.upper, lower) > 0 {
   815  				if attempts < 25 {
   816  					continue
   817  				}
   818  				lower = iterLastOpts.upper
   819  				upper = lower
   820  				break
   821  			}
   822  			if ensureUpperLE && g.cmp(upper, iterLastOpts.lower) > 0 {
   823  				if attempts < 25 {
   824  					continue
   825  				}
   826  				upper = iterLastOpts.lower
   827  				lower = upper
   828  				break
   829  			}
   830  			break
   831  		}
   832  		newOpts.lower = lower
   833  		newOpts.upper = upper
   834  	}
   835  	g.itersLastOpts[iterID] = newOpts
   836  	g.add(&iterSetBoundsOp{
   837  		iterID: iterID,
   838  		lower:  newOpts.lower,
   839  		upper:  newOpts.upper,
   840  	})
   841  	// Additionally seek the iterator in a manner consistent with the bounds,
   842  	// and do some steps (Next/Prev). The seeking exercises typical
   843  	// CockroachDB behavior when using iterators and the steps are trying to
   844  	// stress the region near the bounds. Ideally, we should not do this as
   845  	// part of generating a single op, but this is easier than trying to
   846  	// control future op generation via generator state.
   847  	doSeekLT := newOpts.upper != nil && g.rng.Float64() < 0.5
   848  	doSeekGE := newOpts.lower != nil && g.rng.Float64() < 0.5
   849  	if doSeekLT && doSeekGE {
   850  		// Pick the seek.
   851  		if g.rng.Float64() < 0.5 {
   852  			doSeekGE = false
   853  		} else {
   854  			doSeekLT = false
   855  		}
   856  	}
   857  	if doSeekLT {
   858  		g.add(&iterSeekLTOp{
   859  			iterID:          iterID,
   860  			key:             newOpts.upper,
   861  			derivedReaderID: g.iterReaderID[iterID],
   862  		})
   863  		if g.rng.Float64() < 0.5 {
   864  			g.iterNext(iterID)
   865  		}
   866  		if g.rng.Float64() < 0.5 {
   867  			g.iterNext(iterID)
   868  		}
   869  		if g.rng.Float64() < 0.5 {
   870  			g.iterPrev(iterID)
   871  		}
   872  	} else if doSeekGE {
   873  		g.add(&iterSeekGEOp{
   874  			iterID:          iterID,
   875  			key:             newOpts.lower,
   876  			derivedReaderID: g.iterReaderID[iterID],
   877  		})
   878  		if g.rng.Float64() < 0.5 {
   879  			g.iterPrev(iterID)
   880  		}
   881  		if g.rng.Float64() < 0.5 {
   882  			g.iterPrev(iterID)
   883  		}
   884  		if g.rng.Float64() < 0.5 {
   885  			g.iterNext(iterID)
   886  		}
   887  	}
   888  }
   889  
   890  func (g *generator) iterSetOptions(iterID objID) {
   891  	opts := g.itersLastOpts[iterID]
   892  	g.maybeMutateOptions(g.iterReaderID[iterID], &opts)
   893  	g.itersLastOpts[iterID] = opts
   894  	g.add(&iterSetOptionsOp{
   895  		iterID:          iterID,
   896  		iterOpts:        opts,
   897  		derivedReaderID: g.iterReaderID[iterID],
   898  	})
   899  
   900  	// Additionally, perform a random absolute positioning operation. The
   901  	// SetOptions contract requires one before the next relative positioning
   902  	// operation. Ideally, we should not do this as part of generating a single
   903  	// op, but this is easier than trying to control future op generation via
   904  	// generator state.
   905  	g.pickOneUniform(
   906  		g.iterFirst,
   907  		g.iterLast,
   908  		g.iterSeekGE,
   909  		g.iterSeekGEWithLimit,
   910  		g.iterSeekPrefixGE,
   911  		g.iterSeekLT,
   912  		g.iterSeekLTWithLimit,
   913  	)(iterID)
   914  }
   915  
   916  func (g *generator) iterSeekGE(iterID objID) {
   917  	g.add(&iterSeekGEOp{
   918  		iterID:          iterID,
   919  		key:             g.randKeyToRead(0.001), // 0.1% new keys
   920  		derivedReaderID: g.iterReaderID[iterID],
   921  	})
   922  }
   923  
   924  func (g *generator) iterSeekGEWithLimit(iterID objID) {
   925  	// 0.1% new keys
   926  	key, limit := g.randKeyToRead(0.001), g.randKeyToRead(0.001)
   927  	if g.cmp(key, limit) > 0 {
   928  		key, limit = limit, key
   929  	}
   930  	g.add(&iterSeekGEOp{
   931  		iterID:          iterID,
   932  		key:             key,
   933  		limit:           limit,
   934  		derivedReaderID: g.iterReaderID[iterID],
   935  	})
   936  }
   937  
   938  func (g *generator) randKeyToReadWithinBounds(lower, upper []byte, readerID objID) []*keyMeta {
   939  	var inRangeKeys []*keyMeta
   940  	for _, keyMeta := range g.keyManager.byObj[readerID] {
   941  		posKey := keyMeta.key
   942  		if g.cmp(posKey, lower) < 0 || g.cmp(posKey, upper) >= 0 {
   943  			continue
   944  		}
   945  		inRangeKeys = append(inRangeKeys, keyMeta)
   946  	}
   947  	return inRangeKeys
   948  }
   949  
   950  func (g *generator) iterSeekPrefixGE(iterID objID) {
   951  	lower := g.itersLastOpts[iterID].lower
   952  	upper := g.itersLastOpts[iterID].upper
   953  	iterCreationTimestamp := g.iterCreationTimestamp[iterID]
   954  	var key []byte
   955  
   956  	// We try to make sure that the SeekPrefixGE key is within the iter bounds,
   957  	// and that the iter can read the key. If the key was created on a batch
   958  	// which deleted the key, then the key will still be considered visible
   959  	// by the current logic. We're also not accounting for keys written to
   960  	// batches which haven't been presisted to the DB. But we're only picking
   961  	// keys in a best effort manner, and the logic is better than picking a
   962  	// random key.
   963  	if g.rng.Intn(10) >= 1 {
   964  		possibleKeys := make([][]byte, 0, 100)
   965  		inRangeKeys := g.randKeyToReadWithinBounds(lower, upper, dbObjID)
   966  		for _, keyMeta := range inRangeKeys {
   967  			posKey := keyMeta.key
   968  			var foundWriteWithoutDelete bool
   969  			for _, update := range keyMeta.updateOps {
   970  				if update.metaTimestamp > iterCreationTimestamp {
   971  					break
   972  				}
   973  
   974  				if update.deleted {
   975  					foundWriteWithoutDelete = false
   976  				} else {
   977  					foundWriteWithoutDelete = true
   978  				}
   979  			}
   980  			if foundWriteWithoutDelete {
   981  				possibleKeys = append(possibleKeys, posKey)
   982  			}
   983  		}
   984  
   985  		if len(possibleKeys) > 0 {
   986  			key = []byte(possibleKeys[g.rng.Int31n(int32(len(possibleKeys)))])
   987  		}
   988  	}
   989  
   990  	if key == nil {
   991  		// TODO(bananabrick): We should try and use keys within the bounds,
   992  		// even if we couldn't find any keys visible to the iterator. However,
   993  		// doing this in experiments didn't really increase the valid
   994  		// SeekPrefixGE calls by much.
   995  		key = g.randKeyToRead(0) // 0% new keys
   996  	}
   997  
   998  	g.add(&iterSeekPrefixGEOp{
   999  		iterID:          iterID,
  1000  		key:             key,
  1001  		derivedReaderID: g.iterReaderID[iterID],
  1002  	})
  1003  }
  1004  
  1005  func (g *generator) iterSeekLT(iterID objID) {
  1006  	g.add(&iterSeekLTOp{
  1007  		iterID:          iterID,
  1008  		key:             g.randKeyToRead(0.001), // 0.1% new keys
  1009  		derivedReaderID: g.iterReaderID[iterID],
  1010  	})
  1011  }
  1012  
  1013  func (g *generator) iterSeekLTWithLimit(iterID objID) {
  1014  	// 0.1% new keys
  1015  	key, limit := g.randKeyToRead(0.001), g.randKeyToRead(0.001)
  1016  	if g.cmp(limit, key) > 0 {
  1017  		key, limit = limit, key
  1018  	}
  1019  	g.add(&iterSeekLTOp{
  1020  		iterID:          iterID,
  1021  		key:             key,
  1022  		limit:           limit,
  1023  		derivedReaderID: g.iterReaderID[iterID],
  1024  	})
  1025  }
  1026  
  1027  // randIter performs partial func application ("currying"), returning a new
  1028  // function that supplies the given func with a random iterator.
  1029  func (g *generator) randIter(gen func(objID)) func() {
  1030  	return func() {
  1031  		if len(g.liveIters) == 0 {
  1032  			return
  1033  		}
  1034  		gen(g.liveIters.rand(g.rng))
  1035  	}
  1036  }
  1037  
  1038  func (g *generator) iterFirst(iterID objID) {
  1039  	g.add(&iterFirstOp{
  1040  		iterID:          iterID,
  1041  		derivedReaderID: g.iterReaderID[iterID],
  1042  	})
  1043  }
  1044  
  1045  func (g *generator) iterLast(iterID objID) {
  1046  	g.add(&iterLastOp{
  1047  		iterID:          iterID,
  1048  		derivedReaderID: g.iterReaderID[iterID],
  1049  	})
  1050  }
  1051  
  1052  func (g *generator) iterNext(iterID objID) {
  1053  	g.add(&iterNextOp{
  1054  		iterID:          iterID,
  1055  		derivedReaderID: g.iterReaderID[iterID],
  1056  	})
  1057  }
  1058  
  1059  func (g *generator) iterPrev(iterID objID) {
  1060  	g.add(&iterPrevOp{
  1061  		iterID:          iterID,
  1062  		derivedReaderID: g.iterReaderID[iterID],
  1063  	})
  1064  }
  1065  
  1066  func (g *generator) iterNextWithLimit(iterID objID) {
  1067  	g.add(&iterNextOp{
  1068  		iterID:          iterID,
  1069  		limit:           g.randKeyToRead(0.001), // 0.1% new keys
  1070  		derivedReaderID: g.iterReaderID[iterID],
  1071  	})
  1072  }
  1073  
  1074  func (g *generator) iterNextPrefix(iterID objID) {
  1075  	g.add(&iterNextPrefixOp{
  1076  		iterID:          iterID,
  1077  		derivedReaderID: g.iterReaderID[iterID],
  1078  	})
  1079  }
  1080  
  1081  func (g *generator) iterCanSingleDelete(iterID objID) {
  1082  	g.add(&iterCanSingleDelOp{
  1083  		iterID:          iterID,
  1084  		derivedReaderID: g.iterReaderID[iterID],
  1085  	})
  1086  }
  1087  
  1088  func (g *generator) iterPrevWithLimit(iterID objID) {
  1089  	g.add(&iterPrevOp{
  1090  		iterID:          iterID,
  1091  		limit:           g.randKeyToRead(0.001), // 0.1% new keys
  1092  		derivedReaderID: g.iterReaderID[iterID],
  1093  	})
  1094  }
  1095  
  1096  func (g *generator) readerGet() {
  1097  	if len(g.liveReaders) == 0 {
  1098  		return
  1099  	}
  1100  
  1101  	readerID := g.liveReaders.rand(g.rng)
  1102  
  1103  	// If the chosen reader is a snapshot created with user-specified key
  1104  	// ranges, restrict the read to fall within one of the provided key ranges.
  1105  	var key []byte
  1106  	if bounds := g.snapshotBounds[readerID]; len(bounds) > 0 {
  1107  		kr := bounds[g.rng.Intn(len(bounds))]
  1108  		key = g.randKeyToReadInRange(0.001, kr) // 0.1% new keys
  1109  	} else {
  1110  		key = g.randKeyToRead(0.001) // 0.1% new keys
  1111  	}
  1112  	g.add(&getOp{readerID: readerID, key: key})
  1113  }
  1114  
  1115  // generateDisjointKeyRanges generates n disjoint key ranges.
  1116  func (g *generator) generateDisjointKeyRanges(n int) []pebble.KeyRange {
  1117  	bounds := make([][]byte, 2*n)
  1118  	used := map[string]bool{}
  1119  	for i := 0; i < len(bounds); i++ {
  1120  		k := g.prefix(g.randKeyToRead(0.1))
  1121  		for used[string(k)] {
  1122  			k = g.prefix(g.randKeyToRead(0.1))
  1123  		}
  1124  		bounds[i] = k
  1125  		used[string(k)] = true
  1126  	}
  1127  	sort.Slice(bounds, func(i, j int) bool {
  1128  		return g.cmp(bounds[i], bounds[j]) < 0
  1129  	})
  1130  	keyRanges := make([]pebble.KeyRange, n)
  1131  	for i := range keyRanges {
  1132  		keyRanges[i] = pebble.KeyRange{
  1133  			Start: bounds[i*2],
  1134  			End:   bounds[i*2+1],
  1135  		}
  1136  	}
  1137  	return keyRanges
  1138  }
  1139  
  1140  func (g *generator) newSnapshot() {
  1141  	snapID := makeObjID(snapTag, g.init.snapshotSlots)
  1142  	g.init.snapshotSlots++
  1143  	g.liveSnapshots = append(g.liveSnapshots, snapID)
  1144  	g.liveReaders = append(g.liveReaders, snapID)
  1145  
  1146  	iters := make(objIDSet)
  1147  	g.snapshots[snapID] = iters
  1148  	g.readers[snapID] = iters
  1149  
  1150  	s := &newSnapshotOp{
  1151  		snapID: snapID,
  1152  	}
  1153  
  1154  	// With 75% probability, impose bounds on the keys that may be read with the
  1155  	// snapshot. Setting bounds allows some runs of the metamorphic test to use
  1156  	// a EventuallyFileOnlySnapshot instead of a Snapshot, testing equivalence
  1157  	// between the two for reads within those bounds.
  1158  	if g.rng.Float64() < 0.75 {
  1159  		s.bounds = g.generateDisjointKeyRanges(
  1160  			g.rng.Intn(5) + 1, /* between 1-5 */
  1161  		)
  1162  		g.snapshotBounds[snapID] = s.bounds
  1163  	}
  1164  	g.add(s)
  1165  }
  1166  
  1167  func (g *generator) snapshotClose() {
  1168  	if len(g.liveSnapshots) == 0 {
  1169  		return
  1170  	}
  1171  
  1172  	snapID := g.liveSnapshots.rand(g.rng)
  1173  	g.liveSnapshots.remove(snapID)
  1174  	iters := g.snapshots[snapID]
  1175  	delete(g.snapshots, snapID)
  1176  	g.liveReaders.remove(snapID)
  1177  	delete(g.readers, snapID)
  1178  
  1179  	for _, id := range iters.sorted() {
  1180  		g.liveIters.remove(id)
  1181  		delete(g.iters, id)
  1182  		g.add(&closeOp{objID: id})
  1183  	}
  1184  
  1185  	g.add(&closeOp{objID: snapID})
  1186  }
  1187  
  1188  func (g *generator) writerApply() {
  1189  	if len(g.liveBatches) == 0 {
  1190  		return
  1191  	}
  1192  	if len(g.liveWriters) < 2 {
  1193  		panic(fmt.Sprintf("insufficient liveWriters (%d) to apply batch", len(g.liveWriters)))
  1194  	}
  1195  
  1196  	batchID := g.liveBatches.rand(g.rng)
  1197  
  1198  	var writerID objID
  1199  	for {
  1200  		writerID = g.liveWriters.rand(g.rng)
  1201  		if writerID != batchID {
  1202  			break
  1203  		}
  1204  	}
  1205  
  1206  	g.removeBatchFromGenerator(batchID)
  1207  
  1208  	g.add(&applyOp{
  1209  		writerID: writerID,
  1210  		batchID:  batchID,
  1211  	})
  1212  	g.add(&closeOp{
  1213  		batchID,
  1214  	})
  1215  }
  1216  
  1217  func (g *generator) writerDelete() {
  1218  	if len(g.liveWriters) == 0 {
  1219  		return
  1220  	}
  1221  
  1222  	writerID := g.liveWriters.rand(g.rng)
  1223  	g.add(&deleteOp{
  1224  		writerID: writerID,
  1225  		key:      g.randKeyToWrite(0.001), // 0.1% new keys
  1226  	})
  1227  }
  1228  
  1229  func (g *generator) writerDeleteRange() {
  1230  	if len(g.liveWriters) == 0 {
  1231  		return
  1232  	}
  1233  
  1234  	start := g.randKeyToWrite(0.001)
  1235  	end := g.randKeyToWrite(0.001)
  1236  	if g.cmp(start, end) > 0 {
  1237  		start, end = end, start
  1238  	}
  1239  
  1240  	writerID := g.liveWriters.rand(g.rng)
  1241  	g.add(&deleteRangeOp{
  1242  		writerID: writerID,
  1243  		start:    start,
  1244  		end:      end,
  1245  	})
  1246  }
  1247  
  1248  func (g *generator) writerRangeKeyDelete() {
  1249  	if len(g.liveWriters) == 0 {
  1250  		return
  1251  	}
  1252  	start, end := g.prefixKeyRange()
  1253  
  1254  	writerID := g.liveWriters.rand(g.rng)
  1255  	g.add(&rangeKeyDeleteOp{
  1256  		writerID: writerID,
  1257  		start:    start,
  1258  		end:      end,
  1259  	})
  1260  }
  1261  
  1262  func (g *generator) writerRangeKeySet() {
  1263  	if len(g.liveWriters) == 0 {
  1264  		return
  1265  	}
  1266  	start, end := g.prefixKeyRange()
  1267  
  1268  	// 90% of the time, set a suffix.
  1269  	var suffix []byte
  1270  	if g.rng.Float64() < 0.90 {
  1271  		// Increase the max suffix 5% of the time.
  1272  		suffix = g.randSuffixToWrite(0.05)
  1273  	}
  1274  
  1275  	writerID := g.liveWriters.rand(g.rng)
  1276  	g.add(&rangeKeySetOp{
  1277  		writerID: writerID,
  1278  		start:    start,
  1279  		end:      end,
  1280  		suffix:   suffix,
  1281  		value:    g.randValue(0, maxValueSize),
  1282  	})
  1283  }
  1284  
  1285  func (g *generator) writerRangeKeyUnset() {
  1286  	if len(g.liveWriters) == 0 {
  1287  		return
  1288  	}
  1289  	start, end := g.prefixKeyRange()
  1290  
  1291  	// 90% of the time, set a suffix.
  1292  	var suffix []byte
  1293  	if g.rng.Float64() < 0.90 {
  1294  		// Increase the max suffix 5% of the time.
  1295  		suffix = g.randSuffixToWrite(0.05)
  1296  	}
  1297  
  1298  	// TODO(jackson): Increase probability of effective unsets? Purely random
  1299  	// unsets are unlikely to remove an active range key.
  1300  
  1301  	writerID := g.liveWriters.rand(g.rng)
  1302  	g.add(&rangeKeyUnsetOp{
  1303  		writerID: writerID,
  1304  		start:    start,
  1305  		end:      end,
  1306  		suffix:   suffix,
  1307  	})
  1308  }
  1309  
  1310  func (g *generator) writerIngest() {
  1311  	if len(g.liveBatches) == 0 {
  1312  		return
  1313  	}
  1314  
  1315  	// TODO(nicktrav): this is resulting in too many single batch ingests.
  1316  	// Consider alternatives. One possibility would be to pass through whether
  1317  	// we can tolerate failure or not, and if the ingestOp encounters a
  1318  	// failure, it would retry after splitting into single batch ingests.
  1319  
  1320  	// Ingest between 1 and 3 batches.
  1321  	batchIDs := make([]objID, 0, 1+g.rng.Intn(3))
  1322  	canFail := cap(batchIDs) > 1
  1323  	for i := 0; i < cap(batchIDs); i++ {
  1324  		batchID := g.liveBatches.rand(g.rng)
  1325  		if canFail && !g.keyManager.canTolerateApplyFailure(batchID) {
  1326  			continue
  1327  		}
  1328  		// After the ingest runs, it either succeeds and the keys are in the
  1329  		// DB, or it fails and these keys never make it to the DB.
  1330  		g.removeBatchFromGenerator(batchID)
  1331  		batchIDs = append(batchIDs, batchID)
  1332  		if len(g.liveBatches) == 0 {
  1333  			break
  1334  		}
  1335  	}
  1336  	if len(batchIDs) == 0 && len(g.liveBatches) > 0 {
  1337  		// Unable to find multiple batches because of the
  1338  		// canTolerateApplyFailure call above, so just pick one batch.
  1339  		batchID := g.liveBatches.rand(g.rng)
  1340  		g.removeBatchFromGenerator(batchID)
  1341  		batchIDs = append(batchIDs, batchID)
  1342  	}
  1343  	g.add(&ingestOp{
  1344  		batchIDs: batchIDs,
  1345  	})
  1346  }
  1347  
  1348  func (g *generator) writerMerge() {
  1349  	if len(g.liveWriters) == 0 {
  1350  		return
  1351  	}
  1352  
  1353  	writerID := g.liveWriters.rand(g.rng)
  1354  	g.add(&mergeOp{
  1355  		writerID: writerID,
  1356  		// 20% new keys.
  1357  		key:   g.randKeyToWrite(0.2),
  1358  		value: g.randValue(0, maxValueSize),
  1359  	})
  1360  }
  1361  
  1362  func (g *generator) writerSet() {
  1363  	if len(g.liveWriters) == 0 {
  1364  		return
  1365  	}
  1366  
  1367  	writerID := g.liveWriters.rand(g.rng)
  1368  	g.add(&setOp{
  1369  		writerID: writerID,
  1370  		// 50% new keys.
  1371  		key:   g.randKeyToWrite(0.5),
  1372  		value: g.randValue(0, maxValueSize),
  1373  	})
  1374  }
  1375  
  1376  func (g *generator) writerSingleDelete() {
  1377  	if len(g.liveWriters) == 0 {
  1378  		return
  1379  	}
  1380  
  1381  	writerID := g.liveWriters.rand(g.rng)
  1382  	key := g.randKeyToSingleDelete(writerID)
  1383  	if key == nil {
  1384  		return
  1385  	}
  1386  	g.add(&singleDeleteOp{
  1387  		writerID: writerID,
  1388  		key:      key,
  1389  		// Keys eligible for single deletes can be removed with a regular
  1390  		// delete. Mutate a percentage of SINGLEDEL ops into DELETEs. Note that
  1391  		// here we are only determining whether the replacement *could* happen.
  1392  		// At test runtime, the `replaceSingleDelete` test option must also be
  1393  		// set to true for the single delete to be replaced.
  1394  		maybeReplaceDelete: g.rng.Float64() < 0.25,
  1395  	})
  1396  }
  1397  
  1398  func (g *generator) maybeMutateOptions(readerID objID, opts *iterOpts) {
  1399  	// With 95% probability, allow changes to any options at all. This ensures
  1400  	// that in 5% of cases there are no changes, and SetOptions hits its fast
  1401  	// path.
  1402  	if g.rng.Intn(100) >= 5 {
  1403  		if !g.maybeSetSnapshotIterBounds(readerID, opts) {
  1404  			// With 1/3 probability, clear existing bounds.
  1405  			if opts.lower != nil && g.rng.Intn(3) == 0 {
  1406  				opts.lower = nil
  1407  			}
  1408  			if opts.upper != nil && g.rng.Intn(3) == 0 {
  1409  				opts.upper = nil
  1410  			}
  1411  			// With 1/3 probability, update the bounds.
  1412  			if g.rng.Intn(3) == 0 {
  1413  				// Generate a new key with a .1% probability.
  1414  				opts.lower = g.randKeyToRead(0.001)
  1415  			}
  1416  			if g.rng.Intn(3) == 0 {
  1417  				// Generate a new key with a .1% probability.
  1418  				opts.upper = g.randKeyToRead(0.001)
  1419  			}
  1420  			if g.cmp(opts.lower, opts.upper) > 0 {
  1421  				opts.lower, opts.upper = opts.upper, opts.lower
  1422  			}
  1423  		}
  1424  
  1425  		// With 1/3 probability, update the key-types/mask.
  1426  		if g.rng.Intn(3) == 0 {
  1427  			opts.keyTypes, opts.maskSuffix = g.randKeyTypesAndMask()
  1428  		}
  1429  
  1430  		// With 1/3 probability, clear existing filter.
  1431  		if opts.filterMax > 0 && g.rng.Intn(3) == 0 {
  1432  			opts.filterMax, opts.filterMin = 0, 0
  1433  		}
  1434  		// With 10% probability, set a filter range.
  1435  		if g.rng.Intn(10) == 1 {
  1436  			max := g.cfg.writeSuffixDist.Max()
  1437  			opts.filterMin, opts.filterMax = g.rng.Uint64n(max)+1, g.rng.Uint64n(max)+1
  1438  			if opts.filterMin > opts.filterMax {
  1439  				opts.filterMin, opts.filterMax = opts.filterMax, opts.filterMin
  1440  			} else if opts.filterMin == opts.filterMax {
  1441  				opts.filterMax = opts.filterMin + 1
  1442  			}
  1443  		}
  1444  		// With 10% probability, flip enablement of L6 filters.
  1445  		if g.rng.Float64() <= 0.1 {
  1446  			opts.useL6Filters = !opts.useL6Filters
  1447  		}
  1448  	}
  1449  }
  1450  
  1451  func (g *generator) pickOneUniform(options ...func(objID)) func(objID) {
  1452  	i := g.rng.Intn(len(options))
  1453  	return options[i]
  1454  }
  1455  
  1456  func (g *generator) cmp(a, b []byte) int {
  1457  	return g.keyManager.comparer.Compare(a, b)
  1458  }
  1459  
  1460  func (g *generator) equal(a, b []byte) bool {
  1461  	return g.keyManager.comparer.Equal(a, b)
  1462  }
  1463  
  1464  func (g *generator) split(a []byte) int {
  1465  	return g.keyManager.comparer.Split(a)
  1466  }
  1467  
  1468  func (g *generator) prefix(a []byte) []byte {
  1469  	return a[:g.split(a)]
  1470  }
  1471  
  1472  func (g *generator) String() string {
  1473  	var buf bytes.Buffer
  1474  	for _, op := range g.ops {
  1475  		fmt.Fprintf(&buf, "%s\n", op)
  1476  	}
  1477  	return buf.String()
  1478  }