github.com/cockroachdb/pebble@v1.1.1-0.20240513155919-3622ade60459/metamorphic/ops.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package metamorphic
     6  
     7  import (
     8  	"bytes"
     9  	"crypto/rand"
    10  	"encoding/binary"
    11  	"fmt"
    12  	"io"
    13  	"path/filepath"
    14  	"strings"
    15  
    16  	"github.com/cockroachdb/errors"
    17  	"github.com/cockroachdb/pebble"
    18  	"github.com/cockroachdb/pebble/internal/base"
    19  	"github.com/cockroachdb/pebble/internal/keyspan"
    20  	"github.com/cockroachdb/pebble/internal/private"
    21  	"github.com/cockroachdb/pebble/internal/testkeys"
    22  	"github.com/cockroachdb/pebble/objstorage/objstorageprovider"
    23  	"github.com/cockroachdb/pebble/sstable"
    24  	"github.com/cockroachdb/pebble/vfs"
    25  	"github.com/cockroachdb/pebble/vfs/errorfs"
    26  )
    27  
    28  // op defines the interface for a single operation, such as creating a batch,
    29  // or advancing an iterator.
    30  type op interface {
    31  	String() string
    32  	run(t *test, h historyRecorder)
    33  
    34  	// receiver returns the object ID of the object the operation is performed
    35  	// on. Every operation has a receiver (eg, batch0.Set(...) has `batch0` as
    36  	// its receiver). Receivers are used for synchronization when running with
    37  	// concurrency.
    38  	receiver() objID
    39  
    40  	// syncObjs returns an additional set of object IDs—excluding the
    41  	// receiver—that the operation must synchronize with. At execution time,
    42  	// the operation will run serially with respect to all other operations
    43  	// that return these objects from their own syncObjs or receiver methods.
    44  	syncObjs() objIDSlice
    45  }
    46  
    47  // initOp performs test initialization
    48  type initOp struct {
    49  	batchSlots    uint32
    50  	iterSlots     uint32
    51  	snapshotSlots uint32
    52  }
    53  
    54  func (o *initOp) run(t *test, h historyRecorder) {
    55  	t.batches = make([]*pebble.Batch, o.batchSlots)
    56  	t.iters = make([]*retryableIter, o.iterSlots)
    57  	t.snapshots = make([]readerCloser, o.snapshotSlots)
    58  	h.Recordf("%s", o)
    59  }
    60  
    61  func (o *initOp) String() string {
    62  	return fmt.Sprintf("Init(%d /* batches */, %d /* iters */, %d /* snapshots */)",
    63  		o.batchSlots, o.iterSlots, o.snapshotSlots)
    64  }
    65  
    66  func (o *initOp) receiver() objID      { return dbObjID }
    67  func (o *initOp) syncObjs() objIDSlice { return nil }
    68  
    69  // applyOp models a Writer.Apply operation.
    70  type applyOp struct {
    71  	writerID objID
    72  	batchID  objID
    73  }
    74  
    75  func (o *applyOp) run(t *test, h historyRecorder) {
    76  	b := t.getBatch(o.batchID)
    77  	w := t.getWriter(o.writerID)
    78  	var err error
    79  	if o.writerID.tag() == dbTag && t.testOpts.asyncApplyToDB && t.writeOpts.Sync {
    80  		err = w.(*pebble.DB).ApplyNoSyncWait(b, t.writeOpts)
    81  		if err == nil {
    82  			err = b.SyncWait()
    83  		}
    84  	} else {
    85  		err = w.Apply(b, t.writeOpts)
    86  	}
    87  	h.Recordf("%s // %v", o, err)
    88  	// batch will be closed by a closeOp which is guaranteed to be generated
    89  }
    90  
    91  func (o *applyOp) String() string  { return fmt.Sprintf("%s.Apply(%s)", o.writerID, o.batchID) }
    92  func (o *applyOp) receiver() objID { return o.writerID }
    93  func (o *applyOp) syncObjs() objIDSlice {
    94  	// Apply should not be concurrent with operations that are mutating the
    95  	// batch.
    96  	return []objID{o.batchID}
    97  }
    98  
    99  // checkpointOp models a DB.Checkpoint operation.
   100  type checkpointOp struct {
   101  	// If non-empty, the checkpoint is restricted to these spans.
   102  	spans []pebble.CheckpointSpan
   103  }
   104  
   105  func (o *checkpointOp) run(t *test, h historyRecorder) {
   106  	// TODO(josh): db.Checkpoint does not work with shared storage yet.
   107  	// It would be better to filter out ahead of calling run on the op,
   108  	// by setting the weight that generator.go uses to zero, or similar.
   109  	// But IIUC the ops are shared for ALL the metamorphic test runs, so
   110  	// not sure how to do that easily:
   111  	// https://github.com/cockroachdb/pebble/blob/master/metamorphic/meta.go#L177
   112  	if t.testOpts.sharedStorageEnabled {
   113  		h.Recordf("%s // %v", o, nil)
   114  		return
   115  	}
   116  	var opts []pebble.CheckpointOption
   117  	if len(o.spans) > 0 {
   118  		opts = append(opts, pebble.WithRestrictToSpans(o.spans))
   119  	}
   120  	err := withRetries(func() error {
   121  		return t.db.Checkpoint(o.dir(t.dir, h.op), opts...)
   122  	})
   123  	h.Recordf("%s // %v", o, err)
   124  }
   125  
   126  func (o *checkpointOp) dir(dataDir string, idx int) string {
   127  	return filepath.Join(dataDir, "checkpoints", fmt.Sprintf("op-%06d", idx))
   128  }
   129  
   130  func (o *checkpointOp) String() string {
   131  	var spanStr bytes.Buffer
   132  	for i, span := range o.spans {
   133  		if i > 0 {
   134  			spanStr.WriteString(",")
   135  		}
   136  		fmt.Fprintf(&spanStr, "%q,%q", span.Start, span.End)
   137  	}
   138  	return fmt.Sprintf("db.Checkpoint(%s)", spanStr.String())
   139  }
   140  
   141  func (o *checkpointOp) receiver() objID      { return dbObjID }
   142  func (o *checkpointOp) syncObjs() objIDSlice { return nil }
   143  
   144  // closeOp models a {Batch,Iterator,Snapshot}.Close operation.
   145  type closeOp struct {
   146  	objID objID
   147  }
   148  
   149  func (o *closeOp) run(t *test, h historyRecorder) {
   150  	c := t.getCloser(o.objID)
   151  	if o.objID.tag() == dbTag && t.opts.DisableWAL {
   152  		// Special case: If WAL is disabled, do a flush right before DB Close. This
   153  		// allows us to reuse this run's data directory as initial state for
   154  		// future runs without losing any mutations.
   155  		_ = t.db.Flush()
   156  	}
   157  	t.clearObj(o.objID)
   158  	err := c.Close()
   159  	h.Recordf("%s // %v", o, err)
   160  }
   161  
   162  func (o *closeOp) String() string  { return fmt.Sprintf("%s.Close()", o.objID) }
   163  func (o *closeOp) receiver() objID { return o.objID }
   164  func (o *closeOp) syncObjs() objIDSlice {
   165  	// Synchronize on the database so that we don't close the database before
   166  	// all its iterators, snapshots and batches are closed.
   167  	// TODO(jackson): It would be nice to relax this so that Close calls can
   168  	// execute in parallel.
   169  	if o.objID == dbObjID {
   170  		return nil
   171  	}
   172  	return []objID{dbObjID}
   173  }
   174  
   175  // compactOp models a DB.Compact operation.
   176  type compactOp struct {
   177  	start       []byte
   178  	end         []byte
   179  	parallelize bool
   180  }
   181  
   182  func (o *compactOp) run(t *test, h historyRecorder) {
   183  	err := withRetries(func() error {
   184  		return t.db.Compact(o.start, o.end, o.parallelize)
   185  	})
   186  	h.Recordf("%s // %v", o, err)
   187  }
   188  
   189  func (o *compactOp) String() string {
   190  	return fmt.Sprintf("db.Compact(%q, %q, %t /* parallelize */)", o.start, o.end, o.parallelize)
   191  }
   192  
   193  func (o *compactOp) receiver() objID      { return dbObjID }
   194  func (o *compactOp) syncObjs() objIDSlice { return nil }
   195  
   196  // deleteOp models a Write.Delete operation.
   197  type deleteOp struct {
   198  	writerID objID
   199  	key      []byte
   200  }
   201  
   202  func (o *deleteOp) run(t *test, h historyRecorder) {
   203  	w := t.getWriter(o.writerID)
   204  	var err error
   205  	if t.testOpts.deleteSized && t.isFMV(pebble.FormatDeleteSizedAndObsolete) {
   206  		// Call DeleteSized with a deterministic size derived from the index.
   207  		// The size does not need to be accurate for correctness.
   208  		err = w.DeleteSized(o.key, hashSize(t.idx), t.writeOpts)
   209  	} else {
   210  		err = w.Delete(o.key, t.writeOpts)
   211  	}
   212  	h.Recordf("%s // %v", o, err)
   213  }
   214  
   215  func hashSize(index int) uint32 {
   216  	// Fibonacci hash https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
   217  	return uint32((11400714819323198485 * uint64(index)) % maxValueSize)
   218  }
   219  
   220  func (o *deleteOp) String() string {
   221  	return fmt.Sprintf("%s.Delete(%q)", o.writerID, o.key)
   222  }
   223  func (o *deleteOp) receiver() objID      { return o.writerID }
   224  func (o *deleteOp) syncObjs() objIDSlice { return nil }
   225  
   226  // singleDeleteOp models a Write.SingleDelete operation.
   227  type singleDeleteOp struct {
   228  	writerID           objID
   229  	key                []byte
   230  	maybeReplaceDelete bool
   231  }
   232  
   233  func (o *singleDeleteOp) run(t *test, h historyRecorder) {
   234  	w := t.getWriter(o.writerID)
   235  	var err error
   236  	if t.testOpts.replaceSingleDelete && o.maybeReplaceDelete {
   237  		err = w.Delete(o.key, t.writeOpts)
   238  	} else {
   239  		err = w.SingleDelete(o.key, t.writeOpts)
   240  	}
   241  	// NOTE: even if the SINGLEDEL was replaced with a DELETE, we must still
   242  	// write the former to the history log. The log line will indicate whether
   243  	// or not the delete *could* have been replaced. The OPTIONS file should
   244  	// also be consulted to determine what happened at runtime (i.e. by taking
   245  	// the logical AND).
   246  	h.Recordf("%s // %v", o, err)
   247  }
   248  
   249  func (o *singleDeleteOp) String() string {
   250  	return fmt.Sprintf("%s.SingleDelete(%q, %v /* maybeReplaceDelete */)", o.writerID, o.key, o.maybeReplaceDelete)
   251  }
   252  
   253  func (o *singleDeleteOp) receiver() objID      { return o.writerID }
   254  func (o *singleDeleteOp) syncObjs() objIDSlice { return nil }
   255  
   256  // deleteRangeOp models a Write.DeleteRange operation.
   257  type deleteRangeOp struct {
   258  	writerID objID
   259  	start    []byte
   260  	end      []byte
   261  }
   262  
   263  func (o *deleteRangeOp) run(t *test, h historyRecorder) {
   264  	w := t.getWriter(o.writerID)
   265  	err := w.DeleteRange(o.start, o.end, t.writeOpts)
   266  	h.Recordf("%s // %v", o, err)
   267  }
   268  
   269  func (o *deleteRangeOp) String() string {
   270  	return fmt.Sprintf("%s.DeleteRange(%q, %q)", o.writerID, o.start, o.end)
   271  }
   272  
   273  func (o *deleteRangeOp) receiver() objID      { return o.writerID }
   274  func (o *deleteRangeOp) syncObjs() objIDSlice { return nil }
   275  
   276  // flushOp models a DB.Flush operation.
   277  type flushOp struct {
   278  }
   279  
   280  func (o *flushOp) run(t *test, h historyRecorder) {
   281  	err := t.db.Flush()
   282  	h.Recordf("%s // %v", o, err)
   283  }
   284  
   285  func (o *flushOp) String() string       { return "db.Flush()" }
   286  func (o *flushOp) receiver() objID      { return dbObjID }
   287  func (o *flushOp) syncObjs() objIDSlice { return nil }
   288  
   289  // mergeOp models a Write.Merge operation.
   290  type mergeOp struct {
   291  	writerID objID
   292  	key      []byte
   293  	value    []byte
   294  }
   295  
   296  func (o *mergeOp) run(t *test, h historyRecorder) {
   297  	w := t.getWriter(o.writerID)
   298  	err := w.Merge(o.key, o.value, t.writeOpts)
   299  	h.Recordf("%s // %v", o, err)
   300  }
   301  
   302  func (o *mergeOp) String() string       { return fmt.Sprintf("%s.Merge(%q, %q)", o.writerID, o.key, o.value) }
   303  func (o *mergeOp) receiver() objID      { return o.writerID }
   304  func (o *mergeOp) syncObjs() objIDSlice { return nil }
   305  
   306  // setOp models a Write.Set operation.
   307  type setOp struct {
   308  	writerID objID
   309  	key      []byte
   310  	value    []byte
   311  }
   312  
   313  func (o *setOp) run(t *test, h historyRecorder) {
   314  	w := t.getWriter(o.writerID)
   315  	err := w.Set(o.key, o.value, t.writeOpts)
   316  	h.Recordf("%s // %v", o, err)
   317  }
   318  
   319  func (o *setOp) String() string       { return fmt.Sprintf("%s.Set(%q, %q)", o.writerID, o.key, o.value) }
   320  func (o *setOp) receiver() objID      { return o.writerID }
   321  func (o *setOp) syncObjs() objIDSlice { return nil }
   322  
   323  // rangeKeyDeleteOp models a Write.RangeKeyDelete operation.
   324  type rangeKeyDeleteOp struct {
   325  	writerID objID
   326  	start    []byte
   327  	end      []byte
   328  }
   329  
   330  func (o *rangeKeyDeleteOp) run(t *test, h historyRecorder) {
   331  	w := t.getWriter(o.writerID)
   332  	err := w.RangeKeyDelete(o.start, o.end, t.writeOpts)
   333  	h.Recordf("%s // %v", o, err)
   334  }
   335  
   336  func (o *rangeKeyDeleteOp) String() string {
   337  	return fmt.Sprintf("%s.RangeKeyDelete(%q, %q)", o.writerID, o.start, o.end)
   338  }
   339  
   340  func (o *rangeKeyDeleteOp) receiver() objID      { return o.writerID }
   341  func (o *rangeKeyDeleteOp) syncObjs() objIDSlice { return nil }
   342  
   343  // rangeKeySetOp models a Write.RangeKeySet operation.
   344  type rangeKeySetOp struct {
   345  	writerID objID
   346  	start    []byte
   347  	end      []byte
   348  	suffix   []byte
   349  	value    []byte
   350  }
   351  
   352  func (o *rangeKeySetOp) run(t *test, h historyRecorder) {
   353  	w := t.getWriter(o.writerID)
   354  	err := w.RangeKeySet(o.start, o.end, o.suffix, o.value, t.writeOpts)
   355  	h.Recordf("%s // %v", o, err)
   356  }
   357  
   358  func (o *rangeKeySetOp) String() string {
   359  	return fmt.Sprintf("%s.RangeKeySet(%q, %q, %q, %q)",
   360  		o.writerID, o.start, o.end, o.suffix, o.value)
   361  }
   362  
   363  func (o *rangeKeySetOp) receiver() objID      { return o.writerID }
   364  func (o *rangeKeySetOp) syncObjs() objIDSlice { return nil }
   365  
   366  // rangeKeyUnsetOp models a Write.RangeKeyUnset operation.
   367  type rangeKeyUnsetOp struct {
   368  	writerID objID
   369  	start    []byte
   370  	end      []byte
   371  	suffix   []byte
   372  }
   373  
   374  func (o *rangeKeyUnsetOp) run(t *test, h historyRecorder) {
   375  	w := t.getWriter(o.writerID)
   376  	err := w.RangeKeyUnset(o.start, o.end, o.suffix, t.writeOpts)
   377  	h.Recordf("%s // %v", o, err)
   378  }
   379  
   380  func (o *rangeKeyUnsetOp) String() string {
   381  	return fmt.Sprintf("%s.RangeKeyUnset(%q, %q, %q)",
   382  		o.writerID, o.start, o.end, o.suffix)
   383  }
   384  
   385  func (o *rangeKeyUnsetOp) receiver() objID      { return o.writerID }
   386  func (o *rangeKeyUnsetOp) syncObjs() objIDSlice { return nil }
   387  
   388  // newBatchOp models a Write.NewBatch operation.
   389  type newBatchOp struct {
   390  	batchID objID
   391  }
   392  
   393  func (o *newBatchOp) run(t *test, h historyRecorder) {
   394  	b := t.db.NewBatch()
   395  	t.setBatch(o.batchID, b)
   396  	h.Recordf("%s", o)
   397  }
   398  
   399  func (o *newBatchOp) String() string  { return fmt.Sprintf("%s = db.NewBatch()", o.batchID) }
   400  func (o *newBatchOp) receiver() objID { return dbObjID }
   401  func (o *newBatchOp) syncObjs() objIDSlice {
   402  	// NewBatch should not be concurrent with operations that interact with that
   403  	// same batch.
   404  	return []objID{o.batchID}
   405  }
   406  
   407  // newIndexedBatchOp models a Write.NewIndexedBatch operation.
   408  type newIndexedBatchOp struct {
   409  	batchID objID
   410  }
   411  
   412  func (o *newIndexedBatchOp) run(t *test, h historyRecorder) {
   413  	b := t.db.NewIndexedBatch()
   414  	t.setBatch(o.batchID, b)
   415  	h.Recordf("%s", o)
   416  }
   417  
   418  func (o *newIndexedBatchOp) String() string {
   419  	return fmt.Sprintf("%s = db.NewIndexedBatch()", o.batchID)
   420  }
   421  func (o *newIndexedBatchOp) receiver() objID { return dbObjID }
   422  func (o *newIndexedBatchOp) syncObjs() objIDSlice {
   423  	// NewIndexedBatch should not be concurrent with operations that interact
   424  	// with that same batch.
   425  	return []objID{o.batchID}
   426  }
   427  
   428  // batchCommitOp models a Batch.Commit operation.
   429  type batchCommitOp struct {
   430  	batchID objID
   431  }
   432  
   433  func (o *batchCommitOp) run(t *test, h historyRecorder) {
   434  	b := t.getBatch(o.batchID)
   435  	err := b.Commit(t.writeOpts)
   436  	h.Recordf("%s // %v", o, err)
   437  }
   438  
   439  func (o *batchCommitOp) String() string  { return fmt.Sprintf("%s.Commit()", o.batchID) }
   440  func (o *batchCommitOp) receiver() objID { return o.batchID }
   441  func (o *batchCommitOp) syncObjs() objIDSlice {
   442  	// Synchronize on the database so that NewIters wait for the commit.
   443  	return []objID{dbObjID}
   444  }
   445  
   446  // ingestOp models a DB.Ingest operation.
   447  type ingestOp struct {
   448  	batchIDs []objID
   449  }
   450  
   451  func (o *ingestOp) run(t *test, h historyRecorder) {
   452  	// We can only use apply as an alternative for ingestion if we are ingesting
   453  	// a single batch. If we are ingesting multiple batches, the batches may
   454  	// overlap which would cause ingestion to fail but apply would succeed.
   455  	if t.testOpts.ingestUsingApply && len(o.batchIDs) == 1 {
   456  		id := o.batchIDs[0]
   457  		b := t.getBatch(id)
   458  		iter, rangeDelIter, rangeKeyIter := private.BatchSort(b)
   459  		c, err := o.collapseBatch(t, iter, rangeDelIter, rangeKeyIter)
   460  		if err == nil {
   461  			w := t.getWriter(makeObjID(dbTag, 0))
   462  			err = w.Apply(c, t.writeOpts)
   463  		}
   464  		_ = b.Close()
   465  		_ = c.Close()
   466  		t.clearObj(id)
   467  		h.Recordf("%s // %v", o, err)
   468  		return
   469  	}
   470  
   471  	var paths []string
   472  	var err error
   473  	for i, id := range o.batchIDs {
   474  		b := t.getBatch(id)
   475  		t.clearObj(id)
   476  		path, err2 := o.build(t, h, b, i)
   477  		if err2 != nil {
   478  			h.Recordf("Build(%s) // %v", id, err2)
   479  		}
   480  		err = firstError(err, err2)
   481  		if err2 == nil {
   482  			paths = append(paths, path)
   483  		}
   484  		err = firstError(err, b.Close())
   485  	}
   486  
   487  	err = firstError(err, withRetries(func() error {
   488  		return t.db.Ingest(paths)
   489  	}))
   490  
   491  	h.Recordf("%s // %v", o, err)
   492  }
   493  
   494  func (o *ingestOp) build(t *test, h historyRecorder, b *pebble.Batch, i int) (string, error) {
   495  	rootFS := vfs.Root(t.opts.FS)
   496  	path := rootFS.PathJoin(t.tmpDir, fmt.Sprintf("ext%d", i))
   497  	f, err := rootFS.Create(path)
   498  	if err != nil {
   499  		return "", err
   500  	}
   501  
   502  	iter, rangeDelIter, rangeKeyIter := private.BatchSort(b)
   503  	defer closeIters(iter, rangeDelIter, rangeKeyIter)
   504  
   505  	equal := t.opts.Comparer.Equal
   506  	tableFormat := t.db.FormatMajorVersion().MaxTableFormat()
   507  	w := sstable.NewWriter(
   508  		objstorageprovider.NewFileWritable(f),
   509  		t.opts.MakeWriterOptions(0, tableFormat),
   510  	)
   511  
   512  	var lastUserKey []byte
   513  	for key, value := iter.First(); key != nil; key, value = iter.Next() {
   514  		// Ignore duplicate keys.
   515  		if equal(lastUserKey, key.UserKey) {
   516  			continue
   517  		}
   518  		// NB: We don't have to copy the key or value since we're reading from a
   519  		// batch which doesn't do prefix compression.
   520  		lastUserKey = key.UserKey
   521  
   522  		key.SetSeqNum(base.SeqNumZero)
   523  		if err := w.Add(*key, value.InPlaceValue()); err != nil {
   524  			return "", err
   525  		}
   526  	}
   527  	if err := iter.Close(); err != nil {
   528  		return "", err
   529  	}
   530  	iter = nil
   531  
   532  	if rangeDelIter != nil {
   533  		// NB: The range tombstones have already been fragmented by the Batch.
   534  		for t := rangeDelIter.First(); t != nil; t = rangeDelIter.Next() {
   535  			// NB: We don't have to copy the key or value since we're reading from a
   536  			// batch which doesn't do prefix compression.
   537  			if err := w.DeleteRange(t.Start, t.End); err != nil {
   538  				return "", err
   539  			}
   540  		}
   541  		if err := rangeDelIter.Close(); err != nil {
   542  			return "", err
   543  		}
   544  		rangeDelIter = nil
   545  	}
   546  
   547  	if err := w.Close(); err != nil {
   548  		return "", err
   549  	}
   550  	return path, nil
   551  }
   552  
   553  func (o *ingestOp) receiver() objID { return dbObjID }
   554  func (o *ingestOp) syncObjs() objIDSlice {
   555  	// Ingest should not be concurrent with mutating the batches that will be
   556  	// ingested as sstables.
   557  	return o.batchIDs
   558  }
   559  
   560  func closeIters(
   561  	pointIter base.InternalIterator,
   562  	rangeDelIter keyspan.FragmentIterator,
   563  	rangeKeyIter keyspan.FragmentIterator,
   564  ) {
   565  	if pointIter != nil {
   566  		pointIter.Close()
   567  	}
   568  	if rangeDelIter != nil {
   569  		rangeDelIter.Close()
   570  	}
   571  	if rangeKeyIter != nil {
   572  		rangeKeyIter.Close()
   573  	}
   574  }
   575  
   576  // collapseBatch collapses the mutations in a batch to be equivalent to an
   577  // sstable ingesting those mutations. Duplicate updates to a key are collapsed
   578  // so that only the latest update is performed. All range deletions are
   579  // performed first in the batch to match the semantics of ingestion where a
   580  // range deletion does not delete a point record contained in the sstable.
   581  func (o *ingestOp) collapseBatch(
   582  	t *test, pointIter base.InternalIterator, rangeDelIter, rangeKeyIter keyspan.FragmentIterator,
   583  ) (*pebble.Batch, error) {
   584  	defer closeIters(pointIter, rangeDelIter, rangeKeyIter)
   585  	equal := t.opts.Comparer.Equal
   586  	collapsed := t.db.NewBatch()
   587  
   588  	if rangeDelIter != nil {
   589  		// NB: The range tombstones have already been fragmented by the Batch.
   590  		for t := rangeDelIter.First(); t != nil; t = rangeDelIter.Next() {
   591  			// NB: We don't have to copy the key or value since we're reading from a
   592  			// batch which doesn't do prefix compression.
   593  			if err := collapsed.DeleteRange(t.Start, t.End, nil); err != nil {
   594  				return nil, err
   595  			}
   596  		}
   597  		if err := rangeDelIter.Close(); err != nil {
   598  			return nil, err
   599  		}
   600  		rangeDelIter = nil
   601  	}
   602  
   603  	if pointIter != nil {
   604  		var lastUserKey []byte
   605  		for key, value := pointIter.First(); key != nil; key, value = pointIter.Next() {
   606  			// Ignore duplicate keys.
   607  			if equal(lastUserKey, key.UserKey) {
   608  				continue
   609  			}
   610  			// NB: We don't have to copy the key or value since we're reading from a
   611  			// batch which doesn't do prefix compression.
   612  			lastUserKey = key.UserKey
   613  
   614  			var err error
   615  			switch key.Kind() {
   616  			case pebble.InternalKeyKindDelete:
   617  				err = collapsed.Delete(key.UserKey, nil)
   618  			case pebble.InternalKeyKindDeleteSized:
   619  				v, _ := binary.Uvarint(value.InPlaceValue())
   620  				// Batch.DeleteSized takes just the length of the value being
   621  				// deleted and adds the key's length to derive the overall entry
   622  				// size of the value being deleted. This has already been done
   623  				// to the key we're reading from the batch, so we must subtract
   624  				// the key length from the encoded value before calling
   625  				// collapsed.DeleteSized, which will again add the key length
   626  				// before encoding.
   627  				err = collapsed.DeleteSized(key.UserKey, uint32(v-uint64(len(key.UserKey))), nil)
   628  			case pebble.InternalKeyKindSingleDelete:
   629  				err = collapsed.SingleDelete(key.UserKey, nil)
   630  			case pebble.InternalKeyKindSet:
   631  				err = collapsed.Set(key.UserKey, value.InPlaceValue(), nil)
   632  			case pebble.InternalKeyKindMerge:
   633  				err = collapsed.Merge(key.UserKey, value.InPlaceValue(), nil)
   634  			case pebble.InternalKeyKindLogData:
   635  				err = collapsed.LogData(key.UserKey, nil)
   636  			default:
   637  				err = errors.Errorf("unknown batch record kind: %d", key.Kind())
   638  			}
   639  			if err != nil {
   640  				return nil, err
   641  			}
   642  		}
   643  		if err := pointIter.Close(); err != nil {
   644  			return nil, err
   645  		}
   646  		pointIter = nil
   647  	}
   648  
   649  	return collapsed, nil
   650  }
   651  
   652  func (o *ingestOp) String() string {
   653  	var buf strings.Builder
   654  	buf.WriteString("db.Ingest(")
   655  	for i, id := range o.batchIDs {
   656  		if i > 0 {
   657  			buf.WriteString(", ")
   658  		}
   659  		buf.WriteString(id.String())
   660  	}
   661  	buf.WriteString(")")
   662  	return buf.String()
   663  }
   664  
   665  // getOp models a Reader.Get operation.
   666  type getOp struct {
   667  	readerID objID
   668  	key      []byte
   669  }
   670  
   671  func (o *getOp) run(t *test, h historyRecorder) {
   672  	r := t.getReader(o.readerID)
   673  	var val []byte
   674  	var closer io.Closer
   675  	err := withRetries(func() (err error) {
   676  		val, closer, err = r.Get(o.key)
   677  		return err
   678  	})
   679  	h.Recordf("%s // [%q] %v", o, val, err)
   680  	if closer != nil {
   681  		closer.Close()
   682  	}
   683  }
   684  
   685  func (o *getOp) String() string  { return fmt.Sprintf("%s.Get(%q)", o.readerID, o.key) }
   686  func (o *getOp) receiver() objID { return o.readerID }
   687  func (o *getOp) syncObjs() objIDSlice {
   688  	if o.readerID == dbObjID {
   689  		return nil
   690  	}
   691  	// batch.Get reads through to the current database state.
   692  	return []objID{dbObjID}
   693  }
   694  
   695  // newIterOp models a Reader.NewIter operation.
   696  type newIterOp struct {
   697  	readerID objID
   698  	iterID   objID
   699  	iterOpts
   700  }
   701  
   702  func (o *newIterOp) run(t *test, h historyRecorder) {
   703  	r := t.getReader(o.readerID)
   704  	opts := iterOptions(o.iterOpts)
   705  
   706  	var i *pebble.Iterator
   707  	for {
   708  		i, _ = r.NewIter(opts)
   709  		if err := i.Error(); !errors.Is(err, errorfs.ErrInjected) {
   710  			break
   711  		}
   712  		// close this iter and retry NewIter
   713  		_ = i.Close()
   714  	}
   715  	t.setIter(o.iterID, i)
   716  
   717  	// Trash the bounds to ensure that Pebble doesn't rely on the stability of
   718  	// the user-provided bounds.
   719  	if opts != nil {
   720  		rand.Read(opts.LowerBound[:])
   721  		rand.Read(opts.UpperBound[:])
   722  	}
   723  	h.Recordf("%s // %v", o, i.Error())
   724  }
   725  
   726  func (o *newIterOp) String() string {
   727  	return fmt.Sprintf("%s = %s.NewIter(%q, %q, %d /* key types */, %d, %d, %t /* use L6 filters */, %q /* masking suffix */)",
   728  		o.iterID, o.readerID, o.lower, o.upper, o.keyTypes, o.filterMin, o.filterMax, o.useL6Filters, o.maskSuffix)
   729  }
   730  
   731  func (o *newIterOp) receiver() objID { return o.readerID }
   732  func (o *newIterOp) syncObjs() objIDSlice {
   733  	// Prevent o.iterID ops from running before it exists.
   734  	objs := []objID{o.iterID}
   735  	// If reading through a batch, the new iterator will also observe database
   736  	// state, and we must synchronize on the database state for a consistent
   737  	// view.
   738  	if o.readerID.tag() == batchTag {
   739  		objs = append(objs, dbObjID)
   740  	}
   741  	return objs
   742  }
   743  
   744  // newIterUsingCloneOp models a Iterator.Clone operation.
   745  type newIterUsingCloneOp struct {
   746  	existingIterID objID
   747  	iterID         objID
   748  	refreshBatch   bool
   749  	iterOpts
   750  
   751  	// derivedReaderID is the ID of the underlying reader that backs both the
   752  	// existing iterator and the new iterator. The derivedReaderID is NOT
   753  	// serialized by String and is derived from other operations during parse.
   754  	derivedReaderID objID
   755  }
   756  
   757  func (o *newIterUsingCloneOp) run(t *test, h historyRecorder) {
   758  	iter := t.getIter(o.existingIterID)
   759  	cloneOpts := pebble.CloneOptions{
   760  		IterOptions:      iterOptions(o.iterOpts),
   761  		RefreshBatchView: o.refreshBatch,
   762  	}
   763  	i, err := iter.iter.Clone(cloneOpts)
   764  	if err != nil {
   765  		panic(err)
   766  	}
   767  	t.setIter(o.iterID, i)
   768  	h.Recordf("%s // %v", o, i.Error())
   769  }
   770  
   771  func (o *newIterUsingCloneOp) String() string {
   772  	return fmt.Sprintf("%s = %s.Clone(%t, %q, %q, %d /* key types */, %d, %d, %t /* use L6 filters */, %q /* masking suffix */)",
   773  		o.iterID, o.existingIterID, o.refreshBatch, o.lower, o.upper,
   774  		o.keyTypes, o.filterMin, o.filterMax, o.useL6Filters, o.maskSuffix)
   775  }
   776  
   777  func (o *newIterUsingCloneOp) receiver() objID { return o.existingIterID }
   778  
   779  func (o *newIterUsingCloneOp) syncObjs() objIDSlice {
   780  	objIDs := []objID{o.iterID}
   781  	// If the underlying reader is a batch, we must synchronize with the batch.
   782  	// If refreshBatch=true, synchronizing is necessary to observe all the
   783  	// mutations up to until this op and no more. Even when refreshBatch=false,
   784  	// we must synchronize because iterator construction may access state cached
   785  	// on the indexed batch to avoid refragmenting range tombstones or range
   786  	// keys.
   787  	if o.derivedReaderID.tag() == batchTag {
   788  		objIDs = append(objIDs, o.derivedReaderID)
   789  	}
   790  	return objIDs
   791  }
   792  
   793  // iterSetBoundsOp models an Iterator.SetBounds operation.
   794  type iterSetBoundsOp struct {
   795  	iterID objID
   796  	lower  []byte
   797  	upper  []byte
   798  }
   799  
   800  func (o *iterSetBoundsOp) run(t *test, h historyRecorder) {
   801  	i := t.getIter(o.iterID)
   802  	var lower, upper []byte
   803  	if o.lower != nil {
   804  		lower = append(lower, o.lower...)
   805  	}
   806  	if o.upper != nil {
   807  		upper = append(upper, o.upper...)
   808  	}
   809  	i.SetBounds(lower, upper)
   810  
   811  	// Trash the bounds to ensure that Pebble doesn't rely on the stability of
   812  	// the user-provided bounds.
   813  	rand.Read(lower[:])
   814  	rand.Read(upper[:])
   815  
   816  	h.Recordf("%s // %v", o, i.Error())
   817  }
   818  
   819  func (o *iterSetBoundsOp) String() string {
   820  	return fmt.Sprintf("%s.SetBounds(%q, %q)", o.iterID, o.lower, o.upper)
   821  }
   822  
   823  func (o *iterSetBoundsOp) receiver() objID      { return o.iterID }
   824  func (o *iterSetBoundsOp) syncObjs() objIDSlice { return nil }
   825  
   826  // iterSetOptionsOp models an Iterator.SetOptions operation.
   827  type iterSetOptionsOp struct {
   828  	iterID objID
   829  	iterOpts
   830  
   831  	// derivedReaderID is the ID of the underlying reader that backs the
   832  	// iterator. The derivedReaderID is NOT serialized by String and is derived
   833  	// from other operations during parse.
   834  	derivedReaderID objID
   835  }
   836  
   837  func (o *iterSetOptionsOp) run(t *test, h historyRecorder) {
   838  	i := t.getIter(o.iterID)
   839  
   840  	opts := iterOptions(o.iterOpts)
   841  	if opts == nil {
   842  		opts = &pebble.IterOptions{}
   843  	}
   844  	i.SetOptions(opts)
   845  
   846  	// Trash the bounds to ensure that Pebble doesn't rely on the stability of
   847  	// the user-provided bounds.
   848  	rand.Read(opts.LowerBound[:])
   849  	rand.Read(opts.UpperBound[:])
   850  
   851  	h.Recordf("%s // %v", o, i.Error())
   852  }
   853  
   854  func (o *iterSetOptionsOp) String() string {
   855  	return fmt.Sprintf("%s.SetOptions(%q, %q, %d /* key types */, %d, %d, %t /* use L6 filters */, %q /* masking suffix */)",
   856  		o.iterID, o.lower, o.upper, o.keyTypes, o.filterMin, o.filterMax, o.useL6Filters, o.maskSuffix)
   857  }
   858  
   859  func iterOptions(o iterOpts) *pebble.IterOptions {
   860  	if o.IsZero() {
   861  		return nil
   862  	}
   863  	var lower, upper []byte
   864  	if o.lower != nil {
   865  		lower = append(lower, o.lower...)
   866  	}
   867  	if o.upper != nil {
   868  		upper = append(upper, o.upper...)
   869  	}
   870  	opts := &pebble.IterOptions{
   871  		LowerBound: lower,
   872  		UpperBound: upper,
   873  		KeyTypes:   pebble.IterKeyType(o.keyTypes),
   874  		RangeKeyMasking: pebble.RangeKeyMasking{
   875  			Suffix: o.maskSuffix,
   876  		},
   877  		UseL6Filters: o.useL6Filters,
   878  	}
   879  	if opts.RangeKeyMasking.Suffix != nil {
   880  		opts.RangeKeyMasking.Filter = func() pebble.BlockPropertyFilterMask {
   881  			return sstable.NewTestKeysMaskingFilter()
   882  		}
   883  	}
   884  	if o.filterMax > 0 {
   885  		opts.PointKeyFilters = []pebble.BlockPropertyFilter{
   886  			sstable.NewTestKeysBlockPropertyFilter(o.filterMin, o.filterMax),
   887  		}
   888  		// Enforce the timestamp bounds in SkipPoint, so that the iterator never
   889  		// returns a key outside the filterMin, filterMax bounds. This provides
   890  		// deterministic iteration.
   891  		opts.SkipPoint = func(k []byte) (skip bool) {
   892  			n := testkeys.Comparer.Split(k)
   893  			if n == len(k) {
   894  				// No suffix, don't skip it.
   895  				return false
   896  			}
   897  			v, err := testkeys.ParseSuffix(k[n:])
   898  			if err != nil {
   899  				panic(err)
   900  			}
   901  			ts := uint64(v)
   902  			return ts < o.filterMin || ts >= o.filterMax
   903  		}
   904  	}
   905  	return opts
   906  }
   907  
   908  func (o *iterSetOptionsOp) receiver() objID { return o.iterID }
   909  
   910  func (o *iterSetOptionsOp) syncObjs() objIDSlice {
   911  	if o.derivedReaderID.tag() == batchTag {
   912  		// If the underlying reader is a batch, we must synchronize with the
   913  		// batch so that we observe all the mutations up until this operation
   914  		// and no more.
   915  		return []objID{o.derivedReaderID}
   916  	}
   917  	return nil
   918  }
   919  
   920  // iterSeekGEOp models an Iterator.SeekGE[WithLimit] operation.
   921  type iterSeekGEOp struct {
   922  	iterID objID
   923  	key    []byte
   924  	limit  []byte
   925  
   926  	derivedReaderID objID
   927  }
   928  
   929  func iteratorPos(i *retryableIter) string {
   930  	var buf bytes.Buffer
   931  	fmt.Fprintf(&buf, "%q", i.Key())
   932  	hasPoint, hasRange := i.HasPointAndRange()
   933  	if hasPoint {
   934  		fmt.Fprintf(&buf, ",%q", i.Value())
   935  	} else {
   936  		fmt.Fprint(&buf, ",<no point>")
   937  	}
   938  	if hasRange {
   939  		start, end := i.RangeBounds()
   940  		fmt.Fprintf(&buf, ",[%q,%q)=>{", start, end)
   941  		for i, rk := range i.RangeKeys() {
   942  			if i > 0 {
   943  				fmt.Fprint(&buf, ",")
   944  			}
   945  			fmt.Fprintf(&buf, "%q=%q", rk.Suffix, rk.Value)
   946  		}
   947  		fmt.Fprint(&buf, "}")
   948  	} else {
   949  		fmt.Fprint(&buf, ",<no range>")
   950  	}
   951  	if i.RangeKeyChanged() {
   952  		fmt.Fprint(&buf, "*")
   953  	}
   954  	return buf.String()
   955  }
   956  
   957  func validBoolToStr(valid bool) string {
   958  	return fmt.Sprintf("%t", valid)
   959  }
   960  
   961  func validityStateToStr(validity pebble.IterValidityState) (bool, string) {
   962  	// We can't distinguish between IterExhausted and IterAtLimit in a
   963  	// deterministic manner.
   964  	switch validity {
   965  	case pebble.IterExhausted, pebble.IterAtLimit:
   966  		return false, "invalid"
   967  	case pebble.IterValid:
   968  		return true, "valid"
   969  	default:
   970  		panic("unknown validity")
   971  	}
   972  }
   973  
   974  func (o *iterSeekGEOp) run(t *test, h historyRecorder) {
   975  	i := t.getIter(o.iterID)
   976  	var valid bool
   977  	var validStr string
   978  	if o.limit == nil {
   979  		valid = i.SeekGE(o.key)
   980  		validStr = validBoolToStr(valid)
   981  	} else {
   982  		valid, validStr = validityStateToStr(i.SeekGEWithLimit(o.key, o.limit))
   983  	}
   984  	if valid {
   985  		h.Recordf("%s // [%s,%s] %v", o, validStr, iteratorPos(i), i.Error())
   986  	} else {
   987  		h.Recordf("%s // [%s] %v", o, validStr, i.Error())
   988  	}
   989  }
   990  
   991  func (o *iterSeekGEOp) String() string {
   992  	return fmt.Sprintf("%s.SeekGE(%q, %q)", o.iterID, o.key, o.limit)
   993  }
   994  func (o *iterSeekGEOp) receiver() objID      { return o.iterID }
   995  func (o *iterSeekGEOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
   996  
   997  func onlyBatchIDs(ids ...objID) objIDSlice {
   998  	var ret objIDSlice
   999  	for _, id := range ids {
  1000  		if id.tag() == batchTag {
  1001  			ret = append(ret, id)
  1002  		}
  1003  	}
  1004  	return ret
  1005  }
  1006  
  1007  // iterSeekPrefixGEOp models an Iterator.SeekPrefixGE operation.
  1008  type iterSeekPrefixGEOp struct {
  1009  	iterID objID
  1010  	key    []byte
  1011  
  1012  	derivedReaderID objID
  1013  }
  1014  
  1015  func (o *iterSeekPrefixGEOp) run(t *test, h historyRecorder) {
  1016  	i := t.getIter(o.iterID)
  1017  	valid := i.SeekPrefixGE(o.key)
  1018  	if valid {
  1019  		h.Recordf("%s // [%t,%s] %v", o, valid, iteratorPos(i), i.Error())
  1020  	} else {
  1021  		h.Recordf("%s // [%t] %v", o, valid, i.Error())
  1022  	}
  1023  }
  1024  
  1025  func (o *iterSeekPrefixGEOp) String() string {
  1026  	return fmt.Sprintf("%s.SeekPrefixGE(%q)", o.iterID, o.key)
  1027  }
  1028  func (o *iterSeekPrefixGEOp) receiver() objID      { return o.iterID }
  1029  func (o *iterSeekPrefixGEOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1030  
  1031  // iterSeekLTOp models an Iterator.SeekLT[WithLimit] operation.
  1032  type iterSeekLTOp struct {
  1033  	iterID objID
  1034  	key    []byte
  1035  	limit  []byte
  1036  
  1037  	derivedReaderID objID
  1038  }
  1039  
  1040  func (o *iterSeekLTOp) run(t *test, h historyRecorder) {
  1041  	i := t.getIter(o.iterID)
  1042  	var valid bool
  1043  	var validStr string
  1044  	if o.limit == nil {
  1045  		valid = i.SeekLT(o.key)
  1046  		validStr = validBoolToStr(valid)
  1047  	} else {
  1048  		valid, validStr = validityStateToStr(i.SeekLTWithLimit(o.key, o.limit))
  1049  	}
  1050  	if valid {
  1051  		h.Recordf("%s // [%s,%s] %v", o, validStr, iteratorPos(i), i.Error())
  1052  	} else {
  1053  		h.Recordf("%s // [%s] %v", o, validStr, i.Error())
  1054  	}
  1055  }
  1056  
  1057  func (o *iterSeekLTOp) String() string {
  1058  	return fmt.Sprintf("%s.SeekLT(%q, %q)", o.iterID, o.key, o.limit)
  1059  }
  1060  
  1061  func (o *iterSeekLTOp) receiver() objID      { return o.iterID }
  1062  func (o *iterSeekLTOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1063  
  1064  // iterFirstOp models an Iterator.First operation.
  1065  type iterFirstOp struct {
  1066  	iterID objID
  1067  
  1068  	derivedReaderID objID
  1069  }
  1070  
  1071  func (o *iterFirstOp) run(t *test, h historyRecorder) {
  1072  	i := t.getIter(o.iterID)
  1073  	valid := i.First()
  1074  	if valid {
  1075  		h.Recordf("%s // [%t,%s] %v", o, valid, iteratorPos(i), i.Error())
  1076  	} else {
  1077  		h.Recordf("%s // [%t] %v", o, valid, i.Error())
  1078  	}
  1079  }
  1080  
  1081  func (o *iterFirstOp) String() string       { return fmt.Sprintf("%s.First()", o.iterID) }
  1082  func (o *iterFirstOp) receiver() objID      { return o.iterID }
  1083  func (o *iterFirstOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1084  
  1085  // iterLastOp models an Iterator.Last operation.
  1086  type iterLastOp struct {
  1087  	iterID objID
  1088  
  1089  	derivedReaderID objID
  1090  }
  1091  
  1092  func (o *iterLastOp) run(t *test, h historyRecorder) {
  1093  	i := t.getIter(o.iterID)
  1094  	valid := i.Last()
  1095  	if valid {
  1096  		h.Recordf("%s // [%t,%s] %v", o, valid, iteratorPos(i), i.Error())
  1097  	} else {
  1098  		h.Recordf("%s // [%t] %v", o, valid, i.Error())
  1099  	}
  1100  }
  1101  
  1102  func (o *iterLastOp) String() string       { return fmt.Sprintf("%s.Last()", o.iterID) }
  1103  func (o *iterLastOp) receiver() objID      { return o.iterID }
  1104  func (o *iterLastOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1105  
  1106  // iterNextOp models an Iterator.Next[WithLimit] operation.
  1107  type iterNextOp struct {
  1108  	iterID objID
  1109  	limit  []byte
  1110  
  1111  	derivedReaderID objID
  1112  }
  1113  
  1114  func (o *iterNextOp) run(t *test, h historyRecorder) {
  1115  	i := t.getIter(o.iterID)
  1116  	var valid bool
  1117  	var validStr string
  1118  	if o.limit == nil {
  1119  		valid = i.Next()
  1120  		validStr = validBoolToStr(valid)
  1121  	} else {
  1122  		valid, validStr = validityStateToStr(i.NextWithLimit(o.limit))
  1123  	}
  1124  	if valid {
  1125  		h.Recordf("%s // [%s,%s] %v", o, validStr, iteratorPos(i), i.Error())
  1126  	} else {
  1127  		h.Recordf("%s // [%s] %v", o, validStr, i.Error())
  1128  	}
  1129  }
  1130  
  1131  func (o *iterNextOp) String() string       { return fmt.Sprintf("%s.Next(%q)", o.iterID, o.limit) }
  1132  func (o *iterNextOp) receiver() objID      { return o.iterID }
  1133  func (o *iterNextOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1134  
  1135  // iterNextPrefixOp models an Iterator.NextPrefix operation.
  1136  type iterNextPrefixOp struct {
  1137  	iterID objID
  1138  
  1139  	derivedReaderID objID
  1140  }
  1141  
  1142  func (o *iterNextPrefixOp) run(t *test, h historyRecorder) {
  1143  	i := t.getIter(o.iterID)
  1144  	valid := i.NextPrefix()
  1145  	validStr := validBoolToStr(valid)
  1146  	if valid {
  1147  		h.Recordf("%s // [%s,%s] %v", o, validStr, iteratorPos(i), i.Error())
  1148  	} else {
  1149  		h.Recordf("%s // [%s] %v", o, validStr, i.Error())
  1150  	}
  1151  }
  1152  
  1153  func (o *iterNextPrefixOp) String() string       { return fmt.Sprintf("%s.NextPrefix()", o.iterID) }
  1154  func (o *iterNextPrefixOp) receiver() objID      { return o.iterID }
  1155  func (o *iterNextPrefixOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1156  
  1157  // iterCanSingleDelOp models a call to CanDeterministicallySingleDelete with an
  1158  // Iterator.
  1159  type iterCanSingleDelOp struct {
  1160  	iterID objID
  1161  
  1162  	derivedReaderID objID
  1163  }
  1164  
  1165  func (o *iterCanSingleDelOp) run(t *test, h historyRecorder) {
  1166  	// TODO(jackson): When we perform error injection, we'll need to rethink
  1167  	// this.
  1168  	_, err := pebble.CanDeterministicallySingleDelete(t.getIter(o.iterID).iter)
  1169  	// The return value of CanDeterministicallySingleDelete is dependent on
  1170  	// internal LSM state and non-deterministic, so we don't record it.
  1171  	// Including the operation within the metamorphic test at all helps ensure
  1172  	// that it does not change the result of any other Iterator operation that
  1173  	// should be deterministic, regardless of its own outcome.
  1174  	//
  1175  	// We still record the value of the error because it's deterministic, at
  1176  	// least for now. The possible error cases are:
  1177  	//  - The iterator was already in an error state when the operation ran.
  1178  	//  - The operation is deterministically invalid (like using an InternalNext
  1179  	//    to change directions.)
  1180  	h.Recordf("%s // %v", o, err)
  1181  }
  1182  
  1183  func (o *iterCanSingleDelOp) String() string       { return fmt.Sprintf("%s.InternalNext()", o.iterID) }
  1184  func (o *iterCanSingleDelOp) receiver() objID      { return o.iterID }
  1185  func (o *iterCanSingleDelOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1186  
  1187  // iterPrevOp models an Iterator.Prev[WithLimit] operation.
  1188  type iterPrevOp struct {
  1189  	iterID objID
  1190  	limit  []byte
  1191  
  1192  	derivedReaderID objID
  1193  }
  1194  
  1195  func (o *iterPrevOp) run(t *test, h historyRecorder) {
  1196  	i := t.getIter(o.iterID)
  1197  	var valid bool
  1198  	var validStr string
  1199  	if o.limit == nil {
  1200  		valid = i.Prev()
  1201  		validStr = validBoolToStr(valid)
  1202  	} else {
  1203  		valid, validStr = validityStateToStr(i.PrevWithLimit(o.limit))
  1204  	}
  1205  	if valid {
  1206  		h.Recordf("%s // [%s,%s] %v", o, validStr, iteratorPos(i), i.Error())
  1207  	} else {
  1208  		h.Recordf("%s // [%s] %v", o, validStr, i.Error())
  1209  	}
  1210  }
  1211  
  1212  func (o *iterPrevOp) String() string       { return fmt.Sprintf("%s.Prev(%q)", o.iterID, o.limit) }
  1213  func (o *iterPrevOp) receiver() objID      { return o.iterID }
  1214  func (o *iterPrevOp) syncObjs() objIDSlice { return onlyBatchIDs(o.derivedReaderID) }
  1215  
  1216  // newSnapshotOp models a DB.NewSnapshot operation.
  1217  type newSnapshotOp struct {
  1218  	snapID objID
  1219  	// If nonempty, this snapshot must not be used to read any keys outside of
  1220  	// the provided bounds. This allows some implementations to use 'Eventually
  1221  	// file-only snapshots,' which require bounds.
  1222  	bounds []pebble.KeyRange
  1223  }
  1224  
  1225  func (o *newSnapshotOp) run(t *test, h historyRecorder) {
  1226  	// Fibonacci hash https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
  1227  	if len(o.bounds) > 0 && ((11400714819323198485*uint64(t.idx)*t.testOpts.seedEFOS)>>63) == 1 {
  1228  		s := t.db.NewEventuallyFileOnlySnapshot(o.bounds)
  1229  		t.setSnapshot(o.snapID, s)
  1230  	} else {
  1231  		s := t.db.NewSnapshot()
  1232  		t.setSnapshot(o.snapID, s)
  1233  	}
  1234  	h.Recordf("%s", o)
  1235  }
  1236  
  1237  func (o *newSnapshotOp) String() string {
  1238  	var buf bytes.Buffer
  1239  	fmt.Fprintf(&buf, "%s = db.NewSnapshot(", o.snapID)
  1240  	for i := range o.bounds {
  1241  		if i > 0 {
  1242  			fmt.Fprint(&buf, ", ")
  1243  		}
  1244  		fmt.Fprintf(&buf, "%q, %q", o.bounds[i].Start, o.bounds[i].End)
  1245  	}
  1246  	fmt.Fprint(&buf, ")")
  1247  	return buf.String()
  1248  }
  1249  func (o *newSnapshotOp) receiver() objID      { return dbObjID }
  1250  func (o *newSnapshotOp) syncObjs() objIDSlice { return []objID{o.snapID} }
  1251  
  1252  type dbRatchetFormatMajorVersionOp struct {
  1253  	vers pebble.FormatMajorVersion
  1254  }
  1255  
  1256  func (o *dbRatchetFormatMajorVersionOp) run(t *test, h historyRecorder) {
  1257  	var err error
  1258  	// NB: We no-op the operation if we're already at or above the provided
  1259  	// format major version. Different runs start at different format major
  1260  	// versions, making the presence of an error and the error message itself
  1261  	// non-deterministic if we attempt to upgrade to an older version.
  1262  	//
  1263  	//Regardless, subsequent operations should behave identically, which is what
  1264  	//we're really aiming to test by including this format major version ratchet
  1265  	//operation.
  1266  	if t.db.FormatMajorVersion() < o.vers {
  1267  		err = t.db.RatchetFormatMajorVersion(o.vers)
  1268  	}
  1269  	h.Recordf("%s // %v", o, err)
  1270  }
  1271  
  1272  func (o *dbRatchetFormatMajorVersionOp) String() string {
  1273  	return fmt.Sprintf("db.RatchetFormatMajorVersion(%s)", o.vers)
  1274  }
  1275  func (o *dbRatchetFormatMajorVersionOp) receiver() objID      { return dbObjID }
  1276  func (o *dbRatchetFormatMajorVersionOp) syncObjs() objIDSlice { return nil }
  1277  
  1278  type dbRestartOp struct{}
  1279  
  1280  func (o *dbRestartOp) run(t *test, h historyRecorder) {
  1281  	if err := t.restartDB(); err != nil {
  1282  		h.Recordf("%s // %v", o, err)
  1283  		h.history.err.Store(errors.Wrap(err, "dbRestartOp"))
  1284  	} else {
  1285  		h.Recordf("%s", o)
  1286  	}
  1287  }
  1288  
  1289  func (o *dbRestartOp) String() string       { return "db.Restart()" }
  1290  func (o *dbRestartOp) receiver() objID      { return dbObjID }
  1291  func (o *dbRestartOp) syncObjs() objIDSlice { return nil }
  1292  
  1293  func formatOps(ops []op) string {
  1294  	var buf strings.Builder
  1295  	for _, op := range ops {
  1296  		fmt.Fprintf(&buf, "%s\n", op)
  1297  	}
  1298  	return buf.String()
  1299  }