github.com/cockroachdb/pebble@v1.1.1-0.20240513155919-3622ade60459/metamorphic/test.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package metamorphic
     6  
     7  import (
     8  	"fmt"
     9  	"io"
    10  	"os"
    11  	"sort"
    12  	"strings"
    13  
    14  	"github.com/cockroachdb/errors"
    15  	"github.com/cockroachdb/pebble"
    16  	"github.com/cockroachdb/pebble/vfs"
    17  	"github.com/cockroachdb/pebble/vfs/errorfs"
    18  )
    19  
    20  type test struct {
    21  	// The list of ops to execute. The ops refer to slots in the batches, iters,
    22  	// and snapshots slices.
    23  	ops       []op
    24  	opsWaitOn [][]int         // op index -> op indexes
    25  	opsDone   []chan struct{} // op index -> done channel
    26  	idx       int
    27  	// The DB the test is run on.
    28  	dir       string
    29  	db        *pebble.DB
    30  	opts      *pebble.Options
    31  	testOpts  *TestOptions
    32  	writeOpts *pebble.WriteOptions
    33  	tmpDir    string
    34  	// The slots for the batches, iterators, and snapshots. These are read and
    35  	// written by the ops to pass state from one op to another.
    36  	batches   []*pebble.Batch
    37  	iters     []*retryableIter
    38  	snapshots []readerCloser
    39  }
    40  
    41  func newTest(ops []op) *test {
    42  	return &test{
    43  		ops: ops,
    44  	}
    45  }
    46  
    47  func (t *test) init(h *history, dir string, testOpts *TestOptions) error {
    48  	t.dir = dir
    49  	t.testOpts = testOpts
    50  	t.writeOpts = pebble.NoSync
    51  	if testOpts.strictFS {
    52  		t.writeOpts = pebble.Sync
    53  	}
    54  	t.opts = testOpts.Opts.EnsureDefaults()
    55  	t.opts.Logger = h
    56  	lel := pebble.MakeLoggingEventListener(t.opts.Logger)
    57  	t.opts.EventListener = &lel
    58  	t.opts.DebugCheck = func(db *pebble.DB) error {
    59  		// Wrap the ordinary DebugCheckLevels with retrying
    60  		// of injected errors.
    61  		return withRetries(func() error {
    62  			return pebble.DebugCheckLevels(db)
    63  		})
    64  	}
    65  
    66  	t.opsWaitOn, t.opsDone = computeSynchronizationPoints(t.ops)
    67  
    68  	defer t.opts.Cache.Unref()
    69  
    70  	// If an error occurs and we were using an in-memory FS, attempt to clone to
    71  	// on-disk in order to allow post-mortem debugging. Note that always using
    72  	// the on-disk FS isn't desirable because there is a large performance
    73  	// difference between in-memory and on-disk which causes different code paths
    74  	// and timings to be exercised.
    75  	maybeExit := func(err error) {
    76  		if err == nil || errors.Is(err, errorfs.ErrInjected) || errors.Is(err, pebble.ErrCancelledCompaction) {
    77  			return
    78  		}
    79  		t.maybeSaveData()
    80  		fmt.Fprintln(os.Stderr, err)
    81  		os.Exit(1)
    82  	}
    83  
    84  	// Exit early on any error from a background operation.
    85  	t.opts.EventListener.BackgroundError = func(err error) {
    86  		t.opts.Logger.Infof("background error: %s", err)
    87  		maybeExit(err)
    88  	}
    89  	t.opts.EventListener.CompactionEnd = func(info pebble.CompactionInfo) {
    90  		t.opts.Logger.Infof("%s", info)
    91  		maybeExit(info.Err)
    92  	}
    93  	t.opts.EventListener.FlushEnd = func(info pebble.FlushInfo) {
    94  		t.opts.Logger.Infof("%s", info)
    95  		if info.Err != nil && !strings.Contains(info.Err.Error(), "pebble: empty table") {
    96  			maybeExit(info.Err)
    97  		}
    98  	}
    99  	t.opts.EventListener.ManifestCreated = func(info pebble.ManifestCreateInfo) {
   100  		t.opts.Logger.Infof("%s", info)
   101  		maybeExit(info.Err)
   102  	}
   103  	t.opts.EventListener.ManifestDeleted = func(info pebble.ManifestDeleteInfo) {
   104  		t.opts.Logger.Infof("%s", info)
   105  		maybeExit(info.Err)
   106  	}
   107  	t.opts.EventListener.TableDeleted = func(info pebble.TableDeleteInfo) {
   108  		t.opts.Logger.Infof("%s", info)
   109  		maybeExit(info.Err)
   110  	}
   111  	t.opts.EventListener.TableIngested = func(info pebble.TableIngestInfo) {
   112  		t.opts.Logger.Infof("%s", info)
   113  		maybeExit(info.Err)
   114  	}
   115  	t.opts.EventListener.WALCreated = func(info pebble.WALCreateInfo) {
   116  		t.opts.Logger.Infof("%s", info)
   117  		maybeExit(info.Err)
   118  	}
   119  	t.opts.EventListener.WALDeleted = func(info pebble.WALDeleteInfo) {
   120  		t.opts.Logger.Infof("%s", info)
   121  		maybeExit(info.Err)
   122  	}
   123  
   124  	for i := range t.testOpts.CustomOpts {
   125  		if err := t.testOpts.CustomOpts[i].Open(t.opts); err != nil {
   126  			return err
   127  		}
   128  	}
   129  
   130  	var db *pebble.DB
   131  	var err error
   132  	err = withRetries(func() error {
   133  		db, err = pebble.Open(dir, t.opts)
   134  		return err
   135  	})
   136  	if err != nil {
   137  		return err
   138  	}
   139  	h.log.Printf("// db.Open() %v", err)
   140  
   141  	if t.testOpts.sharedStorageEnabled {
   142  		err = withRetries(func() error {
   143  			return db.SetCreatorID(1)
   144  		})
   145  		if err != nil {
   146  			return err
   147  		}
   148  		h.log.Printf("// db.SetCreatorID() %v", err)
   149  	}
   150  
   151  	t.tmpDir = t.opts.FS.PathJoin(dir, "tmp")
   152  	if err = t.opts.FS.MkdirAll(t.tmpDir, 0755); err != nil {
   153  		return err
   154  	}
   155  	if t.testOpts.strictFS {
   156  		// Sync the whole directory path for the tmpDir, since restartDB() is executed during
   157  		// the test. That would reset MemFS to the synced state, which would make an unsynced
   158  		// directory disappear in the middle of the test. It is the responsibility of the test
   159  		// (not Pebble) to ensure that it can write the ssts that it will subsequently ingest
   160  		// into Pebble.
   161  		for {
   162  			f, err := t.opts.FS.OpenDir(dir)
   163  			if err != nil {
   164  				return err
   165  			}
   166  			if err = f.Sync(); err != nil {
   167  				return err
   168  			}
   169  			if err = f.Close(); err != nil {
   170  				return err
   171  			}
   172  			if len(dir) == 1 {
   173  				break
   174  			}
   175  			dir = t.opts.FS.PathDir(dir)
   176  			// TODO(sbhola): PathDir returns ".", which OpenDir() complains about. Fix.
   177  			if len(dir) == 1 {
   178  				dir = "/"
   179  			}
   180  		}
   181  	}
   182  
   183  	t.db = db
   184  	return nil
   185  }
   186  
   187  func (t *test) isFMV(fmv pebble.FormatMajorVersion) bool {
   188  	return t.db.FormatMajorVersion() >= fmv
   189  }
   190  
   191  func (t *test) restartDB() error {
   192  	if !t.testOpts.strictFS {
   193  		return nil
   194  	}
   195  	t.opts.Cache.Ref()
   196  	// The fs isn't necessarily a MemFS.
   197  	fs, ok := vfs.Root(t.opts.FS).(*vfs.MemFS)
   198  	if ok {
   199  		fs.SetIgnoreSyncs(true)
   200  	}
   201  	if err := t.db.Close(); err != nil {
   202  		return err
   203  	}
   204  	// Release any resources held by custom options. This may be used, for
   205  	// example, by the encryption-at-rest custom option (within the Cockroach
   206  	// repository) to close the file registry.
   207  	for i := range t.testOpts.CustomOpts {
   208  		if err := t.testOpts.CustomOpts[i].Close(t.opts); err != nil {
   209  			return err
   210  		}
   211  	}
   212  	if ok {
   213  		fs.ResetToSyncedState()
   214  		fs.SetIgnoreSyncs(false)
   215  	}
   216  
   217  	// TODO(jackson): Audit errorRate and ensure custom options' hooks semantics
   218  	// are well defined within the context of retries.
   219  	err := withRetries(func() (err error) {
   220  		// Reacquire any resources required by custom options. This may be used, for
   221  		// example, by the encryption-at-rest custom option (within the Cockroach
   222  		// repository) to reopen the file registry.
   223  		for i := range t.testOpts.CustomOpts {
   224  			if err := t.testOpts.CustomOpts[i].Open(t.opts); err != nil {
   225  				return err
   226  			}
   227  		}
   228  		t.db, err = pebble.Open(t.dir, t.opts)
   229  		return err
   230  	})
   231  	t.opts.Cache.Unref()
   232  	return err
   233  }
   234  
   235  // If an in-memory FS is being used, save the contents to disk.
   236  func (t *test) maybeSaveData() {
   237  	rootFS := vfs.Root(t.opts.FS)
   238  	if rootFS == vfs.Default {
   239  		return
   240  	}
   241  	_ = os.RemoveAll(t.dir)
   242  	if _, err := vfs.Clone(rootFS, vfs.Default, t.dir, t.dir); err != nil {
   243  		t.opts.Logger.Infof("unable to clone: %s: %v", t.dir, err)
   244  	}
   245  }
   246  
   247  func (t *test) step(h *history) bool {
   248  	if t.idx >= len(t.ops) {
   249  		return false
   250  	}
   251  	t.ops[t.idx].run(t, h.recorder(-1 /* thread */, t.idx))
   252  	t.idx++
   253  	return true
   254  }
   255  
   256  func (t *test) setBatch(id objID, b *pebble.Batch) {
   257  	if id.tag() != batchTag {
   258  		panic(fmt.Sprintf("invalid batch ID: %s", id))
   259  	}
   260  	t.batches[id.slot()] = b
   261  }
   262  
   263  func (t *test) setIter(id objID, i *pebble.Iterator) {
   264  	if id.tag() != iterTag {
   265  		panic(fmt.Sprintf("invalid iter ID: %s", id))
   266  	}
   267  	t.iters[id.slot()] = &retryableIter{
   268  		iter:    i,
   269  		lastKey: nil,
   270  	}
   271  }
   272  
   273  type readerCloser interface {
   274  	pebble.Reader
   275  	io.Closer
   276  }
   277  
   278  func (t *test) setSnapshot(id objID, s readerCloser) {
   279  	if id.tag() != snapTag {
   280  		panic(fmt.Sprintf("invalid snapshot ID: %s", id))
   281  	}
   282  	t.snapshots[id.slot()] = s
   283  }
   284  
   285  func (t *test) clearObj(id objID) {
   286  	switch id.tag() {
   287  	case dbTag:
   288  		t.db = nil
   289  	case batchTag:
   290  		t.batches[id.slot()] = nil
   291  	case iterTag:
   292  		t.iters[id.slot()] = nil
   293  	case snapTag:
   294  		t.snapshots[id.slot()] = nil
   295  	}
   296  }
   297  
   298  func (t *test) getBatch(id objID) *pebble.Batch {
   299  	if id.tag() != batchTag {
   300  		panic(fmt.Sprintf("invalid batch ID: %s", id))
   301  	}
   302  	return t.batches[id.slot()]
   303  }
   304  
   305  func (t *test) getCloser(id objID) io.Closer {
   306  	switch id.tag() {
   307  	case dbTag:
   308  		return t.db
   309  	case batchTag:
   310  		return t.batches[id.slot()]
   311  	case iterTag:
   312  		return t.iters[id.slot()]
   313  	case snapTag:
   314  		return t.snapshots[id.slot()]
   315  	}
   316  	panic(fmt.Sprintf("cannot close ID: %s", id))
   317  }
   318  
   319  func (t *test) getIter(id objID) *retryableIter {
   320  	if id.tag() != iterTag {
   321  		panic(fmt.Sprintf("invalid iter ID: %s", id))
   322  	}
   323  	return t.iters[id.slot()]
   324  }
   325  
   326  func (t *test) getReader(id objID) pebble.Reader {
   327  	switch id.tag() {
   328  	case dbTag:
   329  		return t.db
   330  	case batchTag:
   331  		return t.batches[id.slot()]
   332  	case snapTag:
   333  		return t.snapshots[id.slot()]
   334  	}
   335  	panic(fmt.Sprintf("invalid reader ID: %s", id))
   336  }
   337  
   338  func (t *test) getWriter(id objID) pebble.Writer {
   339  	switch id.tag() {
   340  	case dbTag:
   341  		return t.db
   342  	case batchTag:
   343  		return t.batches[id.slot()]
   344  	}
   345  	panic(fmt.Sprintf("invalid writer ID: %s", id))
   346  }
   347  
   348  // Compute the synchronization points between operations. When operating
   349  // with more than 1 thread, operations must synchronize access to shared
   350  // objects. Compute two slices the same length as ops.
   351  //
   352  // opsWaitOn: the value v at index i indicates that operation i must wait
   353  // for the operation at index v to finish before it may run. NB: v < i
   354  //
   355  // opsDone: the channel at index i must be closed when the operation at index i
   356  // completes. This slice is sparse. Operations that are never used as
   357  // synchronization points may have a nil channel.
   358  func computeSynchronizationPoints(ops []op) (opsWaitOn [][]int, opsDone []chan struct{}) {
   359  	opsDone = make([]chan struct{}, len(ops)) // operation index -> done channel
   360  	opsWaitOn = make([][]int, len(ops))       // operation index -> operation index
   361  	lastOpReference := make(map[objID]int)    // objID -> operation index
   362  	for i, o := range ops {
   363  		// Find the last operation that involved the same receiver object. We at
   364  		// least need to wait on that operation.
   365  		receiver := o.receiver()
   366  		waitIndex, ok := lastOpReference[receiver]
   367  		lastOpReference[receiver] = i
   368  		if !ok {
   369  			// Only valid for i=0. For all other operations, the receiver should
   370  			// have been referenced by some other operation before it's used as
   371  			// a receiver.
   372  			if i != 0 {
   373  				panic(fmt.Sprintf("op %d on receiver %s; first reference of %s", i, receiver, receiver))
   374  			}
   375  			continue
   376  		}
   377  
   378  		// The last operation that referenced `receiver` is the one at index
   379  		// `waitIndex`. All operations with the same receiver are performed on
   380  		// the same thread. We only need to synchronize on the operation at
   381  		// `waitIndex` if `receiver` isn't also the receiver on that operation
   382  		// too.
   383  		if ops[waitIndex].receiver() != receiver {
   384  			opsWaitOn[i] = append(opsWaitOn[i], waitIndex)
   385  		}
   386  
   387  		// In additional to synchronizing on the operation's receiver operation,
   388  		// we may need to synchronize on additional objects. For example,
   389  		// batch0.Commit() must synchronize its receiver, batch0, but also on
   390  		// dbObjID since it mutates database state.
   391  		for _, syncObjID := range o.syncObjs() {
   392  			if vi, vok := lastOpReference[syncObjID]; vok {
   393  				opsWaitOn[i] = append(opsWaitOn[i], vi)
   394  			}
   395  			lastOpReference[syncObjID] = i
   396  		}
   397  
   398  		waitIndexes := opsWaitOn[i]
   399  		sort.Ints(waitIndexes)
   400  		for _, waitIndex := range waitIndexes {
   401  			// If this is the first operation that must wait on the operation at
   402  			// `waitIndex`, then there will be no channel for the operation yet.
   403  			// Create one.
   404  			if opsDone[waitIndex] == nil {
   405  				opsDone[waitIndex] = make(chan struct{})
   406  			}
   407  		}
   408  	}
   409  	return opsWaitOn, opsDone
   410  }