github.com/cockroachdb/pebble@v1.1.2/level_checker_test.go (about)

     1  // Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package pebble
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"fmt"
    11  	"io"
    12  	"path/filepath"
    13  	"strings"
    14  	"testing"
    15  
    16  	"github.com/cockroachdb/datadriven"
    17  	"github.com/cockroachdb/errors"
    18  	"github.com/cockroachdb/pebble/internal/base"
    19  	"github.com/cockroachdb/pebble/internal/keyspan"
    20  	"github.com/cockroachdb/pebble/internal/manifest"
    21  	"github.com/cockroachdb/pebble/internal/private"
    22  	"github.com/cockroachdb/pebble/internal/rangedel"
    23  	"github.com/cockroachdb/pebble/objstorage/objstorageprovider"
    24  	"github.com/cockroachdb/pebble/sstable"
    25  	"github.com/cockroachdb/pebble/vfs"
    26  	"github.com/stretchr/testify/require"
    27  )
    28  
    29  func TestCheckLevelsBasics(t *testing.T) {
    30  	testCases := []string{"db-stage-1", "db-stage-2", "db-stage-3", "db-stage-4"}
    31  	for _, tc := range testCases {
    32  		t.Run(tc, func(t *testing.T) {
    33  			t.Logf("%s", t.Name())
    34  			fs := vfs.NewMem()
    35  			_, err := vfs.Clone(vfs.Default, fs, filepath.Join("testdata", tc), tc)
    36  			if err != nil {
    37  				t.Fatalf("%s: cloneFileSystem failed: %v", tc, err)
    38  			}
    39  			d, err := Open(tc, &Options{
    40  				FS: fs,
    41  			})
    42  			if err != nil {
    43  				t.Fatalf("%s: Open failed: %v", tc, err)
    44  			}
    45  			require.NoError(t, d.CheckLevels(nil))
    46  			require.NoError(t, d.Close())
    47  		})
    48  	}
    49  }
    50  
    51  type failMerger struct {
    52  	lastBuf    []byte
    53  	closeCount int
    54  }
    55  
    56  func (f *failMerger) MergeNewer(value []byte) error {
    57  	return nil
    58  }
    59  
    60  func (f *failMerger) MergeOlder(value []byte) error {
    61  	if string(value) == "fail-merge" {
    62  		f.lastBuf = nil
    63  		return errors.New("merge failed")
    64  	}
    65  	f.lastBuf = append(f.lastBuf[:0], value...)
    66  	return nil
    67  }
    68  
    69  func (f *failMerger) Finish(includesBase bool) ([]byte, io.Closer, error) {
    70  	if string(f.lastBuf) == "fail-finish" {
    71  		f.lastBuf = nil
    72  		return nil, nil, errors.New("finish failed")
    73  	}
    74  	f.closeCount++
    75  	return nil, f, nil
    76  }
    77  
    78  func (f *failMerger) Close() error {
    79  	f.closeCount--
    80  	f.lastBuf = nil
    81  	return nil
    82  }
    83  
    84  func TestCheckLevelsCornerCases(t *testing.T) {
    85  	memFS := vfs.NewMem()
    86  	var levels [][]*fileMetadata
    87  	formatKey := DefaultComparer.FormatKey
    88  	// Indexed by fileNum
    89  	var readers []*sstable.Reader
    90  	defer func() {
    91  		for _, r := range readers {
    92  			r.Close()
    93  		}
    94  	}()
    95  
    96  	var fileNum FileNum
    97  	newIters :=
    98  		func(_ context.Context, file *manifest.FileMetadata, _ *IterOptions, _ internalIterOpts) (internalIterator, keyspan.FragmentIterator, error) {
    99  			r := readers[file.FileNum]
   100  			rangeDelIter, err := r.NewRawRangeDelIter()
   101  			if err != nil {
   102  				return nil, nil, err
   103  			}
   104  			iter, err := r.NewIter(nil /* lower */, nil /* upper */)
   105  			if err != nil {
   106  				return nil, nil, err
   107  			}
   108  			return iter, rangeDelIter, nil
   109  		}
   110  
   111  	fm := &failMerger{}
   112  	defer require.Equal(t, 0, fm.closeCount)
   113  
   114  	failMerger := &Merger{
   115  		Merge: func(key, value []byte) (ValueMerger, error) {
   116  			fm.lastBuf = append(fm.lastBuf[:0], value...)
   117  			return fm, nil
   118  		},
   119  
   120  		Name: "fail-merger",
   121  	}
   122  
   123  	datadriven.RunTest(t, "testdata/level_checker", func(t *testing.T, d *datadriven.TestData) string {
   124  		switch d.Cmd {
   125  		case "define":
   126  			lines := strings.Split(d.Input, "\n")
   127  			levels = levels[:0]
   128  			for i := 0; i < len(lines); i++ {
   129  				line := lines[i]
   130  				line = strings.TrimSpace(line)
   131  				if line == "L" {
   132  					// start next level
   133  					levels = append(levels, nil)
   134  					continue
   135  				}
   136  				li := &levels[len(levels)-1]
   137  				keys := strings.Fields(line)
   138  				smallestKey := base.ParseInternalKey(keys[0])
   139  				largestKey := base.ParseInternalKey(keys[1])
   140  				m := (&fileMetadata{
   141  					FileNum: fileNum,
   142  				}).ExtendPointKeyBounds(DefaultComparer.Compare, smallestKey, largestKey)
   143  				m.InitPhysicalBacking()
   144  				*li = append(*li, m)
   145  
   146  				i++
   147  				line = lines[i]
   148  				line = strings.TrimSpace(line)
   149  				name := fmt.Sprint(fileNum)
   150  				fileNum++
   151  				f, err := memFS.Create(name)
   152  				if err != nil {
   153  					return err.Error()
   154  				}
   155  				writeUnfragmented := false
   156  				w := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{})
   157  				for _, arg := range d.CmdArgs {
   158  					switch arg.Key {
   159  					case "disable-key-order-checks":
   160  						private.SSTableWriterDisableKeyOrderChecks(w)
   161  					case "write-unfragmented":
   162  						writeUnfragmented = true
   163  					default:
   164  						return fmt.Sprintf("unknown arg: %s", arg.Key)
   165  					}
   166  				}
   167  				var tombstones []keyspan.Span
   168  				frag := keyspan.Fragmenter{
   169  					Cmp:    DefaultComparer.Compare,
   170  					Format: formatKey,
   171  					Emit: func(fragmented keyspan.Span) {
   172  						tombstones = append(tombstones, fragmented)
   173  					},
   174  				}
   175  				keyvalues := strings.Fields(line)
   176  				for _, kv := range keyvalues {
   177  					j := strings.Index(kv, ":")
   178  					ikey := base.ParseInternalKey(kv[:j])
   179  					value := []byte(kv[j+1:])
   180  					var err error
   181  					switch ikey.Kind() {
   182  					case InternalKeyKindRangeDelete:
   183  						if writeUnfragmented {
   184  							err = w.Add(ikey, value)
   185  							break
   186  						}
   187  						frag.Add(rangedel.Decode(ikey, value, nil))
   188  					default:
   189  						err = w.Add(ikey, value)
   190  					}
   191  					if err != nil {
   192  						return err.Error()
   193  					}
   194  				}
   195  				frag.Finish()
   196  				for _, v := range tombstones {
   197  					if err := rangedel.Encode(&v, w.Add); err != nil {
   198  						return err.Error()
   199  					}
   200  				}
   201  				if err := w.Close(); err != nil {
   202  					return err.Error()
   203  				}
   204  				f, err = memFS.Open(name)
   205  				if err != nil {
   206  					return err.Error()
   207  				}
   208  				readable, err := sstable.NewSimpleReadable(f)
   209  				if err != nil {
   210  					return err.Error()
   211  				}
   212  				cacheOpts := private.SSTableCacheOpts(0, base.FileNum(uint64(fileNum)-1).DiskFileNum()).(sstable.ReaderOption)
   213  				r, err := sstable.NewReader(readable, sstable.ReaderOptions{}, cacheOpts)
   214  				if err != nil {
   215  					return err.Error()
   216  				}
   217  				readers = append(readers, r)
   218  			}
   219  			// TODO(sbhola): clean this up by wrapping levels in a Version and using
   220  			// Version.DebugString().
   221  			var buf bytes.Buffer
   222  			for i, l := range levels {
   223  				fmt.Fprintf(&buf, "Level %d\n", i+1)
   224  				for j, f := range l {
   225  					fmt.Fprintf(&buf, "  file %d: [%s-%s]\n", j, f.Smallest.String(), f.Largest.String())
   226  				}
   227  			}
   228  			return buf.String()
   229  		case "check":
   230  			merge := DefaultMerger.Merge
   231  			for _, arg := range d.CmdArgs {
   232  				switch arg.Key {
   233  				case "merger":
   234  					if len(arg.Vals) != 1 {
   235  						return fmt.Sprintf("expected one arg value, got %d", len(arg.Vals))
   236  					}
   237  					if arg.Vals[0] != failMerger.Name {
   238  						return "unsupported merger"
   239  					}
   240  					merge = failMerger.Merge
   241  				default:
   242  					return fmt.Sprintf("unknown arg: %s", arg.Key)
   243  				}
   244  			}
   245  
   246  			var files [numLevels][]*fileMetadata
   247  			for i := range levels {
   248  				// Start from level 1 in this test.
   249  				files[i+1] = levels[i]
   250  			}
   251  			version := manifest.NewVersion(
   252  				base.DefaultComparer.Compare,
   253  				base.DefaultFormatter,
   254  				0,
   255  				files)
   256  			readState := &readState{current: version}
   257  			c := &checkConfig{
   258  				comparer:  DefaultComparer,
   259  				readState: readState,
   260  				newIters:  newIters,
   261  				seqNum:    InternalKeySeqNumMax,
   262  				merge:     merge,
   263  				formatKey: formatKey,
   264  			}
   265  			if err := checkLevelsInternal(c); err != nil {
   266  				return err.Error()
   267  			}
   268  			return ""
   269  		default:
   270  			return fmt.Sprintf("unknown command: %s", d.Cmd)
   271  		}
   272  	})
   273  }