github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/storageccl/export_test.go (about)

     1  // Copyright 2016 The Cockroach Authors.
     2  //
     3  // Licensed as a CockroachDB Enterprise file under the Cockroach Community
     4  // License (the "License"); you may not use this file except in compliance with
     5  // the License. You may obtain a copy of the License at
     6  //
     7  //     https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
     8  
     9  package storageccl
    10  
    11  import (
    12  	"bytes"
    13  	"context"
    14  	"fmt"
    15  	"io/ioutil"
    16  	"math"
    17  	"math/rand"
    18  	"path/filepath"
    19  	"sort"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/cockroachdb/cockroach/pkg/base"
    24  	"github.com/cockroachdb/cockroach/pkg/keys"
    25  	"github.com/cockroachdb/cockroach/pkg/kv"
    26  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    27  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    28  	"github.com/cockroachdb/cockroach/pkg/storage"
    29  	"github.com/cockroachdb/cockroach/pkg/testutils"
    30  	"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
    31  	"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
    32  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    33  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    34  	"github.com/cockroachdb/cockroach/pkg/util/randutil"
    35  	"github.com/stretchr/testify/require"
    36  )
    37  
    38  func TestExportCmd(t *testing.T) {
    39  	defer leaktest.AfterTest(t)()
    40  
    41  	ctx := context.Background()
    42  	dir, dirCleanupFn := testutils.TempDir(t)
    43  	defer dirCleanupFn()
    44  	tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}})
    45  	defer tc.Stopper().Stop(ctx)
    46  	kvDB := tc.Server(0).DB()
    47  
    48  	export := func(
    49  		t *testing.T, start hlc.Timestamp, mvccFilter roachpb.MVCCFilter,
    50  	) (roachpb.Response, *roachpb.Error) {
    51  		req := &roachpb.ExportRequest{
    52  			RequestHeader: roachpb.RequestHeader{Key: keys.UserTableDataMin, EndKey: keys.MaxKey},
    53  			StartTime:     start,
    54  			Storage: roachpb.ExternalStorage{
    55  				Provider:  roachpb.ExternalStorageProvider_LocalFile,
    56  				LocalFile: roachpb.ExternalStorage_LocalFilePath{Path: "/foo"},
    57  			},
    58  			MVCCFilter:     mvccFilter,
    59  			ReturnSST:      true,
    60  			TargetFileSize: ExportRequestTargetFileSize.Get(&tc.Server(0).ClusterSettings().SV),
    61  		}
    62  		return kv.SendWrapped(ctx, kvDB.NonTransactionalSender(), req)
    63  	}
    64  
    65  	exportAndSlurpOne := func(
    66  		t *testing.T, start hlc.Timestamp, mvccFilter roachpb.MVCCFilter,
    67  	) ([]string, []storage.MVCCKeyValue) {
    68  		res, pErr := export(t, start, mvccFilter)
    69  		if pErr != nil {
    70  			t.Fatalf("%+v", pErr)
    71  		}
    72  
    73  		var paths []string
    74  		var kvs []storage.MVCCKeyValue
    75  		ingestFunc := func(kv storage.MVCCKeyValue) (bool, error) {
    76  			kvs = append(kvs, kv)
    77  			return false, nil
    78  		}
    79  		for _, file := range res.(*roachpb.ExportResponse).Files {
    80  			paths = append(paths, file.Path)
    81  
    82  			sst := storage.MakeRocksDBSstFileReader()
    83  			defer sst.Close()
    84  
    85  			fileContents, err := ioutil.ReadFile(filepath.Join(dir, "foo", file.Path))
    86  			if err != nil {
    87  				t.Fatalf("%+v", err)
    88  			}
    89  			if !bytes.Equal(fileContents, file.SST) {
    90  				t.Fatal("Returned SST and exported SST don't match!")
    91  			}
    92  			if err := sst.IngestExternalFile(file.SST); err != nil {
    93  				t.Fatalf("%+v", err)
    94  			}
    95  			if err := sst.Iterate(keys.MinKey, keys.MaxKey, ingestFunc); err != nil {
    96  				t.Fatalf("%+v", err)
    97  			}
    98  		}
    99  
   100  		return paths, kvs
   101  	}
   102  	type ExportAndSlurpResult struct {
   103  		end             hlc.Timestamp
   104  		mvccLatestFiles []string
   105  		mvccLatestKVs   []storage.MVCCKeyValue
   106  		mvccAllFiles    []string
   107  		mvccAllKVs      []storage.MVCCKeyValue
   108  	}
   109  	exportAndSlurp := func(t *testing.T, start hlc.Timestamp) ExportAndSlurpResult {
   110  		var ret ExportAndSlurpResult
   111  		ret.end = hlc.NewClock(hlc.UnixNano, time.Nanosecond).Now()
   112  		ret.mvccLatestFiles, ret.mvccLatestKVs = exportAndSlurpOne(t, start, roachpb.MVCCFilter_Latest)
   113  		ret.mvccAllFiles, ret.mvccAllKVs = exportAndSlurpOne(t, start, roachpb.MVCCFilter_All)
   114  		return ret
   115  	}
   116  
   117  	expect := func(
   118  		t *testing.T, res ExportAndSlurpResult,
   119  		mvccLatestFilesLen int, mvccLatestKVsLen int, mvccAllFilesLen int, mvccAllKVsLen int,
   120  	) {
   121  		if len(res.mvccLatestFiles) != mvccLatestFilesLen {
   122  			t.Errorf("expected %d files in latest export got %d", mvccLatestFilesLen, len(res.mvccLatestFiles))
   123  		}
   124  		if len(res.mvccLatestKVs) != mvccLatestKVsLen {
   125  			t.Errorf("expected %d kvs in latest export got %d", mvccLatestKVsLen, len(res.mvccLatestKVs))
   126  		}
   127  		if len(res.mvccAllFiles) != mvccAllFilesLen {
   128  			t.Errorf("expected %d files in all export got %d", mvccAllFilesLen, len(res.mvccAllFiles))
   129  		}
   130  		if len(res.mvccAllKVs) != mvccAllKVsLen {
   131  			t.Errorf("expected %d kvs in all export got %d", mvccAllKVsLen, len(res.mvccAllKVs))
   132  		}
   133  	}
   134  
   135  	sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0])
   136  	sqlDB.Exec(t, `CREATE DATABASE mvcclatest`)
   137  	sqlDB.Exec(t, `CREATE TABLE mvcclatest.export (id INT PRIMARY KEY, value INT)`)
   138  	const (
   139  		targetSizeSetting = "kv.bulk_sst.target_size"
   140  		maxOverageSetting = "kv.bulk_sst.max_allowed_overage"
   141  	)
   142  	var (
   143  		setSetting = func(t *testing.T, variable, val string) {
   144  			sqlDB.Exec(t, "SET CLUSTER SETTING "+variable+" = "+val)
   145  		}
   146  		resetSetting = func(t *testing.T, variable string) {
   147  			setSetting(t, variable, "DEFAULT")
   148  		}
   149  		setExportTargetSize = func(t *testing.T, val string) {
   150  			setSetting(t, targetSizeSetting, val)
   151  		}
   152  		resetExportTargetSize = func(t *testing.T) {
   153  			resetSetting(t, targetSizeSetting)
   154  		}
   155  		setMaxOverage = func(t *testing.T, val string) {
   156  			setSetting(t, maxOverageSetting, val)
   157  		}
   158  		resetMaxOverage = func(t *testing.T) {
   159  			resetSetting(t, maxOverageSetting)
   160  		}
   161  	)
   162  
   163  	var res1 ExportAndSlurpResult
   164  	t.Run("ts1", func(t *testing.T) {
   165  		// When run with MVCCFilter_Latest and a startTime of 0 (full backup of
   166  		// only the latest values), Export special cases and skips keys that are
   167  		// deleted before the export timestamp.
   168  		sqlDB.Exec(t, `INSERT INTO mvcclatest.export VALUES (1, 1), (3, 3), (4, 4)`)
   169  		sqlDB.Exec(t, `DELETE from mvcclatest.export WHERE id = 4`)
   170  		res1 = exportAndSlurp(t, hlc.Timestamp{})
   171  		expect(t, res1, 1, 2, 1, 4)
   172  		defer resetExportTargetSize(t)
   173  		setExportTargetSize(t, "'1b'")
   174  		res1 = exportAndSlurp(t, hlc.Timestamp{})
   175  		expect(t, res1, 2, 2, 3, 4)
   176  	})
   177  
   178  	var res2 ExportAndSlurpResult
   179  	t.Run("ts2", func(t *testing.T) {
   180  		// If nothing has changed, nothing should be exported.
   181  		res2 = exportAndSlurp(t, res1.end)
   182  		expect(t, res2, 0, 0, 0, 0)
   183  	})
   184  
   185  	var res3 ExportAndSlurpResult
   186  	t.Run("ts3", func(t *testing.T) {
   187  		// MVCCFilter_All saves all values.
   188  		sqlDB.Exec(t, `INSERT INTO mvcclatest.export VALUES (2, 2)`)
   189  		sqlDB.Exec(t, `UPSERT INTO mvcclatest.export VALUES (2, 8)`)
   190  		res3 = exportAndSlurp(t, res2.end)
   191  		expect(t, res3, 1, 1, 1, 2)
   192  	})
   193  
   194  	var res4 ExportAndSlurpResult
   195  	t.Run("ts4", func(t *testing.T) {
   196  		sqlDB.Exec(t, `DELETE FROM mvcclatest.export WHERE id = 3`)
   197  		res4 = exportAndSlurp(t, res3.end)
   198  		expect(t, res4, 1, 1, 1, 1)
   199  		if len(res4.mvccLatestKVs[0].Value) != 0 {
   200  			v := roachpb.Value{RawBytes: res4.mvccLatestKVs[0].Value}
   201  			t.Errorf("expected a deletion tombstone got %s", v.PrettyPrint())
   202  		}
   203  		if len(res4.mvccAllKVs[0].Value) != 0 {
   204  			v := roachpb.Value{RawBytes: res4.mvccAllKVs[0].Value}
   205  			t.Errorf("expected a deletion tombstone got %s", v.PrettyPrint())
   206  		}
   207  	})
   208  
   209  	var res5 ExportAndSlurpResult
   210  	t.Run("ts5", func(t *testing.T) {
   211  		sqlDB.Exec(t, `ALTER TABLE mvcclatest.export SPLIT AT VALUES (2)`)
   212  		res5 = exportAndSlurp(t, hlc.Timestamp{})
   213  		expect(t, res5, 2, 2, 2, 7)
   214  
   215  		// Re-run the test with a 1b target size which will lead to more files.
   216  		defer resetExportTargetSize(t)
   217  		setExportTargetSize(t, "'1b'")
   218  		res5 = exportAndSlurp(t, hlc.Timestamp{})
   219  		expect(t, res5, 2, 2, 4, 7)
   220  	})
   221  
   222  	var res6 ExportAndSlurpResult
   223  	t.Run("ts6", func(t *testing.T) {
   224  		// Add 100 rows to the table.
   225  		sqlDB.Exec(t, `WITH RECURSIVE
   226      t (id, value)
   227          AS (VALUES (1, 1) UNION ALL SELECT id + 1, value FROM t WHERE id < 100)
   228  UPSERT
   229  INTO
   230      mvcclatest.export
   231  (SELECT id, value FROM t);`)
   232  
   233  		// Run the test with the default target size which will lead to 2 files due
   234  		// to the above split.
   235  		res6 = exportAndSlurp(t, res5.end)
   236  		expect(t, res6, 2, 100, 2, 100)
   237  
   238  		// Re-run the test with a 1b target size which will lead to 100 files.
   239  		defer resetExportTargetSize(t)
   240  		setExportTargetSize(t, "'1b'")
   241  		res6 = exportAndSlurp(t, res5.end)
   242  		expect(t, res6, 100, 100, 100, 100)
   243  
   244  		// Set the MaxOverage to 1b and ensure that we get errors due to
   245  		// the max overage being exceeded.
   246  		defer resetMaxOverage(t)
   247  		setMaxOverage(t, "'1b'")
   248  		const expectedError = `export size \(11 bytes\) exceeds max size \(2 bytes\)`
   249  		_, pErr := export(t, res5.end, roachpb.MVCCFilter_Latest)
   250  		require.Regexp(t, expectedError, pErr)
   251  		_, pErr = export(t, res5.end, roachpb.MVCCFilter_All)
   252  		require.Regexp(t, expectedError, pErr)
   253  
   254  		// Disable the TargetSize and ensure that we don't get any errors
   255  		// to the max overage being exceeded.
   256  		setExportTargetSize(t, "'0b'")
   257  		res6 = exportAndSlurp(t, res5.end)
   258  		expect(t, res6, 2, 100, 2, 100)
   259  
   260  	})
   261  }
   262  
   263  func TestExportGCThreshold(t *testing.T) {
   264  	defer leaktest.AfterTest(t)()
   265  
   266  	ctx := context.Background()
   267  	tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{})
   268  	defer tc.Stopper().Stop(ctx)
   269  	kvDB := tc.Server(0).DB()
   270  
   271  	req := &roachpb.ExportRequest{
   272  		RequestHeader: roachpb.RequestHeader{Key: keys.UserTableDataMin, EndKey: keys.MaxKey},
   273  		StartTime:     hlc.Timestamp{WallTime: -1},
   274  	}
   275  	_, pErr := kv.SendWrapped(ctx, kvDB.NonTransactionalSender(), req)
   276  	if !testutils.IsPError(pErr, "must be after replica GC threshold") {
   277  		t.Fatalf(`expected "must be after replica GC threshold" error got: %+v`, pErr)
   278  	}
   279  }
   280  
   281  // exportUsingGoIterator uses the legacy implementation of export, and is used
   282  // as an oracle to check the correctness of the new C++ implementation.
   283  func exportUsingGoIterator(
   284  	filter roachpb.MVCCFilter,
   285  	startTime, endTime hlc.Timestamp,
   286  	startKey, endKey roachpb.Key,
   287  	enableTimeBoundIteratorOptimization bool,
   288  	reader storage.Reader,
   289  ) ([]byte, error) {
   290  	sst, err := storage.MakeRocksDBSstFileWriter()
   291  	if err != nil {
   292  		return nil, nil //nolint:returnerrcheck
   293  	}
   294  	defer sst.Close()
   295  
   296  	var skipTombstones bool
   297  	var iterFn func(*storage.MVCCIncrementalIterator)
   298  	switch filter {
   299  	case roachpb.MVCCFilter_Latest:
   300  		skipTombstones = true
   301  		iterFn = (*storage.MVCCIncrementalIterator).NextKey
   302  	case roachpb.MVCCFilter_All:
   303  		skipTombstones = false
   304  		iterFn = (*storage.MVCCIncrementalIterator).Next
   305  	default:
   306  		return nil, nil
   307  	}
   308  
   309  	io := storage.IterOptions{
   310  		UpperBound: endKey,
   311  	}
   312  	if enableTimeBoundIteratorOptimization {
   313  		io.MaxTimestampHint = endTime
   314  		io.MinTimestampHint = startTime.Next()
   315  	}
   316  	iter := storage.NewMVCCIncrementalIterator(reader, storage.MVCCIncrementalIterOptions{
   317  		IterOptions: io,
   318  		StartTime:   startTime,
   319  		EndTime:     endTime,
   320  	})
   321  	defer iter.Close()
   322  	for iter.SeekGE(storage.MakeMVCCMetadataKey(startKey)); ; iterFn(iter) {
   323  		ok, err := iter.Valid()
   324  		if err != nil {
   325  			// The error may be a WriteIntentError. In which case, returning it will
   326  			// cause this command to be retried.
   327  			return nil, err
   328  		}
   329  		if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
   330  			break
   331  		}
   332  
   333  		// Skip tombstone (len=0) records when startTime is zero
   334  		// (non-incremental) and we're not exporting all versions.
   335  		if skipTombstones && startTime.IsEmpty() && len(iter.UnsafeValue()) == 0 {
   336  			continue
   337  		}
   338  
   339  		if err := sst.Put(iter.UnsafeKey(), iter.UnsafeValue()); err != nil {
   340  			return nil, err
   341  		}
   342  	}
   343  
   344  	if sst.DataSize() == 0 {
   345  		// Let the defer Close the sstable.
   346  		return nil, nil
   347  	}
   348  
   349  	sstContents, err := sst.Finish()
   350  	if err != nil {
   351  		return nil, err
   352  	}
   353  
   354  	return sstContents, nil
   355  }
   356  
   357  func loadSST(t *testing.T, data []byte, start, end roachpb.Key) []storage.MVCCKeyValue {
   358  	t.Helper()
   359  	if len(data) == 0 {
   360  		return nil
   361  	}
   362  
   363  	sst := storage.MakeRocksDBSstFileReader()
   364  	defer sst.Close()
   365  
   366  	if err := sst.IngestExternalFile(data); err != nil {
   367  		t.Fatal(err)
   368  	}
   369  
   370  	var kvs []storage.MVCCKeyValue
   371  	if err := sst.Iterate(start, end, func(kv storage.MVCCKeyValue) (bool, error) {
   372  		kvs = append(kvs, kv)
   373  		return false, nil
   374  	}); err != nil {
   375  		t.Fatal(err)
   376  	}
   377  
   378  	return kvs
   379  }
   380  
   381  func assertEqualKVs(
   382  	ctx context.Context,
   383  	e storage.Engine,
   384  	startKey, endKey roachpb.Key,
   385  	startTime, endTime hlc.Timestamp,
   386  	exportAllRevisions bool,
   387  	enableTimeBoundIteratorOptimization bool,
   388  	targetSize uint64,
   389  ) func(*testing.T) {
   390  	return func(t *testing.T) {
   391  		t.Helper()
   392  
   393  		var filter roachpb.MVCCFilter
   394  		if exportAllRevisions {
   395  			filter = roachpb.MVCCFilter_All
   396  		} else {
   397  			filter = roachpb.MVCCFilter_Latest
   398  		}
   399  
   400  		// Run oracle (go implementation of the IncrementalIterator).
   401  		expected, err := exportUsingGoIterator(filter, startTime, endTime,
   402  			startKey, endKey, enableTimeBoundIteratorOptimization, e)
   403  		if err != nil {
   404  			t.Fatalf("Oracle failed to export provided key range.")
   405  		}
   406  
   407  		// Run new C++ implementation of IncrementalIterator.
   408  		io := storage.IterOptions{
   409  			UpperBound: endKey,
   410  		}
   411  		if enableTimeBoundIteratorOptimization {
   412  			io.MaxTimestampHint = endTime
   413  			io.MinTimestampHint = startTime.Next()
   414  		}
   415  		var kvs []storage.MVCCKeyValue
   416  		for start := startKey; start != nil; {
   417  			var sst []byte
   418  			var summary roachpb.BulkOpSummary
   419  			maxSize := uint64(0)
   420  			prevStart := start
   421  			sst, summary, start, err = e.ExportToSst(start, endKey, startTime, endTime,
   422  				exportAllRevisions, targetSize, maxSize, io)
   423  			require.NoError(t, err)
   424  			loaded := loadSST(t, sst, startKey, endKey)
   425  			// Ensure that the pagination worked properly.
   426  			if start != nil {
   427  				dataSize := uint64(summary.DataSize)
   428  				require.Truef(t, targetSize <= dataSize, "%d > %d",
   429  					targetSize, summary.DataSize)
   430  				// Now we want to ensure that if we remove the bytes due to the last
   431  				// key that we are below the target size.
   432  				firstKVofLastKey := sort.Search(len(loaded), func(i int) bool {
   433  					return loaded[i].Key.Key.Equal(loaded[len(loaded)-1].Key.Key)
   434  				})
   435  				dataSizeWithoutLastKey := dataSize
   436  				for _, kv := range loaded[firstKVofLastKey:] {
   437  					dataSizeWithoutLastKey -= uint64(len(kv.Key.Key) + len(kv.Value))
   438  				}
   439  				require.Truef(t, targetSize > dataSizeWithoutLastKey, "%d <= %d", targetSize, dataSizeWithoutLastKey)
   440  				// Ensure that maxSize leads to an error if exceeded.
   441  				// Note that this uses a relatively non-sensical value of maxSize which
   442  				// is equal to the targetSize.
   443  				maxSize = targetSize
   444  				dataSizeWhenExceeded := dataSize
   445  				for i := len(loaded) - 1; i >= 0; i-- {
   446  					kv := loaded[i]
   447  					lessThisKey := dataSizeWhenExceeded - uint64(len(kv.Key.Key)+len(kv.Value))
   448  					if lessThisKey >= maxSize {
   449  						dataSizeWhenExceeded = lessThisKey
   450  					} else {
   451  						break
   452  					}
   453  				}
   454  				// It might be the case that this key would lead to an SST of exactly
   455  				// max size, in this case we overwrite max size to be less so that
   456  				// we still generate an error.
   457  				if dataSizeWhenExceeded == maxSize {
   458  					maxSize--
   459  				}
   460  				_, _, _, err = e.ExportToSst(prevStart, endKey, startTime, endTime,
   461  					exportAllRevisions, targetSize, maxSize, io)
   462  				require.Regexp(t, fmt.Sprintf("export size \\(%d bytes\\) exceeds max size \\(%d bytes\\)",
   463  					dataSizeWhenExceeded, maxSize), err)
   464  			}
   465  			kvs = append(kvs, loaded...)
   466  		}
   467  
   468  		// Compare new C++ implementation against the oracle.
   469  		expectedKVS := loadSST(t, expected, startKey, endKey)
   470  		if len(kvs) != len(expectedKVS) {
   471  			t.Fatalf("got %d kvs but expected %d:\n%v\n%v", len(kvs), len(expectedKVS), kvs, expectedKVS)
   472  		}
   473  
   474  		for i := range kvs {
   475  			if !kvs[i].Key.Equal(expectedKVS[i].Key) {
   476  				t.Fatalf("%d key: got %v but expected %v", i, kvs[i].Key, expectedKVS[i].Key)
   477  			}
   478  			if !bytes.Equal(kvs[i].Value, expectedKVS[i].Value) {
   479  				t.Fatalf("%d value: got %x but expected %x", i, kvs[i].Value, expectedKVS[i].Value)
   480  			}
   481  		}
   482  	}
   483  }
   484  func TestRandomKeyAndTimestampExport(t *testing.T) {
   485  	defer leaktest.AfterTest(t)()
   486  
   487  	ctx := context.Background()
   488  
   489  	mkEngine := func(t *testing.T) (e storage.Engine, cleanup func()) {
   490  		dir, cleanupDir := testutils.TempDir(t)
   491  		e, err := storage.NewDefaultEngine(
   492  			0,
   493  			base.StorageConfig{
   494  				Settings: cluster.MakeTestingClusterSettings(),
   495  				Dir:      dir,
   496  			})
   497  		if err != nil {
   498  			t.Fatal(err)
   499  		}
   500  		return e, func() {
   501  			e.Close()
   502  			cleanupDir()
   503  		}
   504  	}
   505  	getNumKeys := func(t *testing.T, rnd *rand.Rand, targetSize uint64) (numKeys int) {
   506  		const (
   507  			targetPages   = 10
   508  			bytesPerValue = 300
   509  			minNumKeys    = 2 // need > 1 keys for random key test
   510  			maxNumKeys    = 5000
   511  		)
   512  		numKeys = maxNumKeys
   513  		if targetSize > 0 {
   514  			numKeys = rnd.Intn(int(targetSize)*targetPages*2) / bytesPerValue
   515  		}
   516  		if numKeys > maxNumKeys {
   517  			numKeys = maxNumKeys
   518  		} else if numKeys < minNumKeys {
   519  			numKeys = minNumKeys
   520  		}
   521  		return numKeys
   522  	}
   523  	mkData := func(
   524  		t *testing.T, e storage.Engine, rnd *rand.Rand, numKeys int,
   525  	) ([]roachpb.Key, []hlc.Timestamp) {
   526  		// Store generated keys and timestamps.
   527  		var keys []roachpb.Key
   528  		var timestamps []hlc.Timestamp
   529  
   530  		var curWallTime = 0
   531  		var curLogical = 0
   532  
   533  		batch := e.NewBatch()
   534  		for i := 0; i < numKeys; i++ {
   535  			// Ensure walltime and logical are monotonically increasing.
   536  			curWallTime = randutil.RandIntInRange(rnd, 0, math.MaxInt64-1)
   537  			curLogical = randutil.RandIntInRange(rnd, 0, math.MaxInt32-1)
   538  			ts := hlc.Timestamp{WallTime: int64(curWallTime), Logical: int32(curLogical)}
   539  			timestamps = append(timestamps, ts)
   540  
   541  			// Make keys unique and ensure they are monotonically increasing.
   542  			key := roachpb.Key(randutil.RandBytes(rnd, 100))
   543  			key = append([]byte(fmt.Sprintf("#%d", i)), key...)
   544  			keys = append(keys, key)
   545  
   546  			value := roachpb.MakeValueFromBytes(randutil.RandBytes(rnd, 200))
   547  			value.InitChecksum(key)
   548  			if err := storage.MVCCPut(ctx, batch, nil, key, ts, value, nil); err != nil {
   549  				t.Fatal(err)
   550  			}
   551  
   552  			// Randomly decide whether to add a newer version of the same key to test
   553  			// MVCC_Filter_All.
   554  			if randutil.RandIntInRange(rnd, 0, math.MaxInt64)%2 == 0 {
   555  				curWallTime++
   556  				ts = hlc.Timestamp{WallTime: int64(curWallTime), Logical: int32(curLogical)}
   557  				value = roachpb.MakeValueFromBytes(randutil.RandBytes(rnd, 200))
   558  				value.InitChecksum(key)
   559  				if err := storage.MVCCPut(ctx, batch, nil, key, ts, value, nil); err != nil {
   560  					t.Fatal(err)
   561  				}
   562  			}
   563  		}
   564  		if err := batch.Commit(true); err != nil {
   565  			t.Fatal(err)
   566  		}
   567  		batch.Close()
   568  
   569  		sort.Slice(timestamps, func(i, j int) bool {
   570  			return (timestamps[i].WallTime < timestamps[j].WallTime) ||
   571  				(timestamps[i].WallTime == timestamps[j].WallTime &&
   572  					timestamps[i].Logical < timestamps[j].Logical)
   573  		})
   574  		return keys, timestamps
   575  	}
   576  
   577  	testWithTargetSize := func(t *testing.T, targetSize uint64) {
   578  		e, cleanup := mkEngine(t)
   579  		defer cleanup()
   580  		rnd, _ := randutil.NewPseudoRand()
   581  		numKeys := getNumKeys(t, rnd, targetSize)
   582  		keys, timestamps := mkData(t, e, rnd, numKeys)
   583  		var (
   584  			keyMin = roachpb.KeyMin
   585  			keyMax = roachpb.KeyMax
   586  
   587  			tsMin = hlc.Timestamp{WallTime: 0, Logical: 0}
   588  			tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0}
   589  		)
   590  
   591  		t.Run("ts (0-∞], latest, nontimebound", assertEqualKVs(ctx, e, keyMin, keyMax, tsMin, tsMax, false, false, targetSize))
   592  		t.Run("ts (0-∞], all, nontimebound", assertEqualKVs(ctx, e, keyMin, keyMax, tsMin, tsMax, true, false, targetSize))
   593  		t.Run("ts (0-∞], latest, timebound", assertEqualKVs(ctx, e, keyMin, keyMax, tsMin, tsMax, false, true, targetSize))
   594  		t.Run("ts (0-∞], all, timebound", assertEqualKVs(ctx, e, keyMin, keyMax, tsMin, tsMax, true, true, targetSize))
   595  
   596  		upperBound := randutil.RandIntInRange(rnd, 1, numKeys)
   597  		lowerBound := rnd.Intn(upperBound)
   598  
   599  		// Exercise random key ranges.
   600  		t.Run("kv [randLower, randUpper), latest, nontimebound", assertEqualKVs(ctx, e, keys[lowerBound], keys[upperBound], tsMin, tsMax, false, false, targetSize))
   601  		t.Run("kv [randLower, randUpper), all, nontimebound", assertEqualKVs(ctx, e, keys[lowerBound], keys[upperBound], tsMin, tsMax, true, false, targetSize))
   602  		t.Run("kv [randLower, randUpper), latest, timebound", assertEqualKVs(ctx, e, keys[lowerBound], keys[upperBound], tsMin, tsMax, false, true, targetSize))
   603  		t.Run("kv [randLower, randUpper), all, timebound", assertEqualKVs(ctx, e, keys[lowerBound], keys[upperBound], tsMin, tsMax, true, true, targetSize))
   604  
   605  		upperBound = randutil.RandIntInRange(rnd, 1, numKeys)
   606  		lowerBound = rnd.Intn(upperBound)
   607  
   608  		// Exercise random timestamps.
   609  		t.Run("kv (randLowerTime, randUpperTime], latest, nontimebound", assertEqualKVs(ctx, e, keyMin, keyMax, timestamps[lowerBound], timestamps[upperBound], false, false, targetSize))
   610  		t.Run("kv (randLowerTime, randUpperTime], all, nontimebound", assertEqualKVs(ctx, e, keyMin, keyMax, timestamps[lowerBound], timestamps[upperBound], true, false, targetSize))
   611  		t.Run("kv (randLowerTime, randUpperTime], latest, timebound", assertEqualKVs(ctx, e, keyMin, keyMax, timestamps[lowerBound], timestamps[upperBound], false, true, targetSize))
   612  		t.Run("kv (randLowerTime, randUpperTime], all, timebound", assertEqualKVs(ctx, e, keyMin, keyMax, timestamps[lowerBound], timestamps[upperBound], true, true, targetSize))
   613  	}
   614  	// Exercise min to max time and key ranges.
   615  	for _, targetSize := range []uint64{
   616  		0 /* unlimited */, 1 << 10, 1 << 16, 1 << 20,
   617  	} {
   618  		t.Run(fmt.Sprintf("targetSize=%d", targetSize), func(t *testing.T) {
   619  			testWithTargetSize(t, targetSize)
   620  		})
   621  	}
   622  
   623  }