github.com/zuoyebang/bitalostable@v1.0.1-0.20240229032404-e3b99a834294/table_cache_test.go (about)

     1  // Copyright 2013 The LevelDB-Go and Pebble and Bitalostored Authors. All rights reserved. Use
     2  // of this source code is governed by a BSD-style license that can be found in
     3  // the LICENSE file.
     4  
     5  package bitalostable
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"fmt"
    11  	"io"
    12  	"os"
    13  	"strconv"
    14  	"strings"
    15  	"sync"
    16  	"sync/atomic"
    17  	"testing"
    18  	"time"
    19  
    20  	"github.com/cockroachdb/errors"
    21  	"github.com/stretchr/testify/require"
    22  	"github.com/zuoyebang/bitalostable/internal/base"
    23  	"github.com/zuoyebang/bitalostable/sstable"
    24  	"github.com/zuoyebang/bitalostable/vfs"
    25  	"golang.org/x/exp/rand"
    26  )
    27  
    28  type tableCacheTestFile struct {
    29  	vfs.File
    30  	fs   *tableCacheTestFS
    31  	name string
    32  }
    33  
    34  func (f *tableCacheTestFile) Close() error {
    35  	f.fs.mu.Lock()
    36  	if f.fs.closeCounts != nil {
    37  		f.fs.closeCounts[f.name]++
    38  	}
    39  	f.fs.mu.Unlock()
    40  	return f.File.Close()
    41  }
    42  
    43  type tableCacheTestFS struct {
    44  	vfs.FS
    45  
    46  	mu               sync.Mutex
    47  	openCounts       map[string]int
    48  	closeCounts      map[string]int
    49  	openErrorEnabled bool
    50  }
    51  
    52  func (fs *tableCacheTestFS) Open(name string, opts ...vfs.OpenOption) (vfs.File, error) {
    53  	fs.mu.Lock()
    54  	if fs.openErrorEnabled {
    55  		fs.mu.Unlock()
    56  		return nil, errors.New("injected error")
    57  	}
    58  	if fs.openCounts != nil {
    59  		fs.openCounts[name]++
    60  	}
    61  	fs.mu.Unlock()
    62  	f, err := fs.FS.Open(name, opts...)
    63  	if len(opts) < 1 || opts[0] != vfs.RandomReadsOption {
    64  		return nil, errors.Errorf("sstable file %s not opened with random reads option", name)
    65  	}
    66  	if err != nil {
    67  		return nil, err
    68  	}
    69  	return &tableCacheTestFile{f, fs, name}, nil
    70  }
    71  
    72  func (fs *tableCacheTestFS) validate(
    73  	t *testing.T, c *tableCacheContainer, f func(i, gotO, gotC int) error,
    74  ) {
    75  	if err := fs.validateOpenTables(f); err != nil {
    76  		t.Error(err)
    77  		return
    78  	}
    79  	c.close()
    80  	if err := fs.validateNoneStillOpen(); err != nil {
    81  		t.Error(err)
    82  		return
    83  	}
    84  }
    85  
    86  func (fs *tableCacheTestFS) setOpenError(enabled bool) {
    87  	fs.mu.Lock()
    88  	defer fs.mu.Unlock()
    89  	fs.openErrorEnabled = enabled
    90  }
    91  
    92  // validateOpenTables validates that no tables in the cache are open twice, and
    93  // the number still open is no greater than tableCacheTestCacheSize.
    94  func (fs *tableCacheTestFS) validateOpenTables(f func(i, gotO, gotC int) error) error {
    95  	// try backs off to let any clean-up goroutines do their work.
    96  	return try(100*time.Microsecond, 20*time.Second, func() error {
    97  		fs.mu.Lock()
    98  		defer fs.mu.Unlock()
    99  
   100  		numStillOpen := 0
   101  		for i := 0; i < tableCacheTestNumTables; i++ {
   102  			filename := base.MakeFilepath(fs, "", fileTypeTable, FileNum(i))
   103  			gotO, gotC := fs.openCounts[filename], fs.closeCounts[filename]
   104  			if gotO > gotC {
   105  				numStillOpen++
   106  			}
   107  			if gotC != gotO && gotC != gotO-1 {
   108  				return errors.Errorf("i=%d: table closed too many or too few times: opened %d times, closed %d times",
   109  					i, gotO, gotC)
   110  			}
   111  			if f != nil {
   112  				if err := f(i, gotO, gotC); err != nil {
   113  					return err
   114  				}
   115  			}
   116  		}
   117  		if numStillOpen > tableCacheTestCacheSize {
   118  			return errors.Errorf("numStillOpen is %d, want <= %d", numStillOpen, tableCacheTestCacheSize)
   119  		}
   120  		return nil
   121  	})
   122  }
   123  
   124  // validateNoneStillOpen validates that no tables in the cache are open.
   125  func (fs *tableCacheTestFS) validateNoneStillOpen() error {
   126  	// try backs off to let any clean-up goroutines do their work.
   127  	return try(100*time.Microsecond, 20*time.Second, func() error {
   128  		fs.mu.Lock()
   129  		defer fs.mu.Unlock()
   130  
   131  		for i := 0; i < tableCacheTestNumTables; i++ {
   132  			filename := base.MakeFilepath(fs, "", fileTypeTable, FileNum(i))
   133  			gotO, gotC := fs.openCounts[filename], fs.closeCounts[filename]
   134  			if gotO != gotC {
   135  				return errors.Errorf("i=%d: opened %d times, closed %d times", i, gotO, gotC)
   136  			}
   137  		}
   138  		return nil
   139  	})
   140  }
   141  
   142  const (
   143  	tableCacheTestNumTables = 300
   144  	tableCacheTestCacheSize = 100
   145  )
   146  
   147  // newTableCacheTest returns a shareable table cache to be used for tests.
   148  // It is the caller's responsibility to unref the table cache.
   149  func newTableCacheTest(size int64, tableCacheSize int, numShards int) *TableCache {
   150  	cache := NewCache(size)
   151  	defer cache.Unref()
   152  	return NewTableCache(cache, numShards, tableCacheSize)
   153  }
   154  
   155  func newTableCacheContainerTest(
   156  	tc *TableCache, dirname string,
   157  ) (*tableCacheContainer, *tableCacheTestFS, error) {
   158  	xxx := bytes.Repeat([]byte("x"), tableCacheTestNumTables)
   159  	fs := &tableCacheTestFS{
   160  		FS: vfs.NewMem(),
   161  	}
   162  	for i := 0; i < tableCacheTestNumTables; i++ {
   163  		f, err := fs.Create(base.MakeFilepath(fs, dirname, fileTypeTable, FileNum(i)))
   164  		if err != nil {
   165  			return nil, nil, errors.Wrap(err, "fs.Create")
   166  		}
   167  		tw := sstable.NewWriter(f, sstable.WriterOptions{TableFormat: sstable.TableFormatPebblev2})
   168  		ik := base.ParseInternalKey(fmt.Sprintf("k.SET.%d", i))
   169  		if err := tw.Add(ik, xxx[:i]); err != nil {
   170  			return nil, nil, errors.Wrap(err, "tw.Set")
   171  		}
   172  		if err := tw.RangeKeySet([]byte("k"), []byte("l"), nil, xxx[:i]); err != nil {
   173  			return nil, nil, errors.Wrap(err, "tw.Set")
   174  		}
   175  		if err := tw.Close(); err != nil {
   176  			return nil, nil, errors.Wrap(err, "tw.Close")
   177  		}
   178  	}
   179  
   180  	fs.mu.Lock()
   181  	fs.openCounts = map[string]int{}
   182  	fs.closeCounts = map[string]int{}
   183  	fs.mu.Unlock()
   184  
   185  	opts := &Options{}
   186  	opts.EnsureDefaults()
   187  	if tc == nil {
   188  		opts.Cache = NewCache(8 << 20) // 8 MB
   189  		defer opts.Cache.Unref()
   190  	} else {
   191  		opts.Cache = tc.cache
   192  	}
   193  
   194  	c := newTableCacheContainer(tc, opts.Cache.NewID(), dirname, fs, opts, tableCacheTestCacheSize)
   195  	return c, fs, nil
   196  }
   197  
   198  // Test basic reference counting for the table cache.
   199  func TestTableCacheRefs(t *testing.T) {
   200  	tc := newTableCacheTest(8<<20, 10, 2)
   201  
   202  	v := atomic.LoadInt64(&tc.atomic.refs)
   203  	if v != 1 {
   204  		require.Equal(t, 1, v)
   205  	}
   206  
   207  	tc.Ref()
   208  	v = atomic.LoadInt64(&tc.atomic.refs)
   209  	if v != 2 {
   210  		require.Equal(t, 2, v)
   211  	}
   212  
   213  	tc.Unref()
   214  	v = atomic.LoadInt64(&tc.atomic.refs)
   215  	if v != 1 {
   216  		require.Equal(t, 1, v)
   217  	}
   218  
   219  	tc.Unref()
   220  	v = atomic.LoadInt64(&tc.atomic.refs)
   221  	if v != 0 {
   222  		require.Equal(t, 0, v)
   223  	}
   224  
   225  	defer func() {
   226  		if r := recover(); r != nil {
   227  			if fmt.Sprint(r) != "bitalostable: inconsistent reference count: -1" {
   228  				t.Fatalf("unexpected panic message")
   229  			}
   230  		} else if r == nil {
   231  			t.Fatalf("expected panic")
   232  		}
   233  	}()
   234  	tc.Unref()
   235  }
   236  
   237  // The table cache shouldn't be usable after all the dbs close.
   238  func TestSharedTableCacheUseAfterAllFree(t *testing.T) {
   239  	tc := newTableCacheTest(8<<20, 10, 1)
   240  	db1, err := Open("test",
   241  		&Options{
   242  			FS:         vfs.NewMem(),
   243  			Cache:      tc.cache,
   244  			TableCache: tc,
   245  		})
   246  	require.NoError(t, err)
   247  
   248  	// Release our reference, now that the db has a reference.
   249  	tc.Unref()
   250  
   251  	db2, err := Open("test",
   252  		&Options{
   253  			FS:         vfs.NewMem(),
   254  			Cache:      tc.cache,
   255  			TableCache: tc,
   256  		})
   257  	require.NoError(t, err)
   258  
   259  	require.NoError(t, db1.Close())
   260  	require.NoError(t, db2.Close())
   261  
   262  	v := atomic.LoadInt64(&tc.atomic.refs)
   263  	if v != 0 {
   264  		t.Fatalf("expected reference count %d, got %d", 0, v)
   265  	}
   266  
   267  	defer func() {
   268  		// The cache ref gets incremented before the panic, so we should
   269  		// decrement it to prevent the finalizer from detecting a leak.
   270  		tc.cache.Unref()
   271  
   272  		if r := recover(); r != nil {
   273  			if fmt.Sprint(r) != "bitalostable: inconsistent reference count: 1" {
   274  				t.Fatalf("unexpected panic message")
   275  			}
   276  		} else if r == nil {
   277  			t.Fatalf("expected panic")
   278  		}
   279  	}()
   280  
   281  	db3, _ := Open("test",
   282  		&Options{
   283  			FS:         vfs.NewMem(),
   284  			Cache:      tc.cache,
   285  			TableCache: tc,
   286  		})
   287  	_ = db3
   288  }
   289  
   290  // Test whether a shared table cache is usable by a db, after
   291  // one of the db's releases its reference.
   292  func TestSharedTableCacheUseAfterOneFree(t *testing.T) {
   293  	tc := newTableCacheTest(8<<20, 10, 1)
   294  	db1, err := Open("test",
   295  		&Options{
   296  			FS:         vfs.NewMem(),
   297  			Cache:      tc.cache,
   298  			TableCache: tc,
   299  		})
   300  	require.NoError(t, err)
   301  
   302  	// Release our reference, now that the db has a reference.
   303  	tc.Unref()
   304  
   305  	db2, err := Open("test",
   306  		&Options{
   307  			FS:         vfs.NewMem(),
   308  			Cache:      tc.cache,
   309  			TableCache: tc,
   310  		})
   311  	require.NoError(t, err)
   312  	defer func() {
   313  		require.NoError(t, db2.Close())
   314  	}()
   315  
   316  	// Make db1 release a reference to the cache. It should
   317  	// still be usable by db2.
   318  	require.NoError(t, db1.Close())
   319  	v := atomic.LoadInt64(&tc.atomic.refs)
   320  	if v != 1 {
   321  		t.Fatalf("expected reference count %d, got %d", 1, v)
   322  	}
   323  
   324  	// Check if db2 is still usable.
   325  	start := []byte("a")
   326  	end := []byte("d")
   327  	require.NoError(t, db2.Set(start, nil, nil))
   328  	require.NoError(t, db2.Flush())
   329  	require.NoError(t, db2.DeleteRange(start, end, nil))
   330  	require.NoError(t, db2.Compact(start, end, false))
   331  }
   332  
   333  // A basic test which makes sure that a shared table cache is usable
   334  // by more than one database at once.
   335  func TestSharedTableCacheUsable(t *testing.T) {
   336  	tc := newTableCacheTest(8<<20, 10, 1)
   337  	db1, err := Open("test",
   338  		&Options{
   339  			FS:         vfs.NewMem(),
   340  			Cache:      tc.cache,
   341  			TableCache: tc,
   342  		})
   343  	require.NoError(t, err)
   344  
   345  	// Release our reference, now that the db has a reference.
   346  	tc.Unref()
   347  
   348  	defer func() {
   349  		require.NoError(t, db1.Close())
   350  	}()
   351  
   352  	db2, err := Open("test",
   353  		&Options{
   354  			FS:         vfs.NewMem(),
   355  			Cache:      tc.cache,
   356  			TableCache: tc,
   357  		})
   358  	require.NoError(t, err)
   359  	defer func() {
   360  		require.NoError(t, db2.Close())
   361  	}()
   362  
   363  	start := []byte("a")
   364  	end := []byte("z")
   365  	require.NoError(t, db1.Set(start, nil, nil))
   366  	require.NoError(t, db1.Flush())
   367  	require.NoError(t, db1.DeleteRange(start, end, nil))
   368  	require.NoError(t, db1.Compact(start, end, false))
   369  
   370  	start = []byte("x")
   371  	end = []byte("y")
   372  	require.NoError(t, db2.Set(start, nil, nil))
   373  	require.NoError(t, db2.Flush())
   374  	require.NoError(t, db2.Set(start, []byte{'a'}, nil))
   375  	require.NoError(t, db2.Flush())
   376  	require.NoError(t, db2.DeleteRange(start, end, nil))
   377  	require.NoError(t, db2.Compact(start, end, false))
   378  }
   379  
   380  func TestSharedTableConcurrent(t *testing.T) {
   381  	tc := newTableCacheTest(8<<20, 10, 1)
   382  	db1, err := Open("test",
   383  		&Options{
   384  			FS:         vfs.NewMem(),
   385  			Cache:      tc.cache,
   386  			TableCache: tc,
   387  		})
   388  	require.NoError(t, err)
   389  
   390  	// Release our reference, now that the db has a reference.
   391  	tc.Unref()
   392  
   393  	defer func() {
   394  		require.NoError(t, db1.Close())
   395  	}()
   396  
   397  	db2, err := Open("test",
   398  		&Options{
   399  			FS:         vfs.NewMem(),
   400  			Cache:      tc.cache,
   401  			TableCache: tc,
   402  		})
   403  	require.NoError(t, err)
   404  	defer func() {
   405  		require.NoError(t, db2.Close())
   406  	}()
   407  
   408  	var wg sync.WaitGroup
   409  	wg.Add(2)
   410  
   411  	// Now that both dbs have a reference to the table cache,
   412  	// we'll run go routines which will use the DBs concurrently.
   413  	concFunc := func(db *DB) {
   414  		for i := 0; i < 1000; i++ {
   415  			start := []byte("a")
   416  			end := []byte("z")
   417  			require.NoError(t, db.Set(start, nil, nil))
   418  			require.NoError(t, db.Flush())
   419  			require.NoError(t, db.DeleteRange(start, end, nil))
   420  			require.NoError(t, db.Compact(start, end, false))
   421  		}
   422  		wg.Done()
   423  	}
   424  
   425  	go concFunc(db1)
   426  	go concFunc(db2)
   427  
   428  	wg.Wait()
   429  }
   430  
   431  func testTableCacheRandomAccess(t *testing.T, concurrent bool) {
   432  	const N = 2000
   433  	c, fs, err := newTableCacheContainerTest(nil, "")
   434  	require.NoError(t, err)
   435  
   436  	rngMu := sync.Mutex{}
   437  	rng := rand.New(rand.NewSource(1))
   438  
   439  	errc := make(chan error, N)
   440  	for i := 0; i < N; i++ {
   441  		go func(i int) {
   442  			rngMu.Lock()
   443  			fileNum, sleepTime := rng.Intn(tableCacheTestNumTables), rng.Intn(1000)
   444  			rngMu.Unlock()
   445  			iter, _, err := c.newIters(
   446  				&fileMetadata{FileNum: FileNum(fileNum)},
   447  				nil, /* iter options */
   448  				internalIterOpts{})
   449  			if err != nil {
   450  				errc <- errors.Errorf("i=%d, fileNum=%d: find: %v", i, fileNum, err)
   451  				return
   452  			}
   453  			key, value := iter.SeekGE([]byte("k"), base.SeekGEFlagsNone)
   454  			if concurrent {
   455  				time.Sleep(time.Duration(sleepTime) * time.Microsecond)
   456  			}
   457  			if key == nil {
   458  				errc <- errors.Errorf("i=%d, fileNum=%d: valid.0: got false, want true", i, fileNum)
   459  				return
   460  			}
   461  			if got := len(value); got != fileNum {
   462  				errc <- errors.Errorf("i=%d, fileNum=%d: value: got %d bytes, want %d", i, fileNum, got, fileNum)
   463  				return
   464  			}
   465  			if key, _ := iter.Next(); key != nil {
   466  				errc <- errors.Errorf("i=%d, fileNum=%d: next.1: got true, want false", i, fileNum)
   467  				return
   468  			}
   469  			if err := iter.Close(); err != nil {
   470  				errc <- errors.Wrapf(err, "close error i=%d, fileNum=%dv", i, fileNum)
   471  				return
   472  			}
   473  			errc <- nil
   474  		}(i)
   475  		if !concurrent {
   476  			require.NoError(t, <-errc)
   477  		}
   478  	}
   479  	if concurrent {
   480  		for i := 0; i < N; i++ {
   481  			require.NoError(t, <-errc)
   482  		}
   483  	}
   484  	fs.validate(t, c, nil)
   485  }
   486  
   487  func TestTableCacheRandomAccessSequential(t *testing.T) { testTableCacheRandomAccess(t, false) }
   488  func TestTableCacheRandomAccessConcurrent(t *testing.T) { testTableCacheRandomAccess(t, true) }
   489  
   490  func testTableCacheFrequentlyUsedInternal(t *testing.T, rangeIter bool) {
   491  	const (
   492  		N       = 1000
   493  		pinned0 = 7
   494  		pinned1 = 11
   495  	)
   496  	c, fs, err := newTableCacheContainerTest(nil, "")
   497  	require.NoError(t, err)
   498  
   499  	for i := 0; i < N; i++ {
   500  		for _, j := range [...]int{pinned0, i % tableCacheTestNumTables, pinned1} {
   501  			var iter io.Closer
   502  			var err error
   503  			if rangeIter {
   504  				iter, err = c.newRangeKeyIter(
   505  					&fileMetadata{FileNum: FileNum(j)},
   506  					nil /* iter options */)
   507  			} else {
   508  				iter, _, err = c.newIters(
   509  					&fileMetadata{FileNum: FileNum(j)},
   510  					nil, /* iter options */
   511  					internalIterOpts{})
   512  			}
   513  			if err != nil {
   514  				t.Fatalf("i=%d, j=%d: find: %v", i, j, err)
   515  			}
   516  			if err := iter.Close(); err != nil {
   517  				t.Fatalf("i=%d, j=%d: close: %v", i, j, err)
   518  			}
   519  		}
   520  	}
   521  
   522  	fs.validate(t, c, func(i, gotO, gotC int) error {
   523  		if i == pinned0 || i == pinned1 {
   524  			if gotO != 1 || gotC != 0 {
   525  				return errors.Errorf("i=%d: pinned table: got %d, %d, want %d, %d", i, gotO, gotC, 1, 0)
   526  			}
   527  		}
   528  		return nil
   529  	})
   530  }
   531  
   532  func TestTableCacheFrequentlyUsed(t *testing.T) {
   533  	for i, iterType := range []string{"point", "range"} {
   534  		t.Run(fmt.Sprintf("iter=%s", iterType), func(t *testing.T) {
   535  			testTableCacheFrequentlyUsedInternal(t, i == 1)
   536  		})
   537  	}
   538  }
   539  
   540  func TestSharedTableCacheFrequentlyUsed(t *testing.T) {
   541  	const (
   542  		N       = 1000
   543  		pinned0 = 7
   544  		pinned1 = 11
   545  	)
   546  	tc := newTableCacheTest(8<<20, 2*tableCacheTestCacheSize, 16)
   547  	c1, fs1, err := newTableCacheContainerTest(tc, "")
   548  	require.NoError(t, err)
   549  	c2, fs2, err := newTableCacheContainerTest(tc, "")
   550  	require.NoError(t, err)
   551  	tc.Unref()
   552  
   553  	for i := 0; i < N; i++ {
   554  		for _, j := range [...]int{pinned0, i % tableCacheTestNumTables, pinned1} {
   555  			iter1, _, err := c1.newIters(
   556  				&fileMetadata{FileNum: FileNum(j)},
   557  				nil, /* iter options */
   558  				internalIterOpts{})
   559  			if err != nil {
   560  				t.Fatalf("i=%d, j=%d: find: %v", i, j, err)
   561  			}
   562  			iter2, _, err := c2.newIters(
   563  				&fileMetadata{FileNum: FileNum(j)},
   564  				nil, /* iter options */
   565  				internalIterOpts{})
   566  			if err != nil {
   567  				t.Fatalf("i=%d, j=%d: find: %v", i, j, err)
   568  			}
   569  
   570  			if err := iter1.Close(); err != nil {
   571  				t.Fatalf("i=%d, j=%d: close: %v", i, j, err)
   572  			}
   573  			if err := iter2.Close(); err != nil {
   574  				t.Fatalf("i=%d, j=%d: close: %v", i, j, err)
   575  			}
   576  		}
   577  	}
   578  
   579  	fs1.validate(t, c1, func(i, gotO, gotC int) error {
   580  		if i == pinned0 || i == pinned1 {
   581  			if gotO != 1 || gotC != 0 {
   582  				return errors.Errorf("i=%d: pinned table: got %d, %d, want %d, %d", i, gotO, gotC, 1, 0)
   583  			}
   584  		}
   585  		return nil
   586  	})
   587  
   588  	fs2.validate(t, c2, func(i, gotO, gotC int) error {
   589  		if i == pinned0 || i == pinned1 {
   590  			if gotO != 1 || gotC != 0 {
   591  				return errors.Errorf("i=%d: pinned table: got %d, %d, want %d, %d", i, gotO, gotC, 1, 0)
   592  			}
   593  		}
   594  		return nil
   595  	})
   596  }
   597  
   598  func testTableCacheEvictionsInternal(t *testing.T, rangeIter bool) {
   599  	const (
   600  		N      = 1000
   601  		lo, hi = 10, 20
   602  	)
   603  	c, fs, err := newTableCacheContainerTest(nil, "")
   604  	require.NoError(t, err)
   605  
   606  	rng := rand.New(rand.NewSource(2))
   607  	for i := 0; i < N; i++ {
   608  		j := rng.Intn(tableCacheTestNumTables)
   609  		var iter io.Closer
   610  		var err error
   611  		if rangeIter {
   612  			iter, err = c.newRangeKeyIter(
   613  				&fileMetadata{FileNum: FileNum(j)},
   614  				nil /* iter options */)
   615  		} else {
   616  			iter, _, err = c.newIters(
   617  				&fileMetadata{FileNum: FileNum(j)},
   618  				nil, /* iter options */
   619  				internalIterOpts{})
   620  		}
   621  		if err != nil {
   622  			t.Fatalf("i=%d, j=%d: find: %v", i, j, err)
   623  		}
   624  		if err := iter.Close(); err != nil {
   625  			t.Fatalf("i=%d, j=%d: close: %v", i, j, err)
   626  		}
   627  
   628  		c.evict(FileNum(lo + rng.Intn(hi-lo)))
   629  	}
   630  
   631  	sumEvicted, nEvicted := 0, 0
   632  	sumSafe, nSafe := 0, 0
   633  	fs.validate(t, c, func(i, gotO, gotC int) error {
   634  		if lo <= i && i < hi {
   635  			sumEvicted += gotO
   636  			nEvicted++
   637  		} else {
   638  			sumSafe += gotO
   639  			nSafe++
   640  		}
   641  		return nil
   642  	})
   643  	fEvicted := float64(sumEvicted) / float64(nEvicted)
   644  	fSafe := float64(sumSafe) / float64(nSafe)
   645  	// The magic 1.25 number isn't derived from formal modeling. It's just a guess. For
   646  	// (lo, hi, tableCacheTestCacheSize, tableCacheTestNumTables) = (10, 20, 100, 300),
   647  	// the ratio seems to converge on roughly 1.5 for large N, compared to 1.0 if we do
   648  	// not evict any cache entries.
   649  	if ratio := fEvicted / fSafe; ratio < 1.25 {
   650  		t.Errorf("evicted tables were opened %.3f times on average, safe tables %.3f, ratio %.3f < 1.250",
   651  			fEvicted, fSafe, ratio)
   652  	}
   653  }
   654  
   655  func TestTableCacheEvictions(t *testing.T) {
   656  	for i, iterType := range []string{"point", "range"} {
   657  		t.Run(fmt.Sprintf("iter=%s", iterType), func(t *testing.T) {
   658  			testTableCacheEvictionsInternal(t, i == 1)
   659  		})
   660  	}
   661  }
   662  
   663  func TestSharedTableCacheEvictions(t *testing.T) {
   664  	const (
   665  		N      = 1000
   666  		lo, hi = 10, 20
   667  	)
   668  	tc := newTableCacheTest(8<<20, 2*tableCacheTestCacheSize, 16)
   669  	c1, fs1, err := newTableCacheContainerTest(tc, "")
   670  	require.NoError(t, err)
   671  	c2, fs2, err := newTableCacheContainerTest(tc, "")
   672  	require.NoError(t, err)
   673  	tc.Unref()
   674  
   675  	rng := rand.New(rand.NewSource(2))
   676  	for i := 0; i < N; i++ {
   677  		j := rng.Intn(tableCacheTestNumTables)
   678  		iter1, _, err := c1.newIters(
   679  			&fileMetadata{FileNum: FileNum(j)},
   680  			nil, /* iter options */
   681  			internalIterOpts{})
   682  		if err != nil {
   683  			t.Fatalf("i=%d, j=%d: find: %v", i, j, err)
   684  		}
   685  
   686  		iter2, _, err := c2.newIters(
   687  			&fileMetadata{FileNum: FileNum(j)},
   688  			nil, /* iter options */
   689  			internalIterOpts{})
   690  		if err != nil {
   691  			t.Fatalf("i=%d, j=%d: find: %v", i, j, err)
   692  		}
   693  
   694  		if err := iter1.Close(); err != nil {
   695  			t.Fatalf("i=%d, j=%d: close: %v", i, j, err)
   696  		}
   697  
   698  		if err := iter2.Close(); err != nil {
   699  			t.Fatalf("i=%d, j=%d: close: %v", i, j, err)
   700  		}
   701  
   702  		c1.evict(FileNum(lo + rng.Intn(hi-lo)))
   703  		c2.evict(FileNum(lo + rng.Intn(hi-lo)))
   704  	}
   705  
   706  	check := func(fs *tableCacheTestFS, c *tableCacheContainer) (float64, float64, float64) {
   707  		sumEvicted, nEvicted := 0, 0
   708  		sumSafe, nSafe := 0, 0
   709  		fs.validate(t, c, func(i, gotO, gotC int) error {
   710  			if lo <= i && i < hi {
   711  				sumEvicted += gotO
   712  				nEvicted++
   713  			} else {
   714  				sumSafe += gotO
   715  				nSafe++
   716  			}
   717  			return nil
   718  		})
   719  		fEvicted := float64(sumEvicted) / float64(nEvicted)
   720  		fSafe := float64(sumSafe) / float64(nSafe)
   721  
   722  		return fEvicted, fSafe, fEvicted / fSafe
   723  	}
   724  
   725  	// The magic 1.25 number isn't derived from formal modeling. It's just a guess. For
   726  	// (lo, hi, tableCacheTestCacheSize, tableCacheTestNumTables) = (10, 20, 100, 300),
   727  	// the ratio seems to converge on roughly 1.5 for large N, compared to 1.0 if we do
   728  	// not evict any cache entries.
   729  	if fEvicted, fSafe, ratio := check(fs1, c1); ratio < 1.25 {
   730  		t.Errorf(
   731  			"evicted tables were opened %.3f times on average, safe tables %.3f, ratio %.3f < 1.250",
   732  			fEvicted, fSafe, ratio,
   733  		)
   734  	}
   735  
   736  	if fEvicted, fSafe, ratio := check(fs2, c2); ratio < 1.25 {
   737  		t.Errorf(
   738  			"evicted tables were opened %.3f times on average, safe tables %.3f, ratio %.3f < 1.250",
   739  			fEvicted, fSafe, ratio,
   740  		)
   741  	}
   742  }
   743  
   744  func TestTableCacheIterLeak(t *testing.T) {
   745  	c, _, err := newTableCacheContainerTest(nil, "")
   746  	require.NoError(t, err)
   747  
   748  	iter, _, err := c.newIters(
   749  		&fileMetadata{FileNum: 0},
   750  		nil, /* iter options */
   751  		internalIterOpts{})
   752  	require.NoError(t, err)
   753  
   754  	if err := c.close(); err == nil {
   755  		t.Fatalf("expected failure, but found success")
   756  	} else if !strings.HasPrefix(err.Error(), "leaked iterators:") {
   757  		t.Fatalf("expected leaked iterators, but found %+v", err)
   758  	} else {
   759  		t.Log(err.Error())
   760  	}
   761  	require.NoError(t, iter.Close())
   762  }
   763  
   764  func TestSharedTableCacheIterLeak(t *testing.T) {
   765  	tc := newTableCacheTest(8<<20, 2*tableCacheTestCacheSize, 16)
   766  	c1, _, err := newTableCacheContainerTest(tc, "")
   767  	require.NoError(t, err)
   768  	c2, _, err := newTableCacheContainerTest(tc, "")
   769  	require.NoError(t, err)
   770  	c3, _, err := newTableCacheContainerTest(tc, "")
   771  	require.NoError(t, err)
   772  	tc.Unref()
   773  
   774  	iter, _, err := c1.newIters(
   775  		&fileMetadata{FileNum: 0},
   776  		nil, /* iter options */
   777  		internalIterOpts{})
   778  	require.NoError(t, err)
   779  
   780  	if err := c1.close(); err == nil {
   781  		t.Fatalf("expected failure, but found success")
   782  	} else if !strings.HasPrefix(err.Error(), "leaked iterators:") {
   783  		t.Fatalf("expected leaked iterators, but found %+v", err)
   784  	} else {
   785  		t.Log(err.Error())
   786  	}
   787  
   788  	// Closing c2 shouldn't error out since c2 isn't leaking any iterators.
   789  	require.NoError(t, c2.close())
   790  
   791  	// Closing c3 should error out since c3 holds the last reference to
   792  	// the TableCache, and when the TableCache closes, it will detect
   793  	// that there was a leaked iterator.
   794  	if err := c3.close(); err == nil {
   795  		t.Fatalf("expected failure, but found success")
   796  	} else if !strings.HasPrefix(err.Error(), "leaked iterators:") {
   797  		t.Fatalf("expected leaked iterators, but found %+v", err)
   798  	} else {
   799  		t.Log(err.Error())
   800  	}
   801  
   802  	require.NoError(t, iter.Close())
   803  }
   804  
   805  func TestTableCacheRetryAfterFailure(t *testing.T) {
   806  	// Test a retry can succeed after a failure, i.e., errors are not cached.
   807  	c, fs, err := newTableCacheContainerTest(nil, "")
   808  	require.NoError(t, err)
   809  
   810  	fs.setOpenError(true /* enabled */)
   811  	if _, _, err := c.newIters(
   812  		&fileMetadata{FileNum: 0},
   813  		nil, /* iter options */
   814  		internalIterOpts{}); err == nil {
   815  		t.Fatalf("expected failure, but found success")
   816  	}
   817  	fs.setOpenError(false /* enabled */)
   818  	var iter internalIterator
   819  	iter, _, err = c.newIters(
   820  		&fileMetadata{FileNum: 0},
   821  		nil, /* iter options */
   822  		internalIterOpts{})
   823  	require.NoError(t, err)
   824  	require.NoError(t, iter.Close())
   825  	fs.validate(t, c, nil)
   826  }
   827  
   828  func TestTableCacheEvictClose(t *testing.T) {
   829  	errs := make(chan error, 10)
   830  	db, err := Open("test",
   831  		&Options{
   832  			FS: vfs.NewMem(),
   833  			EventListener: EventListener{
   834  				TableDeleted: func(info TableDeleteInfo) {
   835  					errs <- info.Err
   836  				},
   837  			},
   838  		})
   839  	require.NoError(t, err)
   840  
   841  	start := []byte("a")
   842  	end := []byte("z")
   843  	require.NoError(t, db.Set(start, nil, nil))
   844  	require.NoError(t, db.Flush())
   845  	require.NoError(t, db.DeleteRange(start, end, nil))
   846  	require.NoError(t, db.Compact(start, end, false))
   847  	require.NoError(t, db.Close())
   848  	close(errs)
   849  
   850  	for err := range errs {
   851  		require.NoError(t, err)
   852  	}
   853  }
   854  
   855  func TestTableCacheClockPro(t *testing.T) {
   856  	// Test data was generated from the python code. See also
   857  	// internal/cache/clockpro_test.go:TestCache.
   858  	f, err := os.Open("internal/cache/testdata/cache")
   859  	require.NoError(t, err)
   860  
   861  	mem := vfs.NewMem()
   862  	makeTable := func(fileNum FileNum) {
   863  		require.NoError(t, err)
   864  		f, err := mem.Create(base.MakeFilepath(mem, "", fileTypeTable, fileNum))
   865  		require.NoError(t, err)
   866  		w := sstable.NewWriter(f, sstable.WriterOptions{})
   867  		require.NoError(t, w.Set([]byte("a"), nil))
   868  		require.NoError(t, w.Close())
   869  	}
   870  
   871  	opts := &Options{
   872  		Cache: NewCache(8 << 20), // 8 MB
   873  	}
   874  	opts.EnsureDefaults()
   875  	defer opts.Cache.Unref()
   876  
   877  	cache := &tableCacheShard{}
   878  	// NB: The table cache size of 200 is required for the expected test values.
   879  	cache.init(200)
   880  	dbOpts := &tableCacheOpts{}
   881  	dbOpts.logger = opts.Logger
   882  	dbOpts.cacheID = 0
   883  	dbOpts.dirname = ""
   884  	dbOpts.fs = mem
   885  	dbOpts.opts = opts.MakeReaderOptions()
   886  
   887  	scanner := bufio.NewScanner(f)
   888  	tables := make(map[int]bool)
   889  	line := 1
   890  
   891  	for scanner.Scan() {
   892  		fields := bytes.Fields(scanner.Bytes())
   893  
   894  		key, err := strconv.Atoi(string(fields[0]))
   895  		require.NoError(t, err)
   896  
   897  		// Ensure that underlying sstables exist on disk, creating each table the
   898  		// first time it is seen.
   899  		if !tables[key] {
   900  			makeTable(FileNum(key))
   901  			tables[key] = true
   902  		}
   903  
   904  		oldHits := atomic.LoadInt64(&cache.atomic.hits)
   905  		v := cache.findNode(&fileMetadata{FileNum: FileNum(key)}, dbOpts)
   906  		cache.unrefValue(v)
   907  
   908  		hit := atomic.LoadInt64(&cache.atomic.hits) != oldHits
   909  		wantHit := fields[1][0] == 'h'
   910  		if hit != wantHit {
   911  			t.Errorf("%d: cache hit mismatch: got %v, want %v\n", line, hit, wantHit)
   912  		}
   913  		line++
   914  	}
   915  }