github.com/df-mc/goleveldb@v1.1.9/leveldb/cache/cache_test.go (about)

     1  // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
     2  // All rights reserved.
     3  //
     4  // Use of this source code is governed by a BSD-style license that can be
     5  // found in the LICENSE file.
     6  
     7  package cache
     8  
     9  import (
    10  	"math/rand"
    11  	"runtime"
    12  	"sync"
    13  	"sync/atomic"
    14  	"testing"
    15  	"time"
    16  	"unsafe"
    17  )
    18  
    19  type int32o int32
    20  
    21  func (o *int32o) acquire() {
    22  	if atomic.AddInt32((*int32)(o), 1) != 1 {
    23  		panic("BUG: invalid ref")
    24  	}
    25  }
    26  
    27  func (o *int32o) Release() {
    28  	if atomic.AddInt32((*int32)(o), -1) != 0 {
    29  		panic("BUG: invalid ref")
    30  	}
    31  }
    32  
    33  type releaserFunc struct {
    34  	fn    func()
    35  	value Value
    36  }
    37  
    38  func (r releaserFunc) Release() {
    39  	if r.fn != nil {
    40  		r.fn()
    41  	}
    42  }
    43  
    44  func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
    45  	return c.Get(ns, key, func() (int, Value) {
    46  		if relf != nil {
    47  			return charge, releaserFunc{relf, value}
    48  		}
    49  		return charge, value
    50  	})
    51  }
    52  
    53  type cacheMapTestParams struct {
    54  	nobjects, nhandles, concurrent, repeat int
    55  }
    56  
    57  func TestCacheMap(t *testing.T) {
    58  	runtime.GOMAXPROCS(runtime.NumCPU())
    59  
    60  	var params []cacheMapTestParams
    61  	if testing.Short() {
    62  		params = []cacheMapTestParams{
    63  			{1000, 100, 20, 3},
    64  			{10000, 300, 50, 10},
    65  		}
    66  	} else {
    67  		params = []cacheMapTestParams{
    68  			{10000, 400, 50, 3},
    69  			{100000, 1000, 100, 10},
    70  		}
    71  	}
    72  
    73  	var (
    74  		objects [][]int32o
    75  		handles [][]unsafe.Pointer
    76  	)
    77  
    78  	for _, x := range params {
    79  		objects = append(objects, make([]int32o, x.nobjects))
    80  		handles = append(handles, make([]unsafe.Pointer, x.nhandles))
    81  	}
    82  
    83  	c := NewCache(nil)
    84  
    85  	wg := new(sync.WaitGroup)
    86  	var done int32
    87  
    88  	for ns, x := range params {
    89  		for i := 0; i < x.concurrent; i++ {
    90  			wg.Add(1)
    91  			go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
    92  				defer wg.Done()
    93  				r := rand.New(rand.NewSource(time.Now().UnixNano()))
    94  
    95  				for j := len(objects) * repeat; j >= 0; j-- {
    96  					key := uint64(r.Intn(len(objects)))
    97  					h := c.Get(uint64(ns), key, func() (int, Value) {
    98  						o := &objects[key]
    99  						o.acquire()
   100  						return 1, o
   101  					})
   102  					if v := h.Value().(*int32o); v != &objects[key] {
   103  						t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
   104  					}
   105  					if objects[key] != 1 {
   106  						t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
   107  					}
   108  					if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
   109  						h.Release()
   110  					}
   111  				}
   112  			}(ns, i, x.repeat, objects[ns], handles[ns])
   113  		}
   114  
   115  		go func(handles []unsafe.Pointer) {
   116  			r := rand.New(rand.NewSource(time.Now().UnixNano()))
   117  
   118  			for atomic.LoadInt32(&done) == 0 {
   119  				i := r.Intn(len(handles))
   120  				h := (*Handle)(atomic.LoadPointer(&handles[i]))
   121  				if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
   122  					h.Release()
   123  				}
   124  				time.Sleep(time.Millisecond)
   125  			}
   126  		}(handles[ns])
   127  	}
   128  
   129  	go func() {
   130  		handles := make([]*Handle, 100000)
   131  		for atomic.LoadInt32(&done) == 0 {
   132  			for i := range handles {
   133  				handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
   134  					return 1, 1
   135  				})
   136  			}
   137  			for _, h := range handles {
   138  				h.Release()
   139  			}
   140  		}
   141  	}()
   142  
   143  	wg.Wait()
   144  
   145  	atomic.StoreInt32(&done, 1)
   146  
   147  	for _, handles0 := range handles {
   148  		for i := range handles0 {
   149  			h := (*Handle)(atomic.LoadPointer(&handles0[i]))
   150  			if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
   151  				h.Release()
   152  			}
   153  		}
   154  	}
   155  
   156  	for ns, objects0 := range objects {
   157  		for i, o := range objects0 {
   158  			if o != 0 {
   159  				t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
   160  			}
   161  		}
   162  	}
   163  }
   164  
   165  func TestCacheMap_NodesAndSize(t *testing.T) {
   166  	c := NewCache(nil)
   167  	if c.Nodes() != 0 {
   168  		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
   169  	}
   170  	if c.Size() != 0 {
   171  		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
   172  	}
   173  	set(c, 0, 1, 1, 1, nil)
   174  	set(c, 0, 2, 2, 2, nil)
   175  	set(c, 1, 1, 3, 3, nil)
   176  	set(c, 2, 1, 4, 1, nil)
   177  	if c.Nodes() != 4 {
   178  		t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
   179  	}
   180  	if c.Size() != 7 {
   181  		t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
   182  	}
   183  }
   184  
   185  func TestLRUCache_Capacity(t *testing.T) {
   186  	c := NewCache(NewLRU(10))
   187  	if c.Capacity() != 10 {
   188  		t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
   189  	}
   190  	set(c, 0, 1, 1, 1, nil).Release()
   191  	set(c, 0, 2, 2, 2, nil).Release()
   192  	set(c, 1, 1, 3, 3, nil).Release()
   193  	set(c, 2, 1, 4, 1, nil).Release()
   194  	set(c, 2, 2, 5, 1, nil).Release()
   195  	set(c, 2, 3, 6, 1, nil).Release()
   196  	set(c, 2, 4, 7, 1, nil).Release()
   197  	set(c, 2, 5, 8, 1, nil).Release()
   198  	if c.Nodes() != 7 {
   199  		t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
   200  	}
   201  	if c.Size() != 10 {
   202  		t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
   203  	}
   204  	c.SetCapacity(9)
   205  	if c.Capacity() != 9 {
   206  		t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
   207  	}
   208  	if c.Nodes() != 6 {
   209  		t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
   210  	}
   211  	if c.Size() != 8 {
   212  		t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
   213  	}
   214  }
   215  
   216  func TestCacheMap_NilValue(t *testing.T) {
   217  	c := NewCache(NewLRU(10))
   218  	h := c.Get(0, 0, func() (size int, value Value) {
   219  		return 1, nil
   220  	})
   221  	if h != nil {
   222  		t.Error("cache handle is non-nil")
   223  	}
   224  	if c.Nodes() != 0 {
   225  		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
   226  	}
   227  	if c.Size() != 0 {
   228  		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
   229  	}
   230  }
   231  
   232  func TestLRUCache_GetLatency(t *testing.T) {
   233  	runtime.GOMAXPROCS(runtime.NumCPU())
   234  
   235  	const (
   236  		concurrentSet = 30
   237  		concurrentGet = 3
   238  		duration      = 3 * time.Second
   239  		delay         = 3 * time.Millisecond
   240  		maxkey        = 100000
   241  	)
   242  
   243  	var (
   244  		set, getHit, getAll        int32
   245  		getMaxLatency, getDuration int64
   246  	)
   247  
   248  	c := NewCache(NewLRU(5000))
   249  	wg := &sync.WaitGroup{}
   250  	until := time.Now().Add(duration)
   251  	for i := 0; i < concurrentSet; i++ {
   252  		wg.Add(1)
   253  		go func(i int) {
   254  			defer wg.Done()
   255  			r := rand.New(rand.NewSource(time.Now().UnixNano()))
   256  			for time.Now().Before(until) {
   257  				c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
   258  					time.Sleep(delay)
   259  					atomic.AddInt32(&set, 1)
   260  					return 1, 1
   261  				}).Release()
   262  			}
   263  		}(i)
   264  	}
   265  	for i := 0; i < concurrentGet; i++ {
   266  		wg.Add(1)
   267  		go func(i int) {
   268  			defer wg.Done()
   269  			r := rand.New(rand.NewSource(time.Now().UnixNano()))
   270  			for {
   271  				mark := time.Now()
   272  				if mark.Before(until) {
   273  					h := c.Get(0, uint64(r.Intn(maxkey)), nil)
   274  					latency := int64(time.Now().Sub(mark))
   275  					m := atomic.LoadInt64(&getMaxLatency)
   276  					if latency > m {
   277  						atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
   278  					}
   279  					atomic.AddInt64(&getDuration, latency)
   280  					if h != nil {
   281  						atomic.AddInt32(&getHit, 1)
   282  						h.Release()
   283  					}
   284  					atomic.AddInt32(&getAll, 1)
   285  				} else {
   286  					break
   287  				}
   288  			}
   289  		}(i)
   290  	}
   291  
   292  	wg.Wait()
   293  	getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
   294  	t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
   295  		set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
   296  
   297  	if getAvglatency > delay/3 {
   298  		t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
   299  	}
   300  }
   301  
   302  func TestLRUCache_HitMiss(t *testing.T) {
   303  	cases := []struct {
   304  		key   uint64
   305  		value string
   306  	}{
   307  		{1, "vvvvvvvvv"},
   308  		{100, "v1"},
   309  		{0, "v2"},
   310  		{12346, "v3"},
   311  		{777, "v4"},
   312  		{999, "v5"},
   313  		{7654, "v6"},
   314  		{2, "v7"},
   315  		{3, "v8"},
   316  		{9, "v9"},
   317  	}
   318  
   319  	setfin := 0
   320  	c := NewCache(NewLRU(1000))
   321  	for i, x := range cases {
   322  		set(c, 0, x.key, x.value, len(x.value), func() {
   323  			setfin++
   324  		}).Release()
   325  		for j, y := range cases {
   326  			h := c.Get(0, y.key, nil)
   327  			if j <= i {
   328  				// should hit
   329  				if h == nil {
   330  					t.Errorf("case '%d' iteration '%d' is miss", i, j)
   331  				} else {
   332  					if x := h.Value().(releaserFunc).value.(string); x != y.value {
   333  						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
   334  					}
   335  				}
   336  			} else {
   337  				// should miss
   338  				if h != nil {
   339  					t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
   340  				}
   341  			}
   342  			if h != nil {
   343  				h.Release()
   344  			}
   345  		}
   346  	}
   347  
   348  	for i, x := range cases {
   349  		finalizerOk := false
   350  		c.Delete(0, x.key, func() {
   351  			finalizerOk = true
   352  		})
   353  
   354  		if !finalizerOk {
   355  			t.Errorf("case %d delete finalizer not executed", i)
   356  		}
   357  
   358  		for j, y := range cases {
   359  			h := c.Get(0, y.key, nil)
   360  			if j > i {
   361  				// should hit
   362  				if h == nil {
   363  					t.Errorf("case '%d' iteration '%d' is miss", i, j)
   364  				} else {
   365  					if x := h.Value().(releaserFunc).value.(string); x != y.value {
   366  						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
   367  					}
   368  				}
   369  			} else {
   370  				// should miss
   371  				if h != nil {
   372  					t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
   373  				}
   374  			}
   375  			if h != nil {
   376  				h.Release()
   377  			}
   378  		}
   379  	}
   380  
   381  	if setfin != len(cases) {
   382  		t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
   383  	}
   384  }
   385  
   386  func TestLRUCache_Eviction(t *testing.T) {
   387  	c := NewCache(NewLRU(12))
   388  	o1 := set(c, 0, 1, 1, 1, nil)
   389  	set(c, 0, 2, 2, 1, nil).Release()
   390  	set(c, 0, 3, 3, 1, nil).Release()
   391  	set(c, 0, 4, 4, 1, nil).Release()
   392  	set(c, 0, 5, 5, 1, nil).Release()
   393  	if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
   394  		h.Release()
   395  	}
   396  	set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
   397  
   398  	for _, key := range []uint64{9, 2, 5, 1} {
   399  		h := c.Get(0, key, nil)
   400  		if h == nil {
   401  			t.Errorf("miss for key '%d'", key)
   402  		} else {
   403  			if x := h.Value().(int); x != int(key) {
   404  				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
   405  			}
   406  			h.Release()
   407  		}
   408  	}
   409  	o1.Release()
   410  	for _, key := range []uint64{1, 2, 5} {
   411  		h := c.Get(0, key, nil)
   412  		if h == nil {
   413  			t.Errorf("miss for key '%d'", key)
   414  		} else {
   415  			if x := h.Value().(int); x != int(key) {
   416  				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
   417  			}
   418  			h.Release()
   419  		}
   420  	}
   421  	for _, key := range []uint64{3, 4, 9} {
   422  		h := c.Get(0, key, nil)
   423  		if h != nil {
   424  			t.Errorf("hit for key '%d'", key)
   425  			if x := h.Value().(int); x != int(key) {
   426  				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
   427  			}
   428  			h.Release()
   429  		}
   430  	}
   431  }
   432  
   433  func TestLRUCache_Evict(t *testing.T) {
   434  	c := NewCache(NewLRU(6))
   435  	set(c, 0, 1, 1, 1, nil).Release()
   436  	set(c, 0, 2, 2, 1, nil).Release()
   437  	set(c, 1, 1, 4, 1, nil).Release()
   438  	set(c, 1, 2, 5, 1, nil).Release()
   439  	set(c, 2, 1, 6, 1, nil).Release()
   440  	set(c, 2, 2, 7, 1, nil).Release()
   441  
   442  	for ns := 0; ns < 3; ns++ {
   443  		for key := 1; key < 3; key++ {
   444  			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
   445  				h.Release()
   446  			} else {
   447  				t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
   448  			}
   449  		}
   450  	}
   451  
   452  	if ok := c.Evict(0, 1); !ok {
   453  		t.Error("first Cache.Evict on #0.1 return false")
   454  	}
   455  	if ok := c.Evict(0, 1); ok {
   456  		t.Error("second Cache.Evict on #0.1 return true")
   457  	}
   458  	if h := c.Get(0, 1, nil); h != nil {
   459  		t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
   460  	}
   461  
   462  	c.EvictNS(1)
   463  	if h := c.Get(1, 1, nil); h != nil {
   464  		t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
   465  	}
   466  	if h := c.Get(1, 2, nil); h != nil {
   467  		t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
   468  	}
   469  
   470  	c.EvictAll()
   471  	for ns := 0; ns < 3; ns++ {
   472  		for key := 1; key < 3; key++ {
   473  			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
   474  				t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
   475  			}
   476  		}
   477  	}
   478  }
   479  
   480  func TestLRUCache_Delete(t *testing.T) {
   481  	delFuncCalled := 0
   482  	delFunc := func() {
   483  		delFuncCalled++
   484  	}
   485  
   486  	c := NewCache(NewLRU(2))
   487  	set(c, 0, 1, 1, 1, nil).Release()
   488  	set(c, 0, 2, 2, 1, nil).Release()
   489  
   490  	if ok := c.Delete(0, 1, delFunc); !ok {
   491  		t.Error("Cache.Delete on #1 return false")
   492  	}
   493  	if h := c.Get(0, 1, nil); h != nil {
   494  		t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
   495  	}
   496  	if ok := c.Delete(0, 1, delFunc); ok {
   497  		t.Error("Cache.Delete on #1 return true")
   498  	}
   499  
   500  	h2 := c.Get(0, 2, nil)
   501  	if h2 == nil {
   502  		t.Error("Cache.Get on #2 return nil")
   503  	}
   504  	if ok := c.Delete(0, 2, delFunc); !ok {
   505  		t.Error("(1) Cache.Delete on #2 return false")
   506  	}
   507  	if ok := c.Delete(0, 2, delFunc); !ok {
   508  		t.Error("(2) Cache.Delete on #2 return false")
   509  	}
   510  
   511  	set(c, 0, 3, 3, 1, nil).Release()
   512  	set(c, 0, 4, 4, 1, nil).Release()
   513  	c.Get(0, 2, nil).Release()
   514  
   515  	for key := 2; key <= 4; key++ {
   516  		if h := c.Get(0, uint64(key), nil); h != nil {
   517  			h.Release()
   518  		} else {
   519  			t.Errorf("Cache.Get on #%d return nil", key)
   520  		}
   521  	}
   522  
   523  	h2.Release()
   524  	if h := c.Get(0, 2, nil); h != nil {
   525  		t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
   526  	}
   527  
   528  	if delFuncCalled != 4 {
   529  		t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
   530  	}
   531  }
   532  
   533  func TestLRUCache_Close(t *testing.T) {
   534  	relFuncCalled := 0
   535  	relFunc := func() {
   536  		relFuncCalled++
   537  	}
   538  	delFuncCalled := 0
   539  	delFunc := func() {
   540  		delFuncCalled++
   541  	}
   542  
   543  	c := NewCache(NewLRU(2))
   544  	set(c, 0, 1, 1, 1, relFunc).Release()
   545  	set(c, 0, 2, 2, 1, relFunc).Release()
   546  
   547  	h3 := set(c, 0, 3, 3, 1, relFunc)
   548  	if h3 == nil {
   549  		t.Error("Cache.Get on #3 return nil")
   550  	}
   551  	if ok := c.Delete(0, 3, delFunc); !ok {
   552  		t.Error("Cache.Delete on #3 return false")
   553  	}
   554  
   555  	c.Close()
   556  
   557  	if relFuncCalled != 3 {
   558  		t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
   559  	}
   560  	if delFuncCalled != 1 {
   561  		t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
   562  	}
   563  }