github.com/insionng/yougam@v0.0.0-20170714101924-2bc18d833463/libraries/syndtr/goleveldb/leveldb/cache/cache_test.go (about)

     1  // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
     2  // All rights reserved.
     3  //
     4  // Use of this source code is governed by a BSD-style license that can be
     5  // found in the LICENSE file.
     6  
     7  package cache
     8  
     9  import (
    10  	"math/rand"
    11  	"runtime"
    12  	"sync"
    13  	"sync/atomic"
    14  	"testing"
    15  	"time"
    16  	"unsafe"
    17  )
    18  
    19  type int32o int32
    20  
    21  func (o *int32o) acquire() {
    22  	if atomic.AddInt32((*int32)(o), 1) != 1 {
    23  		panic("BUG: invalid ref")
    24  	}
    25  }
    26  
    27  func (o *int32o) Release() {
    28  	if atomic.AddInt32((*int32)(o), -1) != 0 {
    29  		panic("BUG: invalid ref")
    30  	}
    31  }
    32  
    33  type releaserFunc struct {
    34  	fn    func()
    35  	value Value
    36  }
    37  
    38  func (r releaserFunc) Release() {
    39  	if r.fn != nil {
    40  		r.fn()
    41  	}
    42  }
    43  
    44  func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
    45  	return c.Get(ns, key, func() (int, Value) {
    46  		if relf != nil {
    47  			return charge, releaserFunc{relf, value}
    48  		}
    49  		return charge, value
    50  	})
    51  }
    52  
    53  func TestCacheMap(t *testing.T) {
    54  	runtime.GOMAXPROCS(runtime.NumCPU())
    55  
    56  	nsx := []struct {
    57  		nobjects, nhandles, concurrent, repeat int
    58  	}{
    59  		{10000, 400, 50, 3},
    60  		{100000, 1000, 100, 10},
    61  	}
    62  
    63  	var (
    64  		objects [][]int32o
    65  		handles [][]unsafe.Pointer
    66  	)
    67  
    68  	for _, x := range nsx {
    69  		objects = append(objects, make([]int32o, x.nobjects))
    70  		handles = append(handles, make([]unsafe.Pointer, x.nhandles))
    71  	}
    72  
    73  	c := NewCache(nil)
    74  
    75  	wg := new(sync.WaitGroup)
    76  	var done int32
    77  
    78  	for ns, x := range nsx {
    79  		for i := 0; i < x.concurrent; i++ {
    80  			wg.Add(1)
    81  			go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
    82  				defer wg.Done()
    83  				r := rand.New(rand.NewSource(time.Now().UnixNano()))
    84  
    85  				for j := len(objects) * repeat; j >= 0; j-- {
    86  					key := uint64(r.Intn(len(objects)))
    87  					h := c.Get(uint64(ns), key, func() (int, Value) {
    88  						o := &objects[key]
    89  						o.acquire()
    90  						return 1, o
    91  					})
    92  					if v := h.Value().(*int32o); v != &objects[key] {
    93  						t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
    94  					}
    95  					if objects[key] != 1 {
    96  						t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
    97  					}
    98  					if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
    99  						h.Release()
   100  					}
   101  				}
   102  			}(ns, i, x.repeat, objects[ns], handles[ns])
   103  		}
   104  
   105  		go func(handles []unsafe.Pointer) {
   106  			r := rand.New(rand.NewSource(time.Now().UnixNano()))
   107  
   108  			for atomic.LoadInt32(&done) == 0 {
   109  				i := r.Intn(len(handles))
   110  				h := (*Handle)(atomic.LoadPointer(&handles[i]))
   111  				if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
   112  					h.Release()
   113  				}
   114  				time.Sleep(time.Millisecond)
   115  			}
   116  		}(handles[ns])
   117  	}
   118  
   119  	go func() {
   120  		handles := make([]*Handle, 100000)
   121  		for atomic.LoadInt32(&done) == 0 {
   122  			for i := range handles {
   123  				handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
   124  					return 1, 1
   125  				})
   126  			}
   127  			for _, h := range handles {
   128  				h.Release()
   129  			}
   130  		}
   131  	}()
   132  
   133  	wg.Wait()
   134  
   135  	atomic.StoreInt32(&done, 1)
   136  
   137  	for _, handles0 := range handles {
   138  		for i := range handles0 {
   139  			h := (*Handle)(atomic.LoadPointer(&handles0[i]))
   140  			if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
   141  				h.Release()
   142  			}
   143  		}
   144  	}
   145  
   146  	for ns, objects0 := range objects {
   147  		for i, o := range objects0 {
   148  			if o != 0 {
   149  				t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
   150  			}
   151  		}
   152  	}
   153  }
   154  
   155  func TestCacheMap_NodesAndSize(t *testing.T) {
   156  	c := NewCache(nil)
   157  	if c.Nodes() != 0 {
   158  		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
   159  	}
   160  	if c.Size() != 0 {
   161  		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
   162  	}
   163  	set(c, 0, 1, 1, 1, nil)
   164  	set(c, 0, 2, 2, 2, nil)
   165  	set(c, 1, 1, 3, 3, nil)
   166  	set(c, 2, 1, 4, 1, nil)
   167  	if c.Nodes() != 4 {
   168  		t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
   169  	}
   170  	if c.Size() != 7 {
   171  		t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
   172  	}
   173  }
   174  
   175  func TestLRUCache_Capacity(t *testing.T) {
   176  	c := NewCache(NewLRU(10))
   177  	if c.Capacity() != 10 {
   178  		t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
   179  	}
   180  	set(c, 0, 1, 1, 1, nil).Release()
   181  	set(c, 0, 2, 2, 2, nil).Release()
   182  	set(c, 1, 1, 3, 3, nil).Release()
   183  	set(c, 2, 1, 4, 1, nil).Release()
   184  	set(c, 2, 2, 5, 1, nil).Release()
   185  	set(c, 2, 3, 6, 1, nil).Release()
   186  	set(c, 2, 4, 7, 1, nil).Release()
   187  	set(c, 2, 5, 8, 1, nil).Release()
   188  	if c.Nodes() != 7 {
   189  		t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
   190  	}
   191  	if c.Size() != 10 {
   192  		t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
   193  	}
   194  	c.SetCapacity(9)
   195  	if c.Capacity() != 9 {
   196  		t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
   197  	}
   198  	if c.Nodes() != 6 {
   199  		t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
   200  	}
   201  	if c.Size() != 8 {
   202  		t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
   203  	}
   204  }
   205  
   206  func TestCacheMap_NilValue(t *testing.T) {
   207  	c := NewCache(NewLRU(10))
   208  	h := c.Get(0, 0, func() (size int, value Value) {
   209  		return 1, nil
   210  	})
   211  	if h != nil {
   212  		t.Error("cache handle is non-nil")
   213  	}
   214  	if c.Nodes() != 0 {
   215  		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
   216  	}
   217  	if c.Size() != 0 {
   218  		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
   219  	}
   220  }
   221  
   222  func TestLRUCache_GetLatency(t *testing.T) {
   223  	runtime.GOMAXPROCS(runtime.NumCPU())
   224  
   225  	const (
   226  		concurrentSet = 30
   227  		concurrentGet = 3
   228  		duration      = 3 * time.Second
   229  		delay         = 3 * time.Millisecond
   230  		maxkey        = 100000
   231  	)
   232  
   233  	var (
   234  		set, getHit, getAll        int32
   235  		getMaxLatency, getDuration int64
   236  	)
   237  
   238  	c := NewCache(NewLRU(5000))
   239  	wg := &sync.WaitGroup{}
   240  	until := time.Now().Add(duration)
   241  	for i := 0; i < concurrentSet; i++ {
   242  		wg.Add(1)
   243  		go func(i int) {
   244  			defer wg.Done()
   245  			r := rand.New(rand.NewSource(time.Now().UnixNano()))
   246  			for time.Now().Before(until) {
   247  				c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
   248  					time.Sleep(delay)
   249  					atomic.AddInt32(&set, 1)
   250  					return 1, 1
   251  				}).Release()
   252  			}
   253  		}(i)
   254  	}
   255  	for i := 0; i < concurrentGet; i++ {
   256  		wg.Add(1)
   257  		go func(i int) {
   258  			defer wg.Done()
   259  			r := rand.New(rand.NewSource(time.Now().UnixNano()))
   260  			for {
   261  				mark := time.Now()
   262  				if mark.Before(until) {
   263  					h := c.Get(0, uint64(r.Intn(maxkey)), nil)
   264  					latency := int64(time.Now().Sub(mark))
   265  					m := atomic.LoadInt64(&getMaxLatency)
   266  					if latency > m {
   267  						atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
   268  					}
   269  					atomic.AddInt64(&getDuration, latency)
   270  					if h != nil {
   271  						atomic.AddInt32(&getHit, 1)
   272  						h.Release()
   273  					}
   274  					atomic.AddInt32(&getAll, 1)
   275  				} else {
   276  					break
   277  				}
   278  			}
   279  		}(i)
   280  	}
   281  
   282  	wg.Wait()
   283  	getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
   284  	t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
   285  		set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
   286  
   287  	if getAvglatency > delay/3 {
   288  		t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
   289  	}
   290  }
   291  
   292  func TestLRUCache_HitMiss(t *testing.T) {
   293  	cases := []struct {
   294  		key   uint64
   295  		value string
   296  	}{
   297  		{1, "vvvvvvvvv"},
   298  		{100, "v1"},
   299  		{0, "v2"},
   300  		{12346, "v3"},
   301  		{777, "v4"},
   302  		{999, "v5"},
   303  		{7654, "v6"},
   304  		{2, "v7"},
   305  		{3, "v8"},
   306  		{9, "v9"},
   307  	}
   308  
   309  	setfin := 0
   310  	c := NewCache(NewLRU(1000))
   311  	for i, x := range cases {
   312  		set(c, 0, x.key, x.value, len(x.value), func() {
   313  			setfin++
   314  		}).Release()
   315  		for j, y := range cases {
   316  			h := c.Get(0, y.key, nil)
   317  			if j <= i {
   318  				// should hit
   319  				if h == nil {
   320  					t.Errorf("case '%d' iteration '%d' is miss", i, j)
   321  				} else {
   322  					if x := h.Value().(releaserFunc).value.(string); x != y.value {
   323  						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
   324  					}
   325  				}
   326  			} else {
   327  				// should miss
   328  				if h != nil {
   329  					t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
   330  				}
   331  			}
   332  			if h != nil {
   333  				h.Release()
   334  			}
   335  		}
   336  	}
   337  
   338  	for i, x := range cases {
   339  		finalizerOk := false
   340  		c.Delete(0, x.key, func() {
   341  			finalizerOk = true
   342  		})
   343  
   344  		if !finalizerOk {
   345  			t.Errorf("case %d delete finalizer not executed", i)
   346  		}
   347  
   348  		for j, y := range cases {
   349  			h := c.Get(0, y.key, nil)
   350  			if j > i {
   351  				// should hit
   352  				if h == nil {
   353  					t.Errorf("case '%d' iteration '%d' is miss", i, j)
   354  				} else {
   355  					if x := h.Value().(releaserFunc).value.(string); x != y.value {
   356  						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
   357  					}
   358  				}
   359  			} else {
   360  				// should miss
   361  				if h != nil {
   362  					t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
   363  				}
   364  			}
   365  			if h != nil {
   366  				h.Release()
   367  			}
   368  		}
   369  	}
   370  
   371  	if setfin != len(cases) {
   372  		t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
   373  	}
   374  }
   375  
   376  func TestLRUCache_Eviction(t *testing.T) {
   377  	c := NewCache(NewLRU(12))
   378  	o1 := set(c, 0, 1, 1, 1, nil)
   379  	set(c, 0, 2, 2, 1, nil).Release()
   380  	set(c, 0, 3, 3, 1, nil).Release()
   381  	set(c, 0, 4, 4, 1, nil).Release()
   382  	set(c, 0, 5, 5, 1, nil).Release()
   383  	if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
   384  		h.Release()
   385  	}
   386  	set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
   387  
   388  	for _, key := range []uint64{9, 2, 5, 1} {
   389  		h := c.Get(0, key, nil)
   390  		if h == nil {
   391  			t.Errorf("miss for key '%d'", key)
   392  		} else {
   393  			if x := h.Value().(int); x != int(key) {
   394  				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
   395  			}
   396  			h.Release()
   397  		}
   398  	}
   399  	o1.Release()
   400  	for _, key := range []uint64{1, 2, 5} {
   401  		h := c.Get(0, key, nil)
   402  		if h == nil {
   403  			t.Errorf("miss for key '%d'", key)
   404  		} else {
   405  			if x := h.Value().(int); x != int(key) {
   406  				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
   407  			}
   408  			h.Release()
   409  		}
   410  	}
   411  	for _, key := range []uint64{3, 4, 9} {
   412  		h := c.Get(0, key, nil)
   413  		if h != nil {
   414  			t.Errorf("hit for key '%d'", key)
   415  			if x := h.Value().(int); x != int(key) {
   416  				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
   417  			}
   418  			h.Release()
   419  		}
   420  	}
   421  }
   422  
   423  func TestLRUCache_Evict(t *testing.T) {
   424  	c := NewCache(NewLRU(6))
   425  	set(c, 0, 1, 1, 1, nil).Release()
   426  	set(c, 0, 2, 2, 1, nil).Release()
   427  	set(c, 1, 1, 4, 1, nil).Release()
   428  	set(c, 1, 2, 5, 1, nil).Release()
   429  	set(c, 2, 1, 6, 1, nil).Release()
   430  	set(c, 2, 2, 7, 1, nil).Release()
   431  
   432  	for ns := 0; ns < 3; ns++ {
   433  		for key := 1; key < 3; key++ {
   434  			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
   435  				h.Release()
   436  			} else {
   437  				t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
   438  			}
   439  		}
   440  	}
   441  
   442  	if ok := c.Evict(0, 1); !ok {
   443  		t.Error("first Cache.Evict on #0.1 return false")
   444  	}
   445  	if ok := c.Evict(0, 1); ok {
   446  		t.Error("second Cache.Evict on #0.1 return true")
   447  	}
   448  	if h := c.Get(0, 1, nil); h != nil {
   449  		t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
   450  	}
   451  
   452  	c.EvictNS(1)
   453  	if h := c.Get(1, 1, nil); h != nil {
   454  		t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
   455  	}
   456  	if h := c.Get(1, 2, nil); h != nil {
   457  		t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
   458  	}
   459  
   460  	c.EvictAll()
   461  	for ns := 0; ns < 3; ns++ {
   462  		for key := 1; key < 3; key++ {
   463  			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
   464  				t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
   465  			}
   466  		}
   467  	}
   468  }
   469  
   470  func TestLRUCache_Delete(t *testing.T) {
   471  	delFuncCalled := 0
   472  	delFunc := func() {
   473  		delFuncCalled++
   474  	}
   475  
   476  	c := NewCache(NewLRU(2))
   477  	set(c, 0, 1, 1, 1, nil).Release()
   478  	set(c, 0, 2, 2, 1, nil).Release()
   479  
   480  	if ok := c.Delete(0, 1, delFunc); !ok {
   481  		t.Error("Cache.Delete on #1 return false")
   482  	}
   483  	if h := c.Get(0, 1, nil); h != nil {
   484  		t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
   485  	}
   486  	if ok := c.Delete(0, 1, delFunc); ok {
   487  		t.Error("Cache.Delete on #1 return true")
   488  	}
   489  
   490  	h2 := c.Get(0, 2, nil)
   491  	if h2 == nil {
   492  		t.Error("Cache.Get on #2 return nil")
   493  	}
   494  	if ok := c.Delete(0, 2, delFunc); !ok {
   495  		t.Error("(1) Cache.Delete on #2 return false")
   496  	}
   497  	if ok := c.Delete(0, 2, delFunc); !ok {
   498  		t.Error("(2) Cache.Delete on #2 return false")
   499  	}
   500  
   501  	set(c, 0, 3, 3, 1, nil).Release()
   502  	set(c, 0, 4, 4, 1, nil).Release()
   503  	c.Get(0, 2, nil).Release()
   504  
   505  	for key := 2; key <= 4; key++ {
   506  		if h := c.Get(0, uint64(key), nil); h != nil {
   507  			h.Release()
   508  		} else {
   509  			t.Errorf("Cache.Get on #%d return nil", key)
   510  		}
   511  	}
   512  
   513  	h2.Release()
   514  	if h := c.Get(0, 2, nil); h != nil {
   515  		t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
   516  	}
   517  
   518  	if delFuncCalled != 4 {
   519  		t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
   520  	}
   521  }
   522  
   523  func TestLRUCache_Close(t *testing.T) {
   524  	relFuncCalled := 0
   525  	relFunc := func() {
   526  		relFuncCalled++
   527  	}
   528  	delFuncCalled := 0
   529  	delFunc := func() {
   530  		delFuncCalled++
   531  	}
   532  
   533  	c := NewCache(NewLRU(2))
   534  	set(c, 0, 1, 1, 1, relFunc).Release()
   535  	set(c, 0, 2, 2, 1, relFunc).Release()
   536  
   537  	h3 := set(c, 0, 3, 3, 1, relFunc)
   538  	if h3 == nil {
   539  		t.Error("Cache.Get on #3 return nil")
   540  	}
   541  	if ok := c.Delete(0, 3, delFunc); !ok {
   542  		t.Error("Cache.Delete on #3 return false")
   543  	}
   544  
   545  	c.Close()
   546  
   547  	if relFuncCalled != 3 {
   548  		t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
   549  	}
   550  	if delFuncCalled != 1 {
   551  		t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
   552  	}
   553  }