github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/util/syncutil/int_map_bench_test.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  // Copyright 2016 The Go Authors. All rights reserved.
    12  // Use of this source code is governed by a BSD-style
    13  // license that can be found in licenses/BSD-golang.txt.
    14  
    15  // This code originated in Go's sync package.
    16  
    17  package syncutil
    18  
    19  import (
    20  	"fmt"
    21  	"reflect"
    22  	"sync/atomic"
    23  	"testing"
    24  	"unsafe"
    25  )
    26  
    27  type bench struct {
    28  	setup func(*testing.B, mapInterface)
    29  	perG  func(b *testing.B, pb *testing.PB, i int, m mapInterface)
    30  }
    31  
    32  func benchMap(b *testing.B, bench bench) {
    33  	for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &IntMap{}} {
    34  		b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
    35  			m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
    36  			if bench.setup != nil {
    37  				bench.setup(b, m)
    38  			}
    39  
    40  			b.ResetTimer()
    41  
    42  			var i int64
    43  			b.RunParallel(func(pb *testing.PB) {
    44  				id := int(atomic.AddInt64(&i, 1) - 1)
    45  				bench.perG(b, pb, id*b.N, m)
    46  			})
    47  		})
    48  	}
    49  }
    50  
    51  func BenchmarkLoadMostlyHits(b *testing.B) {
    52  	const hits, misses = 1023, 1
    53  	v := unsafe.Pointer(new(int))
    54  
    55  	benchMap(b, bench{
    56  		setup: func(_ *testing.B, m mapInterface) {
    57  			for i := 0; i < hits; i++ {
    58  				m.LoadOrStore(int64(i), v)
    59  			}
    60  			// Prime the map to get it into a steady state.
    61  			for i := 0; i < hits*2; i++ {
    62  				m.Load(int64(i % hits))
    63  			}
    64  		},
    65  
    66  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
    67  			for ; pb.Next(); i++ {
    68  				m.Load(int64(i % (hits + misses)))
    69  			}
    70  		},
    71  	})
    72  }
    73  
    74  func BenchmarkLoadMostlyMisses(b *testing.B) {
    75  	const hits, misses = 1, 1023
    76  	v := unsafe.Pointer(new(int))
    77  
    78  	benchMap(b, bench{
    79  		setup: func(_ *testing.B, m mapInterface) {
    80  			for i := 0; i < hits; i++ {
    81  				m.LoadOrStore(int64(i), v)
    82  			}
    83  			// Prime the map to get it into a steady state.
    84  			for i := 0; i < hits*2; i++ {
    85  				m.Load(int64(i % hits))
    86  			}
    87  		},
    88  
    89  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
    90  			for ; pb.Next(); i++ {
    91  				m.Load(int64(i % (hits + misses)))
    92  			}
    93  		},
    94  	})
    95  }
    96  
    97  func BenchmarkLoadOrStoreBalanced(b *testing.B) {
    98  	const hits, misses = 128, 128
    99  	v := unsafe.Pointer(new(int))
   100  
   101  	benchMap(b, bench{
   102  		setup: func(b *testing.B, m mapInterface) {
   103  			if _, ok := m.(*DeepCopyMap); ok {
   104  				b.Skip("DeepCopyMap has quadratic running time.")
   105  			}
   106  			for i := 0; i < hits; i++ {
   107  				m.LoadOrStore(int64(i), v)
   108  			}
   109  			// Prime the map to get it into a steady state.
   110  			for i := 0; i < hits*2; i++ {
   111  				m.Load(int64(i % hits))
   112  			}
   113  		},
   114  
   115  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
   116  			for ; pb.Next(); i++ {
   117  				j := i % (hits + misses)
   118  				if j < hits {
   119  					if _, ok := m.LoadOrStore(int64(j), v); !ok {
   120  						b.Fatalf("unexpected miss for %v", j)
   121  					}
   122  				} else {
   123  					if e, loaded := m.LoadOrStore(int64(i), v); loaded {
   124  						b.Fatalf("failed to store %v: existing value %v", i, e)
   125  					}
   126  				}
   127  			}
   128  		},
   129  	})
   130  }
   131  
   132  func BenchmarkLoadOrStoreUnique(b *testing.B) {
   133  	v := unsafe.Pointer(new(int))
   134  
   135  	benchMap(b, bench{
   136  		setup: func(b *testing.B, m mapInterface) {
   137  			if _, ok := m.(*DeepCopyMap); ok {
   138  				b.Skip("DeepCopyMap has quadratic running time.")
   139  			}
   140  		},
   141  
   142  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
   143  			for ; pb.Next(); i++ {
   144  				m.LoadOrStore(int64(i), v)
   145  			}
   146  		},
   147  	})
   148  }
   149  
   150  func BenchmarkLoadOrStoreCollision(b *testing.B) {
   151  	v := unsafe.Pointer(new(int))
   152  
   153  	benchMap(b, bench{
   154  		setup: func(_ *testing.B, m mapInterface) {
   155  			m.LoadOrStore(int64(0), v)
   156  		},
   157  
   158  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
   159  			for ; pb.Next(); i++ {
   160  				m.LoadOrStore(int64(0), v)
   161  			}
   162  		},
   163  	})
   164  }
   165  
   166  func BenchmarkRange(b *testing.B) {
   167  	const mapSize = 1 << 10
   168  	v := unsafe.Pointer(new(int))
   169  
   170  	benchMap(b, bench{
   171  		setup: func(_ *testing.B, m mapInterface) {
   172  			for i := 0; i < mapSize; i++ {
   173  				m.Store(int64(i), v)
   174  			}
   175  		},
   176  
   177  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
   178  			for ; pb.Next(); i++ {
   179  				m.Range(func(_ int64, _ unsafe.Pointer) bool { return true })
   180  			}
   181  		},
   182  	})
   183  }
   184  
   185  // BenchmarkAdversarialAlloc tests performance when we store a new value
   186  // immediately whenever the map is promoted to clean and otherwise load a
   187  // unique, missing key.
   188  //
   189  // This forces the Load calls to always acquire the map's mutex.
   190  func BenchmarkAdversarialAlloc(b *testing.B) {
   191  	v := unsafe.Pointer(new(int))
   192  
   193  	benchMap(b, bench{
   194  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
   195  			var stores, loadsSinceStore int64
   196  			for ; pb.Next(); i++ {
   197  				m.Load(int64(i))
   198  				if loadsSinceStore++; loadsSinceStore > stores {
   199  					m.LoadOrStore(int64(i), v)
   200  					loadsSinceStore = 0
   201  					stores++
   202  				}
   203  			}
   204  		},
   205  	})
   206  }
   207  
   208  // BenchmarkAdversarialDelete tests performance when we periodically delete
   209  // one key and add a different one in a large map.
   210  //
   211  // This forces the Load calls to always acquire the map's mutex and periodically
   212  // makes a full copy of the map despite changing only one entry.
   213  func BenchmarkAdversarialDelete(b *testing.B) {
   214  	const mapSize = 1 << 10
   215  	v := unsafe.Pointer(new(int))
   216  
   217  	benchMap(b, bench{
   218  		setup: func(_ *testing.B, m mapInterface) {
   219  			for i := 0; i < mapSize; i++ {
   220  				m.Store(int64(i), v)
   221  			}
   222  		},
   223  
   224  		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
   225  			for ; pb.Next(); i++ {
   226  				m.Load(int64(i))
   227  
   228  				if i%mapSize == 0 {
   229  					m.Range(func(k int64, _ unsafe.Pointer) bool {
   230  						m.Delete(k)
   231  						return false
   232  					})
   233  					m.Store(int64(i), v)
   234  				}
   235  			}
   236  		},
   237  	})
   238  }