github.com/fufuok/utils@v1.0.10/xsync/rbmutex_test.go (about) 1 // Copyright notice. Initial version of the following tests was based on 2 // the following file from the Go Programming Language core repo: 3 // https://github.com/golang/go/blob/831f9376d8d730b16fb33dfd775618dffe13ce7a/src/sync/rwmutex_test.go 4 5 package xsync_test 6 7 import ( 8 "fmt" 9 "runtime" 10 "sync" 11 "sync/atomic" 12 "testing" 13 14 . "github.com/fufuok/utils/xsync" 15 ) 16 17 func TestRBMutexSerialReader(t *testing.T) { 18 const numIters = 10 19 mu := NewRBMutex() 20 var rtokens [numIters]*RToken 21 for i := 0; i < numIters; i++ { 22 rtokens[i] = mu.RLock() 23 } 24 for i := 0; i < numIters; i++ { 25 mu.RUnlock(rtokens[i]) 26 } 27 } 28 29 func parallelReader(mu *RBMutex, clocked, cunlock, cdone chan bool) { 30 tk := mu.RLock() 31 clocked <- true 32 <-cunlock 33 mu.RUnlock(tk) 34 cdone <- true 35 } 36 37 func doTestParallelReaders(numReaders, gomaxprocs int) { 38 runtime.GOMAXPROCS(gomaxprocs) 39 mu := NewRBMutex() 40 clocked := make(chan bool) 41 cunlock := make(chan bool) 42 cdone := make(chan bool) 43 for i := 0; i < numReaders; i++ { 44 go parallelReader(mu, clocked, cunlock, cdone) 45 } 46 // Wait for all parallel RLock()s to succeed. 47 for i := 0; i < numReaders; i++ { 48 <-clocked 49 } 50 for i := 0; i < numReaders; i++ { 51 cunlock <- true 52 } 53 // Wait for the goroutines to finish. 54 for i := 0; i < numReaders; i++ { 55 <-cdone 56 } 57 } 58 59 func TestRBMutexParallelReaders(t *testing.T) { 60 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(0)) 61 doTestParallelReaders(1, 4) 62 doTestParallelReaders(3, 4) 63 doTestParallelReaders(4, 2) 64 } 65 66 func reader(mu *RBMutex, numIterations int, activity *int32, cdone chan bool) { 67 for i := 0; i < numIterations; i++ { 68 tk := mu.RLock() 69 n := atomic.AddInt32(activity, 1) 70 if n < 1 || n >= 10000 { 71 mu.RUnlock(tk) 72 panic(fmt.Sprintf("rlock(%d)\n", n)) 73 } 74 for i := 0; i < 100; i++ { 75 } 76 atomic.AddInt32(activity, -1) 77 mu.RUnlock(tk) 78 } 79 cdone <- true 80 } 81 82 func writer(mu *RBMutex, numIterations int, activity *int32, cdone chan bool) { 83 for i := 0; i < numIterations; i++ { 84 mu.Lock() 85 n := atomic.AddInt32(activity, 10000) 86 if n != 10000 { 87 mu.Unlock() 88 panic(fmt.Sprintf("wlock(%d)\n", n)) 89 } 90 for i := 0; i < 100; i++ { 91 } 92 atomic.AddInt32(activity, -10000) 93 mu.Unlock() 94 } 95 cdone <- true 96 } 97 98 func hammerRBMutex(gomaxprocs, numReaders, numIterations int) { 99 runtime.GOMAXPROCS(gomaxprocs) 100 // Number of active readers + 10000 * number of active writers. 101 var activity int32 102 mu := NewRBMutex() 103 cdone := make(chan bool) 104 go writer(mu, numIterations, &activity, cdone) 105 var i int 106 for i = 0; i < numReaders/2; i++ { 107 go reader(mu, numIterations, &activity, cdone) 108 } 109 go writer(mu, numIterations, &activity, cdone) 110 for ; i < numReaders; i++ { 111 go reader(mu, numIterations, &activity, cdone) 112 } 113 // Wait for the 2 writers and all readers to finish. 114 for i := 0; i < 2+numReaders; i++ { 115 <-cdone 116 } 117 } 118 119 func TestRBMutex(t *testing.T) { 120 const n = 1000 121 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(0)) 122 hammerRBMutex(1, 1, n) 123 hammerRBMutex(1, 3, n) 124 hammerRBMutex(1, 10, n) 125 hammerRBMutex(4, 1, n) 126 hammerRBMutex(4, 3, n) 127 hammerRBMutex(4, 10, n) 128 hammerRBMutex(10, 1, n) 129 hammerRBMutex(10, 3, n) 130 hammerRBMutex(10, 10, n) 131 hammerRBMutex(10, 5, n) 132 } 133 134 func benchmarkRBMutex(b *testing.B, parallelism, localWork, writeRatio int) { 135 mu := NewRBMutex() 136 b.SetParallelism(parallelism) 137 runParallel(b, func(pb *testing.PB) { 138 foo := 0 139 for pb.Next() { 140 foo++ 141 if writeRatio > 0 && foo%writeRatio == 0 { 142 mu.Lock() 143 for i := 0; i != localWork; i += 1 { 144 foo *= 2 145 foo /= 2 146 } 147 mu.Unlock() 148 } else { 149 tk := mu.RLock() 150 for i := 0; i != localWork; i += 1 { 151 foo *= 2 152 foo /= 2 153 } 154 mu.RUnlock(tk) 155 } 156 } 157 _ = foo 158 }) 159 } 160 161 func BenchmarkRBMutexWorkReadOnly_HighParallelism(b *testing.B) { 162 benchmarkRBMutex(b, 1024, 100, -1) 163 } 164 165 func BenchmarkRBMutexWorkReadOnly(b *testing.B) { 166 benchmarkRBMutex(b, -1, 100, -1) 167 } 168 169 func BenchmarkRBMutexWorkWrite100000(b *testing.B) { 170 benchmarkRBMutex(b, -1, 100, 100000) 171 } 172 173 func BenchmarkRBMutexWorkWrite1000(b *testing.B) { 174 benchmarkRBMutex(b, -1, 100, 1000) 175 } 176 177 func benchmarkRWMutex(b *testing.B, parallelism, localWork, writeRatio int) { 178 var mu sync.RWMutex 179 b.SetParallelism(parallelism) 180 runParallel(b, func(pb *testing.PB) { 181 foo := 0 182 for pb.Next() { 183 foo++ 184 if writeRatio > 0 && foo%writeRatio == 0 { 185 mu.Lock() 186 for i := 0; i != localWork; i += 1 { 187 foo *= 2 188 foo /= 2 189 } 190 mu.Unlock() 191 } else { 192 mu.RLock() 193 for i := 0; i != localWork; i += 1 { 194 foo *= 2 195 foo /= 2 196 } 197 mu.RUnlock() 198 } 199 } 200 _ = foo 201 }) 202 } 203 204 func BenchmarkRWMutexWorkReadOnly_HighParallelism(b *testing.B) { 205 benchmarkRWMutex(b, 1024, 100, -1) 206 } 207 208 func BenchmarkRWMutexWorkReadOnly(b *testing.B) { 209 benchmarkRWMutex(b, -1, 100, -1) 210 } 211 212 func BenchmarkRWMutexWorkWrite100000(b *testing.B) { 213 benchmarkRWMutex(b, -1, 100, 100000) 214 } 215 216 func BenchmarkRWMutexWorkWrite1000(b *testing.B) { 217 benchmarkRWMutex(b, -1, 100, 1000) 218 }