github.com/puzpuzpuz/xsync/v3@v3.1.1-0.20240225193106-cbe4ec1e954f/rbmutex_test.go (about) 1 // Copyright notice. Initial version of the following tests was based on 2 // the following file from the Go Programming Language core repo: 3 // https://github.com/golang/go/blob/831f9376d8d730b16fb33dfd775618dffe13ce7a/src/sync/rwmutex_test.go 4 5 package xsync_test 6 7 import ( 8 "fmt" 9 "runtime" 10 "sync" 11 "sync/atomic" 12 "testing" 13 14 . "github.com/puzpuzpuz/xsync/v3" 15 ) 16 17 func TestRBMutexSerialReader(t *testing.T) { 18 const numIters = 10 19 mu := NewRBMutex() 20 var rtokens [numIters]*RToken 21 for i := 0; i < numIters; i++ { 22 rtokens[i] = mu.RLock() 23 24 } 25 for i := 0; i < numIters; i++ { 26 mu.RUnlock(rtokens[i]) 27 } 28 } 29 30 func parallelReader(mu *RBMutex, clocked, cunlock, cdone chan bool) { 31 tk := mu.RLock() 32 clocked <- true 33 <-cunlock 34 mu.RUnlock(tk) 35 cdone <- true 36 } 37 38 func doTestParallelReaders(numReaders, gomaxprocs int) { 39 runtime.GOMAXPROCS(gomaxprocs) 40 mu := NewRBMutex() 41 clocked := make(chan bool) 42 cunlock := make(chan bool) 43 cdone := make(chan bool) 44 for i := 0; i < numReaders; i++ { 45 go parallelReader(mu, clocked, cunlock, cdone) 46 } 47 // Wait for all parallel RLock()s to succeed. 48 for i := 0; i < numReaders; i++ { 49 <-clocked 50 } 51 for i := 0; i < numReaders; i++ { 52 cunlock <- true 53 } 54 // Wait for the goroutines to finish. 55 for i := 0; i < numReaders; i++ { 56 <-cdone 57 } 58 } 59 60 func TestRBMutexParallelReaders(t *testing.T) { 61 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(0)) 62 doTestParallelReaders(1, 4) 63 doTestParallelReaders(3, 4) 64 doTestParallelReaders(4, 2) 65 } 66 67 func reader(mu *RBMutex, numIterations int, activity *int32, cdone chan bool) { 68 for i := 0; i < numIterations; i++ { 69 tk := mu.RLock() 70 n := atomic.AddInt32(activity, 1) 71 if n < 1 || n >= 10000 { 72 mu.RUnlock(tk) 73 panic(fmt.Sprintf("rlock(%d)\n", n)) 74 } 75 for i := 0; i < 100; i++ { 76 } 77 atomic.AddInt32(activity, -1) 78 mu.RUnlock(tk) 79 } 80 cdone <- true 81 } 82 83 func writer(mu *RBMutex, numIterations int, activity *int32, cdone chan bool) { 84 for i := 0; i < numIterations; i++ { 85 mu.Lock() 86 n := atomic.AddInt32(activity, 10000) 87 if n != 10000 { 88 mu.Unlock() 89 panic(fmt.Sprintf("wlock(%d)\n", n)) 90 } 91 for i := 0; i < 100; i++ { 92 } 93 atomic.AddInt32(activity, -10000) 94 mu.Unlock() 95 } 96 cdone <- true 97 } 98 99 func hammerRBMutex(gomaxprocs, numReaders, numIterations int) { 100 runtime.GOMAXPROCS(gomaxprocs) 101 // Number of active readers + 10000 * number of active writers. 102 var activity int32 103 mu := NewRBMutex() 104 cdone := make(chan bool) 105 go writer(mu, numIterations, &activity, cdone) 106 var i int 107 for i = 0; i < numReaders/2; i++ { 108 go reader(mu, numIterations, &activity, cdone) 109 } 110 go writer(mu, numIterations, &activity, cdone) 111 for ; i < numReaders; i++ { 112 go reader(mu, numIterations, &activity, cdone) 113 } 114 // Wait for the 2 writers and all readers to finish. 115 for i := 0; i < 2+numReaders; i++ { 116 <-cdone 117 } 118 } 119 120 func TestRBMutex(t *testing.T) { 121 const n = 1000 122 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(0)) 123 hammerRBMutex(1, 1, n) 124 hammerRBMutex(1, 3, n) 125 hammerRBMutex(1, 10, n) 126 hammerRBMutex(4, 1, n) 127 hammerRBMutex(4, 3, n) 128 hammerRBMutex(4, 10, n) 129 hammerRBMutex(10, 1, n) 130 hammerRBMutex(10, 3, n) 131 hammerRBMutex(10, 10, n) 132 hammerRBMutex(10, 5, n) 133 } 134 135 func benchmarkRBMutex(b *testing.B, parallelism, localWork, writeRatio int) { 136 mu := NewRBMutex() 137 b.SetParallelism(parallelism) 138 runParallel(b, func(pb *testing.PB) { 139 foo := 0 140 for pb.Next() { 141 foo++ 142 if writeRatio > 0 && foo%writeRatio == 0 { 143 mu.Lock() 144 for i := 0; i != localWork; i += 1 { 145 foo *= 2 146 foo /= 2 147 } 148 mu.Unlock() 149 } else { 150 tk := mu.RLock() 151 for i := 0; i != localWork; i += 1 { 152 foo *= 2 153 foo /= 2 154 } 155 mu.RUnlock(tk) 156 } 157 } 158 _ = foo 159 }) 160 } 161 162 func BenchmarkRBMutexWorkReadOnly_HighParallelism(b *testing.B) { 163 benchmarkRBMutex(b, 1024, 100, -1) 164 } 165 166 func BenchmarkRBMutexWorkReadOnly(b *testing.B) { 167 benchmarkRBMutex(b, -1, 100, -1) 168 } 169 170 func BenchmarkRBMutexWorkWrite100000(b *testing.B) { 171 benchmarkRBMutex(b, -1, 100, 100000) 172 } 173 174 func BenchmarkRBMutexWorkWrite1000(b *testing.B) { 175 benchmarkRBMutex(b, -1, 100, 1000) 176 } 177 178 func benchmarkRWMutex(b *testing.B, parallelism, localWork, writeRatio int) { 179 var mu sync.RWMutex 180 b.SetParallelism(parallelism) 181 runParallel(b, func(pb *testing.PB) { 182 foo := 0 183 for pb.Next() { 184 foo++ 185 if writeRatio > 0 && foo%writeRatio == 0 { 186 mu.Lock() 187 for i := 0; i != localWork; i += 1 { 188 foo *= 2 189 foo /= 2 190 } 191 mu.Unlock() 192 } else { 193 mu.RLock() 194 for i := 0; i != localWork; i += 1 { 195 foo *= 2 196 foo /= 2 197 } 198 mu.RUnlock() 199 } 200 } 201 _ = foo 202 }) 203 } 204 205 func BenchmarkRWMutexWorkReadOnly_HighParallelism(b *testing.B) { 206 benchmarkRWMutex(b, 1024, 100, -1) 207 } 208 209 func BenchmarkRWMutexWorkReadOnly(b *testing.B) { 210 benchmarkRWMutex(b, -1, 100, -1) 211 } 212 213 func BenchmarkRWMutexWorkWrite100000(b *testing.B) { 214 benchmarkRWMutex(b, -1, 100, 100000) 215 } 216 217 func BenchmarkRWMutexWorkWrite1000(b *testing.B) { 218 benchmarkRWMutex(b, -1, 100, 1000) 219 }