gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/sync/rwmutex_test.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Copyright 2019 The gVisor Authors. 3 // Use of this source code is governed by a BSD-style 4 // license that can be found in the LICENSE file. 5 6 // GOMAXPROCS=10 go test 7 8 // Copy/pasted from the standard library's sync/rwmutex_test.go, except for the 9 // addition of downgradingWriter and the renaming of num_iterations to 10 // numIterations to shut up Golint. 11 12 package sync 13 14 import ( 15 "fmt" 16 "runtime" 17 "sync/atomic" 18 "testing" 19 ) 20 21 func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) { 22 m.RLock() 23 clocked <- true 24 <-cunlock 25 m.RUnlock() 26 cdone <- true 27 } 28 29 func doTestParallelReaders(numReaders, gomaxprocs int) { 30 runtime.GOMAXPROCS(gomaxprocs) 31 var m RWMutex 32 clocked := make(chan bool) 33 cunlock := make(chan bool) 34 cdone := make(chan bool) 35 for i := 0; i < numReaders; i++ { 36 go parallelReader(&m, clocked, cunlock, cdone) 37 } 38 // Wait for all parallel RLock()s to succeed. 39 for i := 0; i < numReaders; i++ { 40 <-clocked 41 } 42 for i := 0; i < numReaders; i++ { 43 cunlock <- true 44 } 45 // Wait for the goroutines to finish. 46 for i := 0; i < numReaders; i++ { 47 <-cdone 48 } 49 } 50 51 func TestParallelReaders(t *testing.T) { 52 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) 53 doTestParallelReaders(1, 4) 54 doTestParallelReaders(3, 4) 55 doTestParallelReaders(4, 2) 56 } 57 58 func reader(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) { 59 for i := 0; i < numIterations; i++ { 60 rwm.RLock() 61 n := atomic.AddInt32(activity, 1) 62 if n < 1 || n >= 10000 { 63 panic(fmt.Sprintf("wlock(%d)\n", n)) 64 } 65 for i := 0; i < 100; i++ { 66 } 67 atomic.AddInt32(activity, -1) 68 rwm.RUnlock() 69 } 70 cdone <- true 71 } 72 73 func writer(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) { 74 for i := 0; i < numIterations; i++ { 75 rwm.Lock() 76 n := atomic.AddInt32(activity, 10000) 77 if n != 10000 { 78 panic(fmt.Sprintf("wlock(%d)\n", n)) 79 } 80 for i := 0; i < 100; i++ { 81 } 82 atomic.AddInt32(activity, -10000) 83 rwm.Unlock() 84 } 85 cdone <- true 86 } 87 88 func downgradingWriter(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) { 89 for i := 0; i < numIterations; i++ { 90 rwm.Lock() 91 n := atomic.AddInt32(activity, 10000) 92 if n != 10000 { 93 panic(fmt.Sprintf("wlock(%d)\n", n)) 94 } 95 for i := 0; i < 100; i++ { 96 } 97 atomic.AddInt32(activity, -10000) 98 rwm.DowngradeLock() 99 n = atomic.AddInt32(activity, 1) 100 if n < 1 || n >= 10000 { 101 panic(fmt.Sprintf("wlock(%d)\n", n)) 102 } 103 for i := 0; i < 100; i++ { 104 } 105 atomic.AddInt32(activity, -1) 106 rwm.RUnlock() 107 } 108 cdone <- true 109 } 110 111 func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) { 112 runtime.GOMAXPROCS(gomaxprocs) 113 // Number of active readers + 10000 * number of active writers. 114 var activity int32 115 var rwm RWMutex 116 cdone := make(chan bool) 117 go writer(&rwm, numIterations, &activity, cdone) 118 go downgradingWriter(&rwm, numIterations, &activity, cdone) 119 var i int 120 for i = 0; i < numReaders/2; i++ { 121 go reader(&rwm, numIterations, &activity, cdone) 122 } 123 go writer(&rwm, numIterations, &activity, cdone) 124 go downgradingWriter(&rwm, numIterations, &activity, cdone) 125 for ; i < numReaders; i++ { 126 go reader(&rwm, numIterations, &activity, cdone) 127 } 128 // Wait for the 4 writers and all readers to finish. 129 for i := 0; i < 4+numReaders; i++ { 130 <-cdone 131 } 132 } 133 134 func TestDowngradableRWMutex(t *testing.T) { 135 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) 136 n := 1000 137 if testing.Short() { 138 n = 5 139 } 140 HammerDowngradableRWMutex(1, 1, n) 141 HammerDowngradableRWMutex(1, 3, n) 142 HammerDowngradableRWMutex(1, 10, n) 143 HammerDowngradableRWMutex(4, 1, n) 144 HammerDowngradableRWMutex(4, 3, n) 145 HammerDowngradableRWMutex(4, 10, n) 146 HammerDowngradableRWMutex(10, 1, n) 147 HammerDowngradableRWMutex(10, 3, n) 148 HammerDowngradableRWMutex(10, 10, n) 149 HammerDowngradableRWMutex(10, 5, n) 150 } 151 152 func TestRWDoubleTryLock(t *testing.T) { 153 var rwm RWMutex 154 if !rwm.TryLock() { 155 t.Fatal("failed to acquire lock") 156 } 157 if rwm.TryLock() { 158 t.Fatal("unexpectedly succeeded in acquiring locked mutex") 159 } 160 } 161 162 func TestRWTryLockAfterLock(t *testing.T) { 163 var rwm RWMutex 164 rwm.Lock() 165 if rwm.TryLock() { 166 t.Fatal("unexpectedly succeeded in acquiring locked mutex") 167 } 168 } 169 170 func TestRWTryLockUnlock(t *testing.T) { 171 var rwm RWMutex 172 if !rwm.TryLock() { 173 t.Fatal("failed to acquire lock") 174 } 175 rwm.Unlock() // +checklocksforce 176 if !rwm.TryLock() { 177 t.Fatal("failed to acquire lock after unlock") 178 } 179 } 180 181 func TestTryRLockAfterLock(t *testing.T) { 182 var rwm RWMutex 183 rwm.Lock() 184 if rwm.TryRLock() { 185 t.Fatal("unexpectedly succeeded in acquiring locked mutex") 186 } 187 } 188 189 func TestTryLockAfterRLock(t *testing.T) { 190 var rwm RWMutex 191 rwm.RLock() 192 if rwm.TryLock() { 193 t.Fatal("unexpectedly succeeded in acquiring locked mutex") 194 } 195 } 196 197 func TestDoubleTryRLock(t *testing.T) { 198 var rwm RWMutex 199 if !rwm.TryRLock() { 200 t.Fatal("failed to acquire lock") 201 } 202 if !rwm.TryRLock() { 203 t.Fatal("failed to read acquire read locked lock") 204 } 205 }