github.com/primecitizens/pcz/std@v0.2.1/core/atomic/atomic_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright 2023 The Prime Citizens 3 // 4 // Copyright 2015 The Go Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style 6 // license that can be found in the LICENSE file. 7 8 package atomic_test 9 10 import ( 11 "runtime" 12 13 "testing" 14 "unsafe" 15 16 "github.com/primecitizens/pcz/std/core/arch" 17 "github.com/primecitizens/pcz/std/core/atomic" 18 ) 19 20 func runParallel(N, iter int, f func()) { 21 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(int(N))) 22 done := make(chan bool) 23 for i := 0; i < N; i++ { 24 go func() { 25 for j := 0; j < iter; j++ { 26 f() 27 } 28 done <- true 29 }() 30 } 31 for i := 0; i < N; i++ { 32 <-done 33 } 34 } 35 36 func TestAddUintptr(t *testing.T) { 37 N := 20 38 iter := 100000 39 if testing.Short() { 40 N = 10 41 iter = 10000 42 } 43 inc := uintptr(100) 44 total := uintptr(0) 45 runParallel(N, iter, func() { 46 atomic.AddUintptr(&total, inc) 47 }) 48 if want := uintptr(N*iter) * inc; want != total { 49 t.Fatalf("xadduintpr error, want %d, got %d", want, total) 50 } 51 total = 0 52 runParallel(N, iter, func() { 53 atomic.AddUintptr(&total, inc) 54 atomic.AddUintptr(&total, uintptr(-int64(inc))) 55 }) 56 if total != 0 { 57 t.Fatalf("xadduintpr total error, want %d, got %d", 0, total) 58 } 59 } 60 61 // Tests that xadduintptr correctly updates 64-bit values. The place where 62 // we actually do so is mstats.go, functions mSysStat{Inc,Dec}. 63 func TestAddUintptrOnUint64(t *testing.T) { 64 if arch.BigEndian { 65 // On big endian architectures, we never use xadduintptr to update 66 // 64-bit values and hence we skip the test. (Note that functions 67 // mSysStat{Inc,Dec} in mstats.go have explicit checks for 68 // big-endianness.) 69 t.Skip("skip xadduintptr on big endian architecture") 70 } 71 const inc = 100 72 val := uint64(0) 73 atomic.AddUintptr((*uintptr)(unsafe.Pointer(&val)), inc) 74 if inc != val { 75 t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val) 76 } 77 } 78 79 func shouldPanic(t *testing.T, name string, f func()) { 80 defer func() { 81 // Check that all GC maps are sane. 82 runtime.GC() 83 84 err := recover() 85 want := "unaligned 64-bit atomic operation" 86 if err == nil { 87 t.Errorf("%s did not panic", name) 88 } else if s, _ := err.(string); s != want { 89 t.Errorf("%s: wanted panic %q, got %q", name, want, err) 90 } 91 }() 92 f() 93 } 94 95 // Variant of sync/atomic's TestUnaligned64: 96 func TestUnaligned64(t *testing.T) { 97 // Unaligned 64-bit atomics on 32-bit systems are 98 // a continual source of pain. Test that on 32-bit systems they crash 99 // instead of failing silently. 100 101 if unsafe.Sizeof(int(0)) != 4 { 102 t.Skip("test only runs on 32-bit systems") 103 } 104 105 x := make([]uint32, 4) 106 u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4 107 108 up64 := (*uint64)(u) // misaligned 109 p64 := (*int64)(u) // misaligned 110 111 shouldPanic(t, "Load64", func() { atomic.Load64(up64) }) 112 shouldPanic(t, "LoadInt64", func() { atomic.LoadInt64(p64) }) 113 shouldPanic(t, "Store64", func() { atomic.Store64(up64, 0) }) 114 shouldPanic(t, "Add64", func() { atomic.Add64(up64, 1) }) 115 shouldPanic(t, "Swap64", func() { atomic.Swap64(up64, 1) }) 116 shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) }) 117 } 118 119 func TestAnd8(t *testing.T) { 120 // Basic sanity check. 121 x := uint8(0xff) 122 for i := uint8(0); i < 8; i++ { 123 atomic.And8(&x, ^(1 << i)) 124 if r := uint8(0xff) << (i + 1); x != r { 125 t.Fatalf("clearing bit %#x: want %#x, got %#x", uint8(1<<i), r, x) 126 } 127 } 128 129 // Set every bit in array to 1. 130 a := make([]uint8, 1<<12) 131 for i := range a { 132 a[i] = 0xff 133 } 134 135 // Clear array bit-by-bit in different goroutines. 136 done := make(chan bool) 137 for i := 0; i < 8; i++ { 138 m := ^uint8(1 << i) 139 go func() { 140 for i := range a { 141 atomic.And8(&a[i], m) 142 } 143 done <- true 144 }() 145 } 146 for i := 0; i < 8; i++ { 147 <-done 148 } 149 150 // Check that the array has been totally cleared. 151 for i, v := range a { 152 if v != 0 { 153 t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint8(0), v) 154 } 155 } 156 } 157 158 func TestAnd32(t *testing.T) { 159 // Basic sanity check. 160 x := uint32(0xffffffff) 161 for i := uint32(0); i < 32; i++ { 162 atomic.And32(&x, ^(1 << i)) 163 if r := uint32(0xffffffff) << (i + 1); x != r { 164 t.Fatalf("clearing bit %#x: want %#x, got %#x", uint32(1<<i), r, x) 165 } 166 } 167 168 // Set every bit in array to 1. 169 a := make([]uint32, 1<<12) 170 for i := range a { 171 a[i] = 0xffffffff 172 } 173 174 // Clear array bit-by-bit in different goroutines. 175 done := make(chan bool) 176 for i := 0; i < 32; i++ { 177 m := ^uint32(1 << i) 178 go func() { 179 for i := range a { 180 atomic.And32(&a[i], m) 181 } 182 done <- true 183 }() 184 } 185 for i := 0; i < 32; i++ { 186 <-done 187 } 188 189 // Check that the array has been totally cleared. 190 for i, v := range a { 191 if v != 0 { 192 t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint32(0), v) 193 } 194 } 195 } 196 197 func TestOr8(t *testing.T) { 198 // Basic sanity check. 199 x := uint8(0) 200 for i := uint8(0); i < 8; i++ { 201 atomic.Or8(&x, 1<<i) 202 if r := (uint8(1) << (i + 1)) - 1; x != r { 203 t.Fatalf("setting bit %#x: want %#x, got %#x", uint8(1)<<i, r, x) 204 } 205 } 206 207 // Start with every bit in array set to 0. 208 a := make([]uint8, 1<<12) 209 210 // Set every bit in array bit-by-bit in different goroutines. 211 done := make(chan bool) 212 for i := 0; i < 8; i++ { 213 m := uint8(1 << i) 214 go func() { 215 for i := range a { 216 atomic.Or8(&a[i], m) 217 } 218 done <- true 219 }() 220 } 221 for i := 0; i < 8; i++ { 222 <-done 223 } 224 225 // Check that the array has been totally set. 226 for i, v := range a { 227 if v != 0xff { 228 t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint8(0xff), v) 229 } 230 } 231 } 232 233 func TestOr(t *testing.T) { 234 // Basic sanity check. 235 x := uint32(0) 236 for i := uint32(0); i < 32; i++ { 237 atomic.Or32(&x, 1<<i) 238 if r := (uint32(1) << (i + 1)) - 1; x != r { 239 t.Fatalf("setting bit %#x: want %#x, got %#x", uint32(1)<<i, r, x) 240 } 241 } 242 243 // Start with every bit in array set to 0. 244 a := make([]uint32, 1<<12) 245 246 // Set every bit in array bit-by-bit in different goroutines. 247 done := make(chan bool) 248 for i := 0; i < 32; i++ { 249 m := uint32(1 << i) 250 go func() { 251 for i := range a { 252 atomic.Or32(&a[i], m) 253 } 254 done <- true 255 }() 256 } 257 for i := 0; i < 32; i++ { 258 <-done 259 } 260 261 // Check that the array has been totally set. 262 for i, v := range a { 263 if v != 0xffffffff { 264 t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint32(0xffffffff), v) 265 } 266 } 267 } 268 269 func TestBitwiseContended8(t *testing.T) { 270 // Start with every bit in array set to 0. 271 a := make([]uint8, 16) 272 273 // Iterations to try. 274 N := 1 << 16 275 if testing.Short() { 276 N = 1 << 10 277 } 278 279 // Set and then clear every bit in the array bit-by-bit in different goroutines. 280 done := make(chan bool) 281 for i := 0; i < 8; i++ { 282 m := uint8(1 << i) 283 go func() { 284 for n := 0; n < N; n++ { 285 for i := range a { 286 atomic.Or8(&a[i], m) 287 if atomic.Load8(&a[i])&m != m { 288 t.Errorf("a[%v] bit %#x not set", i, m) 289 } 290 atomic.And8(&a[i], ^m) 291 if atomic.Load8(&a[i])&m != 0 { 292 t.Errorf("a[%v] bit %#x not clear", i, m) 293 } 294 } 295 } 296 done <- true 297 }() 298 } 299 for i := 0; i < 8; i++ { 300 <-done 301 } 302 303 // Check that the array has been totally cleared. 304 for i, v := range a { 305 if v != 0 { 306 t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint8(0), v) 307 } 308 } 309 } 310 311 func TestBitwiseContended32(t *testing.T) { 312 // Start with every bit in array set to 0. 313 a := make([]uint32, 16) 314 315 // Iterations to try. 316 N := 1 << 16 317 if testing.Short() { 318 N = 1 << 10 319 } 320 321 // Set and then clear every bit in the array bit-by-bit in different goroutines. 322 done := make(chan bool) 323 for i := 0; i < 32; i++ { 324 m := uint32(1 << i) 325 go func() { 326 for n := 0; n < N; n++ { 327 for i := range a { 328 atomic.Or32(&a[i], m) 329 if atomic.Load32(&a[i])&m != m { 330 t.Errorf("a[%v] bit %#x not set", i, m) 331 } 332 atomic.And32(&a[i], ^m) 333 if atomic.Load32(&a[i])&m != 0 { 334 t.Errorf("a[%v] bit %#x not clear", i, m) 335 } 336 } 337 } 338 done <- true 339 }() 340 } 341 for i := 0; i < 32; i++ { 342 <-done 343 } 344 345 // Check that the array has been totally cleared. 346 for i, v := range a { 347 if v != 0 { 348 t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint32(0), v) 349 } 350 } 351 } 352 353 func TestCasRel32(t *testing.T) { 354 const _magic = 0x5a5aa5a5 355 var x struct { 356 before uint32 357 i uint32 358 after uint32 359 o uint32 360 n uint32 361 } 362 363 x.before = _magic 364 x.after = _magic 365 for j := 0; j < 32; j += 1 { 366 x.i = (1 << j) + 0 367 x.o = (1 << j) + 0 368 x.n = (1 << j) + 1 369 if !atomic.CasRel32(&x.i, x.o, x.n) { 370 t.Fatalf("should have swapped %#x %#x", x.o, x.n) 371 } 372 373 if x.i != x.n { 374 t.Fatalf("wrong x.i after swap: x.i=%#x x.n=%#x", x.i, x.n) 375 } 376 377 if x.before != _magic || x.after != _magic { 378 t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, _magic, _magic) 379 } 380 } 381 } 382 383 func TestStorePointer(t *testing.T) { 384 var p [2]*int 385 for i := range p { 386 atomic.StorePointer(unsafe.Pointer(&p[i]), unsafe.Pointer(new(int))) 387 } 388 if p[0] == p[1] { 389 t.Error("Bad escape analysis of StorePointer") 390 } 391 }