github.com/s1s1ty/go@v0.0.0-20180207192209-104445e3140f/src/runtime/rwmutex_test.go (about)

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  // This is a copy of sync/rwmutex_test.go rewritten to test the
     8  // runtime rwmutex.
     9  
    10  package runtime_test
    11  
    12  import (
    13  	"fmt"
    14  	. "runtime"
    15  	"runtime/debug"
    16  	"sync/atomic"
    17  	"testing"
    18  )
    19  
    20  func parallelReader(m *RWMutex, clocked chan bool, cunlock *uint32, cdone chan bool) {
    21  	m.RLock()
    22  	clocked <- true
    23  	for atomic.LoadUint32(cunlock) == 0 {
    24  	}
    25  	m.RUnlock()
    26  	cdone <- true
    27  }
    28  
    29  func doTestParallelReaders(numReaders int) {
    30  	GOMAXPROCS(numReaders + 1)
    31  	var m RWMutex
    32  	clocked := make(chan bool, numReaders)
    33  	var cunlock uint32
    34  	cdone := make(chan bool)
    35  	for i := 0; i < numReaders; i++ {
    36  		go parallelReader(&m, clocked, &cunlock, cdone)
    37  	}
    38  	// Wait for all parallel RLock()s to succeed.
    39  	for i := 0; i < numReaders; i++ {
    40  		<-clocked
    41  	}
    42  	atomic.StoreUint32(&cunlock, 1)
    43  	// Wait for the goroutines to finish.
    44  	for i := 0; i < numReaders; i++ {
    45  		<-cdone
    46  	}
    47  }
    48  
    49  func TestParallelRWMutexReaders(t *testing.T) {
    50  	defer GOMAXPROCS(GOMAXPROCS(-1))
    51  	// If runtime triggers a forced GC during this test then it will deadlock,
    52  	// since the goroutines can't be stopped/preempted.
    53  	// Disable GC for this test (see issue #10958).
    54  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    55  	doTestParallelReaders(1)
    56  	doTestParallelReaders(3)
    57  	doTestParallelReaders(4)
    58  }
    59  
    60  func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    61  	for i := 0; i < num_iterations; i++ {
    62  		rwm.RLock()
    63  		n := atomic.AddInt32(activity, 1)
    64  		if n < 1 || n >= 10000 {
    65  			panic(fmt.Sprintf("wlock(%d)\n", n))
    66  		}
    67  		for i := 0; i < 100; i++ {
    68  		}
    69  		atomic.AddInt32(activity, -1)
    70  		rwm.RUnlock()
    71  	}
    72  	cdone <- true
    73  }
    74  
    75  func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    76  	for i := 0; i < num_iterations; i++ {
    77  		rwm.Lock()
    78  		n := atomic.AddInt32(activity, 10000)
    79  		if n != 10000 {
    80  			panic(fmt.Sprintf("wlock(%d)\n", n))
    81  		}
    82  		for i := 0; i < 100; i++ {
    83  		}
    84  		atomic.AddInt32(activity, -10000)
    85  		rwm.Unlock()
    86  	}
    87  	cdone <- true
    88  }
    89  
    90  func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
    91  	GOMAXPROCS(gomaxprocs)
    92  	// Number of active readers + 10000 * number of active writers.
    93  	var activity int32
    94  	var rwm RWMutex
    95  	cdone := make(chan bool)
    96  	go writer(&rwm, num_iterations, &activity, cdone)
    97  	var i int
    98  	for i = 0; i < numReaders/2; i++ {
    99  		go reader(&rwm, num_iterations, &activity, cdone)
   100  	}
   101  	go writer(&rwm, num_iterations, &activity, cdone)
   102  	for ; i < numReaders; i++ {
   103  		go reader(&rwm, num_iterations, &activity, cdone)
   104  	}
   105  	// Wait for the 2 writers and all readers to finish.
   106  	for i := 0; i < 2+numReaders; i++ {
   107  		<-cdone
   108  	}
   109  }
   110  
   111  func TestRWMutex(t *testing.T) {
   112  	defer GOMAXPROCS(GOMAXPROCS(-1))
   113  	n := 1000
   114  	if testing.Short() {
   115  		n = 5
   116  	}
   117  	HammerRWMutex(1, 1, n)
   118  	HammerRWMutex(1, 3, n)
   119  	HammerRWMutex(1, 10, n)
   120  	HammerRWMutex(4, 1, n)
   121  	HammerRWMutex(4, 3, n)
   122  	HammerRWMutex(4, 10, n)
   123  	HammerRWMutex(10, 1, n)
   124  	HammerRWMutex(10, 3, n)
   125  	HammerRWMutex(10, 10, n)
   126  	HammerRWMutex(10, 5, n)
   127  }
   128  
   129  func BenchmarkRWMutexUncontended(b *testing.B) {
   130  	type PaddedRWMutex struct {
   131  		RWMutex
   132  		pad [32]uint32
   133  	}
   134  	b.RunParallel(func(pb *testing.PB) {
   135  		var rwm PaddedRWMutex
   136  		for pb.Next() {
   137  			rwm.RLock()
   138  			rwm.RLock()
   139  			rwm.RUnlock()
   140  			rwm.RUnlock()
   141  			rwm.Lock()
   142  			rwm.Unlock()
   143  		}
   144  	})
   145  }
   146  
   147  func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
   148  	var rwm RWMutex
   149  	b.RunParallel(func(pb *testing.PB) {
   150  		foo := 0
   151  		for pb.Next() {
   152  			foo++
   153  			if foo%writeRatio == 0 {
   154  				rwm.Lock()
   155  				rwm.Unlock()
   156  			} else {
   157  				rwm.RLock()
   158  				for i := 0; i != localWork; i += 1 {
   159  					foo *= 2
   160  					foo /= 2
   161  				}
   162  				rwm.RUnlock()
   163  			}
   164  		}
   165  		_ = foo
   166  	})
   167  }
   168  
   169  func BenchmarkRWMutexWrite100(b *testing.B) {
   170  	benchmarkRWMutex(b, 0, 100)
   171  }
   172  
   173  func BenchmarkRWMutexWrite10(b *testing.B) {
   174  	benchmarkRWMutex(b, 0, 10)
   175  }
   176  
   177  func BenchmarkRWMutexWorkWrite100(b *testing.B) {
   178  	benchmarkRWMutex(b, 100, 100)
   179  }
   180  
   181  func BenchmarkRWMutexWorkWrite10(b *testing.B) {
   182  	benchmarkRWMutex(b, 100, 10)
   183  }