golang.org/toolchain@v0.0.1-go1.9rc2.windows-amd64/src/runtime/rwmutex_test.go (about)

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  // This is a copy of sync/rwmutex_test.go rewritten to test the
     8  // runtime rwmutex.
     9  
    10  package runtime_test
    11  
    12  import (
    13  	"fmt"
    14  	. "runtime"
    15  	"sync/atomic"
    16  	"testing"
    17  )
    18  
    19  func parallelReader(m *RWMutex, clocked chan bool, cunlock *uint32, cdone chan bool) {
    20  	m.RLock()
    21  	clocked <- true
    22  	for atomic.LoadUint32(cunlock) == 0 {
    23  	}
    24  	m.RUnlock()
    25  	cdone <- true
    26  }
    27  
    28  func doTestParallelReaders(numReaders int) {
    29  	GOMAXPROCS(numReaders + 1)
    30  	var m RWMutex
    31  	clocked := make(chan bool, numReaders)
    32  	var cunlock uint32
    33  	cdone := make(chan bool)
    34  	for i := 0; i < numReaders; i++ {
    35  		go parallelReader(&m, clocked, &cunlock, cdone)
    36  	}
    37  	// Wait for all parallel RLock()s to succeed.
    38  	for i := 0; i < numReaders; i++ {
    39  		<-clocked
    40  	}
    41  	atomic.StoreUint32(&cunlock, 1)
    42  	// Wait for the goroutines to finish.
    43  	for i := 0; i < numReaders; i++ {
    44  		<-cdone
    45  	}
    46  }
    47  
    48  func TestParallelRWMutexReaders(t *testing.T) {
    49  	defer GOMAXPROCS(GOMAXPROCS(-1))
    50  	doTestParallelReaders(1)
    51  	doTestParallelReaders(3)
    52  	doTestParallelReaders(4)
    53  }
    54  
    55  func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    56  	for i := 0; i < num_iterations; i++ {
    57  		rwm.RLock()
    58  		n := atomic.AddInt32(activity, 1)
    59  		if n < 1 || n >= 10000 {
    60  			panic(fmt.Sprintf("wlock(%d)\n", n))
    61  		}
    62  		for i := 0; i < 100; i++ {
    63  		}
    64  		atomic.AddInt32(activity, -1)
    65  		rwm.RUnlock()
    66  	}
    67  	cdone <- true
    68  }
    69  
    70  func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    71  	for i := 0; i < num_iterations; i++ {
    72  		rwm.Lock()
    73  		n := atomic.AddInt32(activity, 10000)
    74  		if n != 10000 {
    75  			panic(fmt.Sprintf("wlock(%d)\n", n))
    76  		}
    77  		for i := 0; i < 100; i++ {
    78  		}
    79  		atomic.AddInt32(activity, -10000)
    80  		rwm.Unlock()
    81  	}
    82  	cdone <- true
    83  }
    84  
    85  func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
    86  	GOMAXPROCS(gomaxprocs)
    87  	// Number of active readers + 10000 * number of active writers.
    88  	var activity int32
    89  	var rwm RWMutex
    90  	cdone := make(chan bool)
    91  	go writer(&rwm, num_iterations, &activity, cdone)
    92  	var i int
    93  	for i = 0; i < numReaders/2; i++ {
    94  		go reader(&rwm, num_iterations, &activity, cdone)
    95  	}
    96  	go writer(&rwm, num_iterations, &activity, cdone)
    97  	for ; i < numReaders; i++ {
    98  		go reader(&rwm, num_iterations, &activity, cdone)
    99  	}
   100  	// Wait for the 2 writers and all readers to finish.
   101  	for i := 0; i < 2+numReaders; i++ {
   102  		<-cdone
   103  	}
   104  }
   105  
   106  func TestRWMutex(t *testing.T) {
   107  	defer GOMAXPROCS(GOMAXPROCS(-1))
   108  	n := 1000
   109  	if testing.Short() {
   110  		n = 5
   111  	}
   112  	HammerRWMutex(1, 1, n)
   113  	HammerRWMutex(1, 3, n)
   114  	HammerRWMutex(1, 10, n)
   115  	HammerRWMutex(4, 1, n)
   116  	HammerRWMutex(4, 3, n)
   117  	HammerRWMutex(4, 10, n)
   118  	HammerRWMutex(10, 1, n)
   119  	HammerRWMutex(10, 3, n)
   120  	HammerRWMutex(10, 10, n)
   121  	HammerRWMutex(10, 5, n)
   122  }
   123  
   124  func BenchmarkRWMutexUncontended(b *testing.B) {
   125  	type PaddedRWMutex struct {
   126  		RWMutex
   127  		pad [32]uint32
   128  	}
   129  	b.RunParallel(func(pb *testing.PB) {
   130  		var rwm PaddedRWMutex
   131  		for pb.Next() {
   132  			rwm.RLock()
   133  			rwm.RLock()
   134  			rwm.RUnlock()
   135  			rwm.RUnlock()
   136  			rwm.Lock()
   137  			rwm.Unlock()
   138  		}
   139  	})
   140  }
   141  
   142  func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
   143  	var rwm RWMutex
   144  	b.RunParallel(func(pb *testing.PB) {
   145  		foo := 0
   146  		for pb.Next() {
   147  			foo++
   148  			if foo%writeRatio == 0 {
   149  				rwm.Lock()
   150  				rwm.Unlock()
   151  			} else {
   152  				rwm.RLock()
   153  				for i := 0; i != localWork; i += 1 {
   154  					foo *= 2
   155  					foo /= 2
   156  				}
   157  				rwm.RUnlock()
   158  			}
   159  		}
   160  		_ = foo
   161  	})
   162  }
   163  
   164  func BenchmarkRWMutexWrite100(b *testing.B) {
   165  	benchmarkRWMutex(b, 0, 100)
   166  }
   167  
   168  func BenchmarkRWMutexWrite10(b *testing.B) {
   169  	benchmarkRWMutex(b, 0, 10)
   170  }
   171  
   172  func BenchmarkRWMutexWorkWrite100(b *testing.B) {
   173  	benchmarkRWMutex(b, 100, 100)
   174  }
   175  
   176  func BenchmarkRWMutexWorkWrite10(b *testing.B) {
   177  	benchmarkRWMutex(b, 100, 10)
   178  }