github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/sync/mutex_test.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  package sync_test
     8  
     9  import (
    10  	"fmt"
    11  	"internal/testenv"
    12  	"os"
    13  	"os/exec"
    14  	"runtime"
    15  	"strings"
    16  	. "sync"
    17  	"testing"
    18  	"time"
    19  )
    20  
    21  func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
    22  	for i := 0; i < loops; i++ {
    23  		Runtime_Semacquire(s)
    24  		Runtime_Semrelease(s, false)
    25  	}
    26  	cdone <- true
    27  }
    28  
    29  func TestSemaphore(t *testing.T) {
    30  	s := new(uint32)
    31  	*s = 1
    32  	c := make(chan bool)
    33  	for i := 0; i < 10; i++ {
    34  		go HammerSemaphore(s, 1000, c)
    35  	}
    36  	for i := 0; i < 10; i++ {
    37  		<-c
    38  	}
    39  }
    40  
    41  func BenchmarkUncontendedSemaphore(b *testing.B) {
    42  	s := new(uint32)
    43  	*s = 1
    44  	HammerSemaphore(s, b.N, make(chan bool, 2))
    45  }
    46  
    47  func BenchmarkContendedSemaphore(b *testing.B) {
    48  	b.StopTimer()
    49  	s := new(uint32)
    50  	*s = 1
    51  	c := make(chan bool)
    52  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    53  	b.StartTimer()
    54  
    55  	go HammerSemaphore(s, b.N/2, c)
    56  	go HammerSemaphore(s, b.N/2, c)
    57  	<-c
    58  	<-c
    59  }
    60  
    61  func HammerMutex(m *Mutex, loops int, cdone chan bool) {
    62  	for i := 0; i < loops; i++ {
    63  		m.Lock()
    64  		m.Unlock()
    65  	}
    66  	cdone <- true
    67  }
    68  
    69  func TestMutex(t *testing.T) {
    70  	if n := runtime.SetMutexProfileFraction(1); n != 0 {
    71  		t.Logf("got mutexrate %d expected 0", n)
    72  	}
    73  	defer runtime.SetMutexProfileFraction(0)
    74  	m := new(Mutex)
    75  	c := make(chan bool)
    76  	for i := 0; i < 10; i++ {
    77  		go HammerMutex(m, 1000, c)
    78  	}
    79  	for i := 0; i < 10; i++ {
    80  		<-c
    81  	}
    82  }
    83  
    84  var misuseTests = []struct {
    85  	name string
    86  	f    func()
    87  }{
    88  	{
    89  		"Mutex.Unlock",
    90  		func() {
    91  			var mu Mutex
    92  			mu.Unlock()
    93  		},
    94  	},
    95  	{
    96  		"Mutex.Unlock2",
    97  		func() {
    98  			var mu Mutex
    99  			mu.Lock()
   100  			mu.Unlock()
   101  			mu.Unlock()
   102  		},
   103  	},
   104  	{
   105  		"RWMutex.Unlock",
   106  		func() {
   107  			var mu RWMutex
   108  			mu.Unlock()
   109  		},
   110  	},
   111  	{
   112  		"RWMutex.Unlock2",
   113  		func() {
   114  			var mu RWMutex
   115  			mu.RLock()
   116  			mu.Unlock()
   117  		},
   118  	},
   119  	{
   120  		"RWMutex.Unlock3",
   121  		func() {
   122  			var mu RWMutex
   123  			mu.Lock()
   124  			mu.Unlock()
   125  			mu.Unlock()
   126  		},
   127  	},
   128  	{
   129  		"RWMutex.RUnlock",
   130  		func() {
   131  			var mu RWMutex
   132  			mu.RUnlock()
   133  		},
   134  	},
   135  	{
   136  		"RWMutex.RUnlock2",
   137  		func() {
   138  			var mu RWMutex
   139  			mu.Lock()
   140  			mu.RUnlock()
   141  		},
   142  	},
   143  	{
   144  		"RWMutex.RUnlock3",
   145  		func() {
   146  			var mu RWMutex
   147  			mu.RLock()
   148  			mu.RUnlock()
   149  			mu.RUnlock()
   150  		},
   151  	},
   152  }
   153  
   154  func init() {
   155  	if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
   156  		for _, test := range misuseTests {
   157  			if test.name == os.Args[2] {
   158  				test.f()
   159  				fmt.Printf("test completed\n")
   160  				os.Exit(0)
   161  			}
   162  		}
   163  		fmt.Printf("unknown test\n")
   164  		os.Exit(0)
   165  	}
   166  }
   167  
   168  func TestMutexMisuse(t *testing.T) {
   169  	testenv.MustHaveExec(t)
   170  	for _, test := range misuseTests {
   171  		out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
   172  		if err == nil || !strings.Contains(string(out), "unlocked") {
   173  			t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
   174  		}
   175  	}
   176  }
   177  
   178  func TestMutexFairness(t *testing.T) {
   179  	var mu Mutex
   180  	stop := make(chan bool)
   181  	defer close(stop)
   182  	go func() {
   183  		for {
   184  			mu.Lock()
   185  			time.Sleep(100 * time.Microsecond)
   186  			mu.Unlock()
   187  			select {
   188  			case <-stop:
   189  				return
   190  			default:
   191  			}
   192  		}
   193  	}()
   194  	done := make(chan bool)
   195  	go func() {
   196  		for i := 0; i < 10; i++ {
   197  			time.Sleep(100 * time.Microsecond)
   198  			mu.Lock()
   199  			mu.Unlock()
   200  		}
   201  		done <- true
   202  	}()
   203  	select {
   204  	case <-done:
   205  	case <-time.After(10 * time.Second):
   206  		t.Fatalf("can't acquire Mutex in 10 seconds")
   207  	}
   208  }
   209  
   210  func BenchmarkMutexUncontended(b *testing.B) {
   211  	type PaddedMutex struct {
   212  		Mutex
   213  		pad [128]uint8
   214  	}
   215  	b.RunParallel(func(pb *testing.PB) {
   216  		var mu PaddedMutex
   217  		for pb.Next() {
   218  			mu.Lock()
   219  			mu.Unlock()
   220  		}
   221  	})
   222  }
   223  
   224  func benchmarkMutex(b *testing.B, slack, work bool) {
   225  	var mu Mutex
   226  	if slack {
   227  		b.SetParallelism(10)
   228  	}
   229  	b.RunParallel(func(pb *testing.PB) {
   230  		foo := 0
   231  		for pb.Next() {
   232  			mu.Lock()
   233  			mu.Unlock()
   234  			if work {
   235  				for i := 0; i < 100; i++ {
   236  					foo *= 2
   237  					foo /= 2
   238  				}
   239  			}
   240  		}
   241  		_ = foo
   242  	})
   243  }
   244  
   245  func BenchmarkMutex(b *testing.B) {
   246  	benchmarkMutex(b, false, false)
   247  }
   248  
   249  func BenchmarkMutexSlack(b *testing.B) {
   250  	benchmarkMutex(b, true, false)
   251  }
   252  
   253  func BenchmarkMutexWork(b *testing.B) {
   254  	benchmarkMutex(b, false, true)
   255  }
   256  
   257  func BenchmarkMutexWorkSlack(b *testing.B) {
   258  	benchmarkMutex(b, true, true)
   259  }
   260  
   261  func BenchmarkMutexNoSpin(b *testing.B) {
   262  	// This benchmark models a situation where spinning in the mutex should be
   263  	// non-profitable and allows to confirm that spinning does not do harm.
   264  	// To achieve this we create excess of goroutines most of which do local work.
   265  	// These goroutines yield during local work, so that switching from
   266  	// a blocked goroutine to other goroutines is profitable.
   267  	// As a matter of fact, this benchmark still triggers some spinning in the mutex.
   268  	var m Mutex
   269  	var acc0, acc1 uint64
   270  	b.SetParallelism(4)
   271  	b.RunParallel(func(pb *testing.PB) {
   272  		c := make(chan bool)
   273  		var data [4 << 10]uint64
   274  		for i := 0; pb.Next(); i++ {
   275  			if i%4 == 0 {
   276  				m.Lock()
   277  				acc0 -= 100
   278  				acc1 += 100
   279  				m.Unlock()
   280  			} else {
   281  				for i := 0; i < len(data); i += 4 {
   282  					data[i]++
   283  				}
   284  				// Elaborate way to say runtime.Gosched
   285  				// that does not put the goroutine onto global runq.
   286  				go func() {
   287  					c <- true
   288  				}()
   289  				<-c
   290  			}
   291  		}
   292  	})
   293  }
   294  
   295  func BenchmarkMutexSpin(b *testing.B) {
   296  	// This benchmark models a situation where spinning in the mutex should be
   297  	// profitable. To achieve this we create a goroutine per-proc.
   298  	// These goroutines access considerable amount of local data so that
   299  	// unnecessary rescheduling is penalized by cache misses.
   300  	var m Mutex
   301  	var acc0, acc1 uint64
   302  	b.RunParallel(func(pb *testing.PB) {
   303  		var data [16 << 10]uint64
   304  		for i := 0; pb.Next(); i++ {
   305  			m.Lock()
   306  			acc0 -= 100
   307  			acc1 += 100
   308  			m.Unlock()
   309  			for i := 0; i < len(data); i += 4 {
   310  				data[i]++
   311  			}
   312  		}
   313  	})
   314  }