github.com/FlowerWrong/netstack@v0.0.0-20191009141956-e5848263af28/tmutex/tmutex_test.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package tmutex
    16  
    17  import (
    18  	"fmt"
    19  	"runtime"
    20  	"sync"
    21  	"sync/atomic"
    22  	"testing"
    23  	"time"
    24  )
    25  
    26  func TestBasicLock(t *testing.T) {
    27  	var m Mutex
    28  	m.Init()
    29  
    30  	m.Lock()
    31  
    32  	// Try blocking lock the mutex from a different goroutine. This must
    33  	// not block because the mutex is held.
    34  	ch := make(chan struct{}, 1)
    35  	go func() {
    36  		m.Lock()
    37  		ch <- struct{}{}
    38  		m.Unlock()
    39  		ch <- struct{}{}
    40  	}()
    41  
    42  	select {
    43  	case <-ch:
    44  		t.Fatalf("Lock succeeded on locked mutex")
    45  	case <-time.After(100 * time.Millisecond):
    46  	}
    47  
    48  	// Unlock the mutex and make sure that the goroutine waiting on Lock()
    49  	// unblocks and succeeds.
    50  	m.Unlock()
    51  
    52  	select {
    53  	case <-ch:
    54  	case <-time.After(100 * time.Millisecond):
    55  		t.Fatalf("Lock failed to acquire unlocked mutex")
    56  	}
    57  
    58  	// Make sure we can lock and unlock again.
    59  	m.Lock()
    60  	m.Unlock()
    61  }
    62  
    63  func TestTryLock(t *testing.T) {
    64  	var m Mutex
    65  	m.Init()
    66  
    67  	// Try to lock. It should succeed.
    68  	if !m.TryLock() {
    69  		t.Fatalf("TryLock failed on unlocked mutex")
    70  	}
    71  
    72  	// Try to lock again, it should now fail.
    73  	if m.TryLock() {
    74  		t.Fatalf("TryLock succeeded on locked mutex")
    75  	}
    76  
    77  	// Try blocking lock the mutex from a different goroutine. This must
    78  	// not block because the mutex is held.
    79  	ch := make(chan struct{}, 1)
    80  	go func() {
    81  		m.Lock()
    82  		ch <- struct{}{}
    83  		m.Unlock()
    84  	}()
    85  
    86  	select {
    87  	case <-ch:
    88  		t.Fatalf("Lock succeeded on locked mutex")
    89  	case <-time.After(100 * time.Millisecond):
    90  	}
    91  
    92  	// Unlock the mutex and make sure that the goroutine waiting on Lock()
    93  	// unblocks and succeeds.
    94  	m.Unlock()
    95  
    96  	select {
    97  	case <-ch:
    98  	case <-time.After(100 * time.Millisecond):
    99  		t.Fatalf("Lock failed to acquire unlocked mutex")
   100  	}
   101  }
   102  
   103  func TestMutualExclusion(t *testing.T) {
   104  	var m Mutex
   105  	m.Init()
   106  
   107  	// Test mutual exclusion by running "gr" goroutines concurrently, and
   108  	// have each one increment a counter "iters" times within the critical
   109  	// section established by the mutex.
   110  	//
   111  	// If at the end the counter is not gr * iters, then we know that
   112  	// goroutines ran concurrently within the critical section.
   113  	//
   114  	// If one of the goroutines doesn't complete, it's likely a bug that
   115  	// causes to it to wait forever.
   116  	const gr = 1000
   117  	const iters = 100000
   118  	v := 0
   119  	var wg sync.WaitGroup
   120  	for i := 0; i < gr; i++ {
   121  		wg.Add(1)
   122  		go func() {
   123  			for j := 0; j < iters; j++ {
   124  				m.Lock()
   125  				v++
   126  				m.Unlock()
   127  			}
   128  			wg.Done()
   129  		}()
   130  	}
   131  
   132  	wg.Wait()
   133  
   134  	if v != gr*iters {
   135  		t.Fatalf("Bad count: got %v, want %v", v, gr*iters)
   136  	}
   137  }
   138  
   139  func TestMutualExclusionWithTryLock(t *testing.T) {
   140  	var m Mutex
   141  	m.Init()
   142  
   143  	// Similar to the previous, with the addition of some goroutines that
   144  	// only increment the count if TryLock succeeds.
   145  	const gr = 1000
   146  	const iters = 100000
   147  	total := int64(gr * iters)
   148  	var tryTotal int64
   149  	v := int64(0)
   150  	var wg sync.WaitGroup
   151  	for i := 0; i < gr; i++ {
   152  		wg.Add(2)
   153  		go func() {
   154  			for j := 0; j < iters; j++ {
   155  				m.Lock()
   156  				v++
   157  				m.Unlock()
   158  			}
   159  			wg.Done()
   160  		}()
   161  		go func() {
   162  			local := int64(0)
   163  			for j := 0; j < iters; j++ {
   164  				if m.TryLock() {
   165  					v++
   166  					m.Unlock()
   167  					local++
   168  				}
   169  			}
   170  			atomic.AddInt64(&tryTotal, local)
   171  			wg.Done()
   172  		}()
   173  	}
   174  
   175  	wg.Wait()
   176  
   177  	t.Logf("tryTotal = %d", tryTotal)
   178  	total += tryTotal
   179  
   180  	if v != total {
   181  		t.Fatalf("Bad count: got %v, want %v", v, total)
   182  	}
   183  }
   184  
   185  // BenchmarkTmutex is equivalent to TestMutualExclusion, with the following
   186  // differences:
   187  //
   188  // - The number of goroutines is variable, with the maximum value depending on
   189  // GOMAXPROCS.
   190  //
   191  // - The number of iterations per benchmark is controlled by the benchmarking
   192  // framework.
   193  //
   194  // - Care is taken to ensure that all goroutines participating in the benchmark
   195  // have been created before the benchmark begins.
   196  func BenchmarkTmutex(b *testing.B) {
   197  	for n, max := 1, 4*runtime.GOMAXPROCS(0); n > 0 && n <= max; n *= 2 {
   198  		b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
   199  			var m Mutex
   200  			m.Init()
   201  
   202  			var ready sync.WaitGroup
   203  			begin := make(chan struct{})
   204  			var end sync.WaitGroup
   205  			for i := 0; i < n; i++ {
   206  				ready.Add(1)
   207  				end.Add(1)
   208  				go func() {
   209  					ready.Done()
   210  					<-begin
   211  					for j := 0; j < b.N; j++ {
   212  						m.Lock()
   213  						m.Unlock()
   214  					}
   215  					end.Done()
   216  				}()
   217  			}
   218  
   219  			ready.Wait()
   220  			b.ResetTimer()
   221  			close(begin)
   222  			end.Wait()
   223  		})
   224  	}
   225  }
   226  
   227  // BenchmarkSyncMutex is equivalent to BenchmarkTmutex, but uses sync.Mutex as
   228  // a comparison point.
   229  func BenchmarkSyncMutex(b *testing.B) {
   230  	for n, max := 1, 4*runtime.GOMAXPROCS(0); n > 0 && n <= max; n *= 2 {
   231  		b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
   232  			var m sync.Mutex
   233  
   234  			var ready sync.WaitGroup
   235  			begin := make(chan struct{})
   236  			var end sync.WaitGroup
   237  			for i := 0; i < n; i++ {
   238  				ready.Add(1)
   239  				end.Add(1)
   240  				go func() {
   241  					ready.Done()
   242  					<-begin
   243  					for j := 0; j < b.N; j++ {
   244  						m.Lock()
   245  						m.Unlock()
   246  					}
   247  					end.Done()
   248  				}()
   249  			}
   250  
   251  			ready.Wait()
   252  			b.ResetTimer()
   253  			close(begin)
   254  			end.Wait()
   255  		})
   256  	}
   257  }