github.com/qgxpagamentos/mapmutex@v0.0.0-20200716162114-c133e97096b7/mutex_test.go (about)

     1  package mapmutex
     2  
     3  import (
     4  	"fmt"
     5  	"math/rand"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  )
    10  
    11  const MaxRetry = 100000
    12  
    13  func TestLockSuccess(t *testing.T) {
    14  	m := NewMapMutex()
    15  
    16  	if !m.TryLock("123") {
    17  		t.Error("fail to get lock")
    18  	}
    19  	m.Unlock("123")
    20  }
    21  
    22  func TestLockFail(t *testing.T) {
    23  	// fail fast
    24  	m := NewCustomizedMapMutex(1, 1, 1, 2, 0.1)
    25  
    26  	c := make(chan bool)
    27  	finish := make(chan bool)
    28  
    29  	num := 5
    30  	success := make([]int, num)
    31  
    32  	for i := 0; i < num; i++ {
    33  		go func(i int) {
    34  			if m.TryLock("123") {
    35  				<-c // block here
    36  				success[i] = 1
    37  				m.Unlock("123")
    38  			}
    39  			finish <- true
    40  		}(i)
    41  	}
    42  
    43  	// most goroutines fail to get the lock
    44  	for i := 0; i < num-1; i++ {
    45  		<-finish
    46  	}
    47  
    48  	sum := 0
    49  	for _, s := range success {
    50  		sum += s
    51  	}
    52  
    53  	if sum != 0 {
    54  		t.Error("some other goroutine got the lock")
    55  	}
    56  
    57  	// finish the success one
    58  	c <- true
    59  	// wait
    60  	<-finish
    61  	for _, s := range success {
    62  		sum += s
    63  	}
    64  	if sum != 1 {
    65  		t.Error("no goroutine got the lock")
    66  	}
    67  }
    68  
    69  func TestLockIndivisually(t *testing.T) {
    70  	m := NewMapMutex()
    71  
    72  	if !m.TryLock(123) || !m.TryLock(456) {
    73  		t.Error("different locks affect each other")
    74  	}
    75  }
    76  
    77  func BenchmarkMutex1000_100_20_20(b *testing.B)        { lockByOneMutex(1000, 100, 20, 20) }
    78  func BenchmarkMapWithMutex1000_100_20_20(b *testing.B) { lockByMapWithMutex(1000, 100, 20, 20) }
    79  func BenchmarkMapMutex1000_100_20_20(b *testing.B)     { lockByMapMutex(1000, 100, 20, 20) }
    80  
    81  // less key, more conflict for map key
    82  func BenchmarkMutex1000_20_20_20(b *testing.B)        { lockByOneMutex(1000, 20, 20, 20) }
    83  func BenchmarkMapWithMutex1000_20_20_20(b *testing.B) { lockByMapWithMutex(1000, 20, 20, 20) }
    84  func BenchmarkMapMutex1000_20_20_20(b *testing.B)     { lockByMapMutex(1000, 20, 20, 20) }
    85  
    86  // less key, more goroutine, more conflict for map key
    87  func BenchmarkMutex1000_20_40_20(b *testing.B)        { lockByOneMutex(1000, 20, 40, 20) }
    88  func BenchmarkMapWithMutex1000_20_40_20(b *testing.B) { lockByMapWithMutex(1000, 20, 40, 20) }
    89  func BenchmarkMapMutex1000_20_40_20(b *testing.B)     { lockByMapMutex(1000, 20, 40, 20) }
    90  
    91  // even we want to use map to avoid unnecessary lock
    92  // if case of only 2 entries, a lot of locking occurs
    93  func BenchmarkMutex1000_2_40_20(b *testing.B)        { lockByOneMutex(1000, 2, 40, 20) }
    94  func BenchmarkMapWithMutex1000_2_40_20(b *testing.B) { lockByMapWithMutex(1000, 2, 40, 20) }
    95  func BenchmarkMapMutex1000_2_40_20(b *testing.B)     { lockByMapMutex(1000, 2, 40, 20) }
    96  
    97  // longer time per job, more conflict for map key
    98  func BenchmarkMutex1000_20_40_60(b *testing.B)        { lockByOneMutex(1000, 20, 40, 60) }
    99  func BenchmarkMapWithMutex1000_20_40_60(b *testing.B) { lockByMapWithMutex(1000, 20, 40, 60) }
   100  func BenchmarkMapMutex1000_20_40_60(b *testing.B)     { lockByMapMutex(1000, 20, 40, 60) }
   101  
   102  // much more actions
   103  func BenchmarkMutex10000_20_40_20(b *testing.B)        { lockByOneMutex(10000, 20, 40, 20) }
   104  func BenchmarkMapWithMutex10000_20_40_20(b *testing.B) { lockByMapWithMutex(10000, 20, 40, 20) }
   105  func BenchmarkMapMutex10000_20_40_20(b *testing.B)     { lockByMapMutex(10000, 20, 40, 20) }
   106  
   107  func min(a, b int) int {
   108  	if a < b {
   109  		return a
   110  	}
   111  	return b
   112  }
   113  
   114  // load should be larger than 0
   115  func splitLoad(load, buckets int) []int {
   116  	result := make([]int, buckets)
   117  	avg := load / buckets
   118  	remain := load % buckets
   119  
   120  	// split
   121  	for i := range result {
   122  		result[i] = avg
   123  		if remain > 0 {
   124  			result[i]++
   125  			remain--
   126  		}
   127  	}
   128  
   129  	// randomize
   130  	for i := 0; i < buckets; i += 2 {
   131  		if i+1 < buckets {
   132  			r := rand.Intn(min(result[i], result[i+1]))
   133  			if rand.Intn(r+1)%2 == 0 {
   134  				result[i] -= r
   135  				result[i+1] += r
   136  			} else {
   137  				result[i] += r
   138  				result[i+1] -= r
   139  			}
   140  		}
   141  	}
   142  
   143  	return result
   144  }
   145  
   146  func lockByOneMutex(actionCount, keyCount, goroutineNum, averageTime int) {
   147  	sharedSlice := make([]int, keyCount)
   148  	var m sync.Mutex
   149  
   150  	loads := splitLoad(actionCount, goroutineNum)
   151  	var wg sync.WaitGroup
   152  	wg.Add(goroutineNum)
   153  	success := make([]int, goroutineNum)
   154  	for i, load := range loads {
   155  		go func(i, load int) {
   156  			success[i] = runWithOneMutex(load, keyCount, averageTime,
   157  				sharedSlice, &m)
   158  			wg.Done()
   159  		}(i, load)
   160  	}
   161  
   162  	wg.Wait()
   163  	sum := 0
   164  	for _, s := range success {
   165  		sum += s
   166  	}
   167  	fmt.Println("one mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum)
   168  }
   169  
   170  func lockByMapWithMutex(actionCount, keyCount, goroutineNum, averageTime int) {
   171  	sharedSlice := make([]int, keyCount)
   172  	locks := make(map[int]bool)
   173  	var m sync.Mutex
   174  
   175  	loads := splitLoad(actionCount, goroutineNum)
   176  	var wg sync.WaitGroup
   177  	wg.Add(goroutineNum)
   178  	success := make([]int, goroutineNum)
   179  	for i, load := range loads {
   180  		go func(i, load int) {
   181  			success[i] = runWithMapWithMutex(load, keyCount, averageTime,
   182  				sharedSlice, &m, locks)
   183  			wg.Done()
   184  		}(i, load)
   185  	}
   186  
   187  	wg.Wait()
   188  	sum := 0
   189  	for _, s := range success {
   190  		sum += s
   191  	}
   192  	fmt.Println("map with mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum)
   193  }
   194  
   195  func lockByMapMutex(actionCount, keyCount, goroutineNum, averageTime int) {
   196  	sharedSlice := make([]int, keyCount)
   197  	m := NewMapMutex()
   198  
   199  	loads := splitLoad(actionCount, goroutineNum)
   200  	var wg sync.WaitGroup
   201  	wg.Add(goroutineNum)
   202  	success := make([]int, goroutineNum)
   203  	for i, load := range loads {
   204  		go func(i, load int) {
   205  			success[i] = runWithMapMutex(load, keyCount, averageTime,
   206  				sharedSlice, m)
   207  			wg.Done()
   208  		}(i, load)
   209  	}
   210  
   211  	wg.Wait()
   212  	sum := 0
   213  	for _, s := range success {
   214  		sum += s
   215  	}
   216  	fmt.Println("map mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum)
   217  }
   218  
   219  func runWithOneMutex(iterateNum, keyCount, averageTime int, sharedSlice []int,
   220  	m *sync.Mutex) int {
   221  	success := 0
   222  	for ; iterateNum > 0; iterateNum-- {
   223  		m.Lock()
   224  
   225  		idx := rand.Intn(keyCount)
   226  		doTheJob(averageTime, idx, sharedSlice)
   227  		success++
   228  
   229  		m.Unlock()
   230  	}
   231  
   232  	return success
   233  }
   234  
   235  func runWithMapWithMutex(iterateNum, keyCount, averageTime int,
   236  	sharedSlice []int, m *sync.Mutex, locks map[int]bool) int {
   237  	success := 0
   238  	for ; iterateNum > 0; iterateNum-- {
   239  		idx := rand.Intn(keyCount)
   240  		goon := false
   241  		for i := 0; i < MaxRetry; i++ {
   242  			m.Lock()
   243  			if locks[idx] { // if locked
   244  				m.Unlock()
   245  				time.Sleep(time.Duration(rand.Intn(100)*(i/100+1)) * time.Nanosecond)
   246  			} else { // if unlock, lockit
   247  				locks[idx] = true
   248  				m.Unlock()
   249  				goon = true
   250  				break
   251  			}
   252  		}
   253  
   254  		if !goon {
   255  			continue // failed to get lock, go on for next iteration
   256  		}
   257  		doTheJob(averageTime, idx, sharedSlice)
   258  		success++
   259  
   260  		m.Lock()
   261  		delete(locks, idx)
   262  		m.Unlock()
   263  	}
   264  	return success
   265  }
   266  
   267  func runWithMapMutex(iterateNum, keyCount, averageTime int,
   268  	sharedSlice []int, m *Mutex) int {
   269  	success := 0
   270  	for ; iterateNum > 0; iterateNum-- {
   271  		idx := rand.Intn(keyCount)
   272  		// fail to get lock
   273  		if !m.TryLock(idx) {
   274  			continue
   275  		}
   276  
   277  		doTheJob(averageTime, idx, sharedSlice)
   278  		success++
   279  
   280  		m.Unlock(idx)
   281  	}
   282  	return success
   283  }
   284  
   285  func doTheJob(averageTime, idx int, sharedSlice []int) {
   286  	// do real job, just sleep some time and set a value
   287  	miliSec := rand.Intn(averageTime * 2)
   288  	time.Sleep(time.Duration(miliSec) * time.Millisecond)
   289  	sharedSlice[idx] = miliSec
   290  }