github.com/qioalice/ekago/v3@v3.3.2-0.20221202205325-5c262d586ee4/ekafuture/mumap_test.go (about)

     1  // Copyright © 2020. All rights reserved.
     2  // Author: Eagle Chen. Modifier: Ilya Stroy.
     3  // Original: https://github.com/EagleChen/mapmutex (c133e97)
     4  // Contacts: iyuryevich@pm.me, https://github.com/qioalice
     5  // License: https://opensource.org/licenses/MIT
     6  
     7  package ekafuture_test
     8  
     9  import (
    10  	"fmt"
    11  	"math/rand"
    12  	"sync"
    13  	"testing"
    14  	"time"
    15  
    16  	"github.com/qioalice/ekago/v3/ekafuture"
    17  
    18  	"github.com/stretchr/testify/assert"
    19  )
    20  
    21  const MaxRetry = 100000
    22  
    23  func TestLockSuccess(t *testing.T) {
    24  	m := ekafuture.NewMuMap()
    25  
    26  	if !m.TryLock("123") {
    27  		t.Error("fail to get lock")
    28  	}
    29  	m.Unlock("123")
    30  }
    31  
    32  func TestLock(t *testing.T) {
    33  	m := ekafuture.NewMuMap()
    34  	assert.True(t, m.RTryLock("abc"))
    35  	assert.False(t, m.TryLock("abc"))
    36  	assert.Panics(t, func() { m.Unlock("abc") })
    37  	assert.NotPanics(t, func() { m.RUnlock("abc") })
    38  }
    39  
    40  func TestLockFail(t *testing.T) {
    41  	// fail fast
    42  	m := ekafuture.NewMuMapCustom(1, 1*time.Nanosecond, 1*time.Nanosecond, 2, 0.1)
    43  
    44  	c := make(chan bool)
    45  	finish := make(chan bool)
    46  
    47  	num := 5
    48  	success := make([]int, num)
    49  
    50  	for i := 0; i < num; i++ {
    51  		go func(i int) {
    52  			if m.TryLock("123") {
    53  				<-c // block here
    54  				success[i] = 1
    55  				m.Unlock("123")
    56  			}
    57  			finish <- true
    58  		}(i)
    59  	}
    60  
    61  	// most goroutines fail to get the lock
    62  	for i := 0; i < num-1; i++ {
    63  		<-finish
    64  	}
    65  
    66  	sum := 0
    67  	for _, s := range success {
    68  		sum += s
    69  	}
    70  
    71  	if sum != 0 {
    72  		t.Error("some other goroutine got the lock")
    73  	}
    74  
    75  	// finish the success one
    76  	c <- true
    77  	// wait
    78  	<-finish
    79  	for _, s := range success {
    80  		sum += s
    81  	}
    82  	if sum != 1 {
    83  		t.Error("no goroutine got the lock")
    84  	}
    85  }
    86  
    87  func TestLockIndivisually(t *testing.T) {
    88  	m := ekafuture.NewMuMap()
    89  
    90  	if !m.TryLock(123) || !m.TryLock(456) {
    91  		t.Error("different locks affect each other")
    92  	}
    93  }
    94  
    95  func BenchmarkMutex1000_100_20_20(b *testing.B)        { lockByOneMutex(1000, 100, 20, 20) }
    96  func BenchmarkMapWithMutex1000_100_20_20(b *testing.B) { lockByMapWithMutex(1000, 100, 20, 20) }
    97  func BenchmarkMapMutex1000_100_20_20(b *testing.B)     { lockByMapMutex(1000, 100, 20, 20) }
    98  
    99  // less key, more conflict for map key
   100  func BenchmarkMutex1000_20_20_20(b *testing.B)        { lockByOneMutex(1000, 20, 20, 20) }
   101  func BenchmarkMapWithMutex1000_20_20_20(b *testing.B) { lockByMapWithMutex(1000, 20, 20, 20) }
   102  func BenchmarkMapMutex1000_20_20_20(b *testing.B)     { lockByMapMutex(1000, 20, 20, 20) }
   103  
   104  // less key, more goroutine, more conflict for map key
   105  func BenchmarkMutex1000_20_40_20(b *testing.B)        { lockByOneMutex(1000, 20, 40, 20) }
   106  func BenchmarkMapWithMutex1000_20_40_20(b *testing.B) { lockByMapWithMutex(1000, 20, 40, 20) }
   107  func BenchmarkMapMutex1000_20_40_20(b *testing.B)     { lockByMapMutex(1000, 20, 40, 20) }
   108  
   109  // even we want to use map to avoid unnecessary lock
   110  // if case of only 2 entries, a lot of locking occurs
   111  func BenchmarkMutex1000_2_40_20(b *testing.B)        { lockByOneMutex(1000, 2, 40, 20) }
   112  func BenchmarkMapWithMutex1000_2_40_20(b *testing.B) { lockByMapWithMutex(1000, 2, 40, 20) }
   113  func BenchmarkMapMutex1000_2_40_20(b *testing.B)     { lockByMapMutex(1000, 2, 40, 20) }
   114  
   115  // longer time per job, more conflict for map key
   116  func BenchmarkMutex1000_20_40_60(b *testing.B)        { lockByOneMutex(1000, 20, 40, 60) }
   117  func BenchmarkMapWithMutex1000_20_40_60(b *testing.B) { lockByMapWithMutex(1000, 20, 40, 60) }
   118  func BenchmarkMapMutex1000_20_40_60(b *testing.B)     { lockByMapMutex(1000, 20, 40, 60) }
   119  
   120  // much more actions
   121  func BenchmarkMutex10000_20_40_20(b *testing.B)        { lockByOneMutex(10000, 20, 40, 20) }
   122  func BenchmarkMapWithMutex10000_20_40_20(b *testing.B) { lockByMapWithMutex(10000, 20, 40, 20) }
   123  func BenchmarkMapMutex10000_20_40_20(b *testing.B)     { lockByMapMutex(10000, 20, 40, 20) }
   124  
   125  func min(a, b int) int {
   126  	if a < b {
   127  		return a
   128  	}
   129  	return b
   130  }
   131  
   132  // load should be larger than 0
   133  func splitLoad(load, buckets int) []int {
   134  	result := make([]int, buckets)
   135  	avg := load / buckets
   136  	remain := load % buckets
   137  
   138  	// split
   139  	for i := range result {
   140  		result[i] = avg
   141  		if remain > 0 {
   142  			result[i]++
   143  			remain--
   144  		}
   145  	}
   146  
   147  	// randomize
   148  	for i := 0; i < buckets; i += 2 {
   149  		if i+1 < buckets {
   150  			r := rand.Intn(min(result[i], result[i+1]))
   151  			if rand.Intn(r+1)%2 == 0 {
   152  				result[i] -= r
   153  				result[i+1] += r
   154  			} else {
   155  				result[i] += r
   156  				result[i+1] -= r
   157  			}
   158  		}
   159  	}
   160  
   161  	return result
   162  }
   163  
   164  func lockByOneMutex(actionCount, keyCount, goroutineNum, averageTime int) {
   165  	sharedSlice := make([]int, keyCount)
   166  	var m sync.Mutex
   167  
   168  	loads := splitLoad(actionCount, goroutineNum)
   169  	var wg sync.WaitGroup
   170  	wg.Add(goroutineNum)
   171  	success := make([]int, goroutineNum)
   172  	for i, load := range loads {
   173  		go func(i, load int) {
   174  			success[i] = runWithOneMutex(load, keyCount, averageTime,
   175  				sharedSlice, &m)
   176  			wg.Done()
   177  		}(i, load)
   178  	}
   179  
   180  	wg.Wait()
   181  	sum := 0
   182  	for _, s := range success {
   183  		sum += s
   184  	}
   185  	fmt.Println("one mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum)
   186  }
   187  
   188  func lockByMapWithMutex(actionCount, keyCount, goroutineNum, averageTime int) {
   189  	sharedSlice := make([]int, keyCount)
   190  	locks := make(map[int]bool)
   191  	var m sync.Mutex
   192  
   193  	loads := splitLoad(actionCount, goroutineNum)
   194  	var wg sync.WaitGroup
   195  	wg.Add(goroutineNum)
   196  	success := make([]int, goroutineNum)
   197  	for i, load := range loads {
   198  		go func(i, load int) {
   199  			success[i] = runWithMapWithMutex(load, keyCount, averageTime,
   200  				sharedSlice, &m, locks)
   201  			wg.Done()
   202  		}(i, load)
   203  	}
   204  
   205  	wg.Wait()
   206  	sum := 0
   207  	for _, s := range success {
   208  		sum += s
   209  	}
   210  	fmt.Println("map with mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum)
   211  }
   212  
   213  func lockByMapMutex(actionCount, keyCount, goroutineNum, averageTime int) {
   214  	sharedSlice := make([]int, keyCount)
   215  	m := ekafuture.NewMuMap()
   216  
   217  	loads := splitLoad(actionCount, goroutineNum)
   218  	var wg sync.WaitGroup
   219  	wg.Add(goroutineNum)
   220  	success := make([]int, goroutineNum)
   221  	for i, load := range loads {
   222  		go func(i, load int) {
   223  			success[i] = runWithMapMutex(load, keyCount, averageTime,
   224  				sharedSlice, m)
   225  			wg.Done()
   226  		}(i, load)
   227  	}
   228  
   229  	wg.Wait()
   230  	sum := 0
   231  	for _, s := range success {
   232  		sum += s
   233  	}
   234  	fmt.Println("map mutex: ", actionCount, keyCount, goroutineNum, averageTime, "sum is: ", sum)
   235  }
   236  
   237  func runWithOneMutex(iterateNum, keyCount, averageTime int, sharedSlice []int,
   238  	m *sync.Mutex) int {
   239  	success := 0
   240  	for ; iterateNum > 0; iterateNum-- {
   241  		m.Lock()
   242  
   243  		idx := rand.Intn(keyCount)
   244  		doTheJob(averageTime, idx, sharedSlice)
   245  		success++
   246  
   247  		m.Unlock()
   248  	}
   249  
   250  	return success
   251  }
   252  
   253  func runWithMapWithMutex(iterateNum, keyCount, averageTime int,
   254  	sharedSlice []int, m *sync.Mutex, locks map[int]bool) int {
   255  	success := 0
   256  	for ; iterateNum > 0; iterateNum-- {
   257  		idx := rand.Intn(keyCount)
   258  		goon := false
   259  		for i := 0; i < MaxRetry; i++ {
   260  			m.Lock()
   261  			if locks[idx] { // if locked
   262  				m.Unlock()
   263  				time.Sleep(time.Duration(rand.Intn(100)*(i/100+1)) * time.Nanosecond)
   264  			} else { // if unlock, lockit
   265  				locks[idx] = true
   266  				m.Unlock()
   267  				goon = true
   268  				break
   269  			}
   270  		}
   271  
   272  		if !goon {
   273  			continue // failed to get lock, go on for next iteration
   274  		}
   275  		doTheJob(averageTime, idx, sharedSlice)
   276  		success++
   277  
   278  		m.Lock()
   279  		delete(locks, idx)
   280  		m.Unlock()
   281  	}
   282  	return success
   283  }
   284  
   285  func runWithMapMutex(iterateNum, keyCount, averageTime int,
   286  	sharedSlice []int, m *ekafuture.MuMap) int {
   287  	success := 0
   288  	for ; iterateNum > 0; iterateNum-- {
   289  		idx := rand.Intn(keyCount)
   290  		// fail to get lock
   291  		if !m.TryLock(idx) {
   292  			continue
   293  		}
   294  
   295  		doTheJob(averageTime, idx, sharedSlice)
   296  		success++
   297  
   298  		m.Unlock(idx)
   299  	}
   300  	return success
   301  }
   302  
   303  func doTheJob(averageTime, idx int, sharedSlice []int) {
   304  	// do real job, just sleep some time and set a value
   305  	miliSec := rand.Intn(averageTime * 2)
   306  	time.Sleep(time.Duration(miliSec) * time.Millisecond)
   307  	sharedSlice[idx] = miliSec
   308  }