github.com/whoyao/protocol@v0.0.0-20230519045905-2d8ace718ca5/utils/lock_tracker.go (about)

     1  package utils
     2  
     3  import (
     4  	"math"
     5  	"runtime"
     6  	"sync"
     7  	"sync/atomic"
     8  	"time"
     9  	"unsafe"
    10  )
    11  
    12  var lockTrackerEnabled = false
    13  var enableLockTrackerOnce sync.Once
    14  var lowResTime uint32 = uint32(time.Now().Unix())
    15  
    16  // EnableLockTracker enable lock tracking background worker. This should be
    17  // called during init
    18  func EnableLockTracker() {
    19  	enableLockTrackerOnce.Do(func() {
    20  		lockTrackerEnabled = true
    21  		go updateLowResTime()
    22  	})
    23  }
    24  
    25  func updateLowResTime() {
    26  	ticker := time.NewTicker(time.Second)
    27  	for t := range ticker.C {
    28  		atomic.StoreUint32(&lowResTime, uint32(t.Unix()))
    29  	}
    30  }
    31  
    32  var weakRefs []uintptr
    33  var weakRefFree []int
    34  var weakRefLock sync.Mutex
    35  
    36  func NumMutexes() int {
    37  	weakRefLock.Lock()
    38  	defer weakRefLock.Unlock()
    39  	return len(weakRefs) - len(weakRefFree)
    40  }
    41  
    42  // ScanTrackedLocks check all lock trackers
    43  func ScanTrackedLocks(threshold time.Duration) []*StuckLock {
    44  	minTS := uint32(time.Now().Add(-threshold).Unix())
    45  
    46  	weakRefLock.Lock()
    47  	defer weakRefLock.Unlock()
    48  	return scanTrackedLocks(weakRefs, minTS)
    49  }
    50  
    51  var nextScanMin int
    52  
    53  // ScanTrackedLocksI check lock trackers incrementally n at a time
    54  func ScanTrackedLocksI(threshold time.Duration, n int) []*StuckLock {
    55  	minTS := uint32(time.Now().Add(-threshold).Unix())
    56  	if n <= 0 {
    57  		n = 10000
    58  	}
    59  
    60  	weakRefLock.Lock()
    61  	defer weakRefLock.Unlock()
    62  
    63  	min := nextScanMin
    64  	max := nextScanMin + n
    65  	if rl := len(weakRefs); rl <= max {
    66  		max = rl
    67  		nextScanMin = 0
    68  	} else {
    69  		nextScanMin = max
    70  	}
    71  
    72  	return scanTrackedLocks(weakRefs[min:max], minTS)
    73  }
    74  
    75  //go:norace
    76  //go:nosplit
    77  func scanTrackedLocks(refs []uintptr, minTS uint32) []*StuckLock {
    78  	var stuck []*StuckLock
    79  	for _, ref := range weakRefs {
    80  		if ref != 0 {
    81  			t := (*lockTracker)(unsafe.Pointer(ref))
    82  			ts := atomic.LoadUint32(&t.ts)
    83  			waiting := atomic.LoadInt32(&t.waiting)
    84  			if ts <= minTS && waiting > 0 {
    85  				stuck = append(stuck, &StuckLock{
    86  					stack:   append([]byte{}, t.stack...),
    87  					ts:      ts,
    88  					waiting: waiting,
    89  					held:    atomic.LoadInt32(&t.held),
    90  				})
    91  			}
    92  		}
    93  	}
    94  	return stuck
    95  }
    96  
    97  type StuckLock struct {
    98  	stack   []byte
    99  	ts      uint32
   100  	waiting int32
   101  	held    int32
   102  }
   103  
   104  func (d *StuckLock) FirstLockedAtStack() string {
   105  	return string(d.stack)
   106  }
   107  
   108  func (d *StuckLock) HeldSince() time.Time {
   109  	return time.Unix(int64(d.ts), 0)
   110  }
   111  
   112  func (d *StuckLock) NumGoroutineHeld() int {
   113  	return int(d.held)
   114  }
   115  
   116  func (d *StuckLock) NumGoroutineWaiting() int {
   117  	return int(d.waiting)
   118  }
   119  
   120  type lockTracker struct {
   121  	stack   []byte
   122  	ts      uint32
   123  	waiting int32
   124  	held    int32
   125  	ref     int
   126  }
   127  
   128  func (t *lockTracker) trackWait() {
   129  	if t != nil {
   130  		atomic.AddInt32(&t.waiting, 1)
   131  	}
   132  }
   133  
   134  func (t *lockTracker) trackLock() {
   135  	if t != nil {
   136  		atomic.AddInt32(&t.waiting, -1)
   137  		if atomic.AddInt32(&t.held, 1) == 1 {
   138  			atomic.StoreUint32(&t.ts, atomic.LoadUint32(&lowResTime))
   139  
   140  			for {
   141  				n := runtime.Stack(t.stack[:cap(t.stack)], false)
   142  				if n < cap(t.stack) {
   143  					t.stack = t.stack[:n]
   144  					break
   145  				}
   146  				t.stack = make([]byte, len(t.stack)*2)
   147  			}
   148  		}
   149  	}
   150  }
   151  
   152  func (t *lockTracker) trackUnlock() {
   153  	if t != nil {
   154  		if atomic.AddInt32(&t.held, -1) == 0 {
   155  			atomic.StoreUint32(&t.ts, math.MaxUint32)
   156  		}
   157  	}
   158  }
   159  
   160  func newLockTracker() *lockTracker {
   161  	t := &lockTracker{
   162  		stack: make([]byte, 1024),
   163  	}
   164  
   165  	runtime.SetFinalizer(t, finalizeLockTracker)
   166  
   167  	weakRefLock.Lock()
   168  	defer weakRefLock.Unlock()
   169  
   170  	if fi := len(weakRefFree) - 1; fi >= 0 {
   171  		t.ref = weakRefFree[fi]
   172  		weakRefs[t.ref] = uintptr((unsafe.Pointer)(t))
   173  		weakRefFree = weakRefFree[:fi]
   174  	} else {
   175  		t.ref = len(weakRefs)
   176  		weakRefs = append(weakRefs, uintptr((unsafe.Pointer)(t)))
   177  	}
   178  
   179  	return t
   180  }
   181  
   182  func finalizeLockTracker(t *lockTracker) {
   183  	weakRefLock.Lock()
   184  	defer weakRefLock.Unlock()
   185  
   186  	weakRefs[t.ref] = 0
   187  	weakRefFree = append(weakRefFree, t.ref)
   188  }
   189  
   190  var waiting lockTracker
   191  
   192  //go:linkname sync_runtime_canSpin sync.runtime_canSpin
   193  func sync_runtime_canSpin(int) bool
   194  
   195  //go:linkname sync_runtime_doSpin sync.runtime_doSpin
   196  func sync_runtime_doSpin()
   197  
   198  func lazyInitLockTracker(p **lockTracker) *lockTracker {
   199  	if !lockTrackerEnabled {
   200  		return nil
   201  	}
   202  	up := (*unsafe.Pointer)(unsafe.Pointer(p))
   203  	iter := 0
   204  	for {
   205  		if t := atomic.LoadPointer(up); t == nil {
   206  			if atomic.CompareAndSwapPointer(up, nil, (unsafe.Pointer)(&waiting)) {
   207  				atomic.StorePointer(up, (unsafe.Pointer)(newLockTracker()))
   208  			}
   209  		} else if t == (unsafe.Pointer)(&waiting) {
   210  			if sync_runtime_canSpin(iter) {
   211  				sync_runtime_doSpin()
   212  				iter++
   213  			} else {
   214  				runtime.Gosched()
   215  			}
   216  		} else {
   217  			return (*lockTracker)(t)
   218  		}
   219  	}
   220  }
   221  
   222  type Mutex struct {
   223  	sync.Mutex
   224  	t *lockTracker
   225  }
   226  
   227  func (m *Mutex) Lock() {
   228  	t := lazyInitLockTracker(&m.t)
   229  	t.trackWait()
   230  	m.Mutex.Lock()
   231  	t.trackLock()
   232  }
   233  
   234  func (m *Mutex) Unlock() {
   235  	t := lazyInitLockTracker(&m.t)
   236  	t.trackUnlock()
   237  	m.Mutex.Unlock()
   238  }
   239  
   240  type RWMutex struct {
   241  	sync.RWMutex
   242  	t *lockTracker
   243  }
   244  
   245  func (m *RWMutex) Lock() {
   246  	t := lazyInitLockTracker(&m.t)
   247  	t.trackWait()
   248  	m.RWMutex.Lock()
   249  	t.trackLock()
   250  }
   251  
   252  func (m *RWMutex) Unlock() {
   253  	t := lazyInitLockTracker(&m.t)
   254  	t.trackUnlock()
   255  	m.RWMutex.Unlock()
   256  }
   257  
   258  func (m *RWMutex) RLock() {
   259  	t := lazyInitLockTracker(&m.t)
   260  	t.trackWait()
   261  	m.RWMutex.RLock()
   262  	t.trackLock()
   263  }
   264  
   265  func (m *RWMutex) RUnlock() {
   266  	t := lazyInitLockTracker(&m.t)
   267  	t.trackUnlock()
   268  	m.RWMutex.RUnlock()
   269  }