github.com/livekit/protocol@v1.16.1-0.20240517185851-47e4c6bba773/utils/lock_tracker.go (about)

     1  // Copyright 2023 LiveKit, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package utils
    16  
    17  import (
    18  	"math"
    19  	"runtime"
    20  	"strconv"
    21  	"strings"
    22  	"sync"
    23  	"sync/atomic"
    24  	"time"
    25  	"unsafe"
    26  
    27  	"golang.org/x/exp/slices"
    28  )
    29  
    30  const lockTrackerMaxStackDepth = 16
    31  
    32  var lockTrackerEnabled = false
    33  var enableLockTrackerOnce sync.Once
    34  var lowResTime uint32 = uint32(time.Now().Unix())
    35  var enableLockTrackerStackTrace uint32
    36  
    37  // EnableLockTracker enable lock tracking background worker. This should be
    38  // called during init
    39  func EnableLockTracker() {
    40  	enableLockTrackerOnce.Do(func() {
    41  		lockTrackerEnabled = true
    42  		go updateLowResTime()
    43  	})
    44  }
    45  
    46  func ToggleLockTrackerStackTraces(enable bool) {
    47  	var v uint32
    48  	if enable {
    49  		v = 1
    50  	}
    51  	atomic.StoreUint32(&enableLockTrackerStackTrace, v)
    52  }
    53  
    54  func updateLowResTime() {
    55  	ticker := time.NewTicker(time.Second)
    56  	for t := range ticker.C {
    57  		atomic.StoreUint32(&lowResTime, uint32(t.Unix()))
    58  	}
    59  }
    60  
    61  var weakRefs []uintptr
    62  var weakRefFree []int
    63  var weakRefLock sync.Mutex
    64  
    65  func NumMutexes() int {
    66  	weakRefLock.Lock()
    67  	defer weakRefLock.Unlock()
    68  	return len(weakRefs) - len(weakRefFree)
    69  }
    70  
    71  // ScanTrackedLocks check all lock trackers
    72  func ScanTrackedLocks(threshold time.Duration) []*StuckLock {
    73  	minTS := uint32(time.Now().Add(-threshold).Unix())
    74  
    75  	weakRefLock.Lock()
    76  	defer weakRefLock.Unlock()
    77  	return scanTrackedLocks(weakRefs, minTS)
    78  }
    79  
    80  var nextScanMin int
    81  
    82  // ScanTrackedLocksI check lock trackers incrementally n at a time
    83  func ScanTrackedLocksI(threshold time.Duration, n int) []*StuckLock {
    84  	minTS := uint32(time.Now().Add(-threshold).Unix())
    85  	if n <= 0 {
    86  		n = 10000
    87  	}
    88  
    89  	weakRefLock.Lock()
    90  	defer weakRefLock.Unlock()
    91  
    92  	min := nextScanMin
    93  	max := nextScanMin + n
    94  	if rl := len(weakRefs); rl <= max {
    95  		max = rl
    96  		nextScanMin = 0
    97  	} else {
    98  		nextScanMin = max
    99  	}
   100  
   101  	return scanTrackedLocks(weakRefs[min:max], minTS)
   102  }
   103  
   104  //go:norace
   105  //go:nosplit
   106  func scanTrackedLocks(refs []uintptr, minTS uint32) []*StuckLock {
   107  	var stuck []*StuckLock
   108  	for _, ref := range refs {
   109  		if ref != 0 {
   110  			t := (*lockTracker)(unsafe.Pointer(ref))
   111  			ts := atomic.LoadUint32(&t.ts)
   112  			waiting := atomic.LoadInt32(&t.waiting)
   113  			if ts <= minTS && waiting > 0 {
   114  				stuck = append(stuck, &StuckLock{
   115  					stack:   slices.Clone(t.stack),
   116  					ts:      ts,
   117  					waiting: waiting,
   118  					held:    atomic.LoadInt32(&t.held),
   119  				})
   120  			}
   121  		}
   122  	}
   123  	return stuck
   124  }
   125  
   126  type StuckLock struct {
   127  	stack   []uintptr
   128  	ts      uint32
   129  	waiting int32
   130  	held    int32
   131  }
   132  
   133  func (d *StuckLock) FirstLockedAtStack() string {
   134  	fs := runtime.CallersFrames(d.stack)
   135  	var b strings.Builder
   136  	for {
   137  		f, ok := fs.Next()
   138  		if !ok {
   139  			break
   140  		}
   141  		if f.Function != "" {
   142  			b.WriteString(f.Function)
   143  			b.WriteByte('\n')
   144  		}
   145  		if f.File != "" {
   146  			b.WriteByte('\t')
   147  			b.WriteString(f.File)
   148  			b.WriteByte(':')
   149  			b.WriteString(strconv.Itoa(f.Line))
   150  			b.WriteByte('\n')
   151  		}
   152  	}
   153  	return b.String()
   154  }
   155  
   156  func (d *StuckLock) HeldSince() time.Time {
   157  	return time.Unix(int64(d.ts), 0)
   158  }
   159  
   160  func (d *StuckLock) NumGoroutineHeld() int {
   161  	return int(d.held)
   162  }
   163  
   164  func (d *StuckLock) NumGoroutineWaiting() int {
   165  	return int(d.waiting)
   166  }
   167  
   168  type lockTracker struct {
   169  	stack   []uintptr
   170  	ts      uint32
   171  	waiting int32
   172  	held    int32
   173  	ref     int
   174  }
   175  
   176  func (t *lockTracker) trackWait() {
   177  	if t != nil {
   178  		atomic.AddInt32(&t.waiting, 1)
   179  	}
   180  }
   181  
   182  func (t *lockTracker) trackLock() {
   183  	if t != nil {
   184  		atomic.AddInt32(&t.waiting, -1)
   185  		if atomic.AddInt32(&t.held, 1) == 1 {
   186  			atomic.StoreUint32(&t.ts, atomic.LoadUint32(&lowResTime))
   187  
   188  			if atomic.LoadUint32(&enableLockTrackerStackTrace) == 1 {
   189  				n := runtime.Callers(2, t.stack[:lockTrackerMaxStackDepth])
   190  				t.stack = t.stack[:n]
   191  			}
   192  		}
   193  	}
   194  }
   195  
   196  func (t *lockTracker) trackUnlock() {
   197  	if t != nil {
   198  		if atomic.AddInt32(&t.held, -1) == 0 {
   199  			atomic.StoreUint32(&t.ts, math.MaxUint32)
   200  		}
   201  	}
   202  }
   203  
   204  func newLockTracker() *lockTracker {
   205  	t := &lockTracker{
   206  		stack: make([]uintptr, lockTrackerMaxStackDepth),
   207  		ts:    math.MaxUint32,
   208  	}
   209  
   210  	runtime.SetFinalizer(t, finalizeLockTracker)
   211  
   212  	weakRefLock.Lock()
   213  	defer weakRefLock.Unlock()
   214  
   215  	if fi := len(weakRefFree) - 1; fi >= 0 {
   216  		t.ref = weakRefFree[fi]
   217  		weakRefs[t.ref] = uintptr((unsafe.Pointer)(t))
   218  		weakRefFree = weakRefFree[:fi]
   219  	} else {
   220  		t.ref = len(weakRefs)
   221  		weakRefs = append(weakRefs, uintptr((unsafe.Pointer)(t)))
   222  	}
   223  
   224  	return t
   225  }
   226  
   227  func finalizeLockTracker(t *lockTracker) {
   228  	weakRefLock.Lock()
   229  	defer weakRefLock.Unlock()
   230  
   231  	weakRefs[t.ref] = 0
   232  	weakRefFree = append(weakRefFree, t.ref)
   233  }
   234  
   235  var waiting lockTracker
   236  
   237  //go:linkname sync_runtime_canSpin sync.runtime_canSpin
   238  func sync_runtime_canSpin(int) bool
   239  
   240  //go:linkname sync_runtime_doSpin sync.runtime_doSpin
   241  func sync_runtime_doSpin()
   242  
   243  func lazyInitLockTracker(p **lockTracker) *lockTracker {
   244  	if !lockTrackerEnabled {
   245  		return nil
   246  	}
   247  	up := (*unsafe.Pointer)(unsafe.Pointer(p))
   248  	iter := 0
   249  	for {
   250  		if t := atomic.LoadPointer(up); t == nil {
   251  			if atomic.CompareAndSwapPointer(up, nil, (unsafe.Pointer)(&waiting)) {
   252  				atomic.StorePointer(up, (unsafe.Pointer)(newLockTracker()))
   253  			}
   254  		} else if t == (unsafe.Pointer)(&waiting) {
   255  			if sync_runtime_canSpin(iter) {
   256  				sync_runtime_doSpin()
   257  				iter++
   258  			} else {
   259  				runtime.Gosched()
   260  			}
   261  		} else {
   262  			return (*lockTracker)(t)
   263  		}
   264  	}
   265  }
   266  
   267  type Mutex struct {
   268  	sync.Mutex
   269  	t *lockTracker
   270  }
   271  
   272  func (m *Mutex) Lock() {
   273  	t := lazyInitLockTracker(&m.t)
   274  	t.trackWait()
   275  	m.Mutex.Lock()
   276  	t.trackLock()
   277  }
   278  
   279  func (m *Mutex) Unlock() {
   280  	t := lazyInitLockTracker(&m.t)
   281  	t.trackUnlock()
   282  	m.Mutex.Unlock()
   283  }
   284  
   285  type RWMutex struct {
   286  	sync.RWMutex
   287  	t *lockTracker
   288  }
   289  
   290  func (m *RWMutex) Lock() {
   291  	t := lazyInitLockTracker(&m.t)
   292  	t.trackWait()
   293  	m.RWMutex.Lock()
   294  	t.trackLock()
   295  }
   296  
   297  func (m *RWMutex) Unlock() {
   298  	t := lazyInitLockTracker(&m.t)
   299  	t.trackUnlock()
   300  	m.RWMutex.Unlock()
   301  }
   302  
   303  func (m *RWMutex) RLock() {
   304  	t := lazyInitLockTracker(&m.t)
   305  	t.trackWait()
   306  	m.RWMutex.RLock()
   307  	t.trackLock()
   308  }
   309  
   310  func (m *RWMutex) RUnlock() {
   311  	t := lazyInitLockTracker(&m.t)
   312  	t.trackUnlock()
   313  	m.RWMutex.RUnlock()
   314  }