github.com/olivere/camlistore@v0.0.0-20140121221811-1b7ac2da0199/pkg/syncutil/lock.go (about)

     1  /*
     2  Copyright 2013 The Camlistore Authors
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8       http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package syncutil
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"log"
    23  	"runtime"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"camlistore.org/pkg/strutil"
    29  )
    30  
    31  // RWMutexTracker is a sync.RWMutex that tracks who owns the current
    32  // exclusive lock.  It's used for debugging deadlocks.
    33  type RWMutexTracker struct {
    34  	mu sync.RWMutex
    35  
    36  	// Atomic counters for number waiting and having read and write locks.
    37  	nwaitr int32
    38  	nwaitw int32
    39  	nhaver int32
    40  	nhavew int32 // should always be 0 or 1
    41  
    42  	logOnce sync.Once
    43  
    44  	hmu    sync.Mutex
    45  	holder []byte
    46  	holdr  map[int64]bool // goroutines holding read lock
    47  }
    48  
    49  const stackBufSize = 16 << 20
    50  
    51  var stackBuf = make(chan []byte, 8)
    52  
    53  func getBuf() []byte {
    54  	select {
    55  	case b := <-stackBuf:
    56  		return b[:stackBufSize]
    57  	default:
    58  		return make([]byte, stackBufSize)
    59  	}
    60  }
    61  
    62  func putBuf(b []byte) {
    63  	select {
    64  	case stackBuf <- b:
    65  	default:
    66  	}
    67  }
    68  
    69  var goroutineSpace = []byte("goroutine ")
    70  
    71  func GoroutineID() int64 {
    72  	b := getBuf()
    73  	defer putBuf(b)
    74  	b = b[:runtime.Stack(b, false)]
    75  	// Parse the 4707 otu of "goroutine 4707 ["
    76  	b = bytes.TrimPrefix(b, goroutineSpace)
    77  	i := bytes.IndexByte(b, ' ')
    78  	if i < 0 {
    79  		panic(fmt.Sprintf("No space found in %q", b))
    80  	}
    81  	b = b[:i]
    82  	n, err := strutil.ParseUintBytes(b, 10, 64)
    83  	if err != nil {
    84  		panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
    85  	}
    86  	return int64(n)
    87  }
    88  
    89  func (m *RWMutexTracker) startLogger() {
    90  	go func() {
    91  		var buf bytes.Buffer
    92  		for {
    93  			time.Sleep(1 * time.Second)
    94  			buf.Reset()
    95  			m.hmu.Lock()
    96  			for gid := range m.holdr {
    97  				fmt.Fprintf(&buf, " [%d]", gid)
    98  			}
    99  			m.hmu.Unlock()
   100  			log.Printf("Mutex %p: waitW %d haveW %d   waitR %d haveR %d %s",
   101  				m,
   102  				atomic.LoadInt32(&m.nwaitw),
   103  				atomic.LoadInt32(&m.nhavew),
   104  				atomic.LoadInt32(&m.nwaitr),
   105  				atomic.LoadInt32(&m.nhaver), buf.Bytes())
   106  		}
   107  	}()
   108  }
   109  
   110  func (m *RWMutexTracker) Lock() {
   111  	m.logOnce.Do(m.startLogger)
   112  	atomic.AddInt32(&m.nwaitw, 1)
   113  	m.mu.Lock()
   114  	atomic.AddInt32(&m.nwaitw, -1)
   115  	atomic.AddInt32(&m.nhavew, 1)
   116  
   117  	m.hmu.Lock()
   118  	defer m.hmu.Unlock()
   119  	if len(m.holder) == 0 {
   120  		m.holder = make([]byte, stackBufSize)
   121  	}
   122  	m.holder = m.holder[:runtime.Stack(m.holder[:stackBufSize], false)]
   123  	log.Printf("Lock at %s", string(m.holder))
   124  }
   125  
   126  func (m *RWMutexTracker) Unlock() {
   127  	m.hmu.Lock()
   128  	m.holder = nil
   129  	m.hmu.Unlock()
   130  
   131  	atomic.AddInt32(&m.nhavew, -1)
   132  	m.mu.Unlock()
   133  }
   134  
   135  func (m *RWMutexTracker) RLock() {
   136  	m.logOnce.Do(m.startLogger)
   137  	atomic.AddInt32(&m.nwaitr, 1)
   138  
   139  	// Catch read-write-read lock. See if somebody (us? via
   140  	// another goroutine?) already has a read lock, and then
   141  	// somebody else is waiting to write, meaning our second read
   142  	// will deadlock.
   143  	if atomic.LoadInt32(&m.nhaver) > 0 && atomic.LoadInt32(&m.nwaitw) > 0 {
   144  		buf := getBuf()
   145  		buf = buf[:runtime.Stack(buf, false)]
   146  		log.Printf("Potential R-W-R deadlock at: %s", buf)
   147  		putBuf(buf)
   148  	}
   149  
   150  	m.mu.RLock()
   151  	atomic.AddInt32(&m.nwaitr, -1)
   152  	atomic.AddInt32(&m.nhaver, 1)
   153  
   154  	gid := GoroutineID()
   155  	m.hmu.Lock()
   156  	defer m.hmu.Unlock()
   157  	if m.holdr == nil {
   158  		m.holdr = make(map[int64]bool)
   159  	}
   160  	if m.holdr[gid] {
   161  		buf := getBuf()
   162  		buf = buf[:runtime.Stack(buf, false)]
   163  		log.Fatalf("Recursive call to RLock: %s", buf)
   164  	}
   165  	m.holdr[gid] = true
   166  }
   167  
   168  func stack() []byte {
   169  	buf := make([]byte, 1024)
   170  	return buf[:runtime.Stack(buf, false)]
   171  }
   172  
   173  func (m *RWMutexTracker) RUnlock() {
   174  	atomic.AddInt32(&m.nhaver, -1)
   175  
   176  	gid := GoroutineID()
   177  	m.hmu.Lock()
   178  	delete(m.holdr, gid)
   179  	m.hmu.Unlock()
   180  
   181  	m.mu.RUnlock()
   182  }
   183  
   184  // Holder returns the stack trace of the current exclusive lock holder's stack
   185  // when it acquired the lock (with Lock). It returns the empty string if the lock
   186  // is not currently held.
   187  func (m *RWMutexTracker) Holder() string {
   188  	m.hmu.Lock()
   189  	defer m.hmu.Unlock()
   190  	return string(m.holder)
   191  }