bitbucket.org/number571/tendermint@v0.8.14/internal/libs/clist/clist.go (about)

     1  package clist
     2  
     3  /*
     4  
     5  The purpose of CList is to provide a goroutine-safe linked-list.
     6  This list can be traversed concurrently by any number of goroutines.
     7  However, removed CElements cannot be added back.
     8  NOTE: Not all methods of container/list are (yet) implemented.
     9  NOTE: Removed elements need to DetachPrev or DetachNext consistently
    10  to ensure garbage collection of removed elements.
    11  
    12  */
    13  
    14  import (
    15  	"fmt"
    16  	"sync"
    17  
    18  	tmsync "bitbucket.org/number571/tendermint/internal/libs/sync"
    19  )
    20  
    21  // MaxLength is the max allowed number of elements a linked list is
    22  // allowed to contain.
    23  // If more elements are pushed to the list it will panic.
    24  const MaxLength = int(^uint(0) >> 1)
    25  
    26  /*
    27  
    28  CElement is an element of a linked-list
    29  Traversal from a CElement is goroutine-safe.
    30  
    31  We can't avoid using WaitGroups or for-loops given the documentation
    32  spec without re-implementing the primitives that already exist in
    33  golang/sync. Notice that WaitGroup allows many go-routines to be
    34  simultaneously released, which is what we want. Mutex doesn't do
    35  this. RWMutex does this, but it's clumsy to use in the way that a
    36  WaitGroup would be used -- and we'd end up having two RWMutex's for
    37  prev/next each, which is doubly confusing.
    38  
    39  sync.Cond would be sort-of useful, but we don't need a write-lock in
    40  the for-loop. Use sync.Cond when you need serial access to the
    41  "condition". In our case our condition is if `next != nil || removed`,
    42  and there's no reason to serialize that condition for goroutines
    43  waiting on NextWait() (since it's just a read operation).
    44  
    45  */
    46  type CElement struct {
    47  	mtx        tmsync.RWMutex
    48  	prev       *CElement
    49  	prevWg     *sync.WaitGroup
    50  	prevWaitCh chan struct{}
    51  	next       *CElement
    52  	nextWg     *sync.WaitGroup
    53  	nextWaitCh chan struct{}
    54  	removed    bool
    55  
    56  	Value interface{} // immutable
    57  }
    58  
    59  // Blocking implementation of Next().
    60  // May return nil iff CElement was tail and got removed.
    61  func (e *CElement) NextWait() *CElement {
    62  	for {
    63  		e.mtx.RLock()
    64  		next := e.next
    65  		nextWg := e.nextWg
    66  		removed := e.removed
    67  		e.mtx.RUnlock()
    68  
    69  		if next != nil || removed {
    70  			return next
    71  		}
    72  
    73  		nextWg.Wait()
    74  		// e.next doesn't necessarily exist here.
    75  		// That's why we need to continue a for-loop.
    76  	}
    77  }
    78  
    79  // Blocking implementation of Prev().
    80  // May return nil iff CElement was head and got removed.
    81  func (e *CElement) PrevWait() *CElement {
    82  	for {
    83  		e.mtx.RLock()
    84  		prev := e.prev
    85  		prevWg := e.prevWg
    86  		removed := e.removed
    87  		e.mtx.RUnlock()
    88  
    89  		if prev != nil || removed {
    90  			return prev
    91  		}
    92  
    93  		prevWg.Wait()
    94  	}
    95  }
    96  
    97  // PrevWaitChan can be used to wait until Prev becomes not nil. Once it does,
    98  // channel will be closed.
    99  func (e *CElement) PrevWaitChan() <-chan struct{} {
   100  	e.mtx.RLock()
   101  	defer e.mtx.RUnlock()
   102  
   103  	return e.prevWaitCh
   104  }
   105  
   106  // NextWaitChan can be used to wait until Next becomes not nil. Once it does,
   107  // channel will be closed.
   108  func (e *CElement) NextWaitChan() <-chan struct{} {
   109  	e.mtx.RLock()
   110  	defer e.mtx.RUnlock()
   111  
   112  	return e.nextWaitCh
   113  }
   114  
   115  // Nonblocking, may return nil if at the end.
   116  func (e *CElement) Next() *CElement {
   117  	e.mtx.RLock()
   118  	val := e.next
   119  	e.mtx.RUnlock()
   120  	return val
   121  }
   122  
   123  // Nonblocking, may return nil if at the end.
   124  func (e *CElement) Prev() *CElement {
   125  	e.mtx.RLock()
   126  	prev := e.prev
   127  	e.mtx.RUnlock()
   128  	return prev
   129  }
   130  
   131  func (e *CElement) Removed() bool {
   132  	e.mtx.RLock()
   133  	isRemoved := e.removed
   134  	e.mtx.RUnlock()
   135  	return isRemoved
   136  }
   137  
   138  func (e *CElement) DetachNext() {
   139  	e.mtx.Lock()
   140  	if !e.removed {
   141  		e.mtx.Unlock()
   142  		panic("DetachNext() must be called after Remove(e)")
   143  	}
   144  	e.next = nil
   145  	e.mtx.Unlock()
   146  }
   147  
   148  func (e *CElement) DetachPrev() {
   149  	e.mtx.Lock()
   150  	if !e.removed {
   151  		e.mtx.Unlock()
   152  		panic("DetachPrev() must be called after Remove(e)")
   153  	}
   154  	e.prev = nil
   155  	e.mtx.Unlock()
   156  }
   157  
   158  // NOTE: This function needs to be safe for
   159  // concurrent goroutines waiting on nextWg.
   160  func (e *CElement) SetNext(newNext *CElement) {
   161  	e.mtx.Lock()
   162  
   163  	oldNext := e.next
   164  	e.next = newNext
   165  	if oldNext != nil && newNext == nil {
   166  		// See https://golang.org/pkg/sync/:
   167  		//
   168  		// If a WaitGroup is reused to wait for several independent sets of
   169  		// events, new Add calls must happen after all previous Wait calls have
   170  		// returned.
   171  		e.nextWg = waitGroup1() // WaitGroups are difficult to re-use.
   172  		e.nextWaitCh = make(chan struct{})
   173  	}
   174  	if oldNext == nil && newNext != nil {
   175  		e.nextWg.Done()
   176  		close(e.nextWaitCh)
   177  	}
   178  	e.mtx.Unlock()
   179  }
   180  
   181  // NOTE: This function needs to be safe for
   182  // concurrent goroutines waiting on prevWg
   183  func (e *CElement) SetPrev(newPrev *CElement) {
   184  	e.mtx.Lock()
   185  
   186  	oldPrev := e.prev
   187  	e.prev = newPrev
   188  	if oldPrev != nil && newPrev == nil {
   189  		e.prevWg = waitGroup1() // WaitGroups are difficult to re-use.
   190  		e.prevWaitCh = make(chan struct{})
   191  	}
   192  	if oldPrev == nil && newPrev != nil {
   193  		e.prevWg.Done()
   194  		close(e.prevWaitCh)
   195  	}
   196  	e.mtx.Unlock()
   197  }
   198  
   199  func (e *CElement) SetRemoved() {
   200  	e.mtx.Lock()
   201  
   202  	e.removed = true
   203  
   204  	// This wakes up anyone waiting in either direction.
   205  	if e.prev == nil {
   206  		e.prevWg.Done()
   207  		close(e.prevWaitCh)
   208  	}
   209  	if e.next == nil {
   210  		e.nextWg.Done()
   211  		close(e.nextWaitCh)
   212  	}
   213  	e.mtx.Unlock()
   214  }
   215  
   216  //--------------------------------------------------------------------------------
   217  
   218  // CList represents a linked list.
   219  // The zero value for CList is an empty list ready to use.
   220  // Operations are goroutine-safe.
   221  // Panics if length grows beyond the max.
   222  type CList struct {
   223  	mtx    tmsync.RWMutex
   224  	wg     *sync.WaitGroup
   225  	waitCh chan struct{}
   226  	head   *CElement // first element
   227  	tail   *CElement // last element
   228  	len    int       // list length
   229  	maxLen int       // max list length
   230  }
   231  
   232  // Return CList with MaxLength. CList will panic if it goes beyond MaxLength.
   233  func New() *CList { return newWithMax(MaxLength) }
   234  
   235  // Return CList with given maxLength.
   236  // Will panic if list exceeds given maxLength.
   237  func newWithMax(maxLength int) *CList {
   238  	l := new(CList)
   239  	l.maxLen = maxLength
   240  
   241  	l.wg = waitGroup1()
   242  	l.waitCh = make(chan struct{})
   243  	l.head = nil
   244  	l.tail = nil
   245  	l.len = 0
   246  
   247  	return l
   248  }
   249  
   250  func (l *CList) Len() int {
   251  	l.mtx.RLock()
   252  	len := l.len
   253  	l.mtx.RUnlock()
   254  	return len
   255  }
   256  
   257  func (l *CList) Front() *CElement {
   258  	l.mtx.RLock()
   259  	head := l.head
   260  	l.mtx.RUnlock()
   261  	return head
   262  }
   263  
   264  func (l *CList) FrontWait() *CElement {
   265  	// Loop until the head is non-nil else wait and try again
   266  	for {
   267  		l.mtx.RLock()
   268  		head := l.head
   269  		wg := l.wg
   270  		l.mtx.RUnlock()
   271  
   272  		if head != nil {
   273  			return head
   274  		}
   275  		wg.Wait()
   276  		// NOTE: If you think l.head exists here, think harder.
   277  	}
   278  }
   279  
   280  func (l *CList) Back() *CElement {
   281  	l.mtx.RLock()
   282  	back := l.tail
   283  	l.mtx.RUnlock()
   284  	return back
   285  }
   286  
   287  func (l *CList) BackWait() *CElement {
   288  	for {
   289  		l.mtx.RLock()
   290  		tail := l.tail
   291  		wg := l.wg
   292  		l.mtx.RUnlock()
   293  
   294  		if tail != nil {
   295  			return tail
   296  		}
   297  		wg.Wait()
   298  		// l.tail doesn't necessarily exist here.
   299  		// That's why we need to continue a for-loop.
   300  	}
   301  }
   302  
   303  // WaitChan can be used to wait until Front or Back becomes not nil. Once it
   304  // does, channel will be closed.
   305  func (l *CList) WaitChan() <-chan struct{} {
   306  	l.mtx.Lock()
   307  	defer l.mtx.Unlock()
   308  
   309  	return l.waitCh
   310  }
   311  
   312  // Panics if list grows beyond its max length.
   313  func (l *CList) PushBack(v interface{}) *CElement {
   314  	l.mtx.Lock()
   315  
   316  	// Construct a new element
   317  	e := &CElement{
   318  		prev:       nil,
   319  		prevWg:     waitGroup1(),
   320  		prevWaitCh: make(chan struct{}),
   321  		next:       nil,
   322  		nextWg:     waitGroup1(),
   323  		nextWaitCh: make(chan struct{}),
   324  		removed:    false,
   325  		Value:      v,
   326  	}
   327  
   328  	// Release waiters on FrontWait/BackWait maybe
   329  	if l.len == 0 {
   330  		l.wg.Done()
   331  		close(l.waitCh)
   332  	}
   333  	if l.len >= l.maxLen {
   334  		panic(fmt.Sprintf("clist: maximum length list reached %d", l.maxLen))
   335  	}
   336  	l.len++
   337  
   338  	// Modify the tail
   339  	if l.tail == nil {
   340  		l.head = e
   341  		l.tail = e
   342  	} else {
   343  		e.SetPrev(l.tail) // We must init e first.
   344  		l.tail.SetNext(e) // This will make e accessible.
   345  		l.tail = e        // Update the list.
   346  	}
   347  	l.mtx.Unlock()
   348  	return e
   349  }
   350  
   351  // CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks.
   352  // NOTE: As per the contract of CList, removed elements cannot be added back.
   353  func (l *CList) Remove(e *CElement) interface{} {
   354  	l.mtx.Lock()
   355  
   356  	prev := e.Prev()
   357  	next := e.Next()
   358  
   359  	if l.head == nil || l.tail == nil {
   360  		l.mtx.Unlock()
   361  		panic("Remove(e) on empty CList")
   362  	}
   363  	if prev == nil && l.head != e {
   364  		l.mtx.Unlock()
   365  		panic("Remove(e) with false head")
   366  	}
   367  	if next == nil && l.tail != e {
   368  		l.mtx.Unlock()
   369  		panic("Remove(e) with false tail")
   370  	}
   371  
   372  	// If we're removing the only item, make CList FrontWait/BackWait wait.
   373  	if l.len == 1 {
   374  		l.wg = waitGroup1() // WaitGroups are difficult to re-use.
   375  		l.waitCh = make(chan struct{})
   376  	}
   377  
   378  	// Update l.len
   379  	l.len--
   380  
   381  	// Connect next/prev and set head/tail
   382  	if prev == nil {
   383  		l.head = next
   384  	} else {
   385  		prev.SetNext(next)
   386  	}
   387  	if next == nil {
   388  		l.tail = prev
   389  	} else {
   390  		next.SetPrev(prev)
   391  	}
   392  
   393  	// Set .Done() on e, otherwise waiters will wait forever.
   394  	e.SetRemoved()
   395  
   396  	l.mtx.Unlock()
   397  	return e.Value
   398  }
   399  
   400  func waitGroup1() (wg *sync.WaitGroup) {
   401  	wg = &sync.WaitGroup{}
   402  	wg.Add(1)
   403  	return
   404  }