github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/go/sync/waitgroup.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package sync
     6  
     7  import (
     8  	"sync/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // A WaitGroup waits for a collection of goroutines to finish.
    13  // The main goroutine calls Add to set the number of
    14  // goroutines to wait for.  Then each of the goroutines
    15  // runs and calls Done when finished.  At the same time,
    16  // Wait can be used to block until all goroutines have finished.
    17  type WaitGroup struct {
    18  	// 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
    19  	// 64-bit atomic operations require 64-bit alignment, but 32-bit
    20  	// compilers do not ensure it. So we allocate 12 bytes and then use
    21  	// the aligned 8 bytes in them as state.
    22  	state1 [12]byte
    23  	sema   uint32
    24  }
    25  
    26  func (wg *WaitGroup) state() *uint64 {
    27  	if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 {
    28  		return (*uint64)(unsafe.Pointer(&wg.state1))
    29  	} else {
    30  		return (*uint64)(unsafe.Pointer(&wg.state1[4]))
    31  	}
    32  }
    33  
    34  // Add adds delta, which may be negative, to the WaitGroup counter.
    35  // If the counter becomes zero, all goroutines blocked on Wait are released.
    36  // If the counter goes negative, Add panics.
    37  //
    38  // Note that calls with a positive delta that occur when the counter is zero
    39  // must happen before a Wait. Calls with a negative delta, or calls with a
    40  // positive delta that start when the counter is greater than zero, may happen
    41  // at any time.
    42  // Typically this means the calls to Add should execute before the statement
    43  // creating the goroutine or other event to be waited for.
    44  // If a WaitGroup is reused to wait for several independent sets of events,
    45  // new Add calls must happen after all previous Wait calls have returned.
    46  // See the WaitGroup example.
    47  func (wg *WaitGroup) Add(delta int) {
    48  	statep := wg.state()
    49  	if raceenabled {
    50  		_ = *statep // trigger nil deref early
    51  		if delta < 0 {
    52  			// Synchronize decrements with Wait.
    53  			raceReleaseMerge(unsafe.Pointer(wg))
    54  		}
    55  		raceDisable()
    56  		defer raceEnable()
    57  	}
    58  	state := atomic.AddUint64(statep, uint64(delta)<<32)
    59  	v := int32(state >> 32)
    60  	w := uint32(state)
    61  	if raceenabled {
    62  		if delta > 0 && v == int32(delta) {
    63  			// The first increment must be synchronized with Wait.
    64  			// Need to model this as a read, because there can be
    65  			// several concurrent wg.counter transitions from 0.
    66  			raceRead(unsafe.Pointer(&wg.sema))
    67  		}
    68  	}
    69  	if v < 0 {
    70  		panic("sync: negative WaitGroup counter")
    71  	}
    72  	if w != 0 && delta > 0 && v == int32(delta) {
    73  		panic("sync: WaitGroup misuse: Add called concurrently with Wait")
    74  	}
    75  	if v > 0 || w == 0 {
    76  		return
    77  	}
    78  	// This goroutine has set counter to 0 when waiters > 0.
    79  	// Now there can't be concurrent mutations of state:
    80  	// - Adds must not happen concurrently with Wait,
    81  	// - Wait does not increment waiters if it sees counter == 0.
    82  	// Still do a cheap sanity check to detect WaitGroup misuse.
    83  	if *statep != state {
    84  		panic("sync: WaitGroup misuse: Add called concurrently with Wait")
    85  	}
    86  	// Reset waiters count to 0.
    87  	*statep = 0
    88  	for ; w != 0; w-- {
    89  		runtime_Semrelease(&wg.sema)
    90  	}
    91  }
    92  
    93  // Done decrements the WaitGroup counter.
    94  func (wg *WaitGroup) Done() {
    95  	wg.Add(-1)
    96  }
    97  
    98  // Wait blocks until the WaitGroup counter is zero.
    99  func (wg *WaitGroup) Wait() {
   100  	statep := wg.state()
   101  	if raceenabled {
   102  		_ = *statep // trigger nil deref early
   103  		raceDisable()
   104  	}
   105  	for {
   106  		state := atomic.LoadUint64(statep)
   107  		v := int32(state >> 32)
   108  		w := uint32(state)
   109  		if v == 0 {
   110  			// Counter is 0, no need to wait.
   111  			if raceenabled {
   112  				raceEnable()
   113  				raceAcquire(unsafe.Pointer(wg))
   114  			}
   115  			return
   116  		}
   117  		// Increment waiters count.
   118  		if atomic.CompareAndSwapUint64(statep, state, state+1) {
   119  			if raceenabled && w == 0 {
   120  				// Wait must be synchronized with the first Add.
   121  				// Need to model this is as a write to race with the read in Add.
   122  				// As a consequence, can do the write only for the first waiter,
   123  				// otherwise concurrent Waits will race with each other.
   124  				raceWrite(unsafe.Pointer(&wg.sema))
   125  			}
   126  			runtime_Semacquire(&wg.sema)
   127  			if *statep != 0 {
   128  				panic("sync: WaitGroup is reused before previous Wait has returned")
   129  			}
   130  			if raceenabled {
   131  				raceEnable()
   132  				raceAcquire(unsafe.Pointer(wg))
   133  			}
   134  			return
   135  		}
   136  	}
   137  }