github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/sync/waitgroup.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "internal/race" 9 "sync/atomic" 10 "unsafe" 11 ) 12 13 // A WaitGroup waits for a collection of goroutines to finish. 14 // The main goroutine calls Add to set the number of 15 // goroutines to wait for. Then each of the goroutines 16 // runs and calls Done when finished. At the same time, 17 // Wait can be used to block until all goroutines have finished. 18 type WaitGroup struct { 19 // 64-bit value: high 32 bits are counter, low 32 bits are waiter count. 20 // 64-bit atomic operations require 64-bit alignment, but 32-bit 21 // compilers do not ensure it. So we allocate 12 bytes and then use 22 // the aligned 8 bytes in them as state. 23 state1 [12]byte 24 sema uint32 25 } 26 27 func (wg *WaitGroup) state() *uint64 { 28 if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 { 29 return (*uint64)(unsafe.Pointer(&wg.state1)) 30 } else { 31 return (*uint64)(unsafe.Pointer(&wg.state1[4])) 32 } 33 } 34 35 // Add adds delta, which may be negative, to the WaitGroup counter. 36 // If the counter becomes zero, all goroutines blocked on Wait are released. 37 // If the counter goes negative, Add panics. 38 // 39 // Note that calls with a positive delta that occur when the counter is zero 40 // must happen before a Wait. Calls with a negative delta, or calls with a 41 // positive delta that start when the counter is greater than zero, may happen 42 // at any time. 43 // Typically this means the calls to Add should execute before the statement 44 // creating the goroutine or other event to be waited for. 45 // If a WaitGroup is reused to wait for several independent sets of events, 46 // new Add calls must happen after all previous Wait calls have returned. 47 // See the WaitGroup example. 48 func (wg *WaitGroup) Add(delta int) { 49 statep := wg.state() 50 if race.Enabled { 51 _ = *statep // trigger nil deref early 52 if delta < 0 { 53 // Synchronize decrements with Wait. 54 race.ReleaseMerge(unsafe.Pointer(wg)) 55 } 56 race.Disable() 57 defer race.Enable() 58 } 59 state := atomic.AddUint64(statep, uint64(delta)<<32) 60 v := int32(state >> 32) 61 w := uint32(state) 62 if race.Enabled { 63 if delta > 0 && v == int32(delta) { 64 // The first increment must be synchronized with Wait. 65 // Need to model this as a read, because there can be 66 // several concurrent wg.counter transitions from 0. 67 race.Read(unsafe.Pointer(&wg.sema)) 68 } 69 } 70 if v < 0 { 71 panic("sync: negative WaitGroup counter") 72 } 73 if w != 0 && delta > 0 && v == int32(delta) { 74 panic("sync: WaitGroup misuse: Add called concurrently with Wait") 75 } 76 if v > 0 || w == 0 { 77 return 78 } 79 // This goroutine has set counter to 0 when waiters > 0. 80 // Now there can't be concurrent mutations of state: 81 // - Adds must not happen concurrently with Wait, 82 // - Wait does not increment waiters if it sees counter == 0. 83 // Still do a cheap sanity check to detect WaitGroup misuse. 84 if *statep != state { 85 panic("sync: WaitGroup misuse: Add called concurrently with Wait") 86 } 87 // Reset waiters count to 0. 88 *statep = 0 89 for ; w != 0; w-- { 90 runtime_Semrelease(&wg.sema) 91 } 92 } 93 94 // Done decrements the WaitGroup counter. 95 func (wg *WaitGroup) Done() { 96 wg.Add(-1) 97 } 98 99 // Wait blocks until the WaitGroup counter is zero. 100 func (wg *WaitGroup) Wait() { 101 statep := wg.state() 102 if race.Enabled { 103 _ = *statep // trigger nil deref early 104 race.Disable() 105 } 106 for { 107 state := atomic.LoadUint64(statep) 108 v := int32(state >> 32) 109 w := uint32(state) 110 if v == 0 { 111 // Counter is 0, no need to wait. 112 if race.Enabled { 113 race.Enable() 114 race.Acquire(unsafe.Pointer(wg)) 115 } 116 return 117 } 118 // Increment waiters count. 119 if atomic.CompareAndSwapUint64(statep, state, state+1) { 120 if race.Enabled && w == 0 { 121 // Wait must be synchronized with the first Add. 122 // Need to model this is as a write to race with the read in Add. 123 // As a consequence, can do the write only for the first waiter, 124 // otherwise concurrent Waits will race with each other. 125 race.Write(unsafe.Pointer(&wg.sema)) 126 } 127 runtime_Semacquire(&wg.sema) 128 if *statep != 0 { 129 panic("sync: WaitGroup is reused before previous Wait has returned") 130 } 131 if race.Enabled { 132 race.Enable() 133 race.Acquire(unsafe.Pointer(wg)) 134 } 135 return 136 } 137 } 138 }