github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/sync/waitgroup.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "sync/atomic" 9 "unsafe" 10 ) 11 12 // A WaitGroup waits for a collection of goroutines to finish. 13 // The main goroutine calls Add to set the number of 14 // goroutines to wait for. Then each of the goroutines 15 // runs and calls Done when finished. At the same time, 16 // Wait can be used to block until all goroutines have finished. 17 type WaitGroup struct { 18 m Mutex 19 counter int32 20 waiters int32 21 sema *uint32 22 } 23 24 // WaitGroup creates a new semaphore each time the old semaphore 25 // is released. This is to avoid the following race: 26 // 27 // G1: Add(1) 28 // G1: go G2() 29 // G1: Wait() // Context switch after Unlock() and before Semacquire(). 30 // G2: Done() // Release semaphore: sema == 1, waiters == 0. G1 doesn't run yet. 31 // G3: Wait() // Finds counter == 0, waiters == 0, doesn't block. 32 // G3: Add(1) // Makes counter == 1, waiters == 0. 33 // G3: go G4() 34 // G3: Wait() // G1 still hasn't run, G3 finds sema == 1, unblocked! Bug. 35 36 // Add adds delta, which may be negative, to the WaitGroup counter. 37 // If the counter becomes zero, all goroutines blocked on Wait are released. 38 // If the counter goes negative, Add panics. 39 // 40 // Note that calls with positive delta must happen before the call to Wait, 41 // or else Wait may wait for too small a group. Typically this means the calls 42 // to Add should execute before the statement creating the goroutine or 43 // other event to be waited for. See the WaitGroup example. 44 func (wg *WaitGroup) Add(delta int) { 45 if raceenabled { 46 _ = wg.m.state 47 raceReleaseMerge(unsafe.Pointer(wg)) 48 raceDisable() 49 defer raceEnable() 50 } 51 v := atomic.AddInt32(&wg.counter, int32(delta)) 52 if v < 0 { 53 panic("sync: negative WaitGroup counter") 54 } 55 if v > 0 || atomic.LoadInt32(&wg.waiters) == 0 { 56 return 57 } 58 wg.m.Lock() 59 for i := int32(0); i < wg.waiters; i++ { 60 runtime_Semrelease(wg.sema) 61 } 62 wg.waiters = 0 63 wg.sema = nil 64 wg.m.Unlock() 65 } 66 67 // Done decrements the WaitGroup counter. 68 func (wg *WaitGroup) Done() { 69 wg.Add(-1) 70 } 71 72 // Wait blocks until the WaitGroup counter is zero. 73 func (wg *WaitGroup) Wait() { 74 if raceenabled { 75 _ = wg.m.state 76 raceDisable() 77 } 78 if atomic.LoadInt32(&wg.counter) == 0 { 79 if raceenabled { 80 raceEnable() 81 raceAcquire(unsafe.Pointer(wg)) 82 } 83 return 84 } 85 wg.m.Lock() 86 atomic.AddInt32(&wg.waiters, 1) 87 // This code is racing with the unlocked path in Add above. 88 // The code above modifies counter and then reads waiters. 89 // We must modify waiters and then read counter (the opposite order) 90 // to avoid missing an Add. 91 if atomic.LoadInt32(&wg.counter) == 0 { 92 atomic.AddInt32(&wg.waiters, -1) 93 if raceenabled { 94 raceEnable() 95 raceAcquire(unsafe.Pointer(wg)) 96 raceDisable() 97 } 98 wg.m.Unlock() 99 if raceenabled { 100 raceEnable() 101 } 102 return 103 } 104 if wg.sema == nil { 105 wg.sema = new(uint32) 106 } 107 s := wg.sema 108 wg.m.Unlock() 109 runtime_Semacquire(s) 110 if raceenabled { 111 raceEnable() 112 raceAcquire(unsafe.Pointer(wg)) 113 } 114 }