github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/sync/waitgroup.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package sync
     6  
     7  import (
     8  	"internal/race"
     9  	"runtime"
    10  	"sync/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // A WaitGroup waits for a collection of goroutines to finish.
    15  // The main goroutine calls Add to set the number of
    16  // goroutines to wait for. Then each of the goroutines
    17  // runs and calls Done when finished. At the same time,
    18  // Wait can be used to block until all goroutines have finished.
    19  //
    20  // A WaitGroup must not be copied after first use.
    21  type WaitGroup struct {
    22  	noCopy noCopy
    23  
    24  	// 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
    25  	// 64-bit atomic operations require 64-bit alignment, but 32-bit
    26  	// compilers do not ensure it. So we allocate 12 bytes and then use
    27  	// the aligned 8 bytes in them as state, and the other 4 as storage
    28  	// for the sema.
    29  	state1 [3]uint32
    30  
    31  	///MYCODE
    32  	Record TradRecord
    33  	Info runtime.WgInfo
    34  }
    35  
    36  // state returns pointers to the state and sema fields stored within wg.state1.
    37  func (wg *WaitGroup) state() (statep *uint64, semap *uint32) {
    38  	if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 {
    39  		return (*uint64)(unsafe.Pointer(&wg.state1)), &wg.state1[2]
    40  	} else {
    41  		return (*uint64)(unsafe.Pointer(&wg.state1[1])), &wg.state1[0]
    42  	}
    43  }
    44  
    45  // Add adds delta, which may be negative, to the WaitGroup counter.
    46  // If the counter becomes zero, all goroutines blocked on Wait are released.
    47  // If the counter goes negative, Add panics.
    48  //
    49  // Note that calls with a positive delta that occur when the counter is zero
    50  // must happen before a Wait. Calls with a negative delta, or calls with a
    51  // positive delta that start when the counter is greater than zero, may happen
    52  // at any time.
    53  // Typically this means the calls to Add should execute before the statement
    54  // creating the goroutine or other event to be waited for.
    55  // If a WaitGroup is reused to wait for several independent sets of events,
    56  // new Add calls must happen after all previous Wait calls have returned.
    57  // See the WaitGroup example.
    58  func (wg *WaitGroup) Add(delta int) {
    59  	///MYCODE:
    60  	runtime.Monitor(&wg.Info)
    61  	if runtime.BoolRecordTrad {
    62  		runtime.RecordTradOp(&wg.Record.PreLoc)
    63  	}
    64  
    65  	statep, semap := wg.state()
    66  	if race.Enabled {
    67  		_ = *statep // trigger nil deref early
    68  		if delta < 0 {
    69  			// Synchronize decrements with Wait.
    70  			race.ReleaseMerge(unsafe.Pointer(wg))
    71  		}
    72  		race.Disable()
    73  		defer race.Enable()
    74  	}
    75  	state := atomic.AddUint64(statep, uint64(delta)<<32)
    76  	v := int32(state >> 32)
    77  	w := uint32(state)
    78  	if race.Enabled && delta > 0 && v == int32(delta) {
    79  		// The first increment must be synchronized with Wait.
    80  		// Need to model this as a read, because there can be
    81  		// several concurrent wg.counter transitions from 0.
    82  		race.Read(unsafe.Pointer(semap))
    83  	}
    84  	if v < 0 {
    85  		panic("sync: negative WaitGroup counter")
    86  	}
    87  	if w != 0 && delta > 0 && v == int32(delta) {
    88  		panic("sync: WaitGroup misuse: Add called concurrently with Wait")
    89  	}
    90  	if v > 0 || w == 0 {
    91  		return
    92  	}
    93  	// This goroutine has set counter to 0 when waiters > 0.
    94  	// Now there can't be concurrent mutations of state:
    95  	// - Adds must not happen concurrently with Wait,
    96  	// - Wait does not increment waiters if it sees counter == 0.
    97  	// Still do a cheap sanity check to detect WaitGroup misuse.
    98  	if *statep != state {
    99  		panic("sync: WaitGroup misuse: Add called concurrently with Wait")
   100  	}
   101  	// Reset waiters count to 0.
   102  	*statep = 0
   103  	for ; w != 0; w-- {
   104  		runtime_Semrelease(semap, false, 0)
   105  	}
   106  }
   107  
   108  // Done decrements the WaitGroup counter by one.
   109  func (wg *WaitGroup) Done() {
   110  	wg.Add(-1)
   111  }
   112  
   113  // Wait blocks until the WaitGroup counter is zero.
   114  func (wg *WaitGroup) Wait() {
   115  	///MYCODE:
   116  	blockEntry := runtime.EnqueueBlockEntry([]runtime.PrimInfo{&wg.Info}, runtime.MuLock)
   117  	defer runtime.DequeueBlockEntry(blockEntry)
   118  	runtime.Monitor(&wg.Info)
   119  	if runtime.BoolRecordTrad {
   120  		runtime.RecordTradOp(&wg.Record.PreLoc)
   121  	}
   122  
   123  	statep, semap := wg.state()
   124  	if race.Enabled {
   125  		_ = *statep // trigger nil deref early
   126  		race.Disable()
   127  	}
   128  	for {
   129  		state := atomic.LoadUint64(statep)
   130  		v := int32(state >> 32)
   131  		w := uint32(state)
   132  		if v == 0 {
   133  			// Counter is 0, no need to wait.
   134  			if race.Enabled {
   135  				race.Enable()
   136  				race.Acquire(unsafe.Pointer(wg))
   137  			}
   138  			return
   139  		}
   140  		// Increment waiters count.
   141  		if atomic.CompareAndSwapUint64(statep, state, state+1) {
   142  			if race.Enabled && w == 0 {
   143  				// Wait must be synchronized with the first Add.
   144  				// Need to model this is as a write to race with the read in Add.
   145  				// As a consequence, can do the write only for the first waiter,
   146  				// otherwise concurrent Waits will race with each other.
   147  				race.Write(unsafe.Pointer(semap))
   148  			}
   149  			runtime_Semacquire(semap)
   150  			if *statep != 0 {
   151  				panic("sync: WaitGroup is reused before previous Wait has returned")
   152  			}
   153  			if race.Enabled {
   154  				race.Enable()
   155  				race.Acquire(unsafe.Pointer(wg))
   156  			}
   157  			return
   158  		}
   159  	}
   160  }