github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/mgcwork.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  const (
    14  	_WorkbufSize = 2048 // in bytes; larger values result in less contention
    15  )
    16  
    17  // Garbage collector work pool abstraction.
    18  //
    19  // This implements a producer/consumer model for pointers to grey
    20  // objects. A grey object is one that is marked and on a work
    21  // queue. A black object is marked and not on a work queue.
    22  //
    23  // Write barriers, root discovery, stack scanning, and object scanning
    24  // produce pointers to grey objects. Scanning consumes pointers to
    25  // grey objects, thus blackening them, and then scans them,
    26  // potentially producing new pointers to grey objects.
    27  
    28  // A wbufptr holds a workbuf*, but protects it from write barriers.
    29  // workbufs never live on the heap, so write barriers are unnecessary.
    30  // Write barriers on workbuf pointers may also be dangerous in the GC.
    31  //
    32  // TODO: Since workbuf is now go:notinheap, this isn't necessary.
    33  type wbufptr uintptr
    34  
    35  func wbufptrOf(w *workbuf) wbufptr {
    36  	return wbufptr(unsafe.Pointer(w))
    37  }
    38  
    39  func (wp wbufptr) ptr() *workbuf {
    40  	return (*workbuf)(unsafe.Pointer(wp))
    41  }
    42  
    43  // A gcWork provides the interface to produce and consume work for the
    44  // garbage collector.
    45  //
    46  // A gcWork can be used on the stack as follows:
    47  //
    48  //     (preemption must be disabled)
    49  //     gcw := &getg().m.p.ptr().gcw
    50  //     .. call gcw.put() to produce and gcw.get() to consume ..
    51  //     if gcBlackenPromptly {
    52  //         gcw.dispose()
    53  //     }
    54  //
    55  // It's important that any use of gcWork during the mark phase prevent
    56  // the garbage collector from transitioning to mark termination since
    57  // gcWork may locally hold GC work buffers. This can be done by
    58  // disabling preemption (systemstack or acquirem).
    59  type gcWork struct {
    60  	// wbuf1 and wbuf2 are the primary and secondary work buffers.
    61  	//
    62  	// This can be thought of as a stack of both work buffers'
    63  	// pointers concatenated. When we pop the last pointer, we
    64  	// shift the stack up by one work buffer by bringing in a new
    65  	// full buffer and discarding an empty one. When we fill both
    66  	// buffers, we shift the stack down by one work buffer by
    67  	// bringing in a new empty buffer and discarding a full one.
    68  	// This way we have one buffer's worth of hysteresis, which
    69  	// amortizes the cost of getting or putting a work buffer over
    70  	// at least one buffer of work and reduces contention on the
    71  	// global work lists.
    72  	//
    73  	// wbuf1 is always the buffer we're currently pushing to and
    74  	// popping from and wbuf2 is the buffer that will be discarded
    75  	// next.
    76  	//
    77  	// Invariant: Both wbuf1 and wbuf2 are nil or neither are.
    78  	wbuf1, wbuf2 wbufptr
    79  
    80  	// Bytes marked (blackened) on this gcWork. This is aggregated
    81  	// into work.bytesMarked by dispose.
    82  	bytesMarked uint64
    83  
    84  	// Scan work performed on this gcWork. This is aggregated into
    85  	// gcController by dispose and may also be flushed by callers.
    86  	scanWork int64
    87  }
    88  
    89  func (w *gcWork) init() {
    90  	w.wbuf1 = wbufptrOf(getempty())
    91  	wbuf2 := trygetfull()
    92  	if wbuf2 == nil {
    93  		wbuf2 = getempty()
    94  	}
    95  	w.wbuf2 = wbufptrOf(wbuf2)
    96  }
    97  
    98  // put enqueues a pointer for the garbage collector to trace.
    99  // obj must point to the beginning of a heap object or an oblet.
   100  //go:nowritebarrier
   101  func (w *gcWork) put(obj uintptr) {
   102  	wbuf := w.wbuf1.ptr()
   103  	if wbuf == nil {
   104  		w.init()
   105  		wbuf = w.wbuf1.ptr()
   106  		// wbuf is empty at this point.
   107  	} else if wbuf.nobj == len(wbuf.obj) {
   108  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   109  		wbuf = w.wbuf1.ptr()
   110  		if wbuf.nobj == len(wbuf.obj) {
   111  			putfull(wbuf)
   112  			wbuf = getempty()
   113  			w.wbuf1 = wbufptrOf(wbuf)
   114  		}
   115  	}
   116  
   117  	wbuf.obj[wbuf.nobj] = obj
   118  	wbuf.nobj++
   119  }
   120  
   121  // putFast does a put and returns true if it can be done quickly
   122  // otherwise it returns false and the caller needs to call put.
   123  //go:nowritebarrier
   124  func (w *gcWork) putFast(obj uintptr) bool {
   125  	wbuf := w.wbuf1.ptr()
   126  	if wbuf == nil {
   127  		return false
   128  	} else if wbuf.nobj == len(wbuf.obj) {
   129  		return false
   130  	}
   131  
   132  	wbuf.obj[wbuf.nobj] = obj
   133  	wbuf.nobj++
   134  	return true
   135  }
   136  
   137  // tryGet dequeues a pointer for the garbage collector to trace.
   138  //
   139  // If there are no pointers remaining in this gcWork or in the global
   140  // queue, tryGet returns 0.  Note that there may still be pointers in
   141  // other gcWork instances or other caches.
   142  //go:nowritebarrier
   143  func (w *gcWork) tryGet() uintptr {
   144  	wbuf := w.wbuf1.ptr()
   145  	if wbuf == nil {
   146  		w.init()
   147  		wbuf = w.wbuf1.ptr()
   148  		// wbuf is empty at this point.
   149  	}
   150  	if wbuf.nobj == 0 {
   151  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   152  		wbuf = w.wbuf1.ptr()
   153  		if wbuf.nobj == 0 {
   154  			owbuf := wbuf
   155  			wbuf = trygetfull()
   156  			if wbuf == nil {
   157  				return 0
   158  			}
   159  			putempty(owbuf)
   160  			w.wbuf1 = wbufptrOf(wbuf)
   161  		}
   162  	}
   163  
   164  	wbuf.nobj--
   165  	return wbuf.obj[wbuf.nobj]
   166  }
   167  
   168  // tryGetFast dequeues a pointer for the garbage collector to trace
   169  // if one is readily available. Otherwise it returns 0 and
   170  // the caller is expected to call tryGet().
   171  //go:nowritebarrier
   172  func (w *gcWork) tryGetFast() uintptr {
   173  	wbuf := w.wbuf1.ptr()
   174  	if wbuf == nil {
   175  		return 0
   176  	}
   177  	if wbuf.nobj == 0 {
   178  		return 0
   179  	}
   180  
   181  	wbuf.nobj--
   182  	return wbuf.obj[wbuf.nobj]
   183  }
   184  
   185  // get dequeues a pointer for the garbage collector to trace, blocking
   186  // if necessary to ensure all pointers from all queues and caches have
   187  // been retrieved.  get returns 0 if there are no pointers remaining.
   188  //go:nowritebarrier
   189  func (w *gcWork) get() uintptr {
   190  	wbuf := w.wbuf1.ptr()
   191  	if wbuf == nil {
   192  		w.init()
   193  		wbuf = w.wbuf1.ptr()
   194  		// wbuf is empty at this point.
   195  	}
   196  	if wbuf.nobj == 0 {
   197  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   198  		wbuf = w.wbuf1.ptr()
   199  		if wbuf.nobj == 0 {
   200  			owbuf := wbuf
   201  			wbuf = getfull()
   202  			if wbuf == nil {
   203  				return 0
   204  			}
   205  			putempty(owbuf)
   206  			w.wbuf1 = wbufptrOf(wbuf)
   207  		}
   208  	}
   209  
   210  	// TODO: This might be a good place to add prefetch code
   211  
   212  	wbuf.nobj--
   213  	return wbuf.obj[wbuf.nobj]
   214  }
   215  
   216  // dispose returns any cached pointers to the global queue.
   217  // The buffers are being put on the full queue so that the
   218  // write barriers will not simply reacquire them before the
   219  // GC can inspect them. This helps reduce the mutator's
   220  // ability to hide pointers during the concurrent mark phase.
   221  //
   222  //go:nowritebarrier
   223  func (w *gcWork) dispose() {
   224  	if wbuf := w.wbuf1.ptr(); wbuf != nil {
   225  		if wbuf.nobj == 0 {
   226  			putempty(wbuf)
   227  		} else {
   228  			putfull(wbuf)
   229  		}
   230  		w.wbuf1 = 0
   231  
   232  		wbuf = w.wbuf2.ptr()
   233  		if wbuf.nobj == 0 {
   234  			putempty(wbuf)
   235  		} else {
   236  			putfull(wbuf)
   237  		}
   238  		w.wbuf2 = 0
   239  	}
   240  	if w.bytesMarked != 0 {
   241  		// dispose happens relatively infrequently. If this
   242  		// atomic becomes a problem, we should first try to
   243  		// dispose less and if necessary aggregate in a per-P
   244  		// counter.
   245  		atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
   246  		w.bytesMarked = 0
   247  	}
   248  	if w.scanWork != 0 {
   249  		atomic.Xaddint64(&gcController.scanWork, w.scanWork)
   250  		w.scanWork = 0
   251  	}
   252  }
   253  
   254  // balance moves some work that's cached in this gcWork back on the
   255  // global queue.
   256  //go:nowritebarrier
   257  func (w *gcWork) balance() {
   258  	if w.wbuf1 == 0 {
   259  		return
   260  	}
   261  	if wbuf := w.wbuf2.ptr(); wbuf.nobj != 0 {
   262  		putfull(wbuf)
   263  		w.wbuf2 = wbufptrOf(getempty())
   264  	} else if wbuf := w.wbuf1.ptr(); wbuf.nobj > 4 {
   265  		w.wbuf1 = wbufptrOf(handoff(wbuf))
   266  	}
   267  }
   268  
   269  // empty returns true if w has no mark work available.
   270  //go:nowritebarrier
   271  func (w *gcWork) empty() bool {
   272  	return w.wbuf1 == 0 || (w.wbuf1.ptr().nobj == 0 && w.wbuf2.ptr().nobj == 0)
   273  }
   274  
   275  // Internally, the GC work pool is kept in arrays in work buffers.
   276  // The gcWork interface caches a work buffer until full (or empty) to
   277  // avoid contending on the global work buffer lists.
   278  
   279  type workbufhdr struct {
   280  	node lfnode // must be first
   281  	nobj int
   282  }
   283  
   284  //go:notinheap
   285  type workbuf struct {
   286  	workbufhdr
   287  	// account for the above fields
   288  	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
   289  }
   290  
   291  // workbuf factory routines. These funcs are used to manage the
   292  // workbufs.
   293  // If the GC asks for some work these are the only routines that
   294  // make wbufs available to the GC.
   295  
   296  func (b *workbuf) checknonempty() {
   297  	if b.nobj == 0 {
   298  		throw("workbuf is empty")
   299  	}
   300  }
   301  
   302  func (b *workbuf) checkempty() {
   303  	if b.nobj != 0 {
   304  		throw("workbuf is not empty")
   305  	}
   306  }
   307  
   308  // getempty pops an empty work buffer off the work.empty list,
   309  // allocating new buffers if none are available.
   310  //go:nowritebarrier
   311  func getempty() *workbuf {
   312  	var b *workbuf
   313  	if work.empty != 0 {
   314  		b = (*workbuf)(lfstackpop(&work.empty))
   315  		if b != nil {
   316  			b.checkempty()
   317  		}
   318  	}
   319  	if b == nil {
   320  		b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
   321  	}
   322  	return b
   323  }
   324  
   325  // putempty puts a workbuf onto the work.empty list.
   326  // Upon entry this go routine owns b. The lfstackpush relinquishes ownership.
   327  //go:nowritebarrier
   328  func putempty(b *workbuf) {
   329  	b.checkempty()
   330  	lfstackpush(&work.empty, &b.node)
   331  }
   332  
   333  // putfull puts the workbuf on the work.full list for the GC.
   334  // putfull accepts partially full buffers so the GC can avoid competing
   335  // with the mutators for ownership of partially full buffers.
   336  //go:nowritebarrier
   337  func putfull(b *workbuf) {
   338  	b.checknonempty()
   339  	lfstackpush(&work.full, &b.node)
   340  
   341  	// We just made more work available. Let the GC controller
   342  	// know so it can encourage more workers to run.
   343  	if gcphase == _GCmark {
   344  		gcController.enlistWorker()
   345  	}
   346  }
   347  
   348  // trygetfull tries to get a full or partially empty workbuffer.
   349  // If one is not immediately available return nil
   350  //go:nowritebarrier
   351  func trygetfull() *workbuf {
   352  	b := (*workbuf)(lfstackpop(&work.full))
   353  	if b != nil {
   354  		b.checknonempty()
   355  		return b
   356  	}
   357  	return b
   358  }
   359  
   360  // Get a full work buffer off the work.full list.
   361  // If nothing is available wait until all the other gc helpers have
   362  // finished and then return nil.
   363  // getfull acts as a barrier for work.nproc helpers. As long as one
   364  // gchelper is actively marking objects it
   365  // may create a workbuffer that the other helpers can work on.
   366  // The for loop either exits when a work buffer is found
   367  // or when _all_ of the work.nproc GC helpers are in the loop
   368  // looking for work and thus not capable of creating new work.
   369  // This is in fact the termination condition for the STW mark
   370  // phase.
   371  //go:nowritebarrier
   372  func getfull() *workbuf {
   373  	b := (*workbuf)(lfstackpop(&work.full))
   374  	if b != nil {
   375  		b.checknonempty()
   376  		return b
   377  	}
   378  
   379  	incnwait := atomic.Xadd(&work.nwait, +1)
   380  	if incnwait > work.nproc {
   381  		println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
   382  		throw("work.nwait > work.nproc")
   383  	}
   384  	for i := 0; ; i++ {
   385  		if work.full != 0 {
   386  			decnwait := atomic.Xadd(&work.nwait, -1)
   387  			if decnwait == work.nproc {
   388  				println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
   389  				throw("work.nwait > work.nproc")
   390  			}
   391  			b = (*workbuf)(lfstackpop(&work.full))
   392  			if b != nil {
   393  				b.checknonempty()
   394  				return b
   395  			}
   396  			incnwait := atomic.Xadd(&work.nwait, +1)
   397  			if incnwait > work.nproc {
   398  				println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
   399  				throw("work.nwait > work.nproc")
   400  			}
   401  		}
   402  		if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs {
   403  			return nil
   404  		}
   405  		_g_ := getg()
   406  		if i < 10 {
   407  			_g_.m.gcstats.nprocyield++
   408  			procyield(20)
   409  		} else if i < 20 {
   410  			_g_.m.gcstats.nosyield++
   411  			osyield()
   412  		} else {
   413  			_g_.m.gcstats.nsleep++
   414  			usleep(100)
   415  		}
   416  	}
   417  }
   418  
   419  //go:nowritebarrier
   420  func handoff(b *workbuf) *workbuf {
   421  	// Make new buffer with half of b's pointers.
   422  	b1 := getempty()
   423  	n := b.nobj / 2
   424  	b.nobj -= n
   425  	b1.nobj = n
   426  	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
   427  	_g_ := getg()
   428  	_g_.m.gcstats.nhandoff++
   429  	_g_.m.gcstats.nhandoffcnt += uint64(n)
   430  
   431  	// Put b on full list - let first half of b get stolen.
   432  	putfull(b)
   433  	return b1
   434  }