rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/runtime/mgcwork.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  const (
    10  	_Debugwbufs  = true    // if true check wbufs consistency
    11  	_WorkbufSize = 1 * 256 // in bytes - if small wbufs are passed to GC in a timely fashion.
    12  )
    13  
    14  // Garbage collector work pool abstraction.
    15  //
    16  // This implements a producer/consumer model for pointers to grey
    17  // objects.  A grey object is one that is marked and on a work
    18  // queue.  A black object is marked and not on a work queue.
    19  //
    20  // Write barriers, root discovery, stack scanning, and object scanning
    21  // produce pointers to grey objects.  Scanning consumes pointers to
    22  // grey objects, thus blackening them, and then scans them,
    23  // potentially producing new pointers to grey objects.
    24  
    25  // A wbufptr holds a workbuf*, but protects it from write barriers.
    26  // workbufs never live on the heap, so write barriers are unnecessary.
    27  // Write barriers on workbuf pointers may also be dangerous in the GC.
    28  type wbufptr uintptr
    29  
    30  func wbufptrOf(w *workbuf) wbufptr {
    31  	return wbufptr(unsafe.Pointer(w))
    32  }
    33  
    34  func (wp wbufptr) ptr() *workbuf {
    35  	return (*workbuf)(unsafe.Pointer(wp))
    36  }
    37  
    38  // A gcWork provides the interface to produce and consume work for the
    39  // garbage collector.
    40  //
    41  // The usual pattern for using gcWork is:
    42  //
    43  //     var gcw gcWork
    44  //     disable preemption
    45  //     .. call gcw.put() to produce and gcw.get() to consume ..
    46  //     gcw.dispose()
    47  //     enable preemption
    48  //
    49  // It's important that any use of gcWork during the mark phase prevent
    50  // the garbage collector from transitioning to mark termination since
    51  // gcWork may locally hold GC work buffers. This can be done by
    52  // disabling preemption (systemstack or acquirem).
    53  type gcWork struct {
    54  	// Invariant: wbuf is never full or empty
    55  	wbuf wbufptr
    56  
    57  	// Bytes marked (blackened) on this gcWork. This is aggregated
    58  	// into work.bytesMarked by dispose.
    59  	bytesMarked uint64
    60  }
    61  
    62  // initFromCache fetches work from this M's currentwbuf cache.
    63  //go:nowritebarrier
    64  func (w *gcWork) initFromCache() {
    65  	// TODO: Instead of making gcWork pull from the currentwbuf
    66  	// cache, use a gcWork as the cache and make shade pass around
    67  	// that gcWork.
    68  	if w.wbuf == 0 {
    69  		w.wbuf = wbufptr(xchguintptr(&getg().m.currentwbuf, 0))
    70  	}
    71  }
    72  
    73  // put enqueues a pointer for the garbage collector to trace.
    74  //go:nowritebarrier
    75  func (ww *gcWork) put(obj uintptr) {
    76  	w := (*gcWork)(noescape(unsafe.Pointer(ww))) // TODO: remove when escape analysis is fixed
    77  
    78  	wbuf := w.wbuf.ptr()
    79  	if wbuf == nil {
    80  		wbuf = getpartialorempty(42)
    81  		w.wbuf = wbufptrOf(wbuf)
    82  	}
    83  
    84  	wbuf.obj[wbuf.nobj] = obj
    85  	wbuf.nobj++
    86  
    87  	if wbuf.nobj == len(wbuf.obj) {
    88  		putfull(wbuf, 50)
    89  		w.wbuf = 0
    90  	}
    91  }
    92  
    93  // tryGet dequeues a pointer for the garbage collector to trace.
    94  //
    95  // If there are no pointers remaining in this gcWork or in the global
    96  // queue, tryGet returns 0.  Note that there may still be pointers in
    97  // other gcWork instances or other caches.
    98  //go:nowritebarrier
    99  func (ww *gcWork) tryGet() uintptr {
   100  	w := (*gcWork)(noescape(unsafe.Pointer(ww))) // TODO: remove when escape analysis is fixed
   101  
   102  	wbuf := w.wbuf.ptr()
   103  	if wbuf == nil {
   104  		wbuf = trygetfull(74)
   105  		if wbuf == nil {
   106  			return 0
   107  		}
   108  		w.wbuf = wbufptrOf(wbuf)
   109  	}
   110  
   111  	wbuf.nobj--
   112  	obj := wbuf.obj[wbuf.nobj]
   113  
   114  	if wbuf.nobj == 0 {
   115  		putempty(wbuf, 86)
   116  		w.wbuf = 0
   117  	}
   118  
   119  	return obj
   120  }
   121  
   122  // get dequeues a pointer for the garbage collector to trace, blocking
   123  // if necessary to ensure all pointers from all queues and caches have
   124  // been retrieved.  get returns 0 if there are no pointers remaining.
   125  //go:nowritebarrier
   126  func (ww *gcWork) get() uintptr {
   127  	w := (*gcWork)(noescape(unsafe.Pointer(ww))) // TODO: remove when escape analysis is fixed
   128  
   129  	wbuf := w.wbuf.ptr()
   130  	if wbuf == nil {
   131  		wbuf = getfull(103)
   132  		if wbuf == nil {
   133  			return 0
   134  		}
   135  		wbuf.checknonempty()
   136  		w.wbuf = wbufptrOf(wbuf)
   137  	}
   138  
   139  	// TODO: This might be a good place to add prefetch code
   140  
   141  	wbuf.nobj--
   142  	obj := wbuf.obj[wbuf.nobj]
   143  
   144  	if wbuf.nobj == 0 {
   145  		putempty(wbuf, 115)
   146  		w.wbuf = 0
   147  	}
   148  
   149  	return obj
   150  }
   151  
   152  // dispose returns any cached pointers to the global queue.
   153  //go:nowritebarrier
   154  func (w *gcWork) dispose() {
   155  	if wbuf := w.wbuf; wbuf != 0 {
   156  		putpartial(wbuf.ptr(), 167)
   157  		w.wbuf = 0
   158  	}
   159  	if w.bytesMarked != 0 {
   160  		// dispose happens relatively infrequently. If this
   161  		// atomic becomes a problem, we should first try to
   162  		// dispose less and if necessary aggregate in a per-P
   163  		// counter.
   164  		xadd64(&work.bytesMarked, int64(w.bytesMarked))
   165  		w.bytesMarked = 0
   166  	}
   167  }
   168  
   169  // disposeToCache returns any cached pointers to this M's currentwbuf.
   170  // It calls throw if currentwbuf is non-nil.
   171  //go:nowritebarrier
   172  func (w *gcWork) disposeToCache() {
   173  	if wbuf := w.wbuf; wbuf != 0 {
   174  		wbuf = wbufptr(xchguintptr(&getg().m.currentwbuf, uintptr(wbuf)))
   175  		if wbuf != 0 {
   176  			throw("m.currentwbuf non-nil in disposeToCache")
   177  		}
   178  		w.wbuf = 0
   179  	}
   180  	if w.bytesMarked != 0 {
   181  		xadd64(&work.bytesMarked, int64(w.bytesMarked))
   182  		w.bytesMarked = 0
   183  	}
   184  }
   185  
   186  // balance moves some work that's cached in this gcWork back on the
   187  // global queue.
   188  //go:nowritebarrier
   189  func (w *gcWork) balance() {
   190  	if wbuf := w.wbuf; wbuf != 0 && wbuf.ptr().nobj > 4 {
   191  		w.wbuf = wbufptrOf(handoff(wbuf.ptr()))
   192  	}
   193  }
   194  
   195  // Internally, the GC work pool is kept in arrays in work buffers.
   196  // The gcWork interface caches a work buffer until full (or empty) to
   197  // avoid contending on the global work buffer lists.
   198  
   199  type workbufhdr struct {
   200  	node  lfnode // must be first
   201  	nobj  int
   202  	inuse bool   // This workbuf is in use by some gorotuine and is not on the work.empty/partial/full queues.
   203  	log   [4]int // line numbers forming a history of ownership changes to workbuf
   204  }
   205  
   206  type workbuf struct {
   207  	workbufhdr
   208  	// account for the above fields
   209  	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / ptrSize]uintptr
   210  }
   211  
   212  // workbuf factory routines. These funcs are used to manage the
   213  // workbufs. They cache workbuf in the m struct field currentwbuf.
   214  // If the GC asks for some work these are the only routines that
   215  // make partially full wbufs available to the GC.
   216  // Each of the gets and puts also take an distinct integer that is used
   217  // to record a brief history of changes to ownership of the workbuf.
   218  // The convention is to use a unique line number but any encoding
   219  // is permissible. For example if you want to pass in 2 bits of information
   220  // you could simple add lineno1*100000+lineno2.
   221  
   222  // logget records the past few values of entry to aid in debugging.
   223  // logget checks the buffer b is not currently in use.
   224  func (b *workbuf) logget(entry int) {
   225  	if !_Debugwbufs {
   226  		return
   227  	}
   228  	if b.inuse {
   229  		println("runtime: logget fails log entry=", entry,
   230  			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
   231  			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
   232  		throw("logget: get not legal")
   233  	}
   234  	b.inuse = true
   235  	copy(b.log[1:], b.log[:])
   236  	b.log[0] = entry
   237  }
   238  
   239  // logput records the past few values of entry to aid in debugging.
   240  // logput checks the buffer b is currently in use.
   241  func (b *workbuf) logput(entry int) {
   242  	if !_Debugwbufs {
   243  		return
   244  	}
   245  	if !b.inuse {
   246  		println("runtime:logput fails log entry=", entry,
   247  			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
   248  			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
   249  		throw("logput: put not legal")
   250  	}
   251  	b.inuse = false
   252  	copy(b.log[1:], b.log[:])
   253  	b.log[0] = entry
   254  }
   255  
   256  func (b *workbuf) checknonempty() {
   257  	if b.nobj == 0 {
   258  		println("runtime: nonempty check fails",
   259  			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
   260  			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
   261  		throw("workbuf is empty")
   262  	}
   263  }
   264  
   265  func (b *workbuf) checkempty() {
   266  	if b.nobj != 0 {
   267  		println("runtime: empty check fails",
   268  			"b.log[0]=", b.log[0], "b.log[1]=", b.log[1],
   269  			"b.log[2]=", b.log[2], "b.log[3]=", b.log[3])
   270  		throw("workbuf is not empty")
   271  	}
   272  }
   273  
   274  // checknocurrentwbuf checks that the m's currentwbuf field is empty
   275  func checknocurrentwbuf() {
   276  	if getg().m.currentwbuf != 0 {
   277  		throw("unexpected currentwbuf")
   278  	}
   279  }
   280  
   281  // getempty pops an empty work buffer off the work.empty list,
   282  // allocating new buffers if none are available.
   283  // entry is used to record a brief history of ownership.
   284  //go:nowritebarrier
   285  func getempty(entry int) *workbuf {
   286  	var b *workbuf
   287  	if work.empty != 0 {
   288  		b = (*workbuf)(lfstackpop(&work.empty))
   289  		if b != nil {
   290  			b.checkempty()
   291  		}
   292  	}
   293  	if b == nil {
   294  		b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
   295  	}
   296  	b.logget(entry)
   297  	return b
   298  }
   299  
   300  // putempty puts a workbuf onto the work.empty list.
   301  // Upon entry this go routine owns b. The lfstackpush relinquishes ownership.
   302  //go:nowritebarrier
   303  func putempty(b *workbuf, entry int) {
   304  	b.checkempty()
   305  	b.logput(entry)
   306  	lfstackpush(&work.empty, &b.node)
   307  }
   308  
   309  // putfull puts the workbuf on the work.full list for the GC.
   310  // putfull accepts partially full buffers so the GC can avoid competing
   311  // with the mutators for ownership of partially full buffers.
   312  //go:nowritebarrier
   313  func putfull(b *workbuf, entry int) {
   314  	b.checknonempty()
   315  	b.logput(entry)
   316  	lfstackpush(&work.full, &b.node)
   317  }
   318  
   319  // getpartialorempty tries to return a partially empty
   320  // and if none are available returns an empty one.
   321  // entry is used to provide a brief histoy of ownership
   322  // using entry + xxx00000 to
   323  // indicating that two line numbers in the call chain.
   324  //go:nowritebarrier
   325  func getpartialorempty(entry int) *workbuf {
   326  	var b *workbuf
   327  	// If this m has a buf in currentwbuf then as an optimization
   328  	// simply return that buffer. If it turns out currentwbuf
   329  	// is full, put it on the work.full queue and get another
   330  	// workbuf off the partial or empty queue.
   331  	if getg().m.currentwbuf != 0 {
   332  		b = (*workbuf)(unsafe.Pointer(xchguintptr(&getg().m.currentwbuf, 0)))
   333  		if b != nil {
   334  			if b.nobj <= len(b.obj) {
   335  				return b
   336  			}
   337  			putfull(b, entry+80100000)
   338  		}
   339  	}
   340  	b = (*workbuf)(lfstackpop(&work.partial))
   341  	if b != nil {
   342  		b.logget(entry)
   343  		return b
   344  	}
   345  	// Let getempty do the logget check but
   346  	// use the entry to encode that it passed
   347  	// through this routine.
   348  	b = getempty(entry + 80700000)
   349  	return b
   350  }
   351  
   352  // putpartial puts empty buffers on the work.empty queue,
   353  // full buffers on the work.full queue and
   354  // others on the work.partial queue.
   355  // entry is used to provide a brief histoy of ownership
   356  // using entry + xxx00000 to
   357  // indicating that two call chain line numbers.
   358  //go:nowritebarrier
   359  func putpartial(b *workbuf, entry int) {
   360  	if b.nobj == 0 {
   361  		putempty(b, entry+81500000)
   362  	} else if b.nobj < len(b.obj) {
   363  		b.logput(entry)
   364  		lfstackpush(&work.partial, &b.node)
   365  	} else if b.nobj == len(b.obj) {
   366  		b.logput(entry)
   367  		lfstackpush(&work.full, &b.node)
   368  	} else {
   369  		throw("putpartial: bad Workbuf b.nobj")
   370  	}
   371  }
   372  
   373  // trygetfull tries to get a full or partially empty workbuffer.
   374  // If one is not immediately available return nil
   375  //go:nowritebarrier
   376  func trygetfull(entry int) *workbuf {
   377  	b := (*workbuf)(lfstackpop(&work.full))
   378  	if b == nil {
   379  		b = (*workbuf)(lfstackpop(&work.partial))
   380  	}
   381  	if b != nil {
   382  		b.logget(entry)
   383  		b.checknonempty()
   384  		return b
   385  	}
   386  	// full and partial are both empty so see if there
   387  	// is an work available on currentwbuf.
   388  	// This is an optimization to shift
   389  	// processing from the STW marktermination phase into
   390  	// the concurrent mark phase.
   391  	if getg().m.currentwbuf != 0 {
   392  		b = (*workbuf)(unsafe.Pointer(xchguintptr(&getg().m.currentwbuf, 0)))
   393  		if b != nil {
   394  			if b.nobj != 0 {
   395  				return b
   396  			}
   397  			putempty(b, 839)
   398  			b = nil
   399  		}
   400  	}
   401  	return b
   402  }
   403  
   404  // Get a full work buffer off the work.full or a partially
   405  // filled one off the work.partial list. If nothing is available
   406  // wait until all the other gc helpers have finished and then
   407  // return nil.
   408  // getfull acts as a barrier for work.nproc helpers. As long as one
   409  // gchelper is actively marking objects it
   410  // may create a workbuffer that the other helpers can work on.
   411  // The for loop either exits when a work buffer is found
   412  // or when _all_ of the work.nproc GC helpers are in the loop
   413  // looking for work and thus not capable of creating new work.
   414  // This is in fact the termination condition for the STW mark
   415  // phase.
   416  //go:nowritebarrier
   417  func getfull(entry int) *workbuf {
   418  	b := (*workbuf)(lfstackpop(&work.full))
   419  	if b != nil {
   420  		b.logget(entry)
   421  		b.checknonempty()
   422  		return b
   423  	}
   424  	b = (*workbuf)(lfstackpop(&work.partial))
   425  	if b != nil {
   426  		b.logget(entry)
   427  		return b
   428  	}
   429  	// Make sure that currentwbuf is also not a source for pointers to be
   430  	// processed. This is an optimization that shifts processing
   431  	// from the mark termination STW phase to the concurrent mark phase.
   432  	if getg().m.currentwbuf != 0 {
   433  		b = (*workbuf)(unsafe.Pointer(xchguintptr(&getg().m.currentwbuf, 0)))
   434  		if b != nil {
   435  			if b.nobj != 0 {
   436  				return b
   437  			}
   438  			putempty(b, 877)
   439  			b = nil
   440  		}
   441  	}
   442  
   443  	xadd(&work.nwait, +1)
   444  	for i := 0; ; i++ {
   445  		if work.full != 0 || work.partial != 0 {
   446  			xadd(&work.nwait, -1)
   447  			b = (*workbuf)(lfstackpop(&work.full))
   448  			if b == nil {
   449  				b = (*workbuf)(lfstackpop(&work.partial))
   450  			}
   451  			if b != nil {
   452  				b.logget(entry)
   453  				b.checknonempty()
   454  				return b
   455  			}
   456  			xadd(&work.nwait, +1)
   457  		}
   458  		if work.nwait == work.nproc {
   459  			return nil
   460  		}
   461  		_g_ := getg()
   462  		if i < 10 {
   463  			_g_.m.gcstats.nprocyield++
   464  			procyield(20)
   465  		} else if i < 20 {
   466  			_g_.m.gcstats.nosyield++
   467  			osyield()
   468  		} else {
   469  			_g_.m.gcstats.nsleep++
   470  			usleep(100)
   471  		}
   472  	}
   473  }
   474  
   475  //go:nowritebarrier
   476  func handoff(b *workbuf) *workbuf {
   477  	// Make new buffer with half of b's pointers.
   478  	b1 := getempty(915)
   479  	n := b.nobj / 2
   480  	b.nobj -= n
   481  	b1.nobj = n
   482  	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
   483  	_g_ := getg()
   484  	_g_.m.gcstats.nhandoff++
   485  	_g_.m.gcstats.nhandoffcnt += uint64(n)
   486  
   487  	// Put b on full list - let first half of b get stolen.
   488  	putfull(b, 942)
   489  	return b1
   490  }
   491  
   492  // 1 when you are harvesting so that the write buffer code shade can
   493  // detect calls during a presumable STW write barrier.
   494  var harvestingwbufs uint32
   495  
   496  // harvestwbufs moves non-empty workbufs to work.full from  m.currentwuf
   497  // Must be in a STW phase.
   498  // xchguintptr is used since there are write barrier calls from the GC helper
   499  // routines even during a STW phase.
   500  // TODO: chase down write barrier calls in STW phase and understand and eliminate
   501  // them.
   502  //go:nowritebarrier
   503  func harvestwbufs() {
   504  	// announce to write buffer that you are harvesting the currentwbufs
   505  	atomicstore(&harvestingwbufs, 1)
   506  
   507  	for mp := allm; mp != nil; mp = mp.alllink {
   508  		wbuf := (*workbuf)(unsafe.Pointer(xchguintptr(&mp.currentwbuf, 0)))
   509  		// TODO: beat write barriers out of the mark termination and eliminate xchg
   510  		//		tempwbuf := (*workbuf)(unsafe.Pointer(tempm.currentwbuf))
   511  		//		tempm.currentwbuf = 0
   512  		if wbuf != nil {
   513  			if wbuf.nobj == 0 {
   514  				putempty(wbuf, 945)
   515  			} else {
   516  				putfull(wbuf, 947) //use full instead of partial so GC doesn't compete to get wbuf
   517  			}
   518  		}
   519  	}
   520  
   521  	atomicstore(&harvestingwbufs, 0)
   522  }