github.com/letsencrypt/go@v0.0.0-20160714163537-4054769a31f6/src/runtime/mbarrier.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: write barriers.
     6  //
     7  // For the concurrent garbage collector, the Go compiler implements
     8  // updates to pointer-valued fields that may be in heap objects by
     9  // emitting calls to write barriers. This file contains the actual write barrier
    10  // implementation, markwb, and the various wrappers called by the
    11  // compiler to implement pointer assignment, slice assignment,
    12  // typed memmove, and so on.
    13  
    14  package runtime
    15  
    16  import (
    17  	"runtime/internal/sys"
    18  	"unsafe"
    19  )
    20  
    21  // markwb is the mark-phase write barrier, the only barrier we have.
    22  // The rest of this file exists only to make calls to this function.
    23  //
    24  // This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
    25  // The original Dijkstra barrier only shaded ptrs being placed in black slots.
    26  //
    27  // Shade indicates that it has seen a white pointer by adding the referent
    28  // to wbuf as well as marking it.
    29  //
    30  // slot is the destination (dst) in go code
    31  // ptr is the value that goes into the slot (src) in the go code
    32  //
    33  //
    34  // Dealing with memory ordering:
    35  //
    36  // Dijkstra pointed out that maintaining the no black to white
    37  // pointers means that white to white pointers do not need
    38  // to be noted by the write barrier. Furthermore if either
    39  // white object dies before it is reached by the
    40  // GC then the object can be collected during this GC cycle
    41  // instead of waiting for the next cycle. Unfortunately the cost of
    42  // ensuring that the object holding the slot doesn't concurrently
    43  // change to black without the mutator noticing seems prohibitive.
    44  //
    45  // Consider the following example where the mutator writes into
    46  // a slot and then loads the slot's mark bit while the GC thread
    47  // writes to the slot's mark bit and then as part of scanning reads
    48  // the slot.
    49  //
    50  // Initially both [slot] and [slotmark] are 0 (nil)
    51  // Mutator thread          GC thread
    52  // st [slot], ptr          st [slotmark], 1
    53  //
    54  // ld r1, [slotmark]       ld r2, [slot]
    55  //
    56  // Without an expensive memory barrier between the st and the ld, the final
    57  // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
    58  // example of what can happen when loads are allowed to be reordered with older
    59  // stores (avoiding such reorderings lies at the heart of the classic
    60  // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
    61  // barriers, which will slow down both the mutator and the GC, we always grey
    62  // the ptr object regardless of the slot's color.
    63  //
    64  // Another place where we intentionally omit memory barriers is when
    65  // accessing mheap_.arena_used to check if a pointer points into the
    66  // heap. On relaxed memory machines, it's possible for a mutator to
    67  // extend the size of the heap by updating arena_used, allocate an
    68  // object from this new region, and publish a pointer to that object,
    69  // but for tracing running on another processor to observe the pointer
    70  // but use the old value of arena_used. In this case, tracing will not
    71  // mark the object, even though it's reachable. However, the mutator
    72  // is guaranteed to execute a write barrier when it publishes the
    73  // pointer, so it will take care of marking the object. A general
    74  // consequence of this is that the garbage collector may cache the
    75  // value of mheap_.arena_used. (See issue #9984.)
    76  //
    77  //
    78  // Stack writes:
    79  //
    80  // The compiler omits write barriers for writes to the current frame,
    81  // but if a stack pointer has been passed down the call stack, the
    82  // compiler will generate a write barrier for writes through that
    83  // pointer (because it doesn't know it's not a heap pointer).
    84  //
    85  // One might be tempted to ignore the write barrier if slot points
    86  // into to the stack. Don't do it! Mark termination only re-scans
    87  // frames that have potentially been active since the concurrent scan,
    88  // so it depends on write barriers to track changes to pointers in
    89  // stack frames that have not been active.
    90  //
    91  //
    92  // Global writes:
    93  //
    94  // The Go garbage collector requires write barriers when heap pointers
    95  // are stored in globals. Many garbage collectors ignore writes to
    96  // globals and instead pick up global -> heap pointers during
    97  // termination. This increases pause time, so we instead rely on write
    98  // barriers for writes to globals so that we don't have to rescan
    99  // global during mark termination.
   100  //
   101  //go:nowritebarrierrec
   102  func gcmarkwb_m(slot *uintptr, ptr uintptr) {
   103  	if writeBarrier.needed {
   104  		if ptr != 0 && inheap(ptr) {
   105  			shade(ptr)
   106  		}
   107  	}
   108  }
   109  
   110  // Write barrier calls must not happen during critical GC and scheduler
   111  // related operations. In particular there are times when the GC assumes
   112  // that the world is stopped but scheduler related code is still being
   113  // executed, dealing with syscalls, dealing with putting gs on runnable
   114  // queues and so forth. This code cannot execute write barriers because
   115  // the GC might drop them on the floor. Stopping the world involves removing
   116  // the p associated with an m. We use the fact that m.p == nil to indicate
   117  // that we are in one these critical section and throw if the write is of
   118  // a pointer to a heap object.
   119  //go:nosplit
   120  func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
   121  	mp := acquirem()
   122  	if mp.inwb || mp.dying > 0 {
   123  		releasem(mp)
   124  		return
   125  	}
   126  	systemstack(func() {
   127  		if mp.p == 0 && memstats.enablegc && !mp.inwb && inheap(src) {
   128  			throw("writebarrierptr_nostore1 called with mp.p == nil")
   129  		}
   130  		mp.inwb = true
   131  		gcmarkwb_m(dst, src)
   132  	})
   133  	mp.inwb = false
   134  	releasem(mp)
   135  }
   136  
   137  // NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
   138  // but if we do that, Go inserts a write barrier on *dst = src.
   139  //go:nosplit
   140  func writebarrierptr(dst *uintptr, src uintptr) {
   141  	*dst = src
   142  	if writeBarrier.cgo {
   143  		cgoCheckWriteBarrier(dst, src)
   144  	}
   145  	if !writeBarrier.needed {
   146  		return
   147  	}
   148  	if src != 0 && src < sys.PhysPageSize {
   149  		systemstack(func() {
   150  			print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
   151  			throw("bad pointer in write barrier")
   152  		})
   153  	}
   154  	writebarrierptr_nostore1(dst, src)
   155  }
   156  
   157  // Like writebarrierptr, but the store has already been applied.
   158  // Do not reapply.
   159  //go:nosplit
   160  func writebarrierptr_nostore(dst *uintptr, src uintptr) {
   161  	if writeBarrier.cgo {
   162  		cgoCheckWriteBarrier(dst, src)
   163  	}
   164  	if !writeBarrier.needed {
   165  		return
   166  	}
   167  	if src != 0 && src < sys.PhysPageSize {
   168  		systemstack(func() { throw("bad pointer in write barrier") })
   169  	}
   170  	writebarrierptr_nostore1(dst, src)
   171  }
   172  
   173  // typedmemmove copies a value of type t to dst from src.
   174  //go:nosplit
   175  func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   176  	memmove(dst, src, typ.size)
   177  	if writeBarrier.cgo {
   178  		cgoCheckMemmove(typ, dst, src, 0, typ.size)
   179  	}
   180  	if typ.kind&kindNoPointers != 0 {
   181  		return
   182  	}
   183  	heapBitsBulkBarrier(uintptr(dst), typ.size)
   184  }
   185  
   186  //go:linkname reflect_typedmemmove reflect.typedmemmove
   187  func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   188  	typedmemmove(typ, dst, src)
   189  }
   190  
   191  // typedmemmovepartial is like typedmemmove but assumes that
   192  // dst and src point off bytes into the value and only copies size bytes.
   193  //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
   194  func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
   195  	memmove(dst, src, size)
   196  	if writeBarrier.cgo {
   197  		cgoCheckMemmove(typ, dst, src, off, size)
   198  	}
   199  	if !writeBarrier.needed || typ.kind&kindNoPointers != 0 || size < sys.PtrSize {
   200  		return
   201  	}
   202  
   203  	if frag := -off & (sys.PtrSize - 1); frag != 0 {
   204  		dst = add(dst, frag)
   205  		size -= frag
   206  	}
   207  	heapBitsBulkBarrier(uintptr(dst), size&^(sys.PtrSize-1))
   208  }
   209  
   210  // callwritebarrier is invoked at the end of reflectcall, to execute
   211  // write barrier operations to record the fact that a call's return
   212  // values have just been copied to frame, starting at retoffset
   213  // and continuing to framesize. The entire frame (not just the return
   214  // values) is described by typ. Because the copy has already
   215  // happened, we call writebarrierptr_nostore, and this is nosplit so
   216  // the copy and write barrier appear atomic to GC.
   217  //go:nosplit
   218  func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uintptr) {
   219  	if !writeBarrier.needed || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < sys.PtrSize {
   220  		return
   221  	}
   222  	heapBitsBulkBarrier(uintptr(add(frame, retoffset)), framesize-retoffset)
   223  }
   224  
   225  //go:nosplit
   226  func typedslicecopy(typ *_type, dst, src slice) int {
   227  	// TODO(rsc): If typedslicecopy becomes faster than calling
   228  	// typedmemmove repeatedly, consider using during func growslice.
   229  	n := dst.len
   230  	if n > src.len {
   231  		n = src.len
   232  	}
   233  	if n == 0 {
   234  		return 0
   235  	}
   236  	dstp := dst.array
   237  	srcp := src.array
   238  
   239  	if raceenabled {
   240  		callerpc := getcallerpc(unsafe.Pointer(&typ))
   241  		pc := funcPC(slicecopy)
   242  		racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
   243  		racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
   244  	}
   245  	if msanenabled {
   246  		msanwrite(dstp, uintptr(n)*typ.size)
   247  		msanread(srcp, uintptr(n)*typ.size)
   248  	}
   249  
   250  	if writeBarrier.cgo {
   251  		cgoCheckSliceCopy(typ, dst, src, n)
   252  	}
   253  
   254  	// Note: No point in checking typ.kind&kindNoPointers here:
   255  	// compiler only emits calls to typedslicecopy for types with pointers,
   256  	// and growslice and reflect_typedslicecopy check for pointers
   257  	// before calling typedslicecopy.
   258  	if !writeBarrier.needed {
   259  		memmove(dstp, srcp, uintptr(n)*typ.size)
   260  		return n
   261  	}
   262  
   263  	systemstack(func() {
   264  		if uintptr(srcp) < uintptr(dstp) && uintptr(srcp)+uintptr(n)*typ.size > uintptr(dstp) {
   265  			// Overlap with src before dst.
   266  			// Copy backward, being careful not to move dstp/srcp
   267  			// out of the array they point into.
   268  			dstp = add(dstp, uintptr(n-1)*typ.size)
   269  			srcp = add(srcp, uintptr(n-1)*typ.size)
   270  			i := 0
   271  			for {
   272  				typedmemmove(typ, dstp, srcp)
   273  				if i++; i >= n {
   274  					break
   275  				}
   276  				dstp = add(dstp, -typ.size)
   277  				srcp = add(srcp, -typ.size)
   278  			}
   279  		} else {
   280  			// Copy forward, being careful not to move dstp/srcp
   281  			// out of the array they point into.
   282  			i := 0
   283  			for {
   284  				typedmemmove(typ, dstp, srcp)
   285  				if i++; i >= n {
   286  					break
   287  				}
   288  				dstp = add(dstp, typ.size)
   289  				srcp = add(srcp, typ.size)
   290  			}
   291  		}
   292  	})
   293  	return n
   294  }
   295  
   296  //go:linkname reflect_typedslicecopy reflect.typedslicecopy
   297  func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
   298  	if elemType.kind&kindNoPointers != 0 {
   299  		n := dst.len
   300  		if n > src.len {
   301  			n = src.len
   302  		}
   303  		memmove(dst.array, src.array, uintptr(n)*elemType.size)
   304  		return n
   305  	}
   306  	return typedslicecopy(elemType, dst, src)
   307  }