github.com/fjballest/golang@v0.0.0-20151209143359-e4c5fe594ca8/src/runtime/mbarrier.go (about)

     1  // Copyright 2015 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: write barriers.
     6  //
     7  // For the concurrent garbage collector, the Go compiler implements
     8  // updates to pointer-valued fields that may be in heap objects by
     9  // emitting calls to write barriers. This file contains the actual write barrier
    10  // implementation, markwb, and the various wrappers called by the
    11  // compiler to implement pointer assignment, slice assignment,
    12  // typed memmove, and so on.
    13  
    14  package runtime
    15  
    16  import (
    17  	"runtime/internal/sys"
    18  	"unsafe"
    19  )
    20  
    21  // markwb is the mark-phase write barrier, the only barrier we have.
    22  // The rest of this file exists only to make calls to this function.
    23  //
    24  // This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
    25  // The original Dijkstra barrier only shaded ptrs being placed in black slots.
    26  //
    27  // Shade indicates that it has seen a white pointer by adding the referent
    28  // to wbuf as well as marking it.
    29  //
    30  // slot is the destination (dst) in go code
    31  // ptr is the value that goes into the slot (src) in the go code
    32  //
    33  //
    34  // Dealing with memory ordering:
    35  //
    36  // Dijkstra pointed out that maintaining the no black to white
    37  // pointers means that white to white pointers do not need
    38  // to be noted by the write barrier. Furthermore if either
    39  // white object dies before it is reached by the
    40  // GC then the object can be collected during this GC cycle
    41  // instead of waiting for the next cycle. Unfortunately the cost of
    42  // ensuring that the object holding the slot doesn't concurrently
    43  // change to black without the mutator noticing seems prohibitive.
    44  //
    45  // Consider the following example where the mutator writes into
    46  // a slot and then loads the slot's mark bit while the GC thread
    47  // writes to the slot's mark bit and then as part of scanning reads
    48  // the slot.
    49  //
    50  // Initially both [slot] and [slotmark] are 0 (nil)
    51  // Mutator thread          GC thread
    52  // st [slot], ptr          st [slotmark], 1
    53  //
    54  // ld r1, [slotmark]       ld r2, [slot]
    55  //
    56  // Without an expensive memory barrier between the st and the ld, the final
    57  // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
    58  // example of what can happen when loads are allowed to be reordered with older
    59  // stores (avoiding such reorderings lies at the heart of the classic
    60  // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
    61  // barriers, which will slow down both the mutator and the GC, we always grey
    62  // the ptr object regardless of the slot's color.
    63  //
    64  // Another place where we intentionally omit memory barriers is when
    65  // accessing mheap_.arena_used to check if a pointer points into the
    66  // heap. On relaxed memory machines, it's possible for a mutator to
    67  // extend the size of the heap by updating arena_used, allocate an
    68  // object from this new region, and publish a pointer to that object,
    69  // but for tracing running on another processor to observe the pointer
    70  // but use the old value of arena_used. In this case, tracing will not
    71  // mark the object, even though it's reachable. However, the mutator
    72  // is guaranteed to execute a write barrier when it publishes the
    73  // pointer, so it will take care of marking the object. A general
    74  // consequence of this is that the garbage collector may cache the
    75  // value of mheap_.arena_used. (See issue #9984.)
    76  //
    77  //
    78  // Stack writes:
    79  //
    80  // The compiler omits write barriers for writes to the current frame,
    81  // but if a stack pointer has been passed down the call stack, the
    82  // compiler will generate a write barrier for writes through that
    83  // pointer (because it doesn't know it's not a heap pointer).
    84  //
    85  // One might be tempted to ignore the write barrier if slot points
    86  // into to the stack. Don't do it! Mark termination only re-scans
    87  // frames that have potentially been active since the concurrent scan,
    88  // so it depends on write barriers to track changes to pointers in
    89  // stack frames that have not been active.
    90  //go:nowritebarrierrec
    91  func gcmarkwb_m(slot *uintptr, ptr uintptr) {
    92  	if writeBarrier.needed {
    93  		if ptr != 0 && inheap(ptr) {
    94  			shade(ptr)
    95  		}
    96  	}
    97  }
    98  
    99  // Write barrier calls must not happen during critical GC and scheduler
   100  // related operations. In particular there are times when the GC assumes
   101  // that the world is stopped but scheduler related code is still being
   102  // executed, dealing with syscalls, dealing with putting gs on runnable
   103  // queues and so forth. This code can not execute write barriers because
   104  // the GC might drop them on the floor. Stopping the world involves removing
   105  // the p associated with an m. We use the fact that m.p == nil to indicate
   106  // that we are in one these critical section and throw if the write is of
   107  // a pointer to a heap object.
   108  //go:nosplit
   109  func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
   110  	mp := acquirem()
   111  	if mp.inwb || mp.dying > 0 {
   112  		releasem(mp)
   113  		return
   114  	}
   115  	systemstack(func() {
   116  		if mp.p == 0 && memstats.enablegc && !mp.inwb && inheap(src) {
   117  			throw("writebarrierptr_nostore1 called with mp.p == nil")
   118  		}
   119  		mp.inwb = true
   120  		gcmarkwb_m(dst, src)
   121  	})
   122  	mp.inwb = false
   123  	releasem(mp)
   124  }
   125  
   126  // NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
   127  // but if we do that, Go inserts a write barrier on *dst = src.
   128  //go:nosplit
   129  func writebarrierptr(dst *uintptr, src uintptr) {
   130  	*dst = src
   131  	if writeBarrier.cgo {
   132  		cgoCheckWriteBarrier(dst, src)
   133  	}
   134  	if !writeBarrier.needed {
   135  		return
   136  	}
   137  	if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
   138  		systemstack(func() {
   139  			print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
   140  			throw("bad pointer in write barrier")
   141  		})
   142  	}
   143  	writebarrierptr_nostore1(dst, src)
   144  }
   145  
   146  // Like writebarrierptr, but the store has already been applied.
   147  // Do not reapply.
   148  //go:nosplit
   149  func writebarrierptr_nostore(dst *uintptr, src uintptr) {
   150  	if writeBarrier.cgo {
   151  		cgoCheckWriteBarrier(dst, src)
   152  	}
   153  	if !writeBarrier.needed {
   154  		return
   155  	}
   156  	if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
   157  		systemstack(func() { throw("bad pointer in write barrier") })
   158  	}
   159  	writebarrierptr_nostore1(dst, src)
   160  }
   161  
   162  //go:nosplit
   163  func writebarrierstring(dst *[2]uintptr, src [2]uintptr) {
   164  	writebarrierptr(&dst[0], src[0])
   165  	dst[1] = src[1]
   166  }
   167  
   168  //go:nosplit
   169  func writebarrierslice(dst *[3]uintptr, src [3]uintptr) {
   170  	writebarrierptr(&dst[0], src[0])
   171  	dst[1] = src[1]
   172  	dst[2] = src[2]
   173  }
   174  
   175  //go:nosplit
   176  func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
   177  	writebarrierptr(&dst[0], src[0])
   178  	writebarrierptr(&dst[1], src[1])
   179  }
   180  
   181  //go:generate go run wbfat_gen.go -- wbfat.go
   182  //
   183  // The above line generates multiword write barriers for
   184  // all the combinations of ptr+scalar up to four words.
   185  // The implementations are written to wbfat.go.
   186  
   187  // typedmemmove copies a value of type t to dst from src.
   188  //go:nosplit
   189  func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   190  	memmove(dst, src, typ.size)
   191  	if writeBarrier.cgo {
   192  		cgoCheckMemmove(typ, dst, src, 0, typ.size)
   193  	}
   194  	if typ.kind&kindNoPointers != 0 {
   195  		return
   196  	}
   197  	heapBitsBulkBarrier(uintptr(dst), typ.size)
   198  }
   199  
   200  //go:linkname reflect_typedmemmove reflect.typedmemmove
   201  func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   202  	typedmemmove(typ, dst, src)
   203  }
   204  
   205  // typedmemmovepartial is like typedmemmove but assumes that
   206  // dst and src point off bytes into the value and only copies size bytes.
   207  //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
   208  func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
   209  	memmove(dst, src, size)
   210  	if writeBarrier.cgo {
   211  		cgoCheckMemmove(typ, dst, src, off, size)
   212  	}
   213  	if !writeBarrier.needed || typ.kind&kindNoPointers != 0 || size < sys.PtrSize || !inheap(uintptr(dst)) {
   214  		return
   215  	}
   216  
   217  	if frag := -off & (sys.PtrSize - 1); frag != 0 {
   218  		dst = add(dst, frag)
   219  		size -= frag
   220  	}
   221  	heapBitsBulkBarrier(uintptr(dst), size&^(sys.PtrSize-1))
   222  }
   223  
   224  // callwritebarrier is invoked at the end of reflectcall, to execute
   225  // write barrier operations to record the fact that a call's return
   226  // values have just been copied to frame, starting at retoffset
   227  // and continuing to framesize. The entire frame (not just the return
   228  // values) is described by typ. Because the copy has already
   229  // happened, we call writebarrierptr_nostore, and we must be careful
   230  // not to be preempted before the write barriers have been run.
   231  //go:nosplit
   232  func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uintptr) {
   233  	if !writeBarrier.needed || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < sys.PtrSize || !inheap(uintptr(frame)) {
   234  		return
   235  	}
   236  	heapBitsBulkBarrier(uintptr(add(frame, retoffset)), framesize-retoffset)
   237  }
   238  
   239  //go:nosplit
   240  func typedslicecopy(typ *_type, dst, src slice) int {
   241  	// TODO(rsc): If typedslicecopy becomes faster than calling
   242  	// typedmemmove repeatedly, consider using during func growslice.
   243  	n := dst.len
   244  	if n > src.len {
   245  		n = src.len
   246  	}
   247  	if n == 0 {
   248  		return 0
   249  	}
   250  	dstp := unsafe.Pointer(dst.array)
   251  	srcp := unsafe.Pointer(src.array)
   252  
   253  	if raceenabled {
   254  		callerpc := getcallerpc(unsafe.Pointer(&typ))
   255  		pc := funcPC(slicecopy)
   256  		racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
   257  		racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
   258  	}
   259  	if msanenabled {
   260  		msanwrite(dstp, uintptr(n)*typ.size)
   261  		msanread(srcp, uintptr(n)*typ.size)
   262  	}
   263  
   264  	if writeBarrier.cgo {
   265  		cgoCheckSliceCopy(typ, dst, src, n)
   266  	}
   267  
   268  	// Note: No point in checking typ.kind&kindNoPointers here:
   269  	// compiler only emits calls to typedslicecopy for types with pointers,
   270  	// and growslice and reflect_typedslicecopy check for pointers
   271  	// before calling typedslicecopy.
   272  	if !writeBarrier.needed {
   273  		memmove(dstp, srcp, uintptr(n)*typ.size)
   274  		return n
   275  	}
   276  
   277  	systemstack(func() {
   278  		if uintptr(srcp) < uintptr(dstp) && uintptr(srcp)+uintptr(n)*typ.size > uintptr(dstp) {
   279  			// Overlap with src before dst.
   280  			// Copy backward, being careful not to move dstp/srcp
   281  			// out of the array they point into.
   282  			dstp = add(dstp, uintptr(n-1)*typ.size)
   283  			srcp = add(srcp, uintptr(n-1)*typ.size)
   284  			i := 0
   285  			for {
   286  				typedmemmove(typ, dstp, srcp)
   287  				if i++; i >= n {
   288  					break
   289  				}
   290  				dstp = add(dstp, -typ.size)
   291  				srcp = add(srcp, -typ.size)
   292  			}
   293  		} else {
   294  			// Copy forward, being careful not to move dstp/srcp
   295  			// out of the array they point into.
   296  			i := 0
   297  			for {
   298  				typedmemmove(typ, dstp, srcp)
   299  				if i++; i >= n {
   300  					break
   301  				}
   302  				dstp = add(dstp, typ.size)
   303  				srcp = add(srcp, typ.size)
   304  			}
   305  		}
   306  	})
   307  	return int(n)
   308  }
   309  
   310  //go:linkname reflect_typedslicecopy reflect.typedslicecopy
   311  func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
   312  	if elemType.kind&kindNoPointers != 0 {
   313  		n := dst.len
   314  		if n > src.len {
   315  			n = src.len
   316  		}
   317  		memmove(dst.array, src.array, uintptr(n)*elemType.size)
   318  		return n
   319  	}
   320  	return typedslicecopy(elemType, dst, src)
   321  }