github.com/reiver/go@v0.0.0-20150109200633-1d0c7792f172/src/runtime/mgc0.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Called from C. Returns the Go type *m.
    10  func gc_m_ptr(ret *interface{}) {
    11  	*ret = (*m)(nil)
    12  }
    13  
    14  // Called from C. Returns the Go type *g.
    15  func gc_g_ptr(ret *interface{}) {
    16  	*ret = (*g)(nil)
    17  }
    18  
    19  // Called from C. Returns the Go type *itab.
    20  func gc_itab_ptr(ret *interface{}) {
    21  	*ret = (*itab)(nil)
    22  }
    23  
    24  func gc_unixnanotime(now *int64) {
    25  	sec, nsec := time_now()
    26  	*now = sec*1e9 + int64(nsec)
    27  }
    28  
    29  //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
    30  func runtime_debug_freeOSMemory() {
    31  	gogc(2) // force GC and do eager sweep
    32  	systemstack(scavenge_m)
    33  }
    34  
    35  var poolcleanup func()
    36  
    37  //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
    38  func sync_runtime_registerPoolCleanup(f func()) {
    39  	poolcleanup = f
    40  }
    41  
    42  func clearpools() {
    43  	// clear sync.Pools
    44  	if poolcleanup != nil {
    45  		poolcleanup()
    46  	}
    47  
    48  	for _, p := range &allp {
    49  		if p == nil {
    50  			break
    51  		}
    52  		// clear tinyalloc pool
    53  		if c := p.mcache; c != nil {
    54  			c.tiny = nil
    55  			c.tinysize = 0
    56  
    57  			// disconnect cached list before dropping it on the floor,
    58  			// so that a dangling ref to one entry does not pin all of them.
    59  			var sg, sgnext *sudog
    60  			for sg = c.sudogcache; sg != nil; sg = sgnext {
    61  				sgnext = sg.next
    62  				sg.next = nil
    63  			}
    64  			c.sudogcache = nil
    65  		}
    66  
    67  		// clear defer pools
    68  		for i := range p.deferpool {
    69  			// disconnect cached list before dropping it on the floor,
    70  			// so that a dangling ref to one entry does not pin all of them.
    71  			var d, dlink *_defer
    72  			for d = p.deferpool[i]; d != nil; d = dlink {
    73  				dlink = d.link
    74  				d.link = nil
    75  			}
    76  			p.deferpool[i] = nil
    77  		}
    78  	}
    79  }
    80  
    81  // backgroundgc is running in a goroutine and does the concurrent GC work.
    82  // bggc holds the state of the backgroundgc.
    83  func backgroundgc() {
    84  	bggc.g = getg()
    85  	bggc.g.issystem = true
    86  	for {
    87  		gcwork(0)
    88  		lock(&bggc.lock)
    89  		bggc.working = 0
    90  		goparkunlock(&bggc.lock, "Concurrent GC wait")
    91  	}
    92  }
    93  
    94  func bgsweep() {
    95  	sweep.g = getg()
    96  	getg().issystem = true
    97  	for {
    98  		for gosweepone() != ^uintptr(0) {
    99  			sweep.nbgsweep++
   100  			Gosched()
   101  		}
   102  		lock(&gclock)
   103  		if !gosweepdone() {
   104  			// This can happen if a GC runs between
   105  			// gosweepone returning ^0 above
   106  			// and the lock being acquired.
   107  			unlock(&gclock)
   108  			continue
   109  		}
   110  		sweep.parked = true
   111  		goparkunlock(&gclock, "GC sweep wait")
   112  	}
   113  }
   114  
   115  const (
   116  	_PoisonGC    = 0xf969696969696969 & (1<<(8*ptrSize) - 1)
   117  	_PoisonStack = 0x6868686868686868 & (1<<(8*ptrSize) - 1)
   118  )
   119  
   120  //go:nosplit
   121  func needwb() bool {
   122  	return gcphase == _GCmark || gcphase == _GCmarktermination || mheap_.shadow_enabled
   123  }
   124  
   125  // shadowptr returns a pointer to the shadow value for addr.
   126  //go:nosplit
   127  func shadowptr(addr uintptr) *uintptr {
   128  	var shadow *uintptr
   129  	if mheap_.data_start <= addr && addr < mheap_.data_end {
   130  		shadow = (*uintptr)(unsafe.Pointer(addr + mheap_.shadow_data))
   131  	} else if inheap(addr) {
   132  		shadow = (*uintptr)(unsafe.Pointer(addr + mheap_.shadow_heap))
   133  	}
   134  	return shadow
   135  }
   136  
   137  // clearshadow clears the shadow copy associated with the n bytes of memory at addr.
   138  func clearshadow(addr, n uintptr) {
   139  	if !mheap_.shadow_enabled {
   140  		return
   141  	}
   142  	p := shadowptr(addr)
   143  	if p == nil || n <= ptrSize {
   144  		return
   145  	}
   146  	memclr(unsafe.Pointer(p), n)
   147  }
   148  
   149  // NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
   150  // but if we do that, Go inserts a write barrier on *dst = src.
   151  //go:nosplit
   152  func writebarrierptr(dst *uintptr, src uintptr) {
   153  	if !needwb() {
   154  		*dst = src
   155  		return
   156  	}
   157  
   158  	if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
   159  		systemstack(func() { throw("bad pointer in write barrier") })
   160  	}
   161  
   162  	if mheap_.shadow_enabled {
   163  		systemstack(func() {
   164  			addr := uintptr(unsafe.Pointer(dst))
   165  			shadow := shadowptr(addr)
   166  			if shadow == nil {
   167  				return
   168  			}
   169  			// There is a race here but only if the program is using
   170  			// racy writes instead of sync/atomic. In that case we
   171  			// don't mind crashing.
   172  			if *shadow != *dst && *shadow != noShadow && istrackedptr(*dst) {
   173  				mheap_.shadow_enabled = false
   174  				print("runtime: write barrier dst=", dst, " old=", hex(*dst), " shadow=", shadow, " old=", hex(*shadow), " new=", hex(src), "\n")
   175  				throw("missed write barrier")
   176  			}
   177  			*shadow = src
   178  		})
   179  	}
   180  
   181  	*dst = src
   182  	writebarrierptr_nostore1(dst, src)
   183  }
   184  
   185  // istrackedptr reports whether the pointer value p requires a write barrier
   186  // when stored into the heap.
   187  func istrackedptr(p uintptr) bool {
   188  	return inheap(p)
   189  }
   190  
   191  // checkwbshadow checks that p matches its shadow word.
   192  // The garbage collector calls checkwbshadow for each pointer during the checkmark phase.
   193  // It is only called when mheap_.shadow_enabled is true.
   194  func checkwbshadow(p *uintptr) {
   195  	addr := uintptr(unsafe.Pointer(p))
   196  	shadow := shadowptr(addr)
   197  	if shadow == nil {
   198  		return
   199  	}
   200  	// There is no race on the accesses here, because the world is stopped,
   201  	// but there may be racy writes that lead to the shadow and the
   202  	// heap being inconsistent. If so, we will detect that here as a
   203  	// missed write barrier and crash. We don't mind.
   204  	// Code should use sync/atomic instead of racy pointer writes.
   205  	if *shadow != *p && *shadow != noShadow && istrackedptr(*p) {
   206  		mheap_.shadow_enabled = false
   207  		print("runtime: checkwritebarrier p=", p, " *p=", hex(*p), " shadow=", shadow, " *shadow=", hex(*shadow), "\n")
   208  		throw("missed write barrier")
   209  	}
   210  }
   211  
   212  // noShadow is stored in as the shadow pointer to mark that there is no
   213  // shadow word recorded. It matches any actual pointer word.
   214  // noShadow is used when it is impossible to know the right word
   215  // to store in the shadow heap, such as when the real heap word
   216  // is being manipulated atomically.
   217  const noShadow uintptr = 1
   218  
   219  // writebarrierptr_noshadow records that the value in *dst
   220  // has been written to using an atomic operation and the shadow
   221  // has not been updated. (In general if dst must be manipulated
   222  // atomically we cannot get the right bits for use in the shadow.)
   223  //go:nosplit
   224  func writebarrierptr_noshadow(dst *uintptr) {
   225  	addr := uintptr(unsafe.Pointer(dst))
   226  	shadow := shadowptr(addr)
   227  	if shadow == nil {
   228  		return
   229  	}
   230  
   231  	*shadow = noShadow
   232  }
   233  
   234  // Like writebarrierptr, but the store has already been applied.
   235  // Do not reapply.
   236  //go:nosplit
   237  func writebarrierptr_nostore(dst *uintptr, src uintptr) {
   238  	if !needwb() {
   239  		return
   240  	}
   241  
   242  	if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
   243  		systemstack(func() { throw("bad pointer in write barrier") })
   244  	}
   245  
   246  	// Apply changes to shadow.
   247  	// Since *dst has been overwritten already, we cannot check
   248  	// whether there were any missed updates, but writebarrierptr_nostore
   249  	// is only rarely used.
   250  	if mheap_.shadow_enabled {
   251  		systemstack(func() {
   252  			addr := uintptr(unsafe.Pointer(dst))
   253  			shadow := shadowptr(addr)
   254  			if shadow == nil {
   255  				return
   256  			}
   257  			*shadow = src
   258  		})
   259  	}
   260  
   261  	writebarrierptr_nostore1(dst, src)
   262  }
   263  
   264  //go:nosplit
   265  func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
   266  	mp := acquirem()
   267  	if mp.inwb || mp.dying > 0 {
   268  		releasem(mp)
   269  		return
   270  	}
   271  	mp.inwb = true
   272  	systemstack(func() {
   273  		gcmarkwb_m(dst, src)
   274  	})
   275  	mp.inwb = false
   276  	releasem(mp)
   277  }
   278  
   279  //go:nosplit
   280  func writebarrierstring(dst *[2]uintptr, src [2]uintptr) {
   281  	writebarrierptr(&dst[0], src[0])
   282  	dst[1] = src[1]
   283  }
   284  
   285  //go:nosplit
   286  func writebarrierslice(dst *[3]uintptr, src [3]uintptr) {
   287  	writebarrierptr(&dst[0], src[0])
   288  	dst[1] = src[1]
   289  	dst[2] = src[2]
   290  }
   291  
   292  //go:nosplit
   293  func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
   294  	writebarrierptr(&dst[0], src[0])
   295  	writebarrierptr(&dst[1], src[1])
   296  }
   297  
   298  //go:generate go run wbfat_gen.go -- wbfat.go
   299  //
   300  // The above line generates multiword write barriers for
   301  // all the combinations of ptr+scalar up to four words.
   302  // The implementations are written to wbfat.go.
   303  
   304  //go:linkname reflect_typedmemmove reflect.typedmemmove
   305  func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   306  	typedmemmove(typ, dst, src)
   307  }
   308  
   309  // typedmemmove copies a value of type t to dst from src.
   310  //go:nosplit
   311  func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
   312  	if !needwb() || (typ.kind&kindNoPointers) != 0 {
   313  		memmove(dst, src, typ.size)
   314  		return
   315  	}
   316  
   317  	systemstack(func() {
   318  		mask := loadPtrMask(typ)
   319  		nptr := typ.size / ptrSize
   320  		for i := uintptr(0); i < nptr; i += 2 {
   321  			bits := mask[i/2]
   322  			if (bits>>2)&_BitsMask == _BitsPointer {
   323  				writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
   324  			} else {
   325  				*(*uintptr)(dst) = *(*uintptr)(src)
   326  			}
   327  			// TODO(rsc): The noescape calls should be unnecessary.
   328  			dst = add(noescape(dst), ptrSize)
   329  			src = add(noescape(src), ptrSize)
   330  			if i+1 == nptr {
   331  				break
   332  			}
   333  			bits >>= 4
   334  			if (bits>>2)&_BitsMask == _BitsPointer {
   335  				writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
   336  			} else {
   337  				*(*uintptr)(dst) = *(*uintptr)(src)
   338  			}
   339  			dst = add(noescape(dst), ptrSize)
   340  			src = add(noescape(src), ptrSize)
   341  		}
   342  	})
   343  }
   344  
   345  // typedmemmovepartial is like typedmemmove but assumes that
   346  // dst and src point off bytes into the value and only copies size bytes.
   347  //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
   348  func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
   349  	if !needwb() || (typ.kind&kindNoPointers) != 0 || size < ptrSize {
   350  		memmove(dst, src, size)
   351  		return
   352  	}
   353  
   354  	if off&(ptrSize-1) != 0 {
   355  		frag := -off & (ptrSize - 1)
   356  		// frag < size, because size >= ptrSize, checked above.
   357  		memmove(dst, src, frag)
   358  		size -= frag
   359  		dst = add(noescape(dst), frag)
   360  		src = add(noescape(src), frag)
   361  		off += frag
   362  	}
   363  
   364  	mask := loadPtrMask(typ)
   365  	nptr := (off + size) / ptrSize
   366  	for i := uintptr(off / ptrSize); i < nptr; i++ {
   367  		bits := mask[i/2] >> ((i & 1) << 2)
   368  		if (bits>>2)&_BitsMask == _BitsPointer {
   369  			writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
   370  		} else {
   371  			*(*uintptr)(dst) = *(*uintptr)(src)
   372  		}
   373  		// TODO(rsc): The noescape calls should be unnecessary.
   374  		dst = add(noescape(dst), ptrSize)
   375  		src = add(noescape(src), ptrSize)
   376  	}
   377  	size &= ptrSize - 1
   378  	if size > 0 {
   379  		memmove(dst, src, size)
   380  	}
   381  }
   382  
   383  // callwritebarrier is invoked at the end of reflectcall, to execute
   384  // write barrier operations to record the fact that a call's return
   385  // values have just been copied to frame, starting at retoffset
   386  // and continuing to framesize. The entire frame (not just the return
   387  // values) is described by typ. Because the copy has already
   388  // happened, we call writebarrierptr_nostore, and we must be careful
   389  // not to be preempted before the write barriers have been run.
   390  //go:nosplit
   391  func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uintptr) {
   392  	if !needwb() || typ == nil || (typ.kind&kindNoPointers) != 0 || framesize-retoffset < ptrSize {
   393  		return
   394  	}
   395  
   396  	systemstack(func() {
   397  		mask := loadPtrMask(typ)
   398  		// retoffset is known to be pointer-aligned (at least).
   399  		// TODO(rsc): The noescape call should be unnecessary.
   400  		dst := add(noescape(frame), retoffset)
   401  		nptr := framesize / ptrSize
   402  		for i := uintptr(retoffset / ptrSize); i < nptr; i++ {
   403  			bits := mask[i/2] >> ((i & 1) << 2)
   404  			if (bits>>2)&_BitsMask == _BitsPointer {
   405  				writebarrierptr_nostore((*uintptr)(dst), *(*uintptr)(dst))
   406  			}
   407  			// TODO(rsc): The noescape call should be unnecessary.
   408  			dst = add(noescape(dst), ptrSize)
   409  		}
   410  	})
   411  }
   412  
   413  //go:linkname reflect_typedslicecopy reflect.typedslicecopy
   414  func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
   415  	return typedslicecopy(elemType, dst, src)
   416  }
   417  
   418  //go:nosplit
   419  func typedslicecopy(typ *_type, dst, src slice) int {
   420  	n := dst.len
   421  	if n > src.len {
   422  		n = src.len
   423  	}
   424  	if n == 0 {
   425  		return 0
   426  	}
   427  	dstp := unsafe.Pointer(dst.array)
   428  	srcp := unsafe.Pointer(src.array)
   429  
   430  	if !needwb() {
   431  		memmove(dstp, srcp, uintptr(n)*typ.size)
   432  		return int(n)
   433  	}
   434  
   435  	systemstack(func() {
   436  		if uintptr(srcp) < uintptr(dstp) && uintptr(srcp)+uintptr(n)*typ.size > uintptr(dstp) {
   437  			// Overlap with src before dst.
   438  			// Copy backward, being careful not to move dstp/srcp
   439  			// out of the array they point into.
   440  			dstp = add(dstp, uintptr(n-1)*typ.size)
   441  			srcp = add(srcp, uintptr(n-1)*typ.size)
   442  			i := uint(0)
   443  			for {
   444  				typedmemmove(typ, dstp, srcp)
   445  				if i++; i >= n {
   446  					break
   447  				}
   448  				dstp = add(dstp, -typ.size)
   449  				srcp = add(srcp, -typ.size)
   450  			}
   451  		} else {
   452  			// Copy forward, being careful not to move dstp/srcp
   453  			// out of the array they point into.
   454  			i := uint(0)
   455  			for {
   456  				typedmemmove(typ, dstp, srcp)
   457  				if i++; i >= n {
   458  					break
   459  				}
   460  				dstp = add(dstp, typ.size)
   461  				srcp = add(srcp, typ.size)
   462  			}
   463  		}
   464  	})
   465  	return int(n)
   466  }