github.com/shijuvar/go@v0.0.0-20141209052335-e8f13700b70c/src/runtime/mgc0.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Called from C. Returns the Go type *m.
    10  func gc_m_ptr(ret *interface{}) {
    11  	*ret = (*m)(nil)
    12  }
    13  
    14  // Called from C. Returns the Go type *g.
    15  func gc_g_ptr(ret *interface{}) {
    16  	*ret = (*g)(nil)
    17  }
    18  
    19  // Called from C. Returns the Go type *itab.
    20  func gc_itab_ptr(ret *interface{}) {
    21  	*ret = (*itab)(nil)
    22  }
    23  
    24  func gc_unixnanotime(now *int64) {
    25  	sec, nsec := timenow()
    26  	*now = sec*1e9 + int64(nsec)
    27  }
    28  
    29  func freeOSMemory() {
    30  	gogc(2) // force GC and do eager sweep
    31  	systemstack(scavenge_m)
    32  }
    33  
    34  var poolcleanup func()
    35  
    36  func registerPoolCleanup(f func()) {
    37  	poolcleanup = f
    38  }
    39  
    40  func clearpools() {
    41  	// clear sync.Pools
    42  	if poolcleanup != nil {
    43  		poolcleanup()
    44  	}
    45  
    46  	for _, p := range &allp {
    47  		if p == nil {
    48  			break
    49  		}
    50  		// clear tinyalloc pool
    51  		if c := p.mcache; c != nil {
    52  			c.tiny = nil
    53  			c.tinysize = 0
    54  
    55  			// disconnect cached list before dropping it on the floor,
    56  			// so that a dangling ref to one entry does not pin all of them.
    57  			var sg, sgnext *sudog
    58  			for sg = c.sudogcache; sg != nil; sg = sgnext {
    59  				sgnext = sg.next
    60  				sg.next = nil
    61  			}
    62  			c.sudogcache = nil
    63  		}
    64  
    65  		// clear defer pools
    66  		for i := range p.deferpool {
    67  			// disconnect cached list before dropping it on the floor,
    68  			// so that a dangling ref to one entry does not pin all of them.
    69  			var d, dlink *_defer
    70  			for d = p.deferpool[i]; d != nil; d = dlink {
    71  				dlink = d.link
    72  				d.link = nil
    73  			}
    74  			p.deferpool[i] = nil
    75  		}
    76  	}
    77  }
    78  
    79  func bgsweep() {
    80  	sweep.g = getg()
    81  	getg().issystem = true
    82  	for {
    83  		for gosweepone() != ^uintptr(0) {
    84  			sweep.nbgsweep++
    85  			Gosched()
    86  		}
    87  		lock(&gclock)
    88  		if !gosweepdone() {
    89  			// This can happen if a GC runs between
    90  			// gosweepone returning ^0 above
    91  			// and the lock being acquired.
    92  			unlock(&gclock)
    93  			continue
    94  		}
    95  		sweep.parked = true
    96  		goparkunlock(&gclock, "GC sweep wait")
    97  	}
    98  }
    99  
   100  const (
   101  	_PoisonGC    = 0xf969696969696969 & (1<<(8*ptrSize) - 1)
   102  	_PoisonStack = 0x6868686868686868 & (1<<(8*ptrSize) - 1)
   103  )
   104  
   105  func needwb() bool {
   106  	return gcphase == _GCmark || gcphase == _GCmarktermination
   107  }
   108  
   109  // NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
   110  // but if we do that, Go inserts a write barrier on *dst = src.
   111  //go:nosplit
   112  func writebarrierptr(dst *uintptr, src uintptr) {
   113  	*dst = src
   114  	if needwb() {
   115  		writebarrierptr_nostore(dst, src)
   116  	}
   117  }
   118  
   119  // Like writebarrierptr, but the store has already been applied.
   120  // Do not reapply.
   121  //go:nosplit
   122  func writebarrierptr_nostore(dst *uintptr, src uintptr) {
   123  	if getg() == nil || !needwb() { // very low-level startup
   124  		return
   125  	}
   126  
   127  	if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
   128  		systemstack(func() { gothrow("bad pointer in write barrier") })
   129  	}
   130  
   131  	mp := acquirem()
   132  	if mp.inwb || mp.dying > 0 {
   133  		releasem(mp)
   134  		return
   135  	}
   136  	mp.inwb = true
   137  	systemstack(func() {
   138  		gcmarkwb_m(dst, src)
   139  	})
   140  	mp.inwb = false
   141  	releasem(mp)
   142  }
   143  
   144  //go:nosplit
   145  func writebarrierstring(dst *[2]uintptr, src [2]uintptr) {
   146  	writebarrierptr(&dst[0], src[0])
   147  	dst[1] = src[1]
   148  }
   149  
   150  //go:nosplit
   151  func writebarrierslice(dst *[3]uintptr, src [3]uintptr) {
   152  	writebarrierptr(&dst[0], src[0])
   153  	dst[1] = src[1]
   154  	dst[2] = src[2]
   155  }
   156  
   157  //go:nosplit
   158  func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
   159  	writebarrierptr(&dst[0], src[0])
   160  	writebarrierptr(&dst[1], src[1])
   161  }
   162  
   163  //go:generate go run wbfat_gen.go -- wbfat.go
   164  //
   165  // The above line generates multiword write barriers for
   166  // all the combinations of ptr+scalar up to four words.
   167  // The implementations are written to wbfat.go.
   168  
   169  //go:nosplit
   170  func writebarrierfat(typ *_type, dst, src unsafe.Pointer) {
   171  	if !needwb() {
   172  		memmove(dst, src, typ.size)
   173  		return
   174  	}
   175  
   176  	systemstack(func() {
   177  		mask := loadPtrMask(typ)
   178  		nptr := typ.size / ptrSize
   179  		for i := uintptr(0); i < nptr; i += 2 {
   180  			bits := mask[i/2]
   181  			if (bits>>2)&_BitsMask == _BitsPointer {
   182  				writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
   183  			} else {
   184  				*(*uintptr)(dst) = *(*uintptr)(src)
   185  			}
   186  			dst = add(dst, ptrSize)
   187  			src = add(src, ptrSize)
   188  			if i+1 == nptr {
   189  				break
   190  			}
   191  			bits >>= 4
   192  			if (bits>>2)&_BitsMask == _BitsPointer {
   193  				writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
   194  			} else {
   195  				*(*uintptr)(dst) = *(*uintptr)(src)
   196  			}
   197  			dst = add(dst, ptrSize)
   198  			src = add(src, ptrSize)
   199  		}
   200  	})
   201  }
   202  
   203  //go:nosplit
   204  func writebarriercopy(typ *_type, dst, src slice) int {
   205  	n := dst.len
   206  	if n > src.len {
   207  		n = src.len
   208  	}
   209  	if n == 0 {
   210  		return 0
   211  	}
   212  	dstp := unsafe.Pointer(dst.array)
   213  	srcp := unsafe.Pointer(src.array)
   214  
   215  	if !needwb() {
   216  		memmove(dstp, srcp, uintptr(n)*typ.size)
   217  		return int(n)
   218  	}
   219  
   220  	systemstack(func() {
   221  		if uintptr(srcp) < uintptr(dstp) && uintptr(srcp)+uintptr(n)*typ.size > uintptr(dstp) {
   222  			// Overlap with src before dst.
   223  			// Copy backward, being careful not to move dstp/srcp
   224  			// out of the array they point into.
   225  			dstp = add(dstp, uintptr(n-1)*typ.size)
   226  			srcp = add(srcp, uintptr(n-1)*typ.size)
   227  			i := uint(0)
   228  			for {
   229  				writebarrierfat(typ, dstp, srcp)
   230  				if i++; i >= n {
   231  					break
   232  				}
   233  				dstp = add(dstp, -typ.size)
   234  				srcp = add(srcp, -typ.size)
   235  			}
   236  		} else {
   237  			// Copy forward, being careful not to move dstp/srcp
   238  			// out of the array they point into.
   239  			i := uint(0)
   240  			for {
   241  				writebarrierfat(typ, dstp, srcp)
   242  				if i++; i >= n {
   243  					break
   244  				}
   245  				dstp = add(dstp, typ.size)
   246  				srcp = add(srcp, typ.size)
   247  			}
   248  		}
   249  	})
   250  	return int(n)
   251  }