github.com/letsencrypt/go@v0.0.0-20160714163537-4054769a31f6/src/runtime/mprof.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Malloc profiling.
     6  // Patterned after tcmalloc's algorithms; shorter code.
     7  
     8  package runtime
     9  
    10  import (
    11  	"runtime/internal/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // NOTE(rsc): Everything here could use cas if contention became an issue.
    16  var proflock mutex
    17  
    18  // All memory allocations are local and do not escape outside of the profiler.
    19  // The profiler is forbidden from referring to garbage-collected memory.
    20  
    21  const (
    22  	// profile types
    23  	memProfile bucketType = 1 + iota
    24  	blockProfile
    25  
    26  	// size of bucket hash table
    27  	buckHashSize = 179999
    28  
    29  	// max depth of stack to record in bucket
    30  	maxStack = 32
    31  )
    32  
    33  type bucketType int
    34  
    35  // A bucket holds per-call-stack profiling information.
    36  // The representation is a bit sleazy, inherited from C.
    37  // This struct defines the bucket header. It is followed in
    38  // memory by the stack words and then the actual record
    39  // data, either a memRecord or a blockRecord.
    40  //
    41  // Per-call-stack profiling information.
    42  // Lookup by hashing call stack into a linked-list hash table.
    43  type bucket struct {
    44  	next    *bucket
    45  	allnext *bucket
    46  	typ     bucketType // memBucket or blockBucket
    47  	hash    uintptr
    48  	size    uintptr
    49  	nstk    uintptr
    50  }
    51  
    52  // A memRecord is the bucket data for a bucket of type memProfile,
    53  // part of the memory profile.
    54  type memRecord struct {
    55  	// The following complex 3-stage scheme of stats accumulation
    56  	// is required to obtain a consistent picture of mallocs and frees
    57  	// for some point in time.
    58  	// The problem is that mallocs come in real time, while frees
    59  	// come only after a GC during concurrent sweeping. So if we would
    60  	// naively count them, we would get a skew toward mallocs.
    61  	//
    62  	// Mallocs are accounted in recent stats.
    63  	// Explicit frees are accounted in recent stats.
    64  	// GC frees are accounted in prev stats.
    65  	// After GC prev stats are added to final stats and
    66  	// recent stats are moved into prev stats.
    67  	allocs      uintptr
    68  	frees       uintptr
    69  	alloc_bytes uintptr
    70  	free_bytes  uintptr
    71  
    72  	// changes between next-to-last GC and last GC
    73  	prev_allocs      uintptr
    74  	prev_frees       uintptr
    75  	prev_alloc_bytes uintptr
    76  	prev_free_bytes  uintptr
    77  
    78  	// changes since last GC
    79  	recent_allocs      uintptr
    80  	recent_frees       uintptr
    81  	recent_alloc_bytes uintptr
    82  	recent_free_bytes  uintptr
    83  }
    84  
    85  // A blockRecord is the bucket data for a bucket of type blockProfile,
    86  // part of the blocking profile.
    87  type blockRecord struct {
    88  	count  int64
    89  	cycles int64
    90  }
    91  
    92  var (
    93  	mbuckets  *bucket // memory profile buckets
    94  	bbuckets  *bucket // blocking profile buckets
    95  	buckhash  *[179999]*bucket
    96  	bucketmem uintptr
    97  )
    98  
    99  // newBucket allocates a bucket with the given type and number of stack entries.
   100  func newBucket(typ bucketType, nstk int) *bucket {
   101  	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
   102  	switch typ {
   103  	default:
   104  		throw("invalid profile bucket type")
   105  	case memProfile:
   106  		size += unsafe.Sizeof(memRecord{})
   107  	case blockProfile:
   108  		size += unsafe.Sizeof(blockRecord{})
   109  	}
   110  
   111  	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
   112  	bucketmem += size
   113  	b.typ = typ
   114  	b.nstk = uintptr(nstk)
   115  	return b
   116  }
   117  
   118  // stk returns the slice in b holding the stack.
   119  func (b *bucket) stk() []uintptr {
   120  	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
   121  	return stk[:b.nstk:b.nstk]
   122  }
   123  
   124  // mp returns the memRecord associated with the memProfile bucket b.
   125  func (b *bucket) mp() *memRecord {
   126  	if b.typ != memProfile {
   127  		throw("bad use of bucket.mp")
   128  	}
   129  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   130  	return (*memRecord)(data)
   131  }
   132  
   133  // bp returns the blockRecord associated with the blockProfile bucket b.
   134  func (b *bucket) bp() *blockRecord {
   135  	if b.typ != blockProfile {
   136  		throw("bad use of bucket.bp")
   137  	}
   138  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   139  	return (*blockRecord)(data)
   140  }
   141  
   142  // Return the bucket for stk[0:nstk], allocating new bucket if needed.
   143  func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
   144  	if buckhash == nil {
   145  		buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
   146  		if buckhash == nil {
   147  			throw("runtime: cannot allocate memory")
   148  		}
   149  	}
   150  
   151  	// Hash stack.
   152  	var h uintptr
   153  	for _, pc := range stk {
   154  		h += pc
   155  		h += h << 10
   156  		h ^= h >> 6
   157  	}
   158  	// hash in size
   159  	h += size
   160  	h += h << 10
   161  	h ^= h >> 6
   162  	// finalize
   163  	h += h << 3
   164  	h ^= h >> 11
   165  
   166  	i := int(h % buckHashSize)
   167  	for b := buckhash[i]; b != nil; b = b.next {
   168  		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
   169  			return b
   170  		}
   171  	}
   172  
   173  	if !alloc {
   174  		return nil
   175  	}
   176  
   177  	// Create new bucket.
   178  	b := newBucket(typ, len(stk))
   179  	copy(b.stk(), stk)
   180  	b.hash = h
   181  	b.size = size
   182  	b.next = buckhash[i]
   183  	buckhash[i] = b
   184  	if typ == memProfile {
   185  		b.allnext = mbuckets
   186  		mbuckets = b
   187  	} else {
   188  		b.allnext = bbuckets
   189  		bbuckets = b
   190  	}
   191  	return b
   192  }
   193  
   194  func eqslice(x, y []uintptr) bool {
   195  	if len(x) != len(y) {
   196  		return false
   197  	}
   198  	for i, xi := range x {
   199  		if xi != y[i] {
   200  			return false
   201  		}
   202  	}
   203  	return true
   204  }
   205  
   206  func mprof_GC() {
   207  	for b := mbuckets; b != nil; b = b.allnext {
   208  		mp := b.mp()
   209  		mp.allocs += mp.prev_allocs
   210  		mp.frees += mp.prev_frees
   211  		mp.alloc_bytes += mp.prev_alloc_bytes
   212  		mp.free_bytes += mp.prev_free_bytes
   213  
   214  		mp.prev_allocs = mp.recent_allocs
   215  		mp.prev_frees = mp.recent_frees
   216  		mp.prev_alloc_bytes = mp.recent_alloc_bytes
   217  		mp.prev_free_bytes = mp.recent_free_bytes
   218  
   219  		mp.recent_allocs = 0
   220  		mp.recent_frees = 0
   221  		mp.recent_alloc_bytes = 0
   222  		mp.recent_free_bytes = 0
   223  	}
   224  }
   225  
   226  // Record that a gc just happened: all the 'recent' statistics are now real.
   227  func mProf_GC() {
   228  	lock(&proflock)
   229  	mprof_GC()
   230  	unlock(&proflock)
   231  }
   232  
   233  // Called by malloc to record a profiled block.
   234  func mProf_Malloc(p unsafe.Pointer, size uintptr) {
   235  	var stk [maxStack]uintptr
   236  	nstk := callers(4, stk[:])
   237  	lock(&proflock)
   238  	b := stkbucket(memProfile, size, stk[:nstk], true)
   239  	mp := b.mp()
   240  	mp.recent_allocs++
   241  	mp.recent_alloc_bytes += size
   242  	unlock(&proflock)
   243  
   244  	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
   245  	// This reduces potential contention and chances of deadlocks.
   246  	// Since the object must be alive during call to mProf_Malloc,
   247  	// it's fine to do this non-atomically.
   248  	systemstack(func() {
   249  		setprofilebucket(p, b)
   250  	})
   251  }
   252  
   253  // Called when freeing a profiled block.
   254  func mProf_Free(b *bucket, size uintptr) {
   255  	lock(&proflock)
   256  	mp := b.mp()
   257  	mp.prev_frees++
   258  	mp.prev_free_bytes += size
   259  	unlock(&proflock)
   260  }
   261  
   262  var blockprofilerate uint64 // in CPU ticks
   263  
   264  // SetBlockProfileRate controls the fraction of goroutine blocking events
   265  // that are reported in the blocking profile. The profiler aims to sample
   266  // an average of one blocking event per rate nanoseconds spent blocked.
   267  //
   268  // To include every blocking event in the profile, pass rate = 1.
   269  // To turn off profiling entirely, pass rate <= 0.
   270  func SetBlockProfileRate(rate int) {
   271  	var r int64
   272  	if rate <= 0 {
   273  		r = 0 // disable profiling
   274  	} else if rate == 1 {
   275  		r = 1 // profile everything
   276  	} else {
   277  		// convert ns to cycles, use float64 to prevent overflow during multiplication
   278  		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
   279  		if r == 0 {
   280  			r = 1
   281  		}
   282  	}
   283  
   284  	atomic.Store64(&blockprofilerate, uint64(r))
   285  }
   286  
   287  func blockevent(cycles int64, skip int) {
   288  	if cycles <= 0 {
   289  		cycles = 1
   290  	}
   291  	rate := int64(atomic.Load64(&blockprofilerate))
   292  	if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
   293  		return
   294  	}
   295  	gp := getg()
   296  	var nstk int
   297  	var stk [maxStack]uintptr
   298  	if gp.m.curg == nil || gp.m.curg == gp {
   299  		nstk = callers(skip, stk[:])
   300  	} else {
   301  		nstk = gcallers(gp.m.curg, skip, stk[:])
   302  	}
   303  	lock(&proflock)
   304  	b := stkbucket(blockProfile, 0, stk[:nstk], true)
   305  	b.bp().count++
   306  	b.bp().cycles += cycles
   307  	unlock(&proflock)
   308  }
   309  
   310  // Go interface to profile data.
   311  
   312  // A StackRecord describes a single execution stack.
   313  type StackRecord struct {
   314  	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
   315  }
   316  
   317  // Stack returns the stack trace associated with the record,
   318  // a prefix of r.Stack0.
   319  func (r *StackRecord) Stack() []uintptr {
   320  	for i, v := range r.Stack0 {
   321  		if v == 0 {
   322  			return r.Stack0[0:i]
   323  		}
   324  	}
   325  	return r.Stack0[0:]
   326  }
   327  
   328  // MemProfileRate controls the fraction of memory allocations
   329  // that are recorded and reported in the memory profile.
   330  // The profiler aims to sample an average of
   331  // one allocation per MemProfileRate bytes allocated.
   332  //
   333  // To include every allocated block in the profile, set MemProfileRate to 1.
   334  // To turn off profiling entirely, set MemProfileRate to 0.
   335  //
   336  // The tools that process the memory profiles assume that the
   337  // profile rate is constant across the lifetime of the program
   338  // and equal to the current value. Programs that change the
   339  // memory profiling rate should do so just once, as early as
   340  // possible in the execution of the program (for example,
   341  // at the beginning of main).
   342  var MemProfileRate int = 512 * 1024
   343  
   344  // A MemProfileRecord describes the live objects allocated
   345  // by a particular call sequence (stack trace).
   346  type MemProfileRecord struct {
   347  	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
   348  	AllocObjects, FreeObjects int64       // number of objects allocated, freed
   349  	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
   350  }
   351  
   352  // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
   353  func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
   354  
   355  // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
   356  func (r *MemProfileRecord) InUseObjects() int64 {
   357  	return r.AllocObjects - r.FreeObjects
   358  }
   359  
   360  // Stack returns the stack trace associated with the record,
   361  // a prefix of r.Stack0.
   362  func (r *MemProfileRecord) Stack() []uintptr {
   363  	for i, v := range r.Stack0 {
   364  		if v == 0 {
   365  			return r.Stack0[0:i]
   366  		}
   367  	}
   368  	return r.Stack0[0:]
   369  }
   370  
   371  // MemProfile returns a profile of memory allocated and freed per allocation
   372  // site.
   373  //
   374  // MemProfile returns n, the number of records in the current memory profile.
   375  // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
   376  // If len(p) < n, MemProfile does not change p and returns n, false.
   377  //
   378  // If inuseZero is true, the profile includes allocation records
   379  // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
   380  // These are sites where memory was allocated, but it has all
   381  // been released back to the runtime.
   382  //
   383  // The returned profile may be up to two garbage collection cycles old.
   384  // This is to avoid skewing the profile toward allocations; because
   385  // allocations happen in real time but frees are delayed until the garbage
   386  // collector performs sweeping, the profile only accounts for allocations
   387  // that have had a chance to be freed by the garbage collector.
   388  //
   389  // Most clients should use the runtime/pprof package or
   390  // the testing package's -test.memprofile flag instead
   391  // of calling MemProfile directly.
   392  func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
   393  	lock(&proflock)
   394  	clear := true
   395  	for b := mbuckets; b != nil; b = b.allnext {
   396  		mp := b.mp()
   397  		if inuseZero || mp.alloc_bytes != mp.free_bytes {
   398  			n++
   399  		}
   400  		if mp.allocs != 0 || mp.frees != 0 {
   401  			clear = false
   402  		}
   403  	}
   404  	if clear {
   405  		// Absolutely no data, suggesting that a garbage collection
   406  		// has not yet happened. In order to allow profiling when
   407  		// garbage collection is disabled from the beginning of execution,
   408  		// accumulate stats as if a GC just happened, and recount buckets.
   409  		mprof_GC()
   410  		mprof_GC()
   411  		n = 0
   412  		for b := mbuckets; b != nil; b = b.allnext {
   413  			mp := b.mp()
   414  			if inuseZero || mp.alloc_bytes != mp.free_bytes {
   415  				n++
   416  			}
   417  		}
   418  	}
   419  	if n <= len(p) {
   420  		ok = true
   421  		idx := 0
   422  		for b := mbuckets; b != nil; b = b.allnext {
   423  			mp := b.mp()
   424  			if inuseZero || mp.alloc_bytes != mp.free_bytes {
   425  				record(&p[idx], b)
   426  				idx++
   427  			}
   428  		}
   429  	}
   430  	unlock(&proflock)
   431  	return
   432  }
   433  
   434  // Write b's data to r.
   435  func record(r *MemProfileRecord, b *bucket) {
   436  	mp := b.mp()
   437  	r.AllocBytes = int64(mp.alloc_bytes)
   438  	r.FreeBytes = int64(mp.free_bytes)
   439  	r.AllocObjects = int64(mp.allocs)
   440  	r.FreeObjects = int64(mp.frees)
   441  	copy(r.Stack0[:], b.stk())
   442  	for i := int(b.nstk); i < len(r.Stack0); i++ {
   443  		r.Stack0[i] = 0
   444  	}
   445  }
   446  
   447  func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
   448  	lock(&proflock)
   449  	for b := mbuckets; b != nil; b = b.allnext {
   450  		mp := b.mp()
   451  		fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees)
   452  	}
   453  	unlock(&proflock)
   454  }
   455  
   456  // BlockProfileRecord describes blocking events originated
   457  // at a particular call sequence (stack trace).
   458  type BlockProfileRecord struct {
   459  	Count  int64
   460  	Cycles int64
   461  	StackRecord
   462  }
   463  
   464  // BlockProfile returns n, the number of records in the current blocking profile.
   465  // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
   466  // If len(p) < n, BlockProfile does not change p and returns n, false.
   467  //
   468  // Most clients should use the runtime/pprof package or
   469  // the testing package's -test.blockprofile flag instead
   470  // of calling BlockProfile directly.
   471  func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
   472  	lock(&proflock)
   473  	for b := bbuckets; b != nil; b = b.allnext {
   474  		n++
   475  	}
   476  	if n <= len(p) {
   477  		ok = true
   478  		for b := bbuckets; b != nil; b = b.allnext {
   479  			bp := b.bp()
   480  			r := &p[0]
   481  			r.Count = bp.count
   482  			r.Cycles = bp.cycles
   483  			i := copy(r.Stack0[:], b.stk())
   484  			for ; i < len(r.Stack0); i++ {
   485  				r.Stack0[i] = 0
   486  			}
   487  			p = p[1:]
   488  		}
   489  	}
   490  	unlock(&proflock)
   491  	return
   492  }
   493  
   494  // ThreadCreateProfile returns n, the number of records in the thread creation profile.
   495  // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
   496  // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
   497  //
   498  // Most clients should use the runtime/pprof package instead
   499  // of calling ThreadCreateProfile directly.
   500  func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
   501  	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
   502  	for mp := first; mp != nil; mp = mp.alllink {
   503  		n++
   504  	}
   505  	if n <= len(p) {
   506  		ok = true
   507  		i := 0
   508  		for mp := first; mp != nil; mp = mp.alllink {
   509  			p[i].Stack0 = mp.createstack
   510  			i++
   511  		}
   512  	}
   513  	return
   514  }
   515  
   516  // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
   517  // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
   518  // If len(p) < n, GoroutineProfile does not change p and returns n, false.
   519  //
   520  // Most clients should use the runtime/pprof package instead
   521  // of calling GoroutineProfile directly.
   522  func GoroutineProfile(p []StackRecord) (n int, ok bool) {
   523  	gp := getg()
   524  
   525  	isOK := func(gp1 *g) bool {
   526  		// Checking isSystemGoroutine here makes GoroutineProfile
   527  		// consistent with both NumGoroutine and Stack.
   528  		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
   529  	}
   530  
   531  	stopTheWorld("profile")
   532  
   533  	n = 1
   534  	for _, gp1 := range allgs {
   535  		if isOK(gp1) {
   536  			n++
   537  		}
   538  	}
   539  
   540  	if n <= len(p) {
   541  		ok = true
   542  		r := p
   543  
   544  		// Save current goroutine.
   545  		sp := getcallersp(unsafe.Pointer(&p))
   546  		pc := getcallerpc(unsafe.Pointer(&p))
   547  		systemstack(func() {
   548  			saveg(pc, sp, gp, &r[0])
   549  		})
   550  		r = r[1:]
   551  
   552  		// Save other goroutines.
   553  		for _, gp1 := range allgs {
   554  			if isOK(gp1) {
   555  				if len(r) == 0 {
   556  					// Should be impossible, but better to return a
   557  					// truncated profile than to crash the entire process.
   558  					break
   559  				}
   560  				saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
   561  				r = r[1:]
   562  			}
   563  		}
   564  	}
   565  
   566  	startTheWorld()
   567  
   568  	return n, ok
   569  }
   570  
   571  func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
   572  	n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
   573  	if n < len(r.Stack0) {
   574  		r.Stack0[n] = 0
   575  	}
   576  }
   577  
   578  // Stack formats a stack trace of the calling goroutine into buf
   579  // and returns the number of bytes written to buf.
   580  // If all is true, Stack formats stack traces of all other goroutines
   581  // into buf after the trace for the current goroutine.
   582  func Stack(buf []byte, all bool) int {
   583  	if all {
   584  		stopTheWorld("stack trace")
   585  	}
   586  
   587  	n := 0
   588  	if len(buf) > 0 {
   589  		gp := getg()
   590  		sp := getcallersp(unsafe.Pointer(&buf))
   591  		pc := getcallerpc(unsafe.Pointer(&buf))
   592  		systemstack(func() {
   593  			g0 := getg()
   594  			// Force traceback=1 to override GOTRACEBACK setting,
   595  			// so that Stack's results are consistent.
   596  			// GOTRACEBACK is only about crash dumps.
   597  			g0.m.traceback = 1
   598  			g0.writebuf = buf[0:0:len(buf)]
   599  			goroutineheader(gp)
   600  			traceback(pc, sp, 0, gp)
   601  			if all {
   602  				tracebackothers(gp)
   603  			}
   604  			g0.m.traceback = 0
   605  			n = len(g0.writebuf)
   606  			g0.writebuf = nil
   607  		})
   608  	}
   609  
   610  	if all {
   611  		startTheWorld()
   612  	}
   613  	return n
   614  }
   615  
   616  // Tracing of alloc/free/gc.
   617  
   618  var tracelock mutex
   619  
   620  func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
   621  	lock(&tracelock)
   622  	gp := getg()
   623  	gp.m.traceback = 2
   624  	if typ == nil {
   625  		print("tracealloc(", p, ", ", hex(size), ")\n")
   626  	} else {
   627  		print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
   628  	}
   629  	if gp.m.curg == nil || gp == gp.m.curg {
   630  		goroutineheader(gp)
   631  		pc := getcallerpc(unsafe.Pointer(&p))
   632  		sp := getcallersp(unsafe.Pointer(&p))
   633  		systemstack(func() {
   634  			traceback(pc, sp, 0, gp)
   635  		})
   636  	} else {
   637  		goroutineheader(gp.m.curg)
   638  		traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
   639  	}
   640  	print("\n")
   641  	gp.m.traceback = 0
   642  	unlock(&tracelock)
   643  }
   644  
   645  func tracefree(p unsafe.Pointer, size uintptr) {
   646  	lock(&tracelock)
   647  	gp := getg()
   648  	gp.m.traceback = 2
   649  	print("tracefree(", p, ", ", hex(size), ")\n")
   650  	goroutineheader(gp)
   651  	pc := getcallerpc(unsafe.Pointer(&p))
   652  	sp := getcallersp(unsafe.Pointer(&p))
   653  	systemstack(func() {
   654  		traceback(pc, sp, 0, gp)
   655  	})
   656  	print("\n")
   657  	gp.m.traceback = 0
   658  	unlock(&tracelock)
   659  }
   660  
   661  func tracegc() {
   662  	lock(&tracelock)
   663  	gp := getg()
   664  	gp.m.traceback = 2
   665  	print("tracegc()\n")
   666  	// running on m->g0 stack; show all non-g0 goroutines
   667  	tracebackothers(gp)
   668  	print("end tracegc\n")
   669  	print("\n")
   670  	gp.m.traceback = 0
   671  	unlock(&tracelock)
   672  }