github.com/aloncn/graphics-go@v0.0.1/src/runtime/mprof.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Malloc profiling.
     6  // Patterned after tcmalloc's algorithms; shorter code.
     7  
     8  package runtime
     9  
    10  import (
    11  	"runtime/internal/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // NOTE(rsc): Everything here could use cas if contention became an issue.
    16  var proflock mutex
    17  
    18  // All memory allocations are local and do not escape outside of the profiler.
    19  // The profiler is forbidden from referring to garbage-collected memory.
    20  
    21  const (
    22  	// profile types
    23  	memProfile bucketType = 1 + iota
    24  	blockProfile
    25  
    26  	// size of bucket hash table
    27  	buckHashSize = 179999
    28  
    29  	// max depth of stack to record in bucket
    30  	maxStack = 32
    31  )
    32  
    33  type bucketType int
    34  
    35  // A bucket holds per-call-stack profiling information.
    36  // The representation is a bit sleazy, inherited from C.
    37  // This struct defines the bucket header. It is followed in
    38  // memory by the stack words and then the actual record
    39  // data, either a memRecord or a blockRecord.
    40  //
    41  // Per-call-stack profiling information.
    42  // Lookup by hashing call stack into a linked-list hash table.
    43  type bucket struct {
    44  	next    *bucket
    45  	allnext *bucket
    46  	typ     bucketType // memBucket or blockBucket
    47  	hash    uintptr
    48  	size    uintptr
    49  	nstk    uintptr
    50  }
    51  
    52  // A memRecord is the bucket data for a bucket of type memProfile,
    53  // part of the memory profile.
    54  type memRecord struct {
    55  	// The following complex 3-stage scheme of stats accumulation
    56  	// is required to obtain a consistent picture of mallocs and frees
    57  	// for some point in time.
    58  	// The problem is that mallocs come in real time, while frees
    59  	// come only after a GC during concurrent sweeping. So if we would
    60  	// naively count them, we would get a skew toward mallocs.
    61  	//
    62  	// Mallocs are accounted in recent stats.
    63  	// Explicit frees are accounted in recent stats.
    64  	// GC frees are accounted in prev stats.
    65  	// After GC prev stats are added to final stats and
    66  	// recent stats are moved into prev stats.
    67  	allocs      uintptr
    68  	frees       uintptr
    69  	alloc_bytes uintptr
    70  	free_bytes  uintptr
    71  
    72  	// changes between next-to-last GC and last GC
    73  	prev_allocs      uintptr
    74  	prev_frees       uintptr
    75  	prev_alloc_bytes uintptr
    76  	prev_free_bytes  uintptr
    77  
    78  	// changes since last GC
    79  	recent_allocs      uintptr
    80  	recent_frees       uintptr
    81  	recent_alloc_bytes uintptr
    82  	recent_free_bytes  uintptr
    83  }
    84  
    85  // A blockRecord is the bucket data for a bucket of type blockProfile,
    86  // part of the blocking profile.
    87  type blockRecord struct {
    88  	count  int64
    89  	cycles int64
    90  }
    91  
    92  var (
    93  	mbuckets  *bucket // memory profile buckets
    94  	bbuckets  *bucket // blocking profile buckets
    95  	buckhash  *[179999]*bucket
    96  	bucketmem uintptr
    97  )
    98  
    99  // newBucket allocates a bucket with the given type and number of stack entries.
   100  func newBucket(typ bucketType, nstk int) *bucket {
   101  	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
   102  	switch typ {
   103  	default:
   104  		throw("invalid profile bucket type")
   105  	case memProfile:
   106  		size += unsafe.Sizeof(memRecord{})
   107  	case blockProfile:
   108  		size += unsafe.Sizeof(blockRecord{})
   109  	}
   110  
   111  	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
   112  	bucketmem += size
   113  	b.typ = typ
   114  	b.nstk = uintptr(nstk)
   115  	return b
   116  }
   117  
   118  // stk returns the slice in b holding the stack.
   119  func (b *bucket) stk() []uintptr {
   120  	stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
   121  	return stk[:b.nstk:b.nstk]
   122  }
   123  
   124  // mp returns the memRecord associated with the memProfile bucket b.
   125  func (b *bucket) mp() *memRecord {
   126  	if b.typ != memProfile {
   127  		throw("bad use of bucket.mp")
   128  	}
   129  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   130  	return (*memRecord)(data)
   131  }
   132  
   133  // bp returns the blockRecord associated with the blockProfile bucket b.
   134  func (b *bucket) bp() *blockRecord {
   135  	if b.typ != blockProfile {
   136  		throw("bad use of bucket.bp")
   137  	}
   138  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   139  	return (*blockRecord)(data)
   140  }
   141  
   142  // Return the bucket for stk[0:nstk], allocating new bucket if needed.
   143  func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
   144  	if buckhash == nil {
   145  		buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
   146  		if buckhash == nil {
   147  			throw("runtime: cannot allocate memory")
   148  		}
   149  	}
   150  
   151  	// Hash stack.
   152  	var h uintptr
   153  	for _, pc := range stk {
   154  		h += pc
   155  		h += h << 10
   156  		h ^= h >> 6
   157  	}
   158  	// hash in size
   159  	h += size
   160  	h += h << 10
   161  	h ^= h >> 6
   162  	// finalize
   163  	h += h << 3
   164  	h ^= h >> 11
   165  
   166  	i := int(h % buckHashSize)
   167  	for b := buckhash[i]; b != nil; b = b.next {
   168  		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
   169  			return b
   170  		}
   171  	}
   172  
   173  	if !alloc {
   174  		return nil
   175  	}
   176  
   177  	// Create new bucket.
   178  	b := newBucket(typ, len(stk))
   179  	copy(b.stk(), stk)
   180  	b.hash = h
   181  	b.size = size
   182  	b.next = buckhash[i]
   183  	buckhash[i] = b
   184  	if typ == memProfile {
   185  		b.allnext = mbuckets
   186  		mbuckets = b
   187  	} else {
   188  		b.allnext = bbuckets
   189  		bbuckets = b
   190  	}
   191  	return b
   192  }
   193  
   194  func eqslice(x, y []uintptr) bool {
   195  	if len(x) != len(y) {
   196  		return false
   197  	}
   198  	for i, xi := range x {
   199  		if xi != y[i] {
   200  			return false
   201  		}
   202  	}
   203  	return true
   204  }
   205  
   206  func mprof_GC() {
   207  	for b := mbuckets; b != nil; b = b.allnext {
   208  		mp := b.mp()
   209  		mp.allocs += mp.prev_allocs
   210  		mp.frees += mp.prev_frees
   211  		mp.alloc_bytes += mp.prev_alloc_bytes
   212  		mp.free_bytes += mp.prev_free_bytes
   213  
   214  		mp.prev_allocs = mp.recent_allocs
   215  		mp.prev_frees = mp.recent_frees
   216  		mp.prev_alloc_bytes = mp.recent_alloc_bytes
   217  		mp.prev_free_bytes = mp.recent_free_bytes
   218  
   219  		mp.recent_allocs = 0
   220  		mp.recent_frees = 0
   221  		mp.recent_alloc_bytes = 0
   222  		mp.recent_free_bytes = 0
   223  	}
   224  }
   225  
   226  // Record that a gc just happened: all the 'recent' statistics are now real.
   227  func mProf_GC() {
   228  	lock(&proflock)
   229  	mprof_GC()
   230  	unlock(&proflock)
   231  }
   232  
   233  // Called by malloc to record a profiled block.
   234  func mProf_Malloc(p unsafe.Pointer, size uintptr) {
   235  	var stk [maxStack]uintptr
   236  	nstk := callers(4, stk[:])
   237  	lock(&proflock)
   238  	b := stkbucket(memProfile, size, stk[:nstk], true)
   239  	mp := b.mp()
   240  	mp.recent_allocs++
   241  	mp.recent_alloc_bytes += size
   242  	unlock(&proflock)
   243  
   244  	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
   245  	// This reduces potential contention and chances of deadlocks.
   246  	// Since the object must be alive during call to mProf_Malloc,
   247  	// it's fine to do this non-atomically.
   248  	systemstack(func() {
   249  		setprofilebucket(p, b)
   250  	})
   251  }
   252  
   253  // Called when freeing a profiled block.
   254  func mProf_Free(b *bucket, size uintptr) {
   255  	lock(&proflock)
   256  	mp := b.mp()
   257  	mp.prev_frees++
   258  	mp.prev_free_bytes += size
   259  	unlock(&proflock)
   260  }
   261  
   262  var blockprofilerate uint64 // in CPU ticks
   263  
   264  // SetBlockProfileRate controls the fraction of goroutine blocking events
   265  // that are reported in the blocking profile.  The profiler aims to sample
   266  // an average of one blocking event per rate nanoseconds spent blocked.
   267  //
   268  // To include every blocking event in the profile, pass rate = 1.
   269  // To turn off profiling entirely, pass rate <= 0.
   270  func SetBlockProfileRate(rate int) {
   271  	var r int64
   272  	if rate <= 0 {
   273  		r = 0 // disable profiling
   274  	} else if rate == 1 {
   275  		r = 1 // profile everything
   276  	} else {
   277  		// convert ns to cycles, use float64 to prevent overflow during multiplication
   278  		r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
   279  		if r == 0 {
   280  			r = 1
   281  		}
   282  	}
   283  
   284  	atomic.Store64(&blockprofilerate, uint64(r))
   285  }
   286  
   287  func blockevent(cycles int64, skip int) {
   288  	if cycles <= 0 {
   289  		cycles = 1
   290  	}
   291  	rate := int64(atomic.Load64(&blockprofilerate))
   292  	if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
   293  		return
   294  	}
   295  	gp := getg()
   296  	var nstk int
   297  	var stk [maxStack]uintptr
   298  	if gp.m.curg == nil || gp.m.curg == gp {
   299  		nstk = callers(skip, stk[:])
   300  	} else {
   301  		nstk = gcallers(gp.m.curg, skip, stk[:])
   302  	}
   303  	lock(&proflock)
   304  	b := stkbucket(blockProfile, 0, stk[:nstk], true)
   305  	b.bp().count++
   306  	b.bp().cycles += cycles
   307  	unlock(&proflock)
   308  }
   309  
   310  // Go interface to profile data.
   311  
   312  // A StackRecord describes a single execution stack.
   313  type StackRecord struct {
   314  	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
   315  }
   316  
   317  // Stack returns the stack trace associated with the record,
   318  // a prefix of r.Stack0.
   319  func (r *StackRecord) Stack() []uintptr {
   320  	for i, v := range r.Stack0 {
   321  		if v == 0 {
   322  			return r.Stack0[0:i]
   323  		}
   324  	}
   325  	return r.Stack0[0:]
   326  }
   327  
   328  // MemProfileRate controls the fraction of memory allocations
   329  // that are recorded and reported in the memory profile.
   330  // The profiler aims to sample an average of
   331  // one allocation per MemProfileRate bytes allocated.
   332  //
   333  // To include every allocated block in the profile, set MemProfileRate to 1.
   334  // To turn off profiling entirely, set MemProfileRate to 0.
   335  //
   336  // The tools that process the memory profiles assume that the
   337  // profile rate is constant across the lifetime of the program
   338  // and equal to the current value.  Programs that change the
   339  // memory profiling rate should do so just once, as early as
   340  // possible in the execution of the program (for example,
   341  // at the beginning of main).
   342  var MemProfileRate int = 512 * 1024
   343  
   344  // A MemProfileRecord describes the live objects allocated
   345  // by a particular call sequence (stack trace).
   346  type MemProfileRecord struct {
   347  	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
   348  	AllocObjects, FreeObjects int64       // number of objects allocated, freed
   349  	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
   350  }
   351  
   352  // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
   353  func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
   354  
   355  // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
   356  func (r *MemProfileRecord) InUseObjects() int64 {
   357  	return r.AllocObjects - r.FreeObjects
   358  }
   359  
   360  // Stack returns the stack trace associated with the record,
   361  // a prefix of r.Stack0.
   362  func (r *MemProfileRecord) Stack() []uintptr {
   363  	for i, v := range r.Stack0 {
   364  		if v == 0 {
   365  			return r.Stack0[0:i]
   366  		}
   367  	}
   368  	return r.Stack0[0:]
   369  }
   370  
   371  // MemProfile returns a profile of memory allocated and freed per allocation
   372  // site.
   373  //
   374  // MemProfile returns n, the number of records in the current memory profile.
   375  // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
   376  // If len(p) < n, MemProfile does not change p and returns n, false.
   377  //
   378  // If inuseZero is true, the profile includes allocation records
   379  // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
   380  // These are sites where memory was allocated, but it has all
   381  // been released back to the runtime.
   382  //
   383  // The returned profile may be up to two garbage collection cycles old.
   384  // This is to avoid skewing the profile toward allocations; because
   385  // allocations happen in real time but frees are delayed until the garbage
   386  // collector performs sweeping, the profile only accounts for allocations
   387  // that have had a chance to be freed by the garbage collector.
   388  //
   389  // Most clients should use the runtime/pprof package or
   390  // the testing package's -test.memprofile flag instead
   391  // of calling MemProfile directly.
   392  func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
   393  	lock(&proflock)
   394  	clear := true
   395  	for b := mbuckets; b != nil; b = b.allnext {
   396  		mp := b.mp()
   397  		if inuseZero || mp.alloc_bytes != mp.free_bytes {
   398  			n++
   399  		}
   400  		if mp.allocs != 0 || mp.frees != 0 {
   401  			clear = false
   402  		}
   403  	}
   404  	if clear {
   405  		// Absolutely no data, suggesting that a garbage collection
   406  		// has not yet happened. In order to allow profiling when
   407  		// garbage collection is disabled from the beginning of execution,
   408  		// accumulate stats as if a GC just happened, and recount buckets.
   409  		mprof_GC()
   410  		mprof_GC()
   411  		n = 0
   412  		for b := mbuckets; b != nil; b = b.allnext {
   413  			mp := b.mp()
   414  			if inuseZero || mp.alloc_bytes != mp.free_bytes {
   415  				n++
   416  			}
   417  		}
   418  	}
   419  	if n <= len(p) {
   420  		ok = true
   421  		idx := 0
   422  		for b := mbuckets; b != nil; b = b.allnext {
   423  			mp := b.mp()
   424  			if inuseZero || mp.alloc_bytes != mp.free_bytes {
   425  				record(&p[idx], b)
   426  				idx++
   427  			}
   428  		}
   429  	}
   430  	unlock(&proflock)
   431  	return
   432  }
   433  
   434  // Write b's data to r.
   435  func record(r *MemProfileRecord, b *bucket) {
   436  	mp := b.mp()
   437  	r.AllocBytes = int64(mp.alloc_bytes)
   438  	r.FreeBytes = int64(mp.free_bytes)
   439  	r.AllocObjects = int64(mp.allocs)
   440  	r.FreeObjects = int64(mp.frees)
   441  	copy(r.Stack0[:], b.stk())
   442  	for i := int(b.nstk); i < len(r.Stack0); i++ {
   443  		r.Stack0[i] = 0
   444  	}
   445  }
   446  
   447  func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
   448  	lock(&proflock)
   449  	for b := mbuckets; b != nil; b = b.allnext {
   450  		mp := b.mp()
   451  		fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees)
   452  	}
   453  	unlock(&proflock)
   454  }
   455  
   456  // BlockProfileRecord describes blocking events originated
   457  // at a particular call sequence (stack trace).
   458  type BlockProfileRecord struct {
   459  	Count  int64
   460  	Cycles int64
   461  	StackRecord
   462  }
   463  
   464  // BlockProfile returns n, the number of records in the current blocking profile.
   465  // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
   466  // If len(p) < n, BlockProfile does not change p and returns n, false.
   467  //
   468  // Most clients should use the runtime/pprof package or
   469  // the testing package's -test.blockprofile flag instead
   470  // of calling BlockProfile directly.
   471  func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
   472  	lock(&proflock)
   473  	for b := bbuckets; b != nil; b = b.allnext {
   474  		n++
   475  	}
   476  	if n <= len(p) {
   477  		ok = true
   478  		for b := bbuckets; b != nil; b = b.allnext {
   479  			bp := b.bp()
   480  			r := &p[0]
   481  			r.Count = int64(bp.count)
   482  			r.Cycles = int64(bp.cycles)
   483  			i := copy(r.Stack0[:], b.stk())
   484  			for ; i < len(r.Stack0); i++ {
   485  				r.Stack0[i] = 0
   486  			}
   487  			p = p[1:]
   488  		}
   489  	}
   490  	unlock(&proflock)
   491  	return
   492  }
   493  
   494  // ThreadCreateProfile returns n, the number of records in the thread creation profile.
   495  // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
   496  // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
   497  //
   498  // Most clients should use the runtime/pprof package instead
   499  // of calling ThreadCreateProfile directly.
   500  func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
   501  	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
   502  	for mp := first; mp != nil; mp = mp.alllink {
   503  		n++
   504  	}
   505  	if n <= len(p) {
   506  		ok = true
   507  		i := 0
   508  		for mp := first; mp != nil; mp = mp.alllink {
   509  			for s := range mp.createstack {
   510  				p[i].Stack0[s] = uintptr(mp.createstack[s])
   511  			}
   512  			i++
   513  		}
   514  	}
   515  	return
   516  }
   517  
   518  // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
   519  // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
   520  // If len(p) < n, GoroutineProfile does not change p and returns n, false.
   521  //
   522  // Most clients should use the runtime/pprof package instead
   523  // of calling GoroutineProfile directly.
   524  func GoroutineProfile(p []StackRecord) (n int, ok bool) {
   525  	gp := getg()
   526  
   527  	isOK := func(gp1 *g) bool {
   528  		// Checking isSystemGoroutine here makes GoroutineProfile
   529  		// consistent with both NumGoroutine and Stack.
   530  		return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1)
   531  	}
   532  
   533  	stopTheWorld("profile")
   534  
   535  	n = 1
   536  	for _, gp1 := range allgs {
   537  		if isOK(gp1) {
   538  			n++
   539  		}
   540  	}
   541  
   542  	if n <= len(p) {
   543  		ok = true
   544  		r := p
   545  
   546  		// Save current goroutine.
   547  		sp := getcallersp(unsafe.Pointer(&p))
   548  		pc := getcallerpc(unsafe.Pointer(&p))
   549  		systemstack(func() {
   550  			saveg(pc, sp, gp, &r[0])
   551  		})
   552  		r = r[1:]
   553  
   554  		// Save other goroutines.
   555  		for _, gp1 := range allgs {
   556  			if isOK(gp1) {
   557  				if len(r) == 0 {
   558  					// Should be impossible, but better to return a
   559  					// truncated profile than to crash the entire process.
   560  					break
   561  				}
   562  				saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
   563  				r = r[1:]
   564  			}
   565  		}
   566  	}
   567  
   568  	startTheWorld()
   569  
   570  	return n, ok
   571  }
   572  
   573  func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
   574  	n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
   575  	if n < len(r.Stack0) {
   576  		r.Stack0[n] = 0
   577  	}
   578  }
   579  
   580  // Stack formats a stack trace of the calling goroutine into buf
   581  // and returns the number of bytes written to buf.
   582  // If all is true, Stack formats stack traces of all other goroutines
   583  // into buf after the trace for the current goroutine.
   584  func Stack(buf []byte, all bool) int {
   585  	if all {
   586  		stopTheWorld("stack trace")
   587  	}
   588  
   589  	n := 0
   590  	if len(buf) > 0 {
   591  		gp := getg()
   592  		sp := getcallersp(unsafe.Pointer(&buf))
   593  		pc := getcallerpc(unsafe.Pointer(&buf))
   594  		systemstack(func() {
   595  			g0 := getg()
   596  			// Force traceback=1 to override GOTRACEBACK setting,
   597  			// so that Stack's results are consistent.
   598  			// GOTRACEBACK is only about crash dumps.
   599  			g0.m.traceback = 1
   600  			g0.writebuf = buf[0:0:len(buf)]
   601  			goroutineheader(gp)
   602  			traceback(pc, sp, 0, gp)
   603  			if all {
   604  				tracebackothers(gp)
   605  			}
   606  			g0.m.traceback = 0
   607  			n = len(g0.writebuf)
   608  			g0.writebuf = nil
   609  		})
   610  	}
   611  
   612  	if all {
   613  		startTheWorld()
   614  	}
   615  	return n
   616  }
   617  
   618  // Tracing of alloc/free/gc.
   619  
   620  var tracelock mutex
   621  
   622  func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
   623  	lock(&tracelock)
   624  	gp := getg()
   625  	gp.m.traceback = 2
   626  	if typ == nil {
   627  		print("tracealloc(", p, ", ", hex(size), ")\n")
   628  	} else {
   629  		print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n")
   630  	}
   631  	if gp.m.curg == nil || gp == gp.m.curg {
   632  		goroutineheader(gp)
   633  		pc := getcallerpc(unsafe.Pointer(&p))
   634  		sp := getcallersp(unsafe.Pointer(&p))
   635  		systemstack(func() {
   636  			traceback(pc, sp, 0, gp)
   637  		})
   638  	} else {
   639  		goroutineheader(gp.m.curg)
   640  		traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
   641  	}
   642  	print("\n")
   643  	gp.m.traceback = 0
   644  	unlock(&tracelock)
   645  }
   646  
   647  func tracefree(p unsafe.Pointer, size uintptr) {
   648  	lock(&tracelock)
   649  	gp := getg()
   650  	gp.m.traceback = 2
   651  	print("tracefree(", p, ", ", hex(size), ")\n")
   652  	goroutineheader(gp)
   653  	pc := getcallerpc(unsafe.Pointer(&p))
   654  	sp := getcallersp(unsafe.Pointer(&p))
   655  	systemstack(func() {
   656  		traceback(pc, sp, 0, gp)
   657  	})
   658  	print("\n")
   659  	gp.m.traceback = 0
   660  	unlock(&tracelock)
   661  }
   662  
   663  func tracegc() {
   664  	lock(&tracelock)
   665  	gp := getg()
   666  	gp.m.traceback = 2
   667  	print("tracegc()\n")
   668  	// running on m->g0 stack; show all non-g0 goroutines
   669  	tracebackothers(gp)
   670  	print("end tracegc\n")
   671  	print("\n")
   672  	gp.m.traceback = 0
   673  	unlock(&tracelock)
   674  }