github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/heapdump.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of runtime/debug.WriteHeapDump. Writes all
     6  // objects in the heap plus additional info (roots, threads,
     7  // finalizers, etc.) to a file.
     8  
     9  // The format of the dumped file is described at
    10  // https://golang.org/s/go15heapdump.
    11  
    12  package runtime
    13  
    14  import (
    15  	"internal/goarch"
    16  	"unsafe"
    17  )
    18  
    19  //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
    20  func runtime_debug_WriteHeapDump(fd uintptr) {
    21  	stopTheWorld("write heap dump")
    22  
    23  	// Keep m on this G's stack instead of the system stack.
    24  	// Both readmemstats_m and writeheapdump_m have pretty large
    25  	// peak stack depths and we risk blowing the system stack.
    26  	// This is safe because the world is stopped, so we don't
    27  	// need to worry about anyone shrinking and therefore moving
    28  	// our stack.
    29  	var m MemStats
    30  	systemstack(func() {
    31  		// Call readmemstats_m here instead of deeper in
    32  		// writeheapdump_m because we might blow the system stack
    33  		// otherwise.
    34  		readmemstats_m(&m)
    35  		writeheapdump_m(fd, &m)
    36  	})
    37  
    38  	startTheWorld()
    39  }
    40  
    41  const (
    42  	fieldKindEol       = 0
    43  	fieldKindPtr       = 1
    44  	fieldKindIface     = 2
    45  	fieldKindEface     = 3
    46  	tagEOF             = 0
    47  	tagObject          = 1
    48  	tagOtherRoot       = 2
    49  	tagType            = 3
    50  	tagGoroutine       = 4
    51  	tagStackFrame      = 5
    52  	tagParams          = 6
    53  	tagFinalizer       = 7
    54  	tagItab            = 8
    55  	tagOSThread        = 9
    56  	tagMemStats        = 10
    57  	tagQueuedFinalizer = 11
    58  	tagData            = 12
    59  	tagBSS             = 13
    60  	tagDefer           = 14
    61  	tagPanic           = 15
    62  	tagMemProf         = 16
    63  	tagAllocSample     = 17
    64  )
    65  
    66  var dumpfd uintptr // fd to write the dump to.
    67  var tmpbuf []byte
    68  
    69  // buffer of pending write data
    70  const (
    71  	bufSize = 4096
    72  )
    73  
    74  var buf [bufSize]byte
    75  var nbuf uintptr
    76  
    77  func dwrite(data unsafe.Pointer, len uintptr) {
    78  	if len == 0 {
    79  		return
    80  	}
    81  	if nbuf+len <= bufSize {
    82  		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
    83  		nbuf += len
    84  		return
    85  	}
    86  
    87  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
    88  	if len >= bufSize {
    89  		write(dumpfd, data, int32(len))
    90  		nbuf = 0
    91  	} else {
    92  		copy(buf[:], (*[bufSize]byte)(data)[:len])
    93  		nbuf = len
    94  	}
    95  }
    96  
    97  func dwritebyte(b byte) {
    98  	dwrite(unsafe.Pointer(&b), 1)
    99  }
   100  
   101  func flush() {
   102  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
   103  	nbuf = 0
   104  }
   105  
   106  // Cache of types that have been serialized already.
   107  // We use a type's hash field to pick a bucket.
   108  // Inside a bucket, we keep a list of types that
   109  // have been serialized so far, most recently used first.
   110  // Note: when a bucket overflows we may end up
   111  // serializing a type more than once. That's ok.
   112  const (
   113  	typeCacheBuckets = 256
   114  	typeCacheAssoc   = 4
   115  )
   116  
   117  type typeCacheBucket struct {
   118  	t [typeCacheAssoc]*_type
   119  }
   120  
   121  var typecache [typeCacheBuckets]typeCacheBucket
   122  
   123  // dump a uint64 in a varint format parseable by encoding/binary
   124  func dumpint(v uint64) {
   125  	var buf [10]byte
   126  	var n int
   127  	for v >= 0x80 {
   128  		buf[n] = byte(v | 0x80)
   129  		n++
   130  		v >>= 7
   131  	}
   132  	buf[n] = byte(v)
   133  	n++
   134  	dwrite(unsafe.Pointer(&buf), uintptr(n))
   135  }
   136  
   137  func dumpbool(b bool) {
   138  	if b {
   139  		dumpint(1)
   140  	} else {
   141  		dumpint(0)
   142  	}
   143  }
   144  
   145  // dump varint uint64 length followed by memory contents
   146  func dumpmemrange(data unsafe.Pointer, len uintptr) {
   147  	dumpint(uint64(len))
   148  	dwrite(data, len)
   149  }
   150  
   151  func dumpslice(b []byte) {
   152  	dumpint(uint64(len(b)))
   153  	if len(b) > 0 {
   154  		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
   155  	}
   156  }
   157  
   158  func dumpstr(s string) {
   159  	dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
   160  }
   161  
   162  // dump information for a type
   163  func dumptype(t *_type) {
   164  	if t == nil {
   165  		return
   166  	}
   167  
   168  	// If we've definitely serialized the type before,
   169  	// no need to do it again.
   170  	b := &typecache[t.hash&(typeCacheBuckets-1)]
   171  	if t == b.t[0] {
   172  		return
   173  	}
   174  	for i := 1; i < typeCacheAssoc; i++ {
   175  		if t == b.t[i] {
   176  			// Move-to-front
   177  			for j := i; j > 0; j-- {
   178  				b.t[j] = b.t[j-1]
   179  			}
   180  			b.t[0] = t
   181  			return
   182  		}
   183  	}
   184  
   185  	// Might not have been dumped yet. Dump it and
   186  	// remember we did so.
   187  	for j := typeCacheAssoc - 1; j > 0; j-- {
   188  		b.t[j] = b.t[j-1]
   189  	}
   190  	b.t[0] = t
   191  
   192  	// dump the type
   193  	dumpint(tagType)
   194  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   195  	dumpint(uint64(t.size))
   196  	if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" {
   197  		dumpstr(t.string())
   198  	} else {
   199  		pkgpath := t.nameOff(x.pkgpath).name()
   200  		name := t.name()
   201  		dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
   202  		dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
   203  		dwritebyte('.')
   204  		dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
   205  	}
   206  	dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
   207  }
   208  
   209  // dump an object
   210  func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
   211  	dumpint(tagObject)
   212  	dumpint(uint64(uintptr(obj)))
   213  	dumpmemrange(obj, size)
   214  	dumpfields(bv)
   215  }
   216  
   217  func dumpotherroot(description string, to unsafe.Pointer) {
   218  	dumpint(tagOtherRoot)
   219  	dumpstr(description)
   220  	dumpint(uint64(uintptr(to)))
   221  }
   222  
   223  func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
   224  	dumpint(tagFinalizer)
   225  	dumpint(uint64(uintptr(obj)))
   226  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   227  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   228  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   229  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   230  }
   231  
   232  type childInfo struct {
   233  	// Information passed up from the callee frame about
   234  	// the layout of the outargs region.
   235  	argoff uintptr   // where the arguments start in the frame
   236  	arglen uintptr   // size of args region
   237  	args   bitvector // if args.n >= 0, pointer map of args region
   238  	sp     *uint8    // callee sp
   239  	depth  uintptr   // depth in call stack (0 == most recent)
   240  }
   241  
   242  // dump kinds & offsets of interesting fields in bv
   243  func dumpbv(cbv *bitvector, offset uintptr) {
   244  	for i := uintptr(0); i < uintptr(cbv.n); i++ {
   245  		if cbv.ptrbit(i) == 1 {
   246  			dumpint(fieldKindPtr)
   247  			dumpint(uint64(offset + i*goarch.PtrSize))
   248  		}
   249  	}
   250  }
   251  
   252  func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
   253  	child := (*childInfo)(arg)
   254  	f := s.fn
   255  
   256  	// Figure out what we can about our stack map
   257  	pc := s.pc
   258  	pcdata := int32(-1) // Use the entry map at function entry
   259  	if pc != f.entry() {
   260  		pc--
   261  		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil)
   262  	}
   263  	if pcdata == -1 {
   264  		// We do not have a valid pcdata value but there might be a
   265  		// stackmap for this function. It is likely that we are looking
   266  		// at the function prologue, assume so and hope for the best.
   267  		pcdata = 0
   268  	}
   269  	stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   270  
   271  	var bv bitvector
   272  	if stkmap != nil && stkmap.n > 0 {
   273  		bv = stackmapdata(stkmap, pcdata)
   274  	} else {
   275  		bv.n = -1
   276  	}
   277  
   278  	// Dump main body of stack frame.
   279  	dumpint(tagStackFrame)
   280  	dumpint(uint64(s.sp))                              // lowest address in frame
   281  	dumpint(uint64(child.depth))                       // # of frames deep on the stack
   282  	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
   283  	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
   284  	dumpint(uint64(f.entry()))
   285  	dumpint(uint64(s.pc))
   286  	dumpint(uint64(s.continpc))
   287  	name := funcname(f)
   288  	if name == "" {
   289  		name = "unknown function"
   290  	}
   291  	dumpstr(name)
   292  
   293  	// Dump fields in the outargs section
   294  	if child.args.n >= 0 {
   295  		dumpbv(&child.args, child.argoff)
   296  	} else {
   297  		// conservative - everything might be a pointer
   298  		for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
   299  			dumpint(fieldKindPtr)
   300  			dumpint(uint64(off))
   301  		}
   302  	}
   303  
   304  	// Dump fields in the local vars section
   305  	if stkmap == nil {
   306  		// No locals information, dump everything.
   307  		for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
   308  			dumpint(fieldKindPtr)
   309  			dumpint(uint64(off))
   310  		}
   311  	} else if stkmap.n < 0 {
   312  		// Locals size information, dump just the locals.
   313  		size := uintptr(-stkmap.n)
   314  		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
   315  			dumpint(fieldKindPtr)
   316  			dumpint(uint64(off))
   317  		}
   318  	} else if stkmap.n > 0 {
   319  		// Locals bitmap information, scan just the pointers in
   320  		// locals.
   321  		dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
   322  	}
   323  	dumpint(fieldKindEol)
   324  
   325  	// Record arg info for parent.
   326  	child.argoff = s.argp - s.fp
   327  	child.arglen = s.argBytes()
   328  	child.sp = (*uint8)(unsafe.Pointer(s.sp))
   329  	child.depth++
   330  	stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   331  	if stkmap != nil {
   332  		child.args = stackmapdata(stkmap, pcdata)
   333  	} else {
   334  		child.args.n = -1
   335  	}
   336  	return true
   337  }
   338  
   339  func dumpgoroutine(gp *g) {
   340  	var sp, pc, lr uintptr
   341  	if gp.syscallsp != 0 {
   342  		sp = gp.syscallsp
   343  		pc = gp.syscallpc
   344  		lr = 0
   345  	} else {
   346  		sp = gp.sched.sp
   347  		pc = gp.sched.pc
   348  		lr = gp.sched.lr
   349  	}
   350  
   351  	dumpint(tagGoroutine)
   352  	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   353  	dumpint(uint64(sp))
   354  	dumpint(gp.goid)
   355  	dumpint(uint64(gp.gopc))
   356  	dumpint(uint64(readgstatus(gp)))
   357  	dumpbool(isSystemGoroutine(gp, false))
   358  	dumpbool(false) // isbackground
   359  	dumpint(uint64(gp.waitsince))
   360  	dumpstr(gp.waitreason.String())
   361  	dumpint(uint64(uintptr(gp.sched.ctxt)))
   362  	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
   363  	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
   364  	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
   365  
   366  	// dump stack
   367  	var child childInfo
   368  	child.args.n = -1
   369  	child.arglen = 0
   370  	child.sp = nil
   371  	child.depth = 0
   372  	gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
   373  
   374  	// dump defer & panic records
   375  	for d := gp._defer; d != nil; d = d.link {
   376  		dumpint(tagDefer)
   377  		dumpint(uint64(uintptr(unsafe.Pointer(d))))
   378  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   379  		dumpint(uint64(d.sp))
   380  		dumpint(uint64(d.pc))
   381  		fn := *(**funcval)(unsafe.Pointer(&d.fn))
   382  		dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   383  		if d.fn == nil {
   384  			// d.fn can be nil for open-coded defers
   385  			dumpint(uint64(0))
   386  		} else {
   387  			dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   388  		}
   389  		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
   390  	}
   391  	for p := gp._panic; p != nil; p = p.link {
   392  		dumpint(tagPanic)
   393  		dumpint(uint64(uintptr(unsafe.Pointer(p))))
   394  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   395  		eface := efaceOf(&p.arg)
   396  		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
   397  		dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
   398  		dumpint(0) // was p->defer, no longer recorded
   399  		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
   400  	}
   401  }
   402  
   403  func dumpgs() {
   404  	assertWorldStopped()
   405  
   406  	// goroutines & stacks
   407  	forEachG(func(gp *g) {
   408  		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
   409  		switch status {
   410  		default:
   411  			print("runtime: unexpected G.status ", hex(status), "\n")
   412  			throw("dumpgs in STW - bad status")
   413  		case _Gdead:
   414  			// ok
   415  		case _Grunnable,
   416  			_Gsyscall,
   417  			_Gwaiting:
   418  			dumpgoroutine(gp)
   419  		}
   420  	})
   421  }
   422  
   423  func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
   424  	dumpint(tagQueuedFinalizer)
   425  	dumpint(uint64(uintptr(obj)))
   426  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   427  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   428  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   429  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   430  }
   431  
   432  func dumproots() {
   433  	// To protect mheap_.allspans.
   434  	assertWorldStopped()
   435  
   436  	// TODO(mwhudson): dump datamask etc from all objects
   437  	// data segment
   438  	dumpint(tagData)
   439  	dumpint(uint64(firstmoduledata.data))
   440  	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
   441  	dumpfields(firstmoduledata.gcdatamask)
   442  
   443  	// bss segment
   444  	dumpint(tagBSS)
   445  	dumpint(uint64(firstmoduledata.bss))
   446  	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
   447  	dumpfields(firstmoduledata.gcbssmask)
   448  
   449  	// mspan.types
   450  	for _, s := range mheap_.allspans {
   451  		if s.state.get() == mSpanInUse {
   452  			// Finalizers
   453  			for sp := s.specials; sp != nil; sp = sp.next {
   454  				if sp.kind != _KindSpecialFinalizer {
   455  					continue
   456  				}
   457  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   458  				p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
   459  				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
   460  			}
   461  		}
   462  	}
   463  
   464  	// Finalizer queue
   465  	iterate_finq(finq_callback)
   466  }
   467  
   468  // Bit vector of free marks.
   469  // Needs to be as big as the largest number of objects per span.
   470  var freemark [_PageSize / 8]bool
   471  
   472  func dumpobjs() {
   473  	// To protect mheap_.allspans.
   474  	assertWorldStopped()
   475  
   476  	for _, s := range mheap_.allspans {
   477  		if s.state.get() != mSpanInUse {
   478  			continue
   479  		}
   480  		p := s.base()
   481  		size := s.elemsize
   482  		n := (s.npages << _PageShift) / size
   483  		if n > uintptr(len(freemark)) {
   484  			throw("freemark array doesn't have enough entries")
   485  		}
   486  
   487  		for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
   488  			if s.isFree(freeIndex) {
   489  				freemark[freeIndex] = true
   490  			}
   491  		}
   492  
   493  		for j := uintptr(0); j < n; j, p = j+1, p+size {
   494  			if freemark[j] {
   495  				freemark[j] = false
   496  				continue
   497  			}
   498  			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
   499  		}
   500  	}
   501  }
   502  
   503  func dumpparams() {
   504  	dumpint(tagParams)
   505  	x := uintptr(1)
   506  	if *(*byte)(unsafe.Pointer(&x)) == 1 {
   507  		dumpbool(false) // little-endian ptrs
   508  	} else {
   509  		dumpbool(true) // big-endian ptrs
   510  	}
   511  	dumpint(goarch.PtrSize)
   512  	var arenaStart, arenaEnd uintptr
   513  	for i1 := range mheap_.arenas {
   514  		if mheap_.arenas[i1] == nil {
   515  			continue
   516  		}
   517  		for i, ha := range mheap_.arenas[i1] {
   518  			if ha == nil {
   519  				continue
   520  			}
   521  			base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
   522  			if arenaStart == 0 || base < arenaStart {
   523  				arenaStart = base
   524  			}
   525  			if base+heapArenaBytes > arenaEnd {
   526  				arenaEnd = base + heapArenaBytes
   527  			}
   528  		}
   529  	}
   530  	dumpint(uint64(arenaStart))
   531  	dumpint(uint64(arenaEnd))
   532  	dumpstr(goarch.GOARCH)
   533  	dumpstr(buildVersion)
   534  	dumpint(uint64(ncpu))
   535  }
   536  
   537  func itab_callback(tab *itab) {
   538  	t := tab._type
   539  	dumptype(t)
   540  	dumpint(tagItab)
   541  	dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   542  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   543  }
   544  
   545  func dumpitabs() {
   546  	iterate_itabs(itab_callback)
   547  }
   548  
   549  func dumpms() {
   550  	for mp := allm; mp != nil; mp = mp.alllink {
   551  		dumpint(tagOSThread)
   552  		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
   553  		dumpint(uint64(mp.id))
   554  		dumpint(mp.procid)
   555  	}
   556  }
   557  
   558  //go:systemstack
   559  func dumpmemstats(m *MemStats) {
   560  	assertWorldStopped()
   561  
   562  	// These ints should be identical to the exported
   563  	// MemStats structure and should be ordered the same
   564  	// way too.
   565  	dumpint(tagMemStats)
   566  	dumpint(m.Alloc)
   567  	dumpint(m.TotalAlloc)
   568  	dumpint(m.Sys)
   569  	dumpint(m.Lookups)
   570  	dumpint(m.Mallocs)
   571  	dumpint(m.Frees)
   572  	dumpint(m.HeapAlloc)
   573  	dumpint(m.HeapSys)
   574  	dumpint(m.HeapIdle)
   575  	dumpint(m.HeapInuse)
   576  	dumpint(m.HeapReleased)
   577  	dumpint(m.HeapObjects)
   578  	dumpint(m.StackInuse)
   579  	dumpint(m.StackSys)
   580  	dumpint(m.MSpanInuse)
   581  	dumpint(m.MSpanSys)
   582  	dumpint(m.MCacheInuse)
   583  	dumpint(m.MCacheSys)
   584  	dumpint(m.BuckHashSys)
   585  	dumpint(m.GCSys)
   586  	dumpint(m.OtherSys)
   587  	dumpint(m.NextGC)
   588  	dumpint(m.LastGC)
   589  	dumpint(m.PauseTotalNs)
   590  	for i := 0; i < 256; i++ {
   591  		dumpint(m.PauseNs[i])
   592  	}
   593  	dumpint(uint64(m.NumGC))
   594  }
   595  
   596  func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
   597  	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
   598  	dumpint(tagMemProf)
   599  	dumpint(uint64(uintptr(unsafe.Pointer(b))))
   600  	dumpint(uint64(size))
   601  	dumpint(uint64(nstk))
   602  	for i := uintptr(0); i < nstk; i++ {
   603  		pc := stk[i]
   604  		f := findfunc(pc)
   605  		if !f.valid() {
   606  			var buf [64]byte
   607  			n := len(buf)
   608  			n--
   609  			buf[n] = ')'
   610  			if pc == 0 {
   611  				n--
   612  				buf[n] = '0'
   613  			} else {
   614  				for pc > 0 {
   615  					n--
   616  					buf[n] = "0123456789abcdef"[pc&15]
   617  					pc >>= 4
   618  				}
   619  			}
   620  			n--
   621  			buf[n] = 'x'
   622  			n--
   623  			buf[n] = '0'
   624  			n--
   625  			buf[n] = '('
   626  			dumpslice(buf[n:])
   627  			dumpstr("?")
   628  			dumpint(0)
   629  		} else {
   630  			dumpstr(funcname(f))
   631  			if i > 0 && pc > f.entry() {
   632  				pc--
   633  			}
   634  			file, line := funcline(f, pc)
   635  			dumpstr(file)
   636  			dumpint(uint64(line))
   637  		}
   638  	}
   639  	dumpint(uint64(allocs))
   640  	dumpint(uint64(frees))
   641  }
   642  
   643  func dumpmemprof() {
   644  	// To protect mheap_.allspans.
   645  	assertWorldStopped()
   646  
   647  	iterate_memprof(dumpmemprof_callback)
   648  	for _, s := range mheap_.allspans {
   649  		if s.state.get() != mSpanInUse {
   650  			continue
   651  		}
   652  		for sp := s.specials; sp != nil; sp = sp.next {
   653  			if sp.kind != _KindSpecialProfile {
   654  				continue
   655  			}
   656  			spp := (*specialprofile)(unsafe.Pointer(sp))
   657  			p := s.base() + uintptr(spp.special.offset)
   658  			dumpint(tagAllocSample)
   659  			dumpint(uint64(p))
   660  			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
   661  		}
   662  	}
   663  }
   664  
   665  var dumphdr = []byte("go1.7 heap dump\n")
   666  
   667  func mdump(m *MemStats) {
   668  	assertWorldStopped()
   669  
   670  	// make sure we're done sweeping
   671  	for _, s := range mheap_.allspans {
   672  		if s.state.get() == mSpanInUse {
   673  			s.ensureSwept()
   674  		}
   675  	}
   676  	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
   677  	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
   678  	dumpparams()
   679  	dumpitabs()
   680  	dumpobjs()
   681  	dumpgs()
   682  	dumpms()
   683  	dumproots()
   684  	dumpmemstats(m)
   685  	dumpmemprof()
   686  	dumpint(tagEOF)
   687  	flush()
   688  }
   689  
   690  func writeheapdump_m(fd uintptr, m *MemStats) {
   691  	assertWorldStopped()
   692  
   693  	gp := getg()
   694  	casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
   695  
   696  	// Set dump file.
   697  	dumpfd = fd
   698  
   699  	// Call dump routine.
   700  	mdump(m)
   701  
   702  	// Reset dump file.
   703  	dumpfd = 0
   704  	if tmpbuf != nil {
   705  		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   706  		tmpbuf = nil
   707  	}
   708  
   709  	casgstatus(gp.m.curg, _Gwaiting, _Grunning)
   710  }
   711  
   712  // dumpint() the kind & offset of each field in an object.
   713  func dumpfields(bv bitvector) {
   714  	dumpbv(&bv, 0)
   715  	dumpint(fieldKindEol)
   716  }
   717  
   718  func makeheapobjbv(p uintptr, size uintptr) bitvector {
   719  	// Extend the temp buffer if necessary.
   720  	nptr := size / goarch.PtrSize
   721  	if uintptr(len(tmpbuf)) < nptr/8+1 {
   722  		if tmpbuf != nil {
   723  			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   724  		}
   725  		n := nptr/8 + 1
   726  		p := sysAlloc(n, &memstats.other_sys)
   727  		if p == nil {
   728  			throw("heapdump: out of memory")
   729  		}
   730  		tmpbuf = (*[1 << 30]byte)(p)[:n]
   731  	}
   732  	// Convert heap bitmap to pointer bitmap.
   733  	for i := uintptr(0); i < nptr/8+1; i++ {
   734  		tmpbuf[i] = 0
   735  	}
   736  
   737  	hbits := heapBitsForAddr(p, size)
   738  	for {
   739  		var addr uintptr
   740  		hbits, addr = hbits.next()
   741  		if addr == 0 {
   742  			break
   743  		}
   744  		i := (addr - p) / goarch.PtrSize
   745  		tmpbuf[i/8] |= 1 << (i % 8)
   746  	}
   747  	return bitvector{int32(nptr), &tmpbuf[0]}
   748  }