github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/heapdump.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of runtime/debug.WriteHeapDump. Writes all
     6  // objects in the heap plus additional info (roots, threads,
     7  // finalizers, etc.) to a file.
     8  
     9  // The format of the dumped file is described at
    10  // https://golang.org/s/go15heapdump.
    11  
    12  package runtime
    13  
    14  import (
    15  	"runtime/internal/sys"
    16  	"unsafe"
    17  )
    18  
    19  //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
    20  func runtime_debug_WriteHeapDump(fd uintptr) {
    21  	stopTheWorld("write heap dump")
    22  
    23  	systemstack(func() {
    24  		writeheapdump_m(fd)
    25  	})
    26  
    27  	startTheWorld()
    28  }
    29  
    30  const (
    31  	fieldKindEol       = 0
    32  	fieldKindPtr       = 1
    33  	fieldKindIface     = 2
    34  	fieldKindEface     = 3
    35  	tagEOF             = 0
    36  	tagObject          = 1
    37  	tagOtherRoot       = 2
    38  	tagType            = 3
    39  	tagGoroutine       = 4
    40  	tagStackFrame      = 5
    41  	tagParams          = 6
    42  	tagFinalizer       = 7
    43  	tagItab            = 8
    44  	tagOSThread        = 9
    45  	tagMemStats        = 10
    46  	tagQueuedFinalizer = 11
    47  	tagData            = 12
    48  	tagBSS             = 13
    49  	tagDefer           = 14
    50  	tagPanic           = 15
    51  	tagMemProf         = 16
    52  	tagAllocSample     = 17
    53  )
    54  
    55  var dumpfd uintptr // fd to write the dump to.
    56  var tmpbuf []byte
    57  
    58  // buffer of pending write data
    59  const (
    60  	bufSize = 4096
    61  )
    62  
    63  var buf [bufSize]byte
    64  var nbuf uintptr
    65  
    66  func dwrite(data unsafe.Pointer, len uintptr) {
    67  	if len == 0 {
    68  		return
    69  	}
    70  	if nbuf+len <= bufSize {
    71  		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
    72  		nbuf += len
    73  		return
    74  	}
    75  
    76  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
    77  	if len >= bufSize {
    78  		write(dumpfd, data, int32(len))
    79  		nbuf = 0
    80  	} else {
    81  		copy(buf[:], (*[bufSize]byte)(data)[:len])
    82  		nbuf = len
    83  	}
    84  }
    85  
    86  func dwritebyte(b byte) {
    87  	dwrite(unsafe.Pointer(&b), 1)
    88  }
    89  
    90  func flush() {
    91  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
    92  	nbuf = 0
    93  }
    94  
    95  // Cache of types that have been serialized already.
    96  // We use a type's hash field to pick a bucket.
    97  // Inside a bucket, we keep a list of types that
    98  // have been serialized so far, most recently used first.
    99  // Note: when a bucket overflows we may end up
   100  // serializing a type more than once. That's ok.
   101  const (
   102  	typeCacheBuckets = 256
   103  	typeCacheAssoc   = 4
   104  )
   105  
   106  type typeCacheBucket struct {
   107  	t [typeCacheAssoc]*_type
   108  }
   109  
   110  var typecache [typeCacheBuckets]typeCacheBucket
   111  
   112  // dump a uint64 in a varint format parseable by encoding/binary
   113  func dumpint(v uint64) {
   114  	var buf [10]byte
   115  	var n int
   116  	for v >= 0x80 {
   117  		buf[n] = byte(v | 0x80)
   118  		n++
   119  		v >>= 7
   120  	}
   121  	buf[n] = byte(v)
   122  	n++
   123  	dwrite(unsafe.Pointer(&buf), uintptr(n))
   124  }
   125  
   126  func dumpbool(b bool) {
   127  	if b {
   128  		dumpint(1)
   129  	} else {
   130  		dumpint(0)
   131  	}
   132  }
   133  
   134  // dump varint uint64 length followed by memory contents
   135  func dumpmemrange(data unsafe.Pointer, len uintptr) {
   136  	dumpint(uint64(len))
   137  	dwrite(data, len)
   138  }
   139  
   140  func dumpslice(b []byte) {
   141  	dumpint(uint64(len(b)))
   142  	if len(b) > 0 {
   143  		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
   144  	}
   145  }
   146  
   147  func dumpstr(s string) {
   148  	sp := stringStructOf(&s)
   149  	dumpmemrange(sp.str, uintptr(sp.len))
   150  }
   151  
   152  // dump information for a type
   153  func dumptype(t *_type) {
   154  	if t == nil {
   155  		return
   156  	}
   157  
   158  	// If we've definitely serialized the type before,
   159  	// no need to do it again.
   160  	b := &typecache[t.hash&(typeCacheBuckets-1)]
   161  	if t == b.t[0] {
   162  		return
   163  	}
   164  	for i := 1; i < typeCacheAssoc; i++ {
   165  		if t == b.t[i] {
   166  			// Move-to-front
   167  			for j := i; j > 0; j-- {
   168  				b.t[j] = b.t[j-1]
   169  			}
   170  			b.t[0] = t
   171  			return
   172  		}
   173  	}
   174  
   175  	// Might not have been dumped yet. Dump it and
   176  	// remember we did so.
   177  	for j := typeCacheAssoc - 1; j > 0; j-- {
   178  		b.t[j] = b.t[j-1]
   179  	}
   180  	b.t[0] = t
   181  
   182  	// dump the type
   183  	dumpint(tagType)
   184  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   185  	dumpint(uint64(t.size))
   186  	if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" {
   187  		dumpstr(t.string())
   188  	} else {
   189  		pkgpathstr := t.nameOff(x.pkgpath).name()
   190  		pkgpath := stringStructOf(&pkgpathstr)
   191  		namestr := t.name()
   192  		name := stringStructOf(&namestr)
   193  		dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
   194  		dwrite(pkgpath.str, uintptr(pkgpath.len))
   195  		dwritebyte('.')
   196  		dwrite(name.str, uintptr(name.len))
   197  	}
   198  	dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
   199  }
   200  
   201  // dump an object
   202  func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
   203  	dumpbvtypes(&bv, obj)
   204  	dumpint(tagObject)
   205  	dumpint(uint64(uintptr(obj)))
   206  	dumpmemrange(obj, size)
   207  	dumpfields(bv)
   208  }
   209  
   210  func dumpotherroot(description string, to unsafe.Pointer) {
   211  	dumpint(tagOtherRoot)
   212  	dumpstr(description)
   213  	dumpint(uint64(uintptr(to)))
   214  }
   215  
   216  func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
   217  	dumpint(tagFinalizer)
   218  	dumpint(uint64(uintptr(obj)))
   219  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   220  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   221  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   222  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   223  }
   224  
   225  type childInfo struct {
   226  	// Information passed up from the callee frame about
   227  	// the layout of the outargs region.
   228  	argoff uintptr   // where the arguments start in the frame
   229  	arglen uintptr   // size of args region
   230  	args   bitvector // if args.n >= 0, pointer map of args region
   231  	sp     *uint8    // callee sp
   232  	depth  uintptr   // depth in call stack (0 == most recent)
   233  }
   234  
   235  // dump kinds & offsets of interesting fields in bv
   236  func dumpbv(cbv *bitvector, offset uintptr) {
   237  	bv := gobv(*cbv)
   238  	for i := uintptr(0); i < bv.n; i++ {
   239  		if bv.bytedata[i/8]>>(i%8)&1 == 1 {
   240  			dumpint(fieldKindPtr)
   241  			dumpint(uint64(offset + i*sys.PtrSize))
   242  		}
   243  	}
   244  }
   245  
   246  func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
   247  	child := (*childInfo)(arg)
   248  	f := s.fn
   249  
   250  	// Figure out what we can about our stack map
   251  	pc := s.pc
   252  	if pc != f.entry {
   253  		pc--
   254  	}
   255  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil)
   256  	if pcdata == -1 {
   257  		// We do not have a valid pcdata value but there might be a
   258  		// stackmap for this function. It is likely that we are looking
   259  		// at the function prologue, assume so and hope for the best.
   260  		pcdata = 0
   261  	}
   262  	stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   263  
   264  	// Dump any types we will need to resolve Efaces.
   265  	if child.args.n >= 0 {
   266  		dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff))
   267  	}
   268  	var bv bitvector
   269  	if stkmap != nil && stkmap.n > 0 {
   270  		bv = stackmapdata(stkmap, pcdata)
   271  		dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*sys.PtrSize)))
   272  	} else {
   273  		bv.n = -1
   274  	}
   275  
   276  	// Dump main body of stack frame.
   277  	dumpint(tagStackFrame)
   278  	dumpint(uint64(s.sp))                              // lowest address in frame
   279  	dumpint(uint64(child.depth))                       // # of frames deep on the stack
   280  	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
   281  	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
   282  	dumpint(uint64(f.entry))
   283  	dumpint(uint64(s.pc))
   284  	dumpint(uint64(s.continpc))
   285  	name := funcname(f)
   286  	if name == "" {
   287  		name = "unknown function"
   288  	}
   289  	dumpstr(name)
   290  
   291  	// Dump fields in the outargs section
   292  	if child.args.n >= 0 {
   293  		dumpbv(&child.args, child.argoff)
   294  	} else {
   295  		// conservative - everything might be a pointer
   296  		for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
   297  			dumpint(fieldKindPtr)
   298  			dumpint(uint64(off))
   299  		}
   300  	}
   301  
   302  	// Dump fields in the local vars section
   303  	if stkmap == nil {
   304  		// No locals information, dump everything.
   305  		for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
   306  			dumpint(fieldKindPtr)
   307  			dumpint(uint64(off))
   308  		}
   309  	} else if stkmap.n < 0 {
   310  		// Locals size information, dump just the locals.
   311  		size := uintptr(-stkmap.n)
   312  		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
   313  			dumpint(fieldKindPtr)
   314  			dumpint(uint64(off))
   315  		}
   316  	} else if stkmap.n > 0 {
   317  		// Locals bitmap information, scan just the pointers in
   318  		// locals.
   319  		dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
   320  	}
   321  	dumpint(fieldKindEol)
   322  
   323  	// Record arg info for parent.
   324  	child.argoff = s.argp - s.fp
   325  	child.arglen = s.arglen
   326  	child.sp = (*uint8)(unsafe.Pointer(s.sp))
   327  	child.depth++
   328  	stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   329  	if stkmap != nil {
   330  		child.args = stackmapdata(stkmap, pcdata)
   331  	} else {
   332  		child.args.n = -1
   333  	}
   334  	return true
   335  }
   336  
   337  func dumpgoroutine(gp *g) {
   338  	var sp, pc, lr uintptr
   339  	if gp.syscallsp != 0 {
   340  		sp = gp.syscallsp
   341  		pc = gp.syscallpc
   342  		lr = 0
   343  	} else {
   344  		sp = gp.sched.sp
   345  		pc = gp.sched.pc
   346  		lr = gp.sched.lr
   347  	}
   348  
   349  	dumpint(tagGoroutine)
   350  	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   351  	dumpint(uint64(sp))
   352  	dumpint(uint64(gp.goid))
   353  	dumpint(uint64(gp.gopc))
   354  	dumpint(uint64(readgstatus(gp)))
   355  	dumpbool(isSystemGoroutine(gp))
   356  	dumpbool(false) // isbackground
   357  	dumpint(uint64(gp.waitsince))
   358  	dumpstr(gp.waitreason)
   359  	dumpint(uint64(uintptr(gp.sched.ctxt)))
   360  	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
   361  	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
   362  	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
   363  
   364  	// dump stack
   365  	var child childInfo
   366  	child.args.n = -1
   367  	child.arglen = 0
   368  	child.sp = nil
   369  	child.depth = 0
   370  	gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
   371  
   372  	// dump defer & panic records
   373  	for d := gp._defer; d != nil; d = d.link {
   374  		dumpint(tagDefer)
   375  		dumpint(uint64(uintptr(unsafe.Pointer(d))))
   376  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   377  		dumpint(uint64(d.sp))
   378  		dumpint(uint64(d.pc))
   379  		dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
   380  		dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
   381  		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
   382  	}
   383  	for p := gp._panic; p != nil; p = p.link {
   384  		dumpint(tagPanic)
   385  		dumpint(uint64(uintptr(unsafe.Pointer(p))))
   386  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   387  		eface := efaceOf(&p.arg)
   388  		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
   389  		dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
   390  		dumpint(0) // was p->defer, no longer recorded
   391  		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
   392  	}
   393  }
   394  
   395  func dumpgs() {
   396  	// goroutines & stacks
   397  	for i := 0; uintptr(i) < allglen; i++ {
   398  		gp := allgs[i]
   399  		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
   400  		switch status {
   401  		default:
   402  			print("runtime: unexpected G.status ", hex(status), "\n")
   403  			throw("dumpgs in STW - bad status")
   404  		case _Gdead:
   405  			// ok
   406  		case _Grunnable,
   407  			_Gsyscall,
   408  			_Gwaiting:
   409  			dumpgoroutine(gp)
   410  		}
   411  	}
   412  }
   413  
   414  func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
   415  	dumpint(tagQueuedFinalizer)
   416  	dumpint(uint64(uintptr(obj)))
   417  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   418  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   419  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   420  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   421  }
   422  
   423  func dumproots() {
   424  	// TODO(mwhudson): dump datamask etc from all objects
   425  	// data segment
   426  	dumpbvtypes(&firstmoduledata.gcdatamask, unsafe.Pointer(firstmoduledata.data))
   427  	dumpint(tagData)
   428  	dumpint(uint64(firstmoduledata.data))
   429  	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
   430  	dumpfields(firstmoduledata.gcdatamask)
   431  
   432  	// bss segment
   433  	dumpbvtypes(&firstmoduledata.gcbssmask, unsafe.Pointer(firstmoduledata.bss))
   434  	dumpint(tagBSS)
   435  	dumpint(uint64(firstmoduledata.bss))
   436  	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
   437  	dumpfields(firstmoduledata.gcbssmask)
   438  
   439  	// MSpan.types
   440  	allspans := h_allspans
   441  	for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
   442  		s := allspans[spanidx]
   443  		if s.state == _MSpanInUse {
   444  			// Finalizers
   445  			for sp := s.specials; sp != nil; sp = sp.next {
   446  				if sp.kind != _KindSpecialFinalizer {
   447  					continue
   448  				}
   449  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   450  				p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
   451  				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
   452  			}
   453  		}
   454  	}
   455  
   456  	// Finalizer queue
   457  	iterate_finq(finq_callback)
   458  }
   459  
   460  // Bit vector of free marks.
   461  // Needs to be as big as the largest number of objects per span.
   462  var freemark [_PageSize / 8]bool
   463  
   464  func dumpobjs() {
   465  	for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
   466  		s := h_allspans[i]
   467  		if s.state != _MSpanInUse {
   468  			continue
   469  		}
   470  		p := s.base()
   471  		size := s.elemsize
   472  		n := (s.npages << _PageShift) / size
   473  		if n > uintptr(len(freemark)) {
   474  			throw("freemark array doesn't have enough entries")
   475  		}
   476  
   477  		for freeIndex := s.freeindex; freeIndex < s.nelems; freeIndex++ {
   478  			if s.isFree(freeIndex) {
   479  				freemark[freeIndex] = true
   480  			}
   481  		}
   482  
   483  		for j := uintptr(0); j < n; j, p = j+1, p+size {
   484  			if freemark[j] {
   485  				freemark[j] = false
   486  				continue
   487  			}
   488  			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
   489  		}
   490  	}
   491  }
   492  
   493  func dumpparams() {
   494  	dumpint(tagParams)
   495  	x := uintptr(1)
   496  	if *(*byte)(unsafe.Pointer(&x)) == 1 {
   497  		dumpbool(false) // little-endian ptrs
   498  	} else {
   499  		dumpbool(true) // big-endian ptrs
   500  	}
   501  	dumpint(sys.PtrSize)
   502  	dumpint(uint64(mheap_.arena_start))
   503  	dumpint(uint64(mheap_.arena_used))
   504  	dumpstr(sys.GOARCH)
   505  	dumpstr(sys.Goexperiment)
   506  	dumpint(uint64(ncpu))
   507  }
   508  
   509  func itab_callback(tab *itab) {
   510  	t := tab._type
   511  	dumptype(t)
   512  	dumpint(tagItab)
   513  	dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   514  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   515  }
   516  
   517  func dumpitabs() {
   518  	iterate_itabs(itab_callback)
   519  }
   520  
   521  func dumpms() {
   522  	for mp := allm; mp != nil; mp = mp.alllink {
   523  		dumpint(tagOSThread)
   524  		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
   525  		dumpint(uint64(mp.id))
   526  		dumpint(mp.procid)
   527  	}
   528  }
   529  
   530  func dumpmemstats() {
   531  	dumpint(tagMemStats)
   532  	dumpint(memstats.alloc)
   533  	dumpint(memstats.total_alloc)
   534  	dumpint(memstats.sys)
   535  	dumpint(memstats.nlookup)
   536  	dumpint(memstats.nmalloc)
   537  	dumpint(memstats.nfree)
   538  	dumpint(memstats.heap_alloc)
   539  	dumpint(memstats.heap_sys)
   540  	dumpint(memstats.heap_idle)
   541  	dumpint(memstats.heap_inuse)
   542  	dumpint(memstats.heap_released)
   543  	dumpint(memstats.heap_objects)
   544  	dumpint(memstats.stacks_inuse)
   545  	dumpint(memstats.stacks_sys)
   546  	dumpint(memstats.mspan_inuse)
   547  	dumpint(memstats.mspan_sys)
   548  	dumpint(memstats.mcache_inuse)
   549  	dumpint(memstats.mcache_sys)
   550  	dumpint(memstats.buckhash_sys)
   551  	dumpint(memstats.gc_sys)
   552  	dumpint(memstats.other_sys)
   553  	dumpint(memstats.next_gc)
   554  	dumpint(memstats.last_gc)
   555  	dumpint(memstats.pause_total_ns)
   556  	for i := 0; i < 256; i++ {
   557  		dumpint(memstats.pause_ns[i])
   558  	}
   559  	dumpint(uint64(memstats.numgc))
   560  }
   561  
   562  func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
   563  	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
   564  	dumpint(tagMemProf)
   565  	dumpint(uint64(uintptr(unsafe.Pointer(b))))
   566  	dumpint(uint64(size))
   567  	dumpint(uint64(nstk))
   568  	for i := uintptr(0); i < nstk; i++ {
   569  		pc := stk[i]
   570  		f := findfunc(pc)
   571  		if f == nil {
   572  			var buf [64]byte
   573  			n := len(buf)
   574  			n--
   575  			buf[n] = ')'
   576  			if pc == 0 {
   577  				n--
   578  				buf[n] = '0'
   579  			} else {
   580  				for pc > 0 {
   581  					n--
   582  					buf[n] = "0123456789abcdef"[pc&15]
   583  					pc >>= 4
   584  				}
   585  			}
   586  			n--
   587  			buf[n] = 'x'
   588  			n--
   589  			buf[n] = '0'
   590  			n--
   591  			buf[n] = '('
   592  			dumpslice(buf[n:])
   593  			dumpstr("?")
   594  			dumpint(0)
   595  		} else {
   596  			dumpstr(funcname(f))
   597  			if i > 0 && pc > f.entry {
   598  				pc--
   599  			}
   600  			file, line := funcline(f, pc)
   601  			dumpstr(file)
   602  			dumpint(uint64(line))
   603  		}
   604  	}
   605  	dumpint(uint64(allocs))
   606  	dumpint(uint64(frees))
   607  }
   608  
   609  func dumpmemprof() {
   610  	iterate_memprof(dumpmemprof_callback)
   611  	allspans := h_allspans
   612  	for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
   613  		s := allspans[spanidx]
   614  		if s.state != _MSpanInUse {
   615  			continue
   616  		}
   617  		for sp := s.specials; sp != nil; sp = sp.next {
   618  			if sp.kind != _KindSpecialProfile {
   619  				continue
   620  			}
   621  			spp := (*specialprofile)(unsafe.Pointer(sp))
   622  			p := s.base() + uintptr(spp.special.offset)
   623  			dumpint(tagAllocSample)
   624  			dumpint(uint64(p))
   625  			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
   626  		}
   627  	}
   628  }
   629  
   630  var dumphdr = []byte("go1.7 heap dump\n")
   631  
   632  func mdump() {
   633  	// make sure we're done sweeping
   634  	for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
   635  		s := h_allspans[i]
   636  		if s.state == _MSpanInUse {
   637  			s.ensureSwept()
   638  		}
   639  	}
   640  	memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
   641  	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
   642  	dumpparams()
   643  	dumpitabs()
   644  	dumpobjs()
   645  	dumpgs()
   646  	dumpms()
   647  	dumproots()
   648  	dumpmemstats()
   649  	dumpmemprof()
   650  	dumpint(tagEOF)
   651  	flush()
   652  }
   653  
   654  func writeheapdump_m(fd uintptr) {
   655  	_g_ := getg()
   656  	casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
   657  	_g_.waitreason = "dumping heap"
   658  
   659  	// Update stats so we can dump them.
   660  	// As a side effect, flushes all the MCaches so the MSpan.freelist
   661  	// lists contain all the free objects.
   662  	updatememstats(nil)
   663  
   664  	// Set dump file.
   665  	dumpfd = fd
   666  
   667  	// Call dump routine.
   668  	mdump()
   669  
   670  	// Reset dump file.
   671  	dumpfd = 0
   672  	if tmpbuf != nil {
   673  		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   674  		tmpbuf = nil
   675  	}
   676  
   677  	casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
   678  }
   679  
   680  // dumpint() the kind & offset of each field in an object.
   681  func dumpfields(bv bitvector) {
   682  	dumpbv(&bv, 0)
   683  	dumpint(fieldKindEol)
   684  }
   685  
   686  // The heap dump reader needs to be able to disambiguate
   687  // Eface entries. So it needs to know every type that might
   688  // appear in such an entry. The following routine accomplishes that.
   689  // TODO(rsc, khr): Delete - no longer possible.
   690  
   691  // Dump all the types that appear in the type field of
   692  // any Eface described by this bit vector.
   693  func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
   694  }
   695  
   696  func makeheapobjbv(p uintptr, size uintptr) bitvector {
   697  	// Extend the temp buffer if necessary.
   698  	nptr := size / sys.PtrSize
   699  	if uintptr(len(tmpbuf)) < nptr/8+1 {
   700  		if tmpbuf != nil {
   701  			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   702  		}
   703  		n := nptr/8 + 1
   704  		p := sysAlloc(n, &memstats.other_sys)
   705  		if p == nil {
   706  			throw("heapdump: out of memory")
   707  		}
   708  		tmpbuf = (*[1 << 30]byte)(p)[:n]
   709  	}
   710  	// Convert heap bitmap to pointer bitmap.
   711  	for i := uintptr(0); i < nptr/8+1; i++ {
   712  		tmpbuf[i] = 0
   713  	}
   714  	i := uintptr(0)
   715  	hbits := heapBitsForAddr(p)
   716  	for ; i < nptr; i++ {
   717  		if i != 1 && !hbits.morePointers() {
   718  			break // end of object
   719  		}
   720  		if hbits.isPointer() {
   721  			tmpbuf[i/8] |= 1 << (i % 8)
   722  		}
   723  		hbits = hbits.next()
   724  	}
   725  	return bitvector{int32(i), &tmpbuf[0]}
   726  }